python_code
stringlengths 0
780k
| repo_name
stringlengths 7
38
| file_path
stringlengths 5
103
|
|---|---|---|
# Copyright 2020 DeepMind Technologies Limited.
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Checkpoint restoring utilities."""
from absl import logging
import dill
def load_checkpoint(checkpoint_path):
try:
with open(checkpoint_path, 'rb') as checkpoint_file:
checkpoint_data = dill.load(checkpoint_file)
logging.info('Loading checkpoint from %s', checkpoint_path)
return checkpoint_data
except FileNotFoundError:
return None
|
deepmind-research-master
|
mmv/utils/checkpoint.py
|
# Copyright 2020 DeepMind Technologies Limited.
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Ucf101 with custom decoding params."""
import tensorflow as tf
import tensorflow_datasets as tfds
# Utilities functions.
tf.compat.v1.enable_eager_execution()
_CITATION = """\
@article{DBLP:journals/corr/abs-1212-0402,
author = {Khurram Soomro and
Amir Roshan Zamir and
Mubarak Shah},
title = {{UCF101:} {A} Dataset of 101 Human Actions Classes From Videos in
The Wild},
journal = {CoRR},
volume = {abs/1212.0402},
year = {2012},
url = {http://arxiv.org/abs/1212.0402},
archivePrefix = {arXiv},
eprint = {1212.0402},
timestamp = {Mon, 13 Aug 2018 16:47:45 +0200},
biburl = {https://dblp.org/rec/bib/journals/corr/abs-1212-0402},
bibsource = {dblp computer science bibliography, https://dblp.org}
}
"""
_LABELS_FNAME = 'video/ucf101_labels.txt'
class ModUcf101(tfds.video.Ucf101):
"""Ucf101 action recognition dataset with better quality.
"""
def _info(self):
ffmpeg_extra_args = ('-qscale:v', '2', '-r', '25', '-t', '00:00:20')
video_shape = (
None, self.builder_config.height, self.builder_config.width, 3)
labels_names_file = tfds.core.tfds_path(_LABELS_FNAME)
features = tfds.features.FeaturesDict({
'video': tfds.features.Video(video_shape,
ffmpeg_extra_args=ffmpeg_extra_args,
encoding_format='jpeg'), # pytype: disable=wrong-arg-types # gen-stub-imports
'label': tfds.features.ClassLabel(names_file=labels_names_file),
})
return tfds.core.DatasetInfo(
builder=self,
description='A 101-label video classification dataset.',
features=features,
homepage='https://www.crcv.ucf.edu/data-sets/ucf101/',
citation=_CITATION,
)
|
deepmind-research-master
|
mmv/utils/ucf101_dataset.py
|
# Copyright 2020 DeepMind Technologies Limited.
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Model for text-video-audio embeddings."""
from typing import Any, Dict, Optional
import haiku as hk
import jax
import jax.numpy as jnp
from mmv.models import normalization
from mmv.models import resnet
from mmv.models import s3d
from mmv.models import tsm_resnet
_DEFAULT_CFG_AUDTXT = {
"totxt_head_mode": "linear",
"toaud_head_mode": "linear",
"toaud_bn_after_proj": False,
"totxt_bn_after_proj": False,
"embedding_dim": 512}
_DEFAULT_CFG_VIDAUD = {
"tovid_head_mode": "linear",
"toaud_head_mode": "mlp@512",
"tovid_bn_after_proj": False,
"toaud_bn_after_proj": True,
"embedding_dim": 512}
_DEFAULT_CFG_VIDTXT = {
"tovid_head_mode": "linear",
"totxt_head_mode": "mlp@512",
"tovid_bn_after_proj": False,
"totxt_bn_after_proj": True,
"embedding_dim": 512}
_DEFAULT_CFG_BN = {"decay_rate": 0.9, "eps": 1e-5,
"create_scale": True, "create_offset": True}
def _setkey_if_not_exists(d, key, value):
if key not in d:
d[key] = value
class AudioTextVideoEmbedding(hk.Module):
"""Module to fuse audio, text and video for joint embedding learning."""
def __init__(
self,
# Language parameters.
word_embedding_matrix,
sentence_dim=2048,
# Audio parameters.
audio_backbone="resnet18",
audio_model_kwargs=None,
# Vision parameters.
visual_backbone="s3d",
vision_model_kwargs=None,
# Common parameters.
mm_embedding_graph="fac_relu",
use_xreplica_bn=True,
bn_config_proj=None,
config_video_text=None,
config_video_audio=None,
config_audio_text=None,
use_audio_text=False,
name="audio_text_video_model"):
"""Initialize the AudioTextVideoEmbedding class.
Args:
word_embedding_matrix: 2d matrix [vocab_size, embed_size] to embed words.
sentence_dim: The dimension of the sentence representation.
audio_backbone: Backbone for audio.
audio_model_kwargs: Other specific parameters to pass to the audio
module.
visual_backbone: The video backbone.
vision_model_kwargs: Other specific parameters to pass to the vision
module.
mm_embedding_graph: Embedding graph merging strategy.
Can be `shared`, `disjoint` or `fac` (fac can be followed by an
activation function name e.g. `fac_relu`).
use_xreplica_bn: Whether or not to use the cross replica batch norm.
bn_config_proj: BN config of the projection heads.
config_video_text: Config for the video and the text branches.
config_video_audio: Config for the video and the audio branches.
config_audio_text: Config for the audio and the text branches.
use_audio_text: Whether or not the audio text branch is used during
training.
name: graph name.
"""
super(AudioTextVideoEmbedding, self).__init__(name=name)
# Audio parameters.
self._audio_backbone = audio_backbone
self._audio_model_kwargs = audio_model_kwargs
# Language parameters.
self._sentence_dim = sentence_dim
self._word_embedding_matrix = word_embedding_matrix
# Vision parameters.
self._visual_backbone = visual_backbone
self._vision_model_kwargs = vision_model_kwargs
# Joint parameters.
self._use_xreplica_bn = use_xreplica_bn
if self._use_xreplica_bn:
self._normalizer_name = "cross_replica_batch_norm"
else:
self._normalizer_name = "batch_norm"
# Projection head parameters.
if config_video_text is None:
config_video_text = _DEFAULT_CFG_VIDTXT
for k, v in _DEFAULT_CFG_VIDTXT.items():
_setkey_if_not_exists(config_video_text, k, v)
self._cfg_vid_txt = config_video_text
if config_video_audio is None:
config_video_audio = _DEFAULT_CFG_VIDAUD
for k, v in _DEFAULT_CFG_VIDAUD.items():
_setkey_if_not_exists(config_video_audio, k, v)
self._cfg_vid_aud = config_video_audio
if config_audio_text is None:
config_audio_text = _DEFAULT_CFG_AUDTXT
for k, v in _DEFAULT_CFG_AUDTXT.items():
_setkey_if_not_exists(config_audio_text, k, v)
self._cfg_aud_txt = config_audio_text
self._use_audio_text = use_audio_text
self._mm_embedding_graph = mm_embedding_graph
self._use_separate_heads = (
mm_embedding_graph == "disjoint" or
mm_embedding_graph.startswith("fac"))
self._bn_config_proj = bn_config_proj or _DEFAULT_CFG_BN
def _get_pair_embedding_heads(self,
embedding_dim_1, embedding_dim_2,
mode1, mode2,
use_bn_out1, use_bn_out2,
name1, name2):
embd1_module = EmbeddingModule(
embedding_dim_1,
mode=mode1,
use_bn_out=use_bn_out1,
bn_config=self._bn_config_proj,
use_xreplica_bn=self._use_xreplica_bn,
name=name1)
if self._use_separate_heads:
embd2_module = EmbeddingModule(
embedding_dim_2,
mode=mode2,
use_bn_out=use_bn_out2,
use_xreplica_bn=self._use_xreplica_bn,
bn_config=self._bn_config_proj,
name=name2)
else:
assert embedding_dim_1 == embedding_dim_2, (
"Using shared heads but inconsistent embedding dims where provided.")
assert mode1 == mode2, (
"Using shared heads but inconsistent modes where provided.")
assert use_bn_out1 == use_bn_out2, (
"Using shared heads but inconsistent bn conf where provided.")
embd2_module = embd1_module
return embd1_module, embd2_module
def _activate_interaction(self, inputs, activation_fn, is_training,
activation_module=None):
"""Activation function for the interaction modules."""
if activation_fn == "relu":
inputs = jax.nn.relu(inputs)
elif activation_fn == "bnrelu":
if activation_module is None:
activation_module = normalization.get_normalize_fn(
normalizer_name=self._normalizer_name,
normalizer_kwargs=self._bn_config_proj)
inputs = activation_module(inputs, is_training=is_training)
inputs = jax.nn.relu(inputs)
else:
raise ValueError(f"{activation_fn} not supported.")
return inputs, activation_module
def __call__(self,
images,
audio_spectrogram,
word_ids,
is_training,
return_intermediate_audio=False):
"""Computes video, text and audio embeddings.
Args:
images: The videos tensor of shape [B1, T, H, W, 3] where B1 is the batch
size, T is the number of frames per clip, H the height, W the width
and 3 the rgb channels.
audio_spectrogram: The audio tensor of shape [B2, T', F] where B2 is the
batch size, T' is the number of temporal frames, F is the number of
frequency frames.
word_ids: If words_embeddings is set to None, it will use the word indices
input instead so that we can compute the word embeddings within the
model graph. The expected shape is [B3, N, D] where B3 is the batch size
and N the maximum number of words per sentence.
is_training: Whether or not to activate the graph in training mode.
return_intermediate_audio: Return audio intermediate representation.
Returns:
if return_intermediate_audio = True
audio_representation: the 4-dim audio representation taken before
averaging over spatial dims in the Resnet.
else
visual_embd: a dict containing the video embeddings in audio and text
of shape [B1, d_embd].
audio_embd: a dict containing the audio embeddings in video and text
of shape [B2, d_embd].
txt_embd: a dict containing the text embeddings in video and audio
of shape[B3, d_embd].
visual_representation: the video rep of shape [B1, d_visual].
audio_representation: the audio rep of shape [B2, d_audio].
"""
# Computes the visual representation.
video_cnn = VisualModule(backbone=self._visual_backbone,
use_xreplica_bn=self._use_xreplica_bn,
model_kwargs=self._vision_model_kwargs)
visual_representation = video_cnn(images, is_training=is_training)
# Projection heads: Video -> Text and Video -> Audio.
vid2txt_embd_module, vid2aud_embd_module = self._get_pair_embedding_heads(
embedding_dim_1=self._cfg_vid_txt["embedding_dim"],
embedding_dim_2=self._cfg_vid_aud["embedding_dim"],
mode1=self._cfg_vid_txt["totxt_head_mode"],
mode2=self._cfg_vid_aud["toaud_head_mode"],
use_bn_out1=self._cfg_vid_txt["totxt_bn_after_proj"],
use_bn_out2=self._cfg_vid_aud["toaud_bn_after_proj"],
name1="vis_embd",
name2="vid2audio_embd")
video_embd = {}
if self._mm_embedding_graph in ["shared", "disjoint"]:
video_embd["toaud"] = vid2aud_embd_module(visual_representation,
is_training=is_training)
video_embd["totxt"] = vid2txt_embd_module(visual_representation,
is_training=is_training)
elif self._mm_embedding_graph.startswith("fac"):
# Activation function if specificed in the name, e.g. fac_relu.
activation_fn = None
if len(self._mm_embedding_graph.split("_")) == 2:
activation_fn = self._mm_embedding_graph.split("_")[1]
video_embd["toaud"] = vid2aud_embd_module(visual_representation,
is_training=is_training)
fine_rep = video_embd["toaud"]
# Eventually activate the fine grained representation.
if activation_fn:
fine_rep, activation_module = self._activate_interaction(
inputs=fine_rep, activation_fn=activation_fn,
is_training=is_training)
video_embd["totxt"] = vid2txt_embd_module(fine_rep,
is_training=is_training)
else:
raise ValueError(
f"{self._mm_embedding_graph} is not a valid MM embedding graph.")
# Computes the audio representation.
audio_cnn = AudioModule(backbone=self._audio_backbone,
use_xreplica_bn=self._use_xreplica_bn,
model_kwargs=self._audio_model_kwargs)
if return_intermediate_audio:
return audio_cnn(audio_spectrogram,
is_training=is_training,
return_intermediate=True)
audio_representation = audio_cnn(audio_spectrogram, is_training=is_training)
# Projection heads: Audio -> Video and Audio -> Text.
aud2vid_embd_module, aud2txt_embd_module = self._get_pair_embedding_heads(
embedding_dim_1=self._cfg_vid_aud["embedding_dim"],
embedding_dim_2=self._cfg_aud_txt["embedding_dim"],
mode1=self._cfg_vid_aud["tovid_head_mode"],
mode2=self._cfg_aud_txt["totxt_head_mode"],
use_bn_out1=self._cfg_vid_aud["tovid_bn_after_proj"],
use_bn_out2=self._cfg_aud_txt["totxt_bn_after_proj"],
name1="audio_embd",
name2="audio2txt_embd")
audio_embd = {}
audio_embd["tovid"] = aud2vid_embd_module(audio_representation,
is_training=is_training)
# Computes the projection to the text domain depending on the MM graph mode.
if (self._mm_embedding_graph.startswith("fac") and
(self._use_audio_text or (not is_training))):
# In case the audio text branch is not used during training, we do that
# only at eval time (is_training=False) in order to not pollute the BN
# stats in vid2txt_embd_module with audio features during training.
fine_rep_audio = audio_embd["tovid"]
if activation_fn:
fine_rep_audio, _ = self._activate_interaction(
inputs=fine_rep_audio, activation_fn=activation_fn,
is_training=is_training, activation_module=activation_module)
audio_embd["totxt"] = vid2txt_embd_module(fine_rep_audio,
is_training=is_training)
else:
audio_embd["totxt"] = aud2txt_embd_module(audio_representation,
is_training=is_training)
# Computes the text representation.
txt_representation = TextModule(
sentence_dim=self._sentence_dim,
word_embedding_matrix=self._word_embedding_matrix)(
word_ids, is_training=is_training)
# Projection heads: Text -> Video and Text -> Audio.
txt2vid_embd_module, txt2aud_embd_module = self._get_pair_embedding_heads(
embedding_dim_1=self._cfg_vid_txt["embedding_dim"],
embedding_dim_2=self._cfg_aud_txt["embedding_dim"],
mode1=self._cfg_vid_txt["tovid_head_mode"],
mode2=self._cfg_aud_txt["toaud_head_mode"],
use_bn_out1=self._cfg_vid_txt["tovid_bn_after_proj"],
use_bn_out2=self._cfg_aud_txt["toaud_bn_after_proj"],
name1="txt_embd",
name2="txt2audio_embd")
txt_embd = {}
txt_embd["tovid"] = txt2vid_embd_module(txt_representation,
is_training=is_training)
txt_embd["toaud"] = txt2aud_embd_module(txt_representation,
is_training=is_training)
return {
"vid_embd": video_embd,
"aud_embd": audio_embd,
"txt_embd": txt_embd,
"vid_repr": visual_representation,
"aud_repr": audio_representation,
}
class EmbeddingModule(hk.Module):
"""Final Embedding module."""
def __init__(self,
embedding_dim: int,
mode: str = "linear",
use_bn_out: bool = False,
bn_config: Optional[Dict[str, Any]] = None,
use_xreplica_bn: bool = True,
name="embedding_module"):
self._embedding_dim = embedding_dim
self._use_bn_out = use_bn_out
self._mode = mode
# Set default BN config.
bn_config = bn_config or _DEFAULT_CFG_BN
if use_xreplica_bn:
normalizer_name = "cross_replica_batch_norm"
else:
normalizer_name = "batch_norm"
self._batch_norm = normalization.get_normalize_fn(
normalizer_name=normalizer_name,
normalizer_kwargs=bn_config)
super(EmbeddingModule, self).__init__(name=name)
def __call__(self, input_feature, is_training):
if self._mode == "linear":
proj = hk.Linear(self._embedding_dim, name="final_projection")
embedding = proj(input_feature)
elif self._mode.startswith("mlp"):
if "@" not in self._mode:
raise ValueError(
("Please specify the inner dimensions of the MLP with `@` symbol"
"e.g. mlp@512 or mlp@512@256 for a 2 layer MLP."))
inner_dims = [int(dim) for dim in self._mode.split("@")[1:]]
embedding = input_feature
for inner_dim in inner_dims:
embedding = hk.Linear(inner_dim, with_bias=True,
name="final_projection_inner")(embedding)
if not self._mode.startswith("mlp_nobn"):
embedding = self._batch_norm(embedding, is_training=is_training)
embedding = jax.nn.relu(embedding)
# Final projection.
embedding = hk.Linear(self._embedding_dim, name="final_projection",
with_bias=not self._use_bn_out)(embedding)
else:
raise NotImplementedError
if self._use_bn_out:
embedding = self._batch_norm(embedding, is_training=is_training)
return embedding
class VisualModule(hk.Module):
"""The visual module selects which CNN backbone to connect to the graph."""
def __init__(self,
use_xreplica_bn=True,
backbone="s3d",
model_kwargs=None,
name="visual_module"):
self._backbone = backbone
super(VisualModule, self).__init__(name=name)
if model_kwargs is None:
model_kwargs = {}
bn_config = model_kwargs.get("bn_config", _DEFAULT_CFG_BN)
if use_xreplica_bn:
normalizer_name = "cross_replica_batch_norm"
else:
normalizer_name = "batch_norm"
normalize_fn = normalization.get_normalize_fn(
normalizer_name=normalizer_name,
normalizer_kwargs=bn_config)
if backbone == "s3d":
self._cnn = s3d.S3D(normalize_fn=normalize_fn)
elif backbone == "resnet50tsm":
width_mult = model_kwargs.get("width_mult", 1)
self._cnn = tsm_resnet.TSMResNetV2(
normalize_fn=normalize_fn,
depth=50,
num_frames=model_kwargs["n_frames"],
width_mult=width_mult)
else:
raise NotImplementedError
def __call__(self, images, is_training):
"""Connects graph to images."""
features = self._cnn(images, is_training=is_training)
return features
class AudioModule(hk.Module):
"""The audio module selects which CNN backbone to connect to the graph."""
def __init__(self,
backbone="resnet18",
use_xreplica_bn=True,
model_kwargs=None,
name="audio_module"):
super(AudioModule, self).__init__(name=name)
model_kwargs = model_kwargs or {}
bn_config = model_kwargs.get("bn_config", _DEFAULT_CFG_BN)
backbone_to_depth = {
"resnet18": 18,
"resnet34": 34,
"resnet50": 50,
"resnet101": 101
}
assert backbone in backbone_to_depth, (
f"backbone should be in {backbone_to_depth.keys()}")
if use_xreplica_bn:
normalizer_name = "cross_replica_batch_norm"
else:
normalizer_name = "batch_norm"
self._cnn = resnet.ResNetV2(
depth=backbone_to_depth[backbone],
normalize_fn=normalization.get_normalize_fn(
normalizer_name=normalizer_name,
normalizer_kwargs=bn_config),
num_classes=None)
def __call__(self,
audio_spectrogram,
is_training,
return_intermediate=False):
"""Connects graph to audio spectrogram."""
final_endpoint = "output"
if return_intermediate:
final_endpoint = "last_conv"
return self._cnn(audio_spectrogram,
is_training=is_training,
final_endpoint=final_endpoint)
class TextModule(hk.Module):
"""Text module computes the sentences representation."""
def __init__(self,
word_embedding_matrix,
sentence_dim=1024,
name="text_module"):
"""Initialize text module.
Args:
word_embedding_matrix: 2d matrix [vocab_size, embed_size] to embed words.
sentence_dim: dimension of sentence representation.
name: module name.
"""
super(TextModule, self).__init__(name=name)
self._word_embedding_module = hk.Embed(
embedding_matrix=word_embedding_matrix)
self._conv1d_module = hk.Conv1D(sentence_dim, 1, name="text_conv1")
def __call__(self, word_ids, is_training):
"""Connects graph to sentence representation."""
word_embeddings = self._word_embedding_module(word_ids)
word_embeddings = jax.lax.stop_gradient(word_embeddings)
output = self._conv1d_module(word_embeddings)
output = jax.nn.relu(output)
output = jnp.amax(output, axis=1)
return output
|
deepmind-research-master
|
mmv/models/mm_embeddings.py
|
# Copyright 2020 DeepMind Technologies Limited.
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for TSM ResNet model."""
from absl.testing import absltest
from absl.testing import parameterized
import haiku as hk
import jax
import jax.numpy as jnp
from mmv.models import tsm_resnet
class TSMResNetTest(parameterized.TestCase):
@parameterized.parameters(
('tsm_resnet_stem', (2 * 32, 56, 56, 64)),
('tsm_resnet_unit_0', (2 * 32, 56, 56, 256)),
('tsm_resnet_unit_1', (2 * 32, 28, 28, 512)),
('tsm_resnet_unit_2', (2 * 32, 14, 14, 1024)),
('tsm_resnet_unit_3', (2 * 32, 7, 7, 2048)),
('last_conv', (2 * 32, 7, 7, 2048)),
('Embeddings', (2, 2048)),
)
def test_output_dimension(self, final_endpoint, expected_shape):
input_shape = (2, 32, 224, 224, 3)
def f():
data = jnp.zeros(input_shape)
net = tsm_resnet.TSMResNetV2()
return net(data, final_endpoint=final_endpoint)
init_fn, apply_fn = hk.transform(f)
out = apply_fn(init_fn(jax.random.PRNGKey(42)), None)
self.assertEqual(out.shape, expected_shape)
def test_tpu_mode(self):
input_shape = (32 * 2, 224, 224, 3)
def f():
data = jnp.zeros(input_shape)
net = tsm_resnet.TSMResNetV2(num_frames=32)
return net(data, final_endpoint='Embeddings')
init_fn, apply_fn = hk.transform(f)
out = apply_fn(init_fn(jax.random.PRNGKey(42)), None)
self.assertEqual(out.shape, (2, 2048))
if __name__ == '__main__':
absltest.main()
|
deepmind-research-master
|
mmv/models/tsm_resnet_test.py
|
# Copyright 2020 DeepMind Technologies Limited.
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for tsm_utils."""
from absl.testing import absltest
from absl.testing import parameterized
import jax.numpy as jnp
import numpy as np
from mmv.models import tsm_utils
class TsmUtilsTest(parameterized.TestCase):
@parameterized.parameters(
((2, 32, 224, 224, 3), 'gpu', (2 * 32, 224, 224, 3), 32),
((32, 224, 224, 3), 'tpu', (32, 224, 224, 3), None),
)
def test_prepare_inputs(self, input_shape, expected_mode, expected_shape,
expected_num_frames):
data = jnp.zeros(input_shape)
out, mode, num_frames = tsm_utils.prepare_inputs(data)
self.assertEqual(out.shape, expected_shape)
self.assertEqual(mode, expected_mode)
self.assertEqual(num_frames, expected_num_frames)
def test_prepare_outputs(self):
data = jnp.concatenate([jnp.zeros(4), jnp.ones(4)]).reshape(4, 2)
out_gpu = tsm_utils.prepare_outputs(data, 'gpu', 2)
out_tpu = tsm_utils.prepare_outputs(data, 'tpu', 2)
expected_gpu = np.concatenate([np.zeros(2), np.ones(2)]).reshape(2, 2)
expected_tpu = 0.5 * jnp.ones((2, 2))
np.testing.assert_allclose(out_gpu, expected_gpu)
np.testing.assert_allclose(out_tpu, expected_tpu)
def test_apply_tsm(self):
shape = (32, 224, 224, 16)
data = jnp.zeros(shape)
out_gpu = tsm_utils.apply_temporal_shift(data, 'gpu', 16)
out_tpu = tsm_utils.apply_temporal_shift(data, 'tpu', 16)
self.assertEqual(out_gpu.shape, shape)
self.assertEqual(out_tpu.shape, shape)
if __name__ == '__main__':
absltest.main()
|
deepmind-research-master
|
mmv/models/tsm_utils_test.py
|
# Copyright 2020 DeepMind Technologies Limited.
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utils functions for TSM."""
from typing import Tuple
import jax
import jax.numpy as jnp
from mmv.models import types
def prepare_inputs(
inputs: types.TensorLike) -> Tuple[jnp.ndarray, str, int]:
"""Deduces input mode for TSM."""
# Deduce if we run on TPU based on input shape.
if len(inputs.shape) == 5:
# Input is given in the standard [B, T, H, W, 3] format.
tsm_mode = 'gpu'
num_frames = inputs.shape[1]
inputs = jnp.reshape(inputs, [-1] + list(inputs.shape[2:]))
else:
# Input is given in the [T * B, H, W, 3] format.
tsm_mode = 'tpu'
num_frames = None
return inputs, tsm_mode, num_frames
def prepare_outputs(outputs: types.TensorLike,
tsm_mode: str,
num_frames: int) -> jnp.ndarray:
"""Processes output of TSM by averaging representations over time axis."""
n_channels = outputs.shape[-1]
if tsm_mode == 'tpu':
outputs = jnp.reshape(outputs, [num_frames, -1, n_channels])
outputs = jnp.mean(outputs, axis=0)
elif tsm_mode == 'gpu':
outputs = jnp.reshape(outputs, [-1, num_frames, n_channels])
outputs = jnp.mean(outputs, axis=1)
else:
raise ValueError(
f'`tsm_mode` should be \'tpu\' or \'gpu\' ({tsm_mode} given)')
return outputs
def apply_temporal_shift(
x: types.TensorLike,
tsm_mode: str,
num_frames: int,
channel_shift_fraction: float = 0.125) -> jnp.ndarray:
"""Performs a temporal shift: https://arxiv.org/abs/1811.08383 with mode."""
if tsm_mode == 'tpu':
outputs = temporal_shift_tpu(x, num_frames, channel_shift_fraction)
elif tsm_mode == 'gpu':
outputs = temporal_shift_gpu(x, num_frames, channel_shift_fraction)
else:
raise ValueError(
f'`tsm_mode` should be \'tpu\' or \'gpu\' ({tsm_mode} given)')
return outputs
def temporal_shift_gpu(
x: types.TensorLike,
num_frames: int,
channel_shift_fraction: float = 0.125) -> jnp.ndarray:
"""Performs a temporal shift: https://arxiv.org/abs/1811.08383."""
# B, T, H, W, C = batch_size, num_frames, im_height, im_width, channels
# Input is (B * T, H, W, C)
orig_shp = tuple(x.shape)
reshaped_x = jnp.reshape(x, (-1, num_frames) + orig_shp[1:])
n_channels = orig_shp[-1]
n_shift = int(n_channels * channel_shift_fraction)
new_shp = tuple(reshaped_x.shape)
# shifted_backward = reshaped_x[:, 1:, :, :, -n_shift:]
shifted_backward = jax.lax.slice(
reshaped_x, (0, 1, 0, 0, new_shp[4] - n_shift),
(new_shp[0], new_shp[1], new_shp[2], new_shp[3], new_shp[4]))
shifted_backward_padding = ((0, 0), (0, 1), (0, 0), (0, 0), (0, 0))
shifted_backward = jnp.pad(shifted_backward, shifted_backward_padding)
# shifted_forward = reshaped_x[:, :-1, :, :, :n_shift]
shifted_forward = jax.lax.slice(
reshaped_x, (0, 0, 0, 0, 0),
(new_shp[0], new_shp[1] - 1, new_shp[2], new_shp[3], n_shift))
shifted_forward_padding = ((0, 0), (1, 0), (0, 0), (0, 0), (0, 0))
shifted_forward = jnp.pad(shifted_forward, shifted_forward_padding)
no_shift = reshaped_x[:, :, :, :, n_shift:-n_shift]
shifted_x = jnp.concatenate([shifted_backward, no_shift, shifted_forward],
axis=4)
return jnp.reshape(shifted_x, (-1,) + orig_shp[1:])
def temporal_shift_tpu(
x: types.TensorLike,
num_frames: int,
channel_shift_fraction: float = 0.125) -> jnp.ndarray:
"""Performs a temporal shift: https://arxiv.org/abs/1811.08383.
TPU optimized version of TSM. Reshape is avoided by having the images
reshaped in [T * B, :] so that frames corresponding to same time frame in
videos are contiguous in memory. Thanks to cr/288510308 which allows to fuse
pad->slice into convolution, we reformulate the slice pad into a pad then
slice. Finally, to avoid concatenate that prevent some fusion from happening
we simply sum masked version of the features.
Args:
x: Input expected to be [T * B, H, W, C] (where the batch has been reshaped
from a time major version of the input).
num_frames: number of frames T per video.
channel_shift_fraction: fraction of the channel to shift forward and
backward.
Returns:
The temporal shifted version of x.
"""
# B, T, H, W, C = batch_size, num_frames, im_height, im_width, channels
# Input is (T * B, H, W, C)
original_shape = list(x.shape)
batch_size = int(original_shape[0] / num_frames)
n_channels = int(original_shape[-1])
n_shift = int(n_channels * channel_shift_fraction)
# Cast to bfloat16.
x = x.astype(jnp.bfloat16)
# For the following, assume that x has 3 channels [x1, x2, x3] and n_shift=1.
# Shift backward, we first pad by zeros [x1, x2, x3, 0, 0].
orig_shp = list(x.shape)
shifted_backward_padding = ((0, batch_size, 0), (0, 0, 0), (0, 0, 0),
(0, n_channels - n_shift, 0))
x_backward_padding = jax.lax.pad(
x,
padding_value=jnp.bfloat16(0.),
padding_config=shifted_backward_padding)
# The following shift gets to [x3^+1, 0, 0] (where +1 means from the future).
shifted_backward = jax.lax.slice(x_backward_padding,
(batch_size, 0, 0, n_channels - n_shift),
(orig_shp[0] + batch_size, orig_shp[1],
orig_shp[2], 2 * n_channels - n_shift))
# Shift forward, we first pad by zeros [0, 0, x1, x2, x3].
shifted_forward_padding = ((batch_size, 0, 0), (0, 0, 0), (0, 0, 0),
(n_channels - n_shift, 0, 0))
x_forward_padding = jax.lax.pad(
x,
padding_value=jnp.bfloat16(0.),
padding_config=shifted_forward_padding)
# The following shift gets to [0, 0, x1^-1] (where -1 means from the past).
shifted_forward = jax.lax.slice(
x_forward_padding, (0, 0, 0, 0),
(orig_shp[0], orig_shp[1], orig_shp[2], n_channels))
# No shift is in the middle, this gets [0, x2, 0].
mask_noshift = (jnp.reshape((jnp.arange(n_channels) >= n_shift) &
(jnp.arange(n_channels) < n_channels - n_shift),
(1, 1, 1, -1))).astype(jnp.bfloat16)
no_shift = mask_noshift * x
# By summing everything together, we end up with [x3^+1, x2, x1^-1].
# Note: channels have been reordered but that doesn't matter for the model.
shifted_x = shifted_backward + shifted_forward + no_shift
return shifted_x.astype(jnp.float32)
|
deepmind-research-master
|
mmv/models/tsm_utils.py
|
# Copyright 2020 DeepMind Technologies Limited.
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Temporal Shift Module w/ ResNet-50 and ResNet-101.
Based on:
TSM: Temporal Shift Module for Efficient Video Understanding
Ji Lin, Chuang Gan, Song Han
https://arxiv.org/pdf/1811.08383.pdf.
"""
from typing import Optional
import haiku as hk
import jax
import jax.numpy as jnp
from mmv.models import tsm_utils as tsmu
from mmv.models import types
class TSMResNetBlock(hk.Module):
"""A ResNet subblock with Temporal Channel Shifting.
Combines a typical ResNetV2 block implementation
(see https://arxiv.org/abs/1512.03385) with a pre-convolution Temporal
Shift Module (see https://arxiv.org/pdf/1811.08383.pdf) in the residual.
"""
def __init__(self,
output_channels: int,
stride: int,
use_projection: bool,
tsm_mode: str,
normalize_fn: Optional[types.NormalizeFn] = None,
channel_shift_fraction: float = 0.125,
num_frames: int = 8,
name: str = 'TSMResNetBlock'):
"""Initializes the TSMResNetBlock module.
Args:
output_channels: Number of output channels.
stride: Stride used in convolutions.
use_projection: Whether to use a projection for the shortcut.
tsm_mode: Mode for TSM ('gpu' or 'tpu').
normalize_fn: Function used for normalization.
channel_shift_fraction: The fraction of temporally shifted channels. If
`channel_shift_fraction` is 0, the block is the same as a normal ResNet
block.
num_frames: Size of frame dimension in a single batch example
name: The name of the module.
"""
super().__init__(name=name)
self._output_channels = output_channels
self._bottleneck_channels = output_channels // 4
self._stride = stride
self._use_projection = use_projection
self._normalize_fn = normalize_fn
self._tsm_mode = tsm_mode
self._channel_shift_fraction = channel_shift_fraction
self._num_frames = num_frames
def __call__(self,
inputs: types.TensorLike,
is_training: bool = True) -> jnp.ndarray:
"""Connects the ResNetBlock module into the graph.
Args:
inputs: A 4-D float array of shape `[B, H, W, C]`.
is_training: Whether to use training mode.
Returns:
A 4-D float array of shape
`[B * num_frames, new_h, new_w, output_channels]`.
"""
# ResNet V2 uses pre-activation, where the batch norm and relu are before
# convolutions, rather than after as in ResNet V1.
preact = inputs
if self._normalize_fn is not None:
preact = self._normalize_fn(preact, is_training=is_training)
preact = jax.nn.relu(preact)
if self._use_projection:
shortcut = hk.Conv2D(
output_channels=self._output_channels,
kernel_shape=1,
stride=self._stride,
with_bias=False,
padding='SAME',
name='shortcut_conv')(
preact)
else:
shortcut = inputs
# Eventually applies Temporal Shift Module.
if self._channel_shift_fraction != 0:
preact = tsmu.apply_temporal_shift(
preact, tsm_mode=self._tsm_mode, num_frames=self._num_frames,
channel_shift_fraction=self._channel_shift_fraction)
# First convolution.
residual = hk.Conv2D(
self._bottleneck_channels,
kernel_shape=1,
stride=1,
with_bias=False,
padding='SAME',
name='conv_0')(
preact)
# Second convolution.
if self._normalize_fn is not None:
residual = self._normalize_fn(residual, is_training=is_training)
residual = jax.nn.relu(residual)
residual = hk.Conv2D(
output_channels=self._bottleneck_channels,
kernel_shape=3,
stride=self._stride,
with_bias=False,
padding='SAME',
name='conv_1')(
residual)
# Third convolution.
if self._normalize_fn is not None:
residual = self._normalize_fn(residual, is_training=is_training)
residual = jax.nn.relu(residual)
residual = hk.Conv2D(
output_channels=self._output_channels,
kernel_shape=1,
stride=1,
with_bias=False,
padding='SAME',
name='conv_2')(
residual)
# NOTE: we do not use block multiplier.
output = shortcut + residual
return output
class TSMResNetUnit(hk.Module):
"""Block group for TSM ResNet."""
def __init__(self,
output_channels: int,
num_blocks: int,
stride: int,
tsm_mode: str,
num_frames: int,
normalize_fn: Optional[types.NormalizeFn] = None,
channel_shift_fraction: float = 0.125,
name: str = 'tsm_resnet_unit'):
"""Creates a TSMResNet Unit.
Args:
output_channels: Number of output channels.
num_blocks: Number of ResNet blocks in the unit.
stride: Stride of the unit.
tsm_mode: Which temporal shift module to use.
num_frames: Size of frame dimension in a single batch example.
normalize_fn: Function used for normalization.
channel_shift_fraction: The fraction of temporally shifted channels. If
`channel_shift_fraction` is 0, the block is the same as a normal ResNet
block.
name: The name of the module.
"""
super().__init__(name=name)
self._output_channels = output_channels
self._num_blocks = num_blocks
self._normalize_fn = normalize_fn
self._stride = stride
self._tsm_mode = tsm_mode
self._channel_shift_fraction = channel_shift_fraction
self._num_frames = num_frames
def __call__(self,
inputs: types.TensorLike,
is_training: bool) -> jnp.ndarray:
"""Connects the module to inputs.
Args:
inputs: A 4-D float array of shape `[B * num_frames, H, W, C]`.
is_training: Whether to use training mode.
Returns:
A 4-D float array of shape
`[B * num_frames, H // stride, W // stride, output_channels]`.
"""
net = inputs
for idx_block in range(self._num_blocks):
net = TSMResNetBlock(
self._output_channels,
stride=self._stride if idx_block == 0 else 1,
use_projection=idx_block == 0,
normalize_fn=self._normalize_fn,
tsm_mode=self._tsm_mode,
channel_shift_fraction=self._channel_shift_fraction,
num_frames=self._num_frames,
name=f'block_{idx_block}')(
net, is_training=is_training)
return net # pytype: disable=bad-return-type # jax-devicearray
class TSMResNetV2(hk.Module):
"""TSM based on ResNet V2 as described in https://arxiv.org/abs/1603.05027."""
# Endpoints of the model in order.
VALID_ENDPOINTS = (
'tsm_resnet_stem',
'tsm_resnet_unit_0',
'tsm_resnet_unit_1',
'tsm_resnet_unit_2',
'tsm_resnet_unit_3',
'last_conv',
'Embeddings',
)
def __init__(self,
normalize_fn: Optional[types.NormalizeFn] = None,
depth: int = 50,
num_frames: int = 16,
channel_shift_fraction: float = 0.125,
width_mult: int = 1,
name: str = 'TSMResNetV2'):
"""Constructs a ResNet model.
Args:
normalize_fn: Function used for normalization.
depth: Depth of the desired ResNet.
num_frames: Number of frames (used in TPU mode).
channel_shift_fraction: Fraction of channels that are temporally shifted,
if `channel_shift_fraction` is 0, a regular ResNet is returned.
width_mult: Whether or not to use a width multiplier.
name: The name of the module.
Raises:
ValueError: If `channel_shift_fraction` or `depth` has invalid value.
"""
super().__init__(name=name)
if not 0. <= channel_shift_fraction <= 1.0:
raise ValueError(
f'channel_shift_fraction ({channel_shift_fraction})'
' has to be in [0, 1].')
self._num_frames = num_frames
self._channels = (256, 512, 1024, 2048)
self._strides = (1, 2, 2, 2)
num_blocks = {
50: (3, 4, 6, 3),
101: (3, 4, 23, 3),
152: (3, 8, 36, 3),
200: (3, 24, 36, 3),
}
if depth not in num_blocks:
raise ValueError(
f'`depth` should be in {list(num_blocks.keys())} ({depth} given).')
self._num_blocks = num_blocks[depth]
self._width_mult = width_mult
self._channel_shift_fraction = channel_shift_fraction
self._normalize_fn = normalize_fn
def __call__(
self,
inputs: types.TensorLike,
is_training: bool = True,
final_endpoint: str = 'Embeddings') -> jnp.ndarray:
"""Connects the TSM ResNetV2 module into the graph.
Args:
inputs: A 4-D float array of shape `[B, H, W, C]`.
is_training: Whether to use training mode.
final_endpoint: Up to which endpoint to run / return.
Returns:
Network output at location `final_endpoint`. A float array which shape
depends on `final_endpoint`.
Raises:
ValueError: If `final_endpoint` is not recognized.
"""
# Prepare inputs for TSM.
inputs, tsm_mode, num_frames = tsmu.prepare_inputs(inputs)
num_frames = num_frames or self._num_frames
self._final_endpoint = final_endpoint
if self._final_endpoint not in self.VALID_ENDPOINTS:
raise ValueError(f'Unknown final endpoint {self._final_endpoint}')
# Stem convolution.
end_point = 'tsm_resnet_stem'
net = hk.Conv2D(
output_channels=64 * self._width_mult,
kernel_shape=7,
stride=2,
with_bias=False,
name=end_point,
padding='SAME')(
inputs)
net = hk.MaxPool(
window_shape=(1, 3, 3, 1),
strides=(1, 2, 2, 1),
padding='SAME')(
net)
if self._final_endpoint == end_point:
return net
# Residual block.
for unit_id, (channels, num_blocks, stride) in enumerate(
zip(self._channels, self._num_blocks, self._strides)):
end_point = f'tsm_resnet_unit_{unit_id}'
net = TSMResNetUnit(
output_channels=channels * self._width_mult,
num_blocks=num_blocks,
stride=stride,
normalize_fn=self._normalize_fn,
channel_shift_fraction=self._channel_shift_fraction,
num_frames=num_frames,
tsm_mode=tsm_mode,
name=end_point)(
net, is_training=is_training)
if self._final_endpoint == end_point:
return net
if self._normalize_fn is not None:
net = self._normalize_fn(net, is_training=is_training)
net = jax.nn.relu(net)
end_point = 'last_conv'
if self._final_endpoint == end_point:
return net
net = jnp.mean(net, axis=(1, 2))
# Prepare embedding outputs for TSM (temporal average of features).
net = tsmu.prepare_outputs(net, tsm_mode, num_frames)
assert self._final_endpoint == 'Embeddings'
return net
|
deepmind-research-master
|
mmv/models/tsm_resnet.py
|
# Copyright 2020 DeepMind Technologies Limited.
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A Haiku S3D model."""
import collections
from typing import Optional, Sequence
import haiku as hk
import jax
from jax import numpy as jnp
from mmv.models import types
class _MaxPool(hk.MaxPool):
"""A `hk.MaxPool` accepting (and discarding) an `is_training` argument."""
def __call__(self,
x: types.TensorLike,
is_training: bool = True) -> jnp.ndarray:
del is_training # Unused.
return super().__call__(x)
def self_gating(inputs: types.TensorLike) -> jnp.ndarray:
"""Feature gating as used in S3D-G.
Transforms the input features by aggregating features from all spatial and
temporal locations, and applying gating conditioned on the aggregated
features. More details can be found at: https://arxiv.org/abs/1712.04851.
Args:
inputs: A 5-D float array of shape `[B, T, H, W, C]`.
Returns:
A tensor with the same shape as input_tensor.
Raises:
ValueError: If `inputs` has the wrong shape.
"""
if inputs.ndim != 5:
raise ValueError(
f'Expected an input of shape `[B, T, H, W, C]` but got {inputs.shape}.')
input_shape = inputs.shape
num_channels = input_shape[4]
spatiotemporal_average = jnp.mean(inputs, axis=(1, 2, 3))
weights = hk.Linear(num_channels, name='self_gating')(spatiotemporal_average)
weights = jax.nn.sigmoid(weights)
return jnp.multiply(weights[:, None, None, None, :], inputs)
class SUnit3D(hk.Module):
"""Base 3d Unit combining Conv3d + Batch Norm + non-linearity."""
def __init__(
self,
output_channels: int,
kernel_shape: Sequence[int] = (1, 1, 1),
stride: Sequence[int] = (1, 1, 1),
with_bias: bool = False,
separable: bool = False,
normalize_fn: Optional[types.NormalizeFn] = None,
activation_fn: Optional[types.ActivationFn] = jax.nn.relu,
self_gating_fn: Optional[types.GatingFn] = None,
name='SUnit3D'):
"""Initializes the SUnit3D module.
Args:
output_channels: Number of output channels.
kernel_shape: The shape of the kernel. A sequence of length 3.
stride: Stride for the kernel. A sequence of length 3.
with_bias: Whether to add a bias to the convolution.
separable: Whether to use separable.
normalize_fn: Function used for normalization.
activation_fn: Function used as non-linearity.
self_gating_fn: Function used for self-gating.
name: The name of the module.
Raises:
ValueError: If `kernel_shape` or `stride` has the wrong shape.
"""
super().__init__(name=name)
# Check args.
if len(kernel_shape) != 3:
raise ValueError(
'Given `kernel_shape` must have length 3 but has length '
f'{len(kernel_shape)}.')
if len(stride) != 3:
raise ValueError(
f'Given `stride` must have length 3 but has length {len(stride)}.')
self._normalize_fn = normalize_fn
self._activation_fn = activation_fn
self._self_gating_fn = self_gating_fn
k0, k1, k2 = kernel_shape
if separable and k1 != 1:
spatial_kernel_shape = [1, k1, k2]
temporal_kernel_shape = [k0, 1, 1]
s0, s1, s2 = stride
spatial_stride = [1, s1, s2]
temporal_stride = [s0, 1, 1]
self._convolutions = [
hk.Conv3D(
output_channels=output_channels,
kernel_shape=spatial_kernel_shape,
stride=spatial_stride,
padding='SAME',
with_bias=with_bias),
hk.Conv3D(
output_channels=output_channels,
kernel_shape=temporal_kernel_shape,
stride=temporal_stride,
padding='SAME',
with_bias=with_bias)
]
else:
self._convolutions = [
hk.Conv3D(
output_channels=output_channels,
kernel_shape=kernel_shape,
stride=stride,
padding='SAME',
with_bias=with_bias)]
def __call__(
self,
inputs: types.TensorLike,
is_training: bool) -> jnp.ndarray:
"""Connects the module to inputs.
Args:
inputs: A 5-D float array of shape `[B, T, H, W, C]`.
is_training: Whether to use training mode.
Returns:
A 5-D float array of shape `[B, new_t, new_h, new_w, output_channels]`.
"""
x = inputs
for conv in self._convolutions:
x = conv(x)
if self._normalize_fn is not None:
x = self._normalize_fn(x, is_training=is_training)
if self._activation_fn is not None:
x = self._activation_fn(x)
if self._self_gating_fn:
x = self._self_gating_fn(x)
return x # pytype: disable=bad-return-type # jax-devicearray
class InceptionBlockV13D(hk.Module):
"""A 3D Inception v1 block.
This allows use of separable 3D convolutions and self-gating, as described in:
Rethinking Spatiotemporal Feature Learning For Video Understanding.
Saining Xie, Chen Sun, Jonathan Huang, Zhuowen Tu and Kevin Murphy.
https://arxiv.org/abs/1712.04851.
"""
def __init__(self,
output_channels: Sequence[int],
normalize_fn: Optional[types.NormalizeFn],
temporal_kernel_size: int = 3,
self_gating_fn: Optional[types.GatingFn] = None,
name: str = 'InceptionBlockV13D'):
"""Initializes the InceptionBlockV13D module.
Args:
output_channels: The size of the output channels of each block, ordered as
[Conv2d_0a_1x1, Conv2d_0a_1x1, Conv2d_0b_3x3, Conv2d_0a_1x1,
Conv2d_0b_3x3, Conv2d_0b_1x1]
normalize_fn: Function used for normalization.
temporal_kernel_size: The size of the temporal convolutional filters in
the conv3d_spatiotemporal blocks.
self_gating_fn: Function which optionally performs self-gating. If `None`,
no self-gating is applied.
name: The name of the module.
Raises:
ValueError: If `output_channels` has the wrong shape.
"""
super().__init__(name=name)
# Check args.
if len(output_channels) != 6:
raise ValueError(
'Given `output_channels` must have length 6 but has length '
f'{len(output_channels)}.')
self._output_channels = output_channels
self._normalize_fn = normalize_fn
self._temporal_kernel_size = temporal_kernel_size
if self_gating_fn is None:
self._self_gating_fn = lambda x: x
else:
self._self_gating_fn = self_gating_fn
def __call__(
self,
inputs: types.TensorLike,
is_training: bool) -> jnp.ndarray:
"""Connects the module to inputs.
Args:
inputs: A 5-D float array of shape `[B, T, H, W, C]`.
is_training: Whether to use training mode.
Returns:
A 5-D float array of shape
`[B, new_t, new_h, new_w, sum(output_channels)]`.
"""
# Branch 0
branch_0 = SUnit3D(
output_channels=self._output_channels[0],
kernel_shape=(1, 1, 1),
separable=False,
normalize_fn=self._normalize_fn,
self_gating_fn=self._self_gating_fn,
name='Branch_0_Conv2d_0a_1x1')(
inputs, is_training=is_training)
# Branch 1
branch_1 = SUnit3D(
output_channels=self._output_channels[1],
kernel_shape=(1, 1, 1),
separable=False,
normalize_fn=self._normalize_fn,
self_gating_fn=None,
name='Branch_1_Conv2d_0a_1x1')(
inputs, is_training=is_training)
branch_1 = SUnit3D(
output_channels=self._output_channels[2],
kernel_shape=(self._temporal_kernel_size, 3, 3),
separable=True,
normalize_fn=self._normalize_fn,
self_gating_fn=self._self_gating_fn,
name='Branch_1_Conv2d_0b_3x3')(
branch_1, is_training=is_training)
# Branch 2
branch_2 = SUnit3D(
output_channels=self._output_channels[3],
kernel_shape=(1, 1, 1),
separable=False,
normalize_fn=self._normalize_fn,
self_gating_fn=None,
name='Branch_2_Conv2d_0a_1x1')(
inputs, is_training=is_training)
branch_2 = SUnit3D(
output_channels=self._output_channels[4],
kernel_shape=(self._temporal_kernel_size, 3, 3),
separable=True,
normalize_fn=self._normalize_fn,
self_gating_fn=self._self_gating_fn,
name='Branch_2_Conv2d_0b_3x3')(
branch_2, is_training=is_training)
# Branch 3
branch_3 = hk.MaxPool(
window_shape=(1, 3, 3, 3, 1),
strides=(1, 1, 1, 1, 1),
padding='SAME',
name='Branch_3_MaxPool_0a_3x3')(
inputs)
branch_3 = SUnit3D(
output_channels=self._output_channels[5],
kernel_shape=(1, 1, 1),
separable=False,
normalize_fn=self._normalize_fn,
self_gating_fn=self._self_gating_fn,
name='Branch_3_Conv2d_0b_1x1')(
branch_3, is_training=is_training)
return jnp.concatenate((branch_0, branch_1, branch_2, branch_3), axis=4)
_Layer = collections.namedtuple('_Layer', ('name', 'module', 'kwargs'))
class S3D(hk.Module):
"""S3D architecture.
Any intermediary representation can be obtained by choosing one of the valid
`final_endpoint`s. The final value returned by this model (when 'Embeddings'
is used as `final_endpoint`) is a single 1-D representation for each video in
the batch. Another layer can be externally added on top of that to obtain
logits.
"""
# Endpoints of the model in order.
VALID_ENDPOINTS = (
'Conv2d_1a_7x7',
'MaxPool_2a_3x3',
'Conv2d_2b_1x1',
'Conv2d_2c_3x3',
'MaxPool_3a_3x3',
'Mixed_3b',
'Mixed_3c',
'MaxPool_4a_3x3',
'Mixed_4b',
'Mixed_4c',
'Mixed_4d',
'Mixed_4e',
'Mixed_4f',
'MaxPool_5a_2x2',
'Mixed_5b',
'Mixed_5c',
'Embeddings',
)
def __init__(self,
normalize_fn: Optional[types.NormalizeFn] = None,
first_temporal_kernel_size: int = 7,
temporal_conv_startat: Optional[str] = 'Conv2d_2c_3x3',
gating_startat: Optional[str] = 'Conv2d_2c_3x3',
name='S3D'):
"""Initializes the S3D module.
Args:
normalize_fn: Function used for normalization.
first_temporal_kernel_size: Specifies the temporal kernel size for the
first conv3d filter. A larger value slows down the model but provides
little accuracy improvement. Must be set to one of 1, 3, 5 or 7.
temporal_conv_startat: Specifies the first conv block to use separable 3D
convs rather than 2D convs (implemented as [1, k, k] 3D conv). This is
used to construct the inverted pyramid models. 'Conv2d_2c_3x3' is the
first valid block to use separable 3D convs. If provided block name is
not present, all valid blocks will use separable 3D convs.
gating_startat: Specifies the first conv block to use self gating.
'Conv2d_2c_3x3' is the first valid block to use self gating. If provided
block name is not present, all valid blocks will use separable 3D convs.
name: The name of the module.
Raises:
ValueError: If `temporal_conv_startat`, `gating_startat` or
`first_temporal_kernel_size` is not recognized.
"""
super().__init__(name=name)
self._first_temporal_kernel_size = first_temporal_kernel_size
self._temporal_conv_startat = temporal_conv_startat
self._gating_startat = gating_startat
self._normalize_fn = normalize_fn
if (temporal_conv_startat not in self.VALID_ENDPOINTS
and temporal_conv_startat is not None):
raise ValueError(
f'Provided `temporal_conv_startat`: {temporal_conv_startat} not '
f'valid. It must be one of: {self.VALID_ENDPOINTS}, or `None`.')
if (gating_startat not in self.VALID_ENDPOINTS
and gating_startat is not None):
raise ValueError(
f'Provided `gating_startat`: {gating_startat} not valid. '
f'It must be one of: {self.VALID_ENDPOINTS}, or `None`.')
if first_temporal_kernel_size not in [1, 3, 5, 7]:
raise ValueError('`first_temporal_kernel_size` can only be 1, 3, 5 or 7.')
def __call__(self,
inputs: types.TensorLike,
is_training: bool,
final_endpoint: str = 'Embeddings') -> jnp.ndarray:
"""Connects the model to inputs.
Args:
inputs: A 5-D float array of shape `[B, T, H, W, C]`.
is_training: Whether to use training mode.
final_endpoint: Up to which endpoint to run / return.
Returns:
A 5-D float array of shape
`[B, new_t, new_h, new_w, sum(output_channels)]`.
Returns:
Network output at location `final_endpoint`. A float array which shape
depends on `final_endpoint`.
Raises:
ValueError: If `final_endpoint` is not recognized.
"""
if final_endpoint not in self.VALID_ENDPOINTS:
raise ValueError(f'Provided final_endpoint: {final_endpoint} not valid.'
f' It must be one of: {self.VALID_ENDPOINTS}')
x = inputs
# We define layers with tuples (name, module, kwargs)
# Not all kwargs are present, as we will need to fill in certain properties
# as we move down the network.
layers = []
# The first layer is conditional on the input data shape: the channel size
# is used to identify whether the `space_to_depth` transformation has been
# applied to the input. This is used to speed up computation on TPUs.
if x.shape[-1] == 3:
layers.append(
_Layer('Conv2d_1a_7x7', SUnit3D,
dict(output_channels=64, stride=(2, 2, 2), separable=False,
kernel_shape=(self._first_temporal_kernel_size, 7, 7),
normalize_fn=self._normalize_fn)))
else:
layers.append(
_Layer('Conv2d_1a_7x7', SUnit3D,
dict(output_channels=64, kernel_shape=(2, 4, 4),
stride=(1, 1, 1), separable=False,
normalize_fn=self._normalize_fn)))
layers.extend([
_Layer('MaxPool_2a_3x3', _MaxPool,
dict(window_shape=(1, 1, 3, 3, 1), strides=(1, 1, 2, 2, 1),
padding='SAME')),
_Layer('Conv2d_2b_1x1', SUnit3D,
dict(output_channels=64, kernel_shape=(1, 1, 1),
normalize_fn=self._normalize_fn)),
_Layer('Conv2d_2c_3x3', SUnit3D,
dict(output_channels=192, separable=True,
normalize_fn=self._normalize_fn)),
_Layer('MaxPool_3a_3x3', _MaxPool,
dict(window_shape=(1, 1, 3, 3, 1), strides=(1, 1, 2, 2, 1),
padding='SAME')),
_Layer('Mixed_3b', InceptionBlockV13D,
dict(output_channels=(64, 96, 128, 16, 32, 32),
normalize_fn=self._normalize_fn)),
_Layer('Mixed_3c', InceptionBlockV13D,
dict(output_channels=(128, 128, 192, 32, 96, 64),
normalize_fn=self._normalize_fn)),
_Layer('MaxPool_4a_3x3', _MaxPool,
dict(window_shape=(1, 3, 3, 3, 1), strides=(1, 2, 2, 2, 1),
padding='SAME')),
_Layer('Mixed_4b', InceptionBlockV13D,
dict(output_channels=(192, 96, 208, 16, 48, 64),
normalize_fn=self._normalize_fn)),
_Layer('Mixed_4c', InceptionBlockV13D,
dict(output_channels=(160, 112, 224, 24, 64, 64),
normalize_fn=self._normalize_fn)),
_Layer('Mixed_4d', InceptionBlockV13D,
dict(output_channels=(128, 128, 256, 24, 64, 64),
normalize_fn=self._normalize_fn)),
_Layer('Mixed_4e', InceptionBlockV13D,
dict(output_channels=(112, 144, 288, 32, 64, 64),
normalize_fn=self._normalize_fn)),
_Layer('Mixed_4f', InceptionBlockV13D,
dict(output_channels=(256, 160, 320, 32, 128, 128),
normalize_fn=self._normalize_fn)),
_Layer('MaxPool_5a_2x2', _MaxPool,
dict(window_shape=(1, 2, 2, 2, 1), strides=(1, 2, 2, 2, 1),
padding='SAME')),
_Layer('Mixed_5b', InceptionBlockV13D,
dict(output_channels=(256, 160, 320, 32, 128, 128),
normalize_fn=self._normalize_fn)),
_Layer('Mixed_5c', InceptionBlockV13D,
dict(output_channels=(384, 192, 384, 48, 128, 128),
normalize_fn=self._normalize_fn)),
])
# These parameters may change thoughout the computation.
self_gating_fn = None
temporal_kernel_size = 1
# Iterate over layers.
for layer in layers:
# Update
if layer.name == self._gating_startat:
self_gating_fn = self_gating
if layer.name == self._temporal_conv_startat:
temporal_kernel_size = 3
kwargs = layer.kwargs
if layer.module is SUnit3D:
kwargs['self_gating_fn'] = self_gating_fn
if 'kernel_shape' not in kwargs:
kwargs['kernel_shape'] = (temporal_kernel_size, 3, 3)
elif layer.module is InceptionBlockV13D:
kwargs['self_gating_fn'] = self_gating_fn
kwargs['temporal_kernel_size'] = temporal_kernel_size
module = layer.module(name=layer.name, **kwargs)
x = module(x, is_training=is_training)
if final_endpoint == layer.name:
return x
assert final_endpoint == 'Embeddings'
return jnp.mean(x, axis=(1, 2, 3))
|
deepmind-research-master
|
mmv/models/s3d.py
|
# Copyright 2020 DeepMind Technologies Limited.
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Type Aliases."""
from typing import Callable, Tuple, Union
import jax
import numpy as np
import optax
TensorLike = Union[np.ndarray, jax.Array]
ActivationFn = Callable[[TensorLike], TensorLike]
GatingFn = Callable[[TensorLike], TensorLike]
NetworkFn = Callable[[TensorLike], TensorLike]
# Callable doesn't allow kwargs to be used, and we often want to
# pass in is_training=..., so ignore the arguments for the sake of pytype.
NormalizeFn = Callable[..., TensorLike]
OptState = Tuple[optax.TraceState, optax.ScaleByScheduleState, optax.ScaleState]
|
deepmind-research-master
|
mmv/models/types.py
|
# Copyright 2020 DeepMind Technologies Limited.
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""ResNet V2 modules.
Equivalent to hk.Resnet except accepting a final_endpoint to return
intermediate activations.
"""
from typing import Optional, Sequence, Text, Type, Union
import haiku as hk
import jax
import jax.numpy as jnp
from mmv.models import types
class BottleneckBlock(hk.Module):
"""Implements a bottleneck residual block (ResNet50 and ResNet101)."""
# pylint:disable=g-bare-generic
def __init__(self,
channels: int,
stride: Union[int, Sequence[int]],
use_projection: bool,
normalize_fn: Optional[types.NormalizeFn] = None,
name: Optional[Text] = None):
super(BottleneckBlock, self).__init__(name=name)
self._channels = channels
self._stride = stride
self._use_projection = use_projection
self._normalize_fn = normalize_fn
if self._use_projection:
self._proj_conv = hk.Conv2D(
output_channels=channels,
kernel_shape=1,
stride=stride,
with_bias=False,
padding='SAME',
name='shortcut_conv')
self._conv_0 = hk.Conv2D(
output_channels=channels // 4,
kernel_shape=1,
stride=1,
with_bias=False,
padding='SAME',
name='conv_0')
self._conv_1 = hk.Conv2D(
output_channels=channels // 4,
kernel_shape=3,
stride=stride,
with_bias=False,
padding='SAME',
name='conv_1')
self._conv_2 = hk.Conv2D(
output_channels=channels,
kernel_shape=1,
stride=1,
with_bias=False,
padding='SAME',
name='conv_2')
def __call__(self,
inputs,
is_training):
net = inputs
shortcut = inputs
for i, conv_i in enumerate([self._conv_0, self._conv_1, self._conv_2]):
if self._normalize_fn is not None:
net = self._normalize_fn(net, is_training=is_training)
net = jax.nn.relu(net)
if i == 0 and self._use_projection:
shortcut = self._proj_conv(net)
# Now do the convs.
net = conv_i(net)
return net + shortcut
class BasicBlock(hk.Module):
"""Implements a basic residual block (ResNet18 and ResNet34)."""
# pylint:disable=g-bare-generic
def __init__(self,
channels: int,
stride: Union[int, Sequence[int]],
use_projection: bool,
normalize_fn: Optional[types.NormalizeFn] = None,
name: Optional[Text] = None):
super(BasicBlock, self).__init__(name=name)
self._channels = channels
self._stride = stride
self._use_projection = use_projection
self._normalize_fn = normalize_fn
if self._use_projection:
self._proj_conv = hk.Conv2D(
output_channels=channels,
kernel_shape=1,
stride=stride,
with_bias=False,
padding='SAME',
name='shortcut_conv')
self._conv_0 = hk.Conv2D(
output_channels=channels,
kernel_shape=1,
stride=1,
with_bias=False,
padding='SAME',
name='conv_0')
self._conv_1 = hk.Conv2D(
output_channels=channels,
kernel_shape=3,
stride=stride,
with_bias=False,
padding='SAME',
name='conv_1')
def __call__(self,
inputs,
is_training):
net = inputs
shortcut = inputs
for i, conv_i in enumerate([self._conv_0, self._conv_1]):
if self._normalize_fn is not None:
net = self._normalize_fn(net, is_training=is_training)
net = jax.nn.relu(net)
if i == 0 and self._use_projection:
shortcut = self._proj_conv(net)
# Now do the convs.
net = conv_i(net)
return net + shortcut
class ResNetUnit(hk.Module):
"""Unit (group of blocks) for ResNet."""
# pylint:disable=g-bare-generic
def __init__(self,
channels: int,
num_blocks: int,
stride: Union[int, Sequence[int]],
block_module: Type[BottleneckBlock],
normalize_fn: Optional[types.NormalizeFn] = None,
name: Optional[Text] = None,
remat: bool = False):
super(ResNetUnit, self).__init__(name=name)
self._channels = channels
self._num_blocks = num_blocks
self._stride = stride
self._normalize_fn = normalize_fn
self._block_module = block_module
self._remat = remat
def __call__(self,
inputs,
is_training):
input_channels = inputs.shape[-1]
self._blocks = []
for id_block in range(self._num_blocks):
use_projection = id_block == 0 and self._channels != input_channels
self._blocks.append(
self._block_module(
channels=self._channels,
stride=self._stride if id_block == 0 else 1,
use_projection=use_projection,
normalize_fn=self._normalize_fn,
name='block_%d' % id_block))
net = inputs
for block in self._blocks:
if self._remat:
# Note: we can ignore cell-var-from-loop because the lambda is evaluated
# inside every iteration of the loop. This is needed to go around the
# way variables are passed to jax.remat.
net = hk.remat(lambda x: block(x, is_training=is_training))(net) # pylint: disable=cell-var-from-loop
else:
net = block(net, is_training=is_training)
return net
class ResNetV2(hk.Module):
"""ResNetV2 model."""
# Endpoints of the model in order.
VALID_ENDPOINTS = (
'resnet_stem',
'resnet_unit_0',
'resnet_unit_1',
'resnet_unit_2',
'resnet_unit_3',
'last_conv',
'output',
)
# pylint:disable=g-bare-generic
def __init__(self,
depth=50,
num_classes: Optional[int] = 1000,
width_mult: int = 1,
normalize_fn: Optional[types.NormalizeFn] = None,
name: Optional[Text] = None,
remat: bool = False):
"""Creates ResNetV2 Haiku module.
Args:
depth: depth of the desired ResNet (18, 34, 50, 101, 152 or 202).
num_classes: (int) Number of outputs in final layer. If None will not add
a classification head and will return the output embedding.
width_mult: multiplier for channel width.
normalize_fn: normalization function, see helpers/utils.py
name: Name of the module.
remat: Whether to rematerialize intermediate activations (saves memory).
"""
super(ResNetV2, self).__init__(name=name)
self._normalize_fn = normalize_fn
self._num_classes = num_classes
self._width_mult = width_mult
self._strides = [1, 2, 2, 2]
num_blocks = {
18: [2, 2, 2, 2],
34: [3, 4, 6, 3],
50: [3, 4, 6, 3],
101: [3, 4, 23, 3],
152: [3, 8, 36, 3],
200: [3, 24, 36, 3],
}
if depth not in num_blocks:
raise ValueError(
f'`depth` should be in {list(num_blocks.keys())} ({depth} given).')
self._num_blocks = num_blocks[depth]
if depth >= 50:
self._block_module = BottleneckBlock
self._channels = [256, 512, 1024, 2048]
else:
self._block_module = BasicBlock
self._channels = [64, 128, 256, 512]
self._initial_conv = hk.Conv2D(
output_channels=64 * self._width_mult,
kernel_shape=7,
stride=2,
with_bias=False,
padding='SAME',
name='initial_conv')
if remat:
self._initial_conv = hk.remat(self._initial_conv)
self._block_groups = []
for i in range(4):
self._block_groups.append(
ResNetUnit(
channels=self._channels[i] * self._width_mult,
num_blocks=self._num_blocks[i],
block_module=self._block_module,
stride=self._strides[i],
normalize_fn=self._normalize_fn,
name='block_group_%d' % i,
remat=remat))
if num_classes is not None:
self._logits_layer = hk.Linear(
output_size=num_classes, w_init=jnp.zeros, name='logits')
def __call__(self, inputs, is_training, final_endpoint='output'):
self._final_endpoint = final_endpoint
net = self._initial_conv(inputs)
net = hk.max_pool(
net, window_shape=(1, 3, 3, 1),
strides=(1, 2, 2, 1),
padding='SAME')
end_point = 'resnet_stem'
if self._final_endpoint == end_point:
return net
for i_group, block_group in enumerate(self._block_groups):
net = block_group(net, is_training=is_training)
end_point = f'resnet_unit_{i_group}'
if self._final_endpoint == end_point:
return net
end_point = 'last_conv'
if self._final_endpoint == end_point:
return net
if self._normalize_fn is not None:
net = self._normalize_fn(net, is_training=is_training)
net = jax.nn.relu(net)
# The actual representation
net = jnp.mean(net, axis=[1, 2])
assert self._final_endpoint == 'output'
if self._num_classes is None:
# If num_classes was None, we just return the output
# of the last block, without fully connected layer.
return net
return self._logits_layer(net)
|
deepmind-research-master
|
mmv/models/resnet.py
|
# Copyright 2020 DeepMind Technologies Limited.
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Normalize functions constructors."""
from typing import Any, Dict, Optional, Sequence, Union
import haiku as hk
from jax import numpy as jnp
from mmv.models import types
class _BatchNorm(hk.BatchNorm):
"""A `hk.BatchNorm` with adapted default arguments."""
def __init__(self,
create_scale: bool = True,
create_offset: bool = True,
decay_rate: float = 0.9,
eps: float = 1e-5,
test_local_stats: bool = False,
**kwargs):
# Check args.
if kwargs.get('cross_replica_axis', None) is not None:
raise ValueError(
'Attempting to use \'batch_norm\' normalizer, but specifying '
'`cross_replica_axis`. If you want this behavior use '
'`normalizer=\'cross_replica_batch_norm\'` directly.')
self._test_local_stats = test_local_stats
super().__init__(create_scale=create_scale,
create_offset=create_offset,
decay_rate=decay_rate,
eps=eps,
**kwargs)
def __call__(self,
x: types.TensorLike,
is_training: bool) -> jnp.ndarray:
return super().__call__(x, is_training,
test_local_stats=self._test_local_stats)
class _CrossReplicaBatchNorm(hk.BatchNorm):
"""A `hk.BatchNorm` with adapted default arguments for cross replica."""
def __init__(self,
create_scale: bool = True,
create_offset: bool = True,
decay_rate: float = 0.9,
eps: float = 1e-5,
test_local_stats: bool = False,
**kwargs):
# Check args.
if 'cross_replica_axis' in kwargs and kwargs['cross_replica_axis'] is None:
raise ValueError(
'Attempting to use \'cross_replica_batch_norm\' normalizer, but '
'specifying `cross_replica_axis` to be None. If you want this '
'behavior use `normalizer=\'batch_norm\'` directly.')
self._test_local_stats = test_local_stats
kwargs['cross_replica_axis'] = kwargs.get('cross_replica_axis', 'i')
super().__init__(create_scale=create_scale,
create_offset=create_offset,
decay_rate=decay_rate,
eps=eps,
**kwargs)
def __call__(self,
x: types.TensorLike,
is_training: bool) -> jnp.ndarray:
return super().__call__(x, is_training,
test_local_stats=self._test_local_stats)
class _LayerNorm(hk.LayerNorm):
"""A `hk.LayerNorm` accepting (and discarding) an `is_training` argument."""
def __init__(self,
axis: Union[int, Sequence[int]] = (1, 2),
create_scale: bool = True,
create_offset: bool = True,
**kwargs):
super().__init__(axis=axis,
create_scale=create_scale,
create_offset=create_offset,
**kwargs)
def __call__(self, # pytype: disable=signature-mismatch # overriding-parameter-count-checks
x: types.TensorLike,
is_training: bool) -> jnp.ndarray:
del is_training # Unused.
return super().__call__(x)
_NORMALIZER_NAME_TO_CLASS = {
'batch_norm': _BatchNorm,
'cross_replica_batch_norm': _CrossReplicaBatchNorm,
'layer_norm': _LayerNorm,
}
def get_normalize_fn(
normalizer_name: str = 'batch_norm',
normalizer_kwargs: Optional[Dict[str, Any]] = None,
) -> types.NormalizeFn:
"""Handles NormalizeFn creation.
These functions are expected to be used as part of Haiku model. On each
application of the returned normalization_fn, a new Haiku layer will be added
to the model.
Args:
normalizer_name: The name of the normalizer to be constructed.
normalizer_kwargs: The kwargs passed to the normalizer constructor.
Returns:
A `types.NormalizeFn` that when applied will create a new layer.
Raises:
ValueError: If `normalizer_name` is unknown.
"""
# Check args.
if normalizer_name not in _NORMALIZER_NAME_TO_CLASS:
raise ValueError(f'Unrecognized `normalizer_name` {normalizer_name}.')
normalizer_class = _NORMALIZER_NAME_TO_CLASS[normalizer_name]
normalizer_kwargs = normalizer_kwargs or dict()
return lambda *a, **k: normalizer_class(**normalizer_kwargs)(*a, **k) # pylint: disable=unnecessary-lambda
|
deepmind-research-master
|
mmv/models/normalization.py
|
# Copyright 2020 DeepMind Technologies Limited.
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for s3d."""
from absl.testing import absltest
from absl.testing import parameterized
import haiku as hk
import jax
import numpy as np
from mmv.models import normalization
from mmv.models import s3d
class _CallableS3D:
"""Wrapper around S3D that take care of parameter book keeping."""
def __init__(self, *args, **kwargs):
self._model = hk.transform_with_state(
lambda *a, **k: # pylint: disable=g-long-lambda,unnecessary-lambda
s3d.S3D(
normalize_fn=normalization.get_normalize_fn(),
*args, **kwargs)(*a, **k))
self._rng = jax.random.PRNGKey(42)
self._params, self._state = None, None
def init(self, inputs, **kwargs):
self._params, self._state = self._model.init(
self._rng, inputs, is_training=True, **kwargs)
def __call__(self, inputs, **kwargs):
if self._params is None:
self.init(inputs)
output, _ = self._model.apply(
self._params, self._state, self._rng, inputs, **kwargs)
return output
class S3DTest(parameterized.TestCase):
# Testing all layers is quite slow, added in comments for completeness.
@parameterized.parameters(
# dict(endpoint='Conv2d_1a_7x7', expected_size=(2, 8, 112, 112, 64)),
# dict(endpoint='MaxPool_2a_3x3', expected_size=(2, 8, 56, 56, 64)),
# dict(endpoint='Conv2d_2b_1x1', expected_size=(2, 8, 56, 56, 64)),
# dict(endpoint='Conv2d_2c_3x3', expected_size=(2, 8, 56, 56, 192)),
# dict(endpoint='MaxPool_3a_3x3', expected_size=(2, 8, 28, 28, 192)),
# dict(endpoint='Mixed_3b', expected_size=(2, 8, 28, 28, 256)),
# dict(endpoint='Mixed_3c', expected_size=(2, 8, 28, 28, 480)),
# dict(endpoint='MaxPool_4a_3x3', expected_size=(2, 4, 14, 14, 480)),
# dict(endpoint='Mixed_4b', expected_size=(2, 4, 14, 14, 512)),
# dict(endpoint='Mixed_4c', expected_size=(2, 4, 14, 14, 512)),
# dict(endpoint='Mixed_4d', expected_size=(2, 4, 14, 14, 512)),
# dict(endpoint='Mixed_4e', expected_size=(2, 4, 14, 14, 528)),
# dict(endpoint='Mixed_4f', expected_size=(2, 4, 14, 14, 832)),
# dict(endpoint='MaxPool_5a_2x2', expected_size=(2, 2, 7, 7, 832)),
# dict(endpoint='Mixed_5b', expected_size=(2, 2, 7, 7, 832)),
# dict(endpoint='Mixed_5c', expected_size=(2, 2, 7, 7, 1024)),
dict(endpoint='Embeddings', expected_size=(2, 1024)),
)
def test_endpoint_expected_output_dimensions(self, endpoint, expected_size):
inputs = np.random.normal(size=(2, 16, 224, 224, 3))
model = _CallableS3D()
output = model(inputs, is_training=False, final_endpoint=endpoint)
self.assertSameElements(output.shape, expected_size)
def test_space_to_depth(self):
inputs = np.random.normal(size=(2, 16//2, 224//2, 224//2, 3*2*2*2))
model = _CallableS3D()
output = model(inputs, is_training=False, final_endpoint='Conv2d_1a_7x7')
self.assertSameElements(output.shape, (2, 8, 112, 112, 64))
if __name__ == '__main__':
absltest.main()
|
deepmind-research-master
|
mmv/models/s3d_test.py
|
# Copyright 2019 DeepMind Technologies Limited and Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Evaluation metrics."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.compat.v1 as tf
import tensorflow_gan as tfgan
import tensorflow_hub as hub
def fid(generated_sentences, real_sentences):
"""Compute FID rn sentences using pretrained universal sentence encoder.
Args:
generated_sentences: list of N strings.
real_sentences: list of N strings.
Returns:
Frechet distance between activations.
"""
embed = hub.Module("https://tfhub.dev/google/universal-sentence-encoder/2")
real_embed = embed(real_sentences)
generated_embed = embed(generated_sentences)
distance = tfgan.eval.frechet_classifier_distance_from_activations(
real_embed, generated_embed)
# Restrict the thread pool size to prevent excessive CPU usage.
config = tf.ConfigProto()
config.intra_op_parallelism_threads = 16
config.inter_op_parallelism_threads = 16
with tf.Session(config=config) as session:
session.run(tf.global_variables_initializer())
session.run(tf.tables_initializer())
distance_np = session.run(distance)
return distance_np
|
deepmind-research-master
|
scratchgan/eval_metrics.py
|
# Copyright 2019 DeepMind Technologies Limited and Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
|
deepmind-research-master
|
scratchgan/__init__.py
|
# Copyright 2019 DeepMind Technologies Limited and Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Generators for text data."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl import logging
import sonnet as snt
import tensorflow.compat.v1 as tf
import tensorflow_probability as tfp
from scratchgan import utils
class LSTMGen(snt.AbstractModule):
"""A multi-layer LSTM language model.
Uses tied input/output embedding weights.
"""
def __init__(self,
vocab_size,
feature_sizes,
max_sequence_length,
batch_size,
use_layer_norm,
trainable_embedding_size,
input_dropout,
output_dropout,
pad_token,
embedding_source=None,
vocab_file=None,
name='lstm_gen'):
super(LSTMGen, self).__init__(name=name)
self._feature_sizes = feature_sizes
self._max_sequence_length = max_sequence_length
self._vocab_size = vocab_size
self._batch_size = batch_size
self._use_layer_norm = use_layer_norm
self._trainable_embedding_size = trainable_embedding_size
self._embedding_source = embedding_source
self._vocab_file = vocab_file
self._input_dropout = input_dropout
self._output_dropout = output_dropout
self._pad_token = pad_token
if self._embedding_source:
assert vocab_file
def _build(self, is_training=True, temperature=1.0):
input_keep_prob = (1. - self._input_dropout) if is_training else 1.0
output_keep_prob = (1. - self._output_dropout) if is_training else 1.0
batch_size = self._batch_size
max_sequence_length = self._max_sequence_length
if self._embedding_source:
all_embeddings = utils.make_partially_trainable_embeddings(
self._vocab_file, self._embedding_source, self._vocab_size,
self._trainable_embedding_size)
else:
all_embeddings = tf.get_variable(
'trainable_embeddings',
shape=[self._vocab_size, self._trainable_embedding_size],
trainable=True)
_, self._embedding_size = all_embeddings.shape.as_list()
input_embeddings = tf.nn.dropout(all_embeddings, keep_prob=input_keep_prob)
output_embeddings = tf.nn.dropout(
all_embeddings, keep_prob=output_keep_prob)
out_bias = tf.get_variable(
'out_bias', shape=[1, self._vocab_size], dtype=tf.float32)
in_proj = tf.get_variable(
'in_proj', shape=[self._embedding_size, self._feature_sizes[0]])
# If more than 1 layer, then output has dim sum(self._feature_sizes),
# which is different from input dim == self._feature_sizes[0]
# So we need a different projection matrix for input and output.
if len(self._feature_sizes) > 1:
out_proj = tf.get_variable(
'out_proj', shape=[self._embedding_size,
sum(self._feature_sizes)])
else:
out_proj = in_proj
encoder_cells = []
for feature_size in self._feature_sizes:
encoder_cells += [
snt.LSTM(feature_size, use_layer_norm=self._use_layer_norm)
]
encoder_cell = snt.DeepRNN(encoder_cells)
state = encoder_cell.initial_state(batch_size)
# Manual unrolling.
samples_list, logits_list, logprobs_list, embeddings_list = [], [], [], []
sample = tf.tile(
tf.constant(self._pad_token, dtype=tf.int32)[None], [batch_size])
logging.info('Unrolling over %d steps.', max_sequence_length)
for _ in range(max_sequence_length):
# Input is sampled word at t-1.
embedding = tf.nn.embedding_lookup(input_embeddings, sample)
embedding.shape.assert_is_compatible_with(
[batch_size, self._embedding_size])
embedding_proj = tf.matmul(embedding, in_proj)
embedding_proj.shape.assert_is_compatible_with(
[batch_size, self._feature_sizes[0]])
outputs, state = encoder_cell(embedding_proj, state)
outputs_proj = tf.matmul(outputs, out_proj, transpose_b=True)
logits = tf.matmul(
outputs_proj, output_embeddings, transpose_b=True) + out_bias
categorical = tfp.distributions.Categorical(logits=logits/temperature)
sample = categorical.sample()
logprobs = categorical.log_prob(sample)
samples_list.append(sample)
logits_list.append(logits)
logprobs_list.append(logprobs)
embeddings_list.append(embedding)
# Create an op to retrieve embeddings for full sequence, useful for testing.
embeddings = tf.stack( # pylint: disable=unused-variable
embeddings_list,
axis=1,
name='embeddings')
sequence = tf.stack(samples_list, axis=1)
logprobs = tf.stack(logprobs_list, axis=1)
# The sequence stops after the first occurrence of a PAD token.
sequence_length = utils.get_first_occurrence_indices(
sequence, self._pad_token)
mask = utils.get_mask_past_symbol(sequence, self._pad_token)
masked_sequence = sequence * tf.cast(mask, tf.int32)
masked_logprobs = logprobs * tf.cast(mask, tf.float32)
return {
'sequence': masked_sequence,
'sequence_length': sequence_length,
'logprobs': masked_logprobs
}
|
deepmind-research-master
|
scratchgan/generators.py
|
# Copyright 2019 DeepMind Technologies Limited and Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for parsing text files."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import json
import os
from absl import logging
import numpy as np
from tensorflow.compat.v1.io import gfile
# sequences: [N, MAX_TOKENS_SEQUENCE] array of int32
# lengths: [N, 2] array of int32, such that
# lengths[i, 0] is the number of non-pad tokens in sequences[i, :]
FILENAMES = {
"emnlp2017": ("train.json", "valid.json", "test.json"),
}
# EMNLP2017 sentences have max length 50, add one for a PAD token so that all
# sentences end with PAD.
MAX_TOKENS_SEQUENCE = {"emnlp2017": 52}
UNK = "<unk>"
PAD = " "
PAD_INT = 0
def tokenize(sentence):
"""Split a string into words."""
return sentence.split(" ") + [PAD]
def _build_vocab(json_data):
"""Builds full vocab from json data."""
vocab = collections.Counter()
for sentence in json_data:
tokens = tokenize(sentence["s"])
vocab.update(tokens)
for title in sentence["t"]:
title_tokens = tokenize(title)
vocab.update(title_tokens)
# Most common words first.
count_pairs = sorted(list(vocab.items()), key=lambda x: (-x[1], x[0]))
words, _ = list(zip(*count_pairs))
words = list(words)
if UNK not in words:
words = [UNK] + words
word_to_id = dict(list(zip(words, list(range(len(words))))))
# Tokens are now sorted by frequency. There's no guarantee that `PAD` will
# end up at `PAD_INT` index. Enforce it by swapping whatever token is
# currently at the `PAD_INT` index with the `PAD` token.
word = list(word_to_id.keys())[list(word_to_id.values()).index(PAD_INT)]
word_to_id[PAD], word_to_id[word] = word_to_id[word], word_to_id[PAD]
assert word_to_id[PAD] == PAD_INT
return word_to_id
def string_sequence_to_sequence(string_sequence, word_to_id):
result = []
for word in string_sequence:
if word in word_to_id:
result.append(word_to_id[word])
else:
result.append(word_to_id[UNK])
return result
def _integerize(json_data, word_to_id, dataset):
"""Transform words into integers."""
sequences = np.full((len(json_data), MAX_TOKENS_SEQUENCE[dataset]),
word_to_id[PAD], np.int32)
sequence_lengths = np.zeros(shape=(len(json_data)), dtype=np.int32)
for i, sentence in enumerate(json_data):
sequence_i = string_sequence_to_sequence(
tokenize(sentence["s"]), word_to_id)
sequence_lengths[i] = len(sequence_i)
sequences[i, :sequence_lengths[i]] = np.array(sequence_i)
return {
"sequences": sequences,
"sequence_lengths": sequence_lengths,
}
def get_raw_data(data_path, dataset, truncate_vocab=20000):
"""Load raw data from data directory "data_path".
Reads text files, converts strings to integer ids,
and performs mini-batching of the inputs.
Args:
data_path: string path to the directory where simple-examples.tgz has been
extracted.
dataset: one of ["emnlp2017"]
truncate_vocab: int, number of words to keep in the vocabulary.
Returns:
tuple (train_data, valid_data, vocabulary) where each of the data
objects can be passed to iterator.
Raises:
ValueError: dataset not in ["emnlp2017"].
"""
if dataset not in FILENAMES:
raise ValueError("Invalid dataset {}. Valid datasets: {}".format(
dataset, list(FILENAMES.keys())))
train_file, valid_file, _ = FILENAMES[dataset]
train_path = os.path.join(data_path, train_file)
valid_path = os.path.join(data_path, valid_file)
with gfile.GFile(train_path, "r") as json_file:
json_data_train = json.load(json_file)
with gfile.GFile(valid_path, "r") as json_file:
json_data_valid = json.load(json_file)
word_to_id = _build_vocab(json_data_train)
logging.info("Full vocab length: %d", len(word_to_id))
# Assume the vocab is sorted by frequency.
word_to_id_truncated = {
k: v for k, v in word_to_id.items() if v < truncate_vocab
}
logging.info("Truncated vocab length: %d", len(word_to_id_truncated))
train_data = _integerize(json_data_train, word_to_id_truncated, dataset)
valid_data = _integerize(json_data_valid, word_to_id_truncated, dataset)
return train_data, valid_data, word_to_id_truncated
def iterator(raw_data, batch_size, random=False):
"""Looping iterators on the raw data."""
sequences = raw_data["sequences"]
sequence_lengths = raw_data["sequence_lengths"]
num_examples = sequences.shape[0]
indice_range = np.arange(num_examples)
if random:
while True:
indices = np.random.choice(indice_range, size=batch_size, replace=True)
yield {
"sequence": sequences[indices, :],
"sequence_length": sequence_lengths[indices],
}
else:
start = 0
while True:
sequence = sequences[start:(start + batch_size), :]
sequence_length = sequence_lengths[start:(start + batch_size)]
start += batch_size
if start + batch_size > num_examples:
start = (start + batch_size) % num_examples
yield {
"sequence": sequence,
"sequence_length": sequence_length,
}
|
deepmind-research-master
|
scratchgan/reader.py
|
# Copyright 2019 DeepMind Technologies Limited and Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Training script for ScratchGAN."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import time
from absl import app
from absl import flags
from absl import logging
import numpy as np
import tensorflow.compat.v1 as tf
from tensorflow.compat.v1.io import gfile
from scratchgan import discriminator_nets
from scratchgan import eval_metrics
from scratchgan import generators
from scratchgan import losses
from scratchgan import reader
from scratchgan import utils
flags.DEFINE_string("dataset", "emnlp2017", "Dataset.")
flags.DEFINE_integer("batch_size", 512, "Batch size")
flags.DEFINE_string("gen_type", "lstm", "Generator type.")
flags.DEFINE_string("disc_type", "lstm", "Discriminator type.")
flags.DEFINE_string("disc_loss_type", "ce", "Loss type.")
flags.DEFINE_integer("gen_feature_size", 512, "Generator feature size.")
flags.DEFINE_integer("disc_feature_size", 512, "Discriminator feature size.")
flags.DEFINE_integer("num_layers_gen", 2, "Number of generator layers.")
flags.DEFINE_integer("num_layers_disc", 1, "Number of discriminator layers.")
flags.DEFINE_bool("layer_norm_gen", False, "Layer norm generator.")
flags.DEFINE_bool("layer_norm_disc", True, "Layer norm discriminator.")
flags.DEFINE_float("gen_input_dropout", 0.0, "Input dropout generator.")
flags.DEFINE_float("gen_output_dropout", 0.0, "Input dropout discriminator.")
flags.DEFINE_float("l2_gen", 0.0, "L2 regularization generator.")
flags.DEFINE_float("l2_disc", 1e-6, "L2 regularization discriminator.")
flags.DEFINE_float("disc_dropout", 0.1, "Dropout discriminator")
flags.DEFINE_integer("trainable_embedding_size", 64,
"Size of trainable embedding.")
flags.DEFINE_bool("use_pretrained_embedding", True, "Use pretrained embedding.")
flags.DEFINE_integer("num_steps", int(200 * 1000), "Number of training steps.")
flags.DEFINE_integer("num_disc_updates", 1, "Number of discriminator updates.")
flags.DEFINE_integer("num_gen_updates", 1, "Number of generator updates.")
flags.DEFINE_string("data_dir", "/tmp/emnlp2017", "Directory where data is.")
flags.DEFINE_float("gen_lr", 9.59e-5, "Learning rate generator.")
flags.DEFINE_float("disc_lr", 9.38e-3, "Learning rate discriminator.")
flags.DEFINE_float("gen_beta1", 0.5, "Beta1 for generator.")
flags.DEFINE_float("disc_beta1", 0.5, "Beta1 for discriminator.")
flags.DEFINE_float("gamma", 0.23, "Discount factor.")
flags.DEFINE_float("baseline_decay", 0.08, "Baseline decay rate.")
flags.DEFINE_string("mode", "train", "train or evaluate_pair.")
flags.DEFINE_string("checkpoint_dir", "/tmp/emnlp2017/checkpoints/",
"Directory for checkpoints.")
flags.DEFINE_integer("export_every", 1000, "Frequency of checkpoint exports.")
flags.DEFINE_integer("num_examples_for_eval", int(1e4),
"Number of examples for evaluation")
EVALUATOR_SLEEP_PERIOD = 60 # Seconds evaluator sleeps if nothing to do.
def main(_):
config = flags.FLAGS
gfile.makedirs(config.checkpoint_dir)
if config.mode == "train":
train(config)
elif config.mode == "evaluate_pair":
while True:
checkpoint_path = utils.maybe_pick_models_to_evaluate(
checkpoint_dir=config.checkpoint_dir)
if checkpoint_path:
evaluate_pair(
config=config,
batch_size=config.batch_size,
checkpoint_path=checkpoint_path,
data_dir=config.data_dir,
dataset=config.dataset,
num_examples_for_eval=config.num_examples_for_eval)
else:
logging.info("No models to evaluate found, sleeping for %d seconds",
EVALUATOR_SLEEP_PERIOD)
time.sleep(EVALUATOR_SLEEP_PERIOD)
else:
raise Exception(
"Unexpected mode %s, supported modes are \"train\" or \"evaluate_pair\""
% (config.mode))
def train(config):
"""Train."""
logging.info("Training.")
tf.reset_default_graph()
np.set_printoptions(precision=4)
# Get data.
raw_data = reader.get_raw_data(
data_path=config.data_dir, dataset=config.dataset)
train_data, valid_data, word_to_id = raw_data
id_to_word = {v: k for k, v in word_to_id.items()}
vocab_size = len(word_to_id)
max_length = reader.MAX_TOKENS_SEQUENCE[config.dataset]
logging.info("Vocabulary size: %d", vocab_size)
iterator = reader.iterator(raw_data=train_data, batch_size=config.batch_size)
iterator_valid = reader.iterator(
raw_data=valid_data, batch_size=config.batch_size)
real_sequence = tf.placeholder(
dtype=tf.int32,
shape=[config.batch_size, max_length],
name="real_sequence")
real_sequence_length = tf.placeholder(
dtype=tf.int32, shape=[config.batch_size], name="real_sequence_length")
first_batch_np = next(iterator)
valid_batch_np = next(iterator_valid)
test_real_batch = {k: tf.constant(v) for k, v in first_batch_np.items()}
test_fake_batch = {
"sequence":
tf.constant(
np.random.choice(
vocab_size, size=[config.batch_size,
max_length]).astype(np.int32)),
"sequence_length":
tf.constant(
np.random.choice(max_length,
size=[config.batch_size]).astype(np.int32)),
}
valid_batch = {k: tf.constant(v) for k, v in valid_batch_np.items()}
# Create generator.
if config.use_pretrained_embedding:
embedding_source = utils.get_embedding_path(config.data_dir, config.dataset)
vocab_file = "/tmp/vocab.txt"
with gfile.GFile(vocab_file, "w") as f:
for i in range(len(id_to_word)):
f.write(id_to_word[i] + "\n")
logging.info("Temporary vocab file: %s", vocab_file)
else:
embedding_source = None
vocab_file = None
gen = generators.LSTMGen(
vocab_size=vocab_size,
feature_sizes=[config.gen_feature_size] * config.num_layers_gen,
max_sequence_length=reader.MAX_TOKENS_SEQUENCE[config.dataset],
batch_size=config.batch_size,
use_layer_norm=config.layer_norm_gen,
trainable_embedding_size=config.trainable_embedding_size,
input_dropout=config.gen_input_dropout,
output_dropout=config.gen_output_dropout,
pad_token=reader.PAD_INT,
embedding_source=embedding_source,
vocab_file=vocab_file,
)
gen_outputs = gen()
# Create discriminator.
disc = discriminator_nets.LSTMEmbedDiscNet(
vocab_size=vocab_size,
feature_sizes=[config.disc_feature_size] * config.num_layers_disc,
trainable_embedding_size=config.trainable_embedding_size,
embedding_source=embedding_source,
use_layer_norm=config.layer_norm_disc,
pad_token=reader.PAD_INT,
vocab_file=vocab_file,
dropout=config.disc_dropout,
)
disc_logits_real = disc(
sequence=real_sequence, sequence_length=real_sequence_length)
disc_logits_fake = disc(
sequence=gen_outputs["sequence"],
sequence_length=gen_outputs["sequence_length"])
# Loss of the discriminator.
if config.disc_loss_type == "ce":
targets_real = tf.ones(
[config.batch_size, reader.MAX_TOKENS_SEQUENCE[config.dataset]])
targets_fake = tf.zeros(
[config.batch_size, reader.MAX_TOKENS_SEQUENCE[config.dataset]])
loss_real = losses.sequential_cross_entropy_loss(disc_logits_real,
targets_real)
loss_fake = losses.sequential_cross_entropy_loss(disc_logits_fake,
targets_fake)
disc_loss = 0.5 * loss_real + 0.5 * loss_fake
# Loss of the generator.
gen_loss, cumulative_rewards, baseline = losses.reinforce_loss(
disc_logits=disc_logits_fake,
gen_logprobs=gen_outputs["logprobs"],
gamma=config.gamma,
decay=config.baseline_decay)
# Optimizers
disc_optimizer = tf.train.AdamOptimizer(
learning_rate=config.disc_lr, beta1=config.disc_beta1)
gen_optimizer = tf.train.AdamOptimizer(
learning_rate=config.gen_lr, beta1=config.gen_beta1)
# Get losses and variables.
disc_vars = disc.get_all_variables()
gen_vars = gen.get_all_variables()
l2_disc = tf.reduce_sum(tf.add_n([tf.nn.l2_loss(v) for v in disc_vars]))
l2_gen = tf.reduce_sum(tf.add_n([tf.nn.l2_loss(v) for v in gen_vars]))
scalar_disc_loss = tf.reduce_mean(disc_loss) + config.l2_disc * l2_disc
scalar_gen_loss = tf.reduce_mean(gen_loss) + config.l2_gen * l2_gen
# Update ops.
global_step = tf.train.get_or_create_global_step()
disc_update = disc_optimizer.minimize(
scalar_disc_loss, var_list=disc_vars, global_step=global_step)
gen_update = gen_optimizer.minimize(
scalar_gen_loss, var_list=gen_vars, global_step=global_step)
# Saver.
saver = tf.train.Saver()
# Metrics
test_disc_logits_real = disc(**test_real_batch)
test_disc_logits_fake = disc(**test_fake_batch)
valid_disc_logits = disc(**valid_batch)
disc_predictions_real = tf.nn.sigmoid(disc_logits_real)
disc_predictions_fake = tf.nn.sigmoid(disc_logits_fake)
valid_disc_predictions = tf.reduce_mean(
tf.nn.sigmoid(valid_disc_logits), axis=0)
test_disc_predictions_real = tf.reduce_mean(
tf.nn.sigmoid(test_disc_logits_real), axis=0)
test_disc_predictions_fake = tf.reduce_mean(
tf.nn.sigmoid(test_disc_logits_fake), axis=0)
# Only log results for the first element of the batch.
metrics = {
"scalar_gen_loss": scalar_gen_loss,
"scalar_disc_loss": scalar_disc_loss,
"disc_predictions_real": tf.reduce_mean(disc_predictions_real),
"disc_predictions_fake": tf.reduce_mean(disc_predictions_fake),
"test_disc_predictions_real": tf.reduce_mean(test_disc_predictions_real),
"test_disc_predictions_fake": tf.reduce_mean(test_disc_predictions_fake),
"valid_disc_predictions": tf.reduce_mean(valid_disc_predictions),
"cumulative_rewards": tf.reduce_mean(cumulative_rewards),
"baseline": tf.reduce_mean(baseline),
}
# Training.
logging.info("Starting training")
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
latest_ckpt = tf.train.latest_checkpoint(config.checkpoint_dir)
if latest_ckpt:
saver.restore(sess, latest_ckpt)
for step in range(config.num_steps):
real_data_np = next(iterator)
train_feed = {
real_sequence: real_data_np["sequence"],
real_sequence_length: real_data_np["sequence_length"],
}
# Update generator and discriminator.
for _ in range(config.num_disc_updates):
sess.run(disc_update, feed_dict=train_feed)
for _ in range(config.num_gen_updates):
sess.run(gen_update, feed_dict=train_feed)
# Reporting
if step % config.export_every == 0:
gen_sequence_np, metrics_np = sess.run(
[gen_outputs["sequence"], metrics], feed_dict=train_feed)
metrics_np["gen_sentence"] = utils.sequence_to_sentence(
gen_sequence_np[0, :], id_to_word)
saver.save(
sess,
save_path=config.checkpoint_dir + "scratchgan",
global_step=global_step)
metrics_np["model_path"] = tf.train.latest_checkpoint(
config.checkpoint_dir)
logging.info(metrics_np)
# After training, export models.
saver.save(
sess,
save_path=config.checkpoint_dir + "scratchgan",
global_step=global_step)
logging.info("Saved final model at %s.",
tf.train.latest_checkpoint(config.checkpoint_dir))
def evaluate_pair(config, batch_size, checkpoint_path, data_dir, dataset,
num_examples_for_eval):
"""Evaluates a pair generator discriminator.
This function loads a discriminator from disk, a generator, and evaluates the
discriminator against the generator.
It returns the mean probability of the discriminator against several batches,
and the FID of the generator against the validation data.
It also writes evaluation samples to disk.
Args:
config: dict, the config file.
batch_size: int, size of the batch.
checkpoint_path: string, full path to the TF checkpoint on disk.
data_dir: string, path to a directory containing the dataset.
dataset: string, "emnlp2017", to select the right dataset.
num_examples_for_eval: int, number of examples for evaluation.
"""
tf.reset_default_graph()
logging.info("Evaluating checkpoint %s.", checkpoint_path)
# Build graph.
train_data, valid_data, word_to_id = reader.get_raw_data(
data_dir, dataset=dataset)
id_to_word = {v: k for k, v in word_to_id.items()}
vocab_size = len(word_to_id)
train_iterator = reader.iterator(raw_data=train_data, batch_size=batch_size)
valid_iterator = reader.iterator(raw_data=valid_data, batch_size=batch_size)
train_sequence = tf.placeholder(
dtype=tf.int32,
shape=[batch_size, reader.MAX_TOKENS_SEQUENCE[dataset]],
name="train_sequence")
train_sequence_length = tf.placeholder(
dtype=tf.int32, shape=[batch_size], name="train_sequence_length")
valid_sequence = tf.placeholder(
dtype=tf.int32,
shape=[batch_size, reader.MAX_TOKENS_SEQUENCE[dataset]],
name="valid_sequence")
valid_sequence_length = tf.placeholder(
dtype=tf.int32, shape=[batch_size], name="valid_sequence_length")
disc_inputs_train = {
"sequence": train_sequence,
"sequence_length": train_sequence_length,
}
disc_inputs_valid = {
"sequence": valid_sequence,
"sequence_length": valid_sequence_length,
}
if config.use_pretrained_embedding:
embedding_source = utils.get_embedding_path(config.data_dir, config.dataset)
vocab_file = "/tmp/vocab.txt"
with gfile.GFile(vocab_file, "w") as f:
for i in range(len(id_to_word)):
f.write(id_to_word[i] + "\n")
logging.info("Temporary vocab file: %s", vocab_file)
else:
embedding_source = None
vocab_file = None
gen = generators.LSTMGen(
vocab_size=vocab_size,
feature_sizes=[config.gen_feature_size] * config.num_layers_gen,
max_sequence_length=reader.MAX_TOKENS_SEQUENCE[config.dataset],
batch_size=config.batch_size,
use_layer_norm=config.layer_norm_gen,
trainable_embedding_size=config.trainable_embedding_size,
input_dropout=config.gen_input_dropout,
output_dropout=config.gen_output_dropout,
pad_token=reader.PAD_INT,
embedding_source=embedding_source,
vocab_file=vocab_file,
)
gen_outputs = gen()
disc = discriminator_nets.LSTMEmbedDiscNet(
vocab_size=vocab_size,
feature_sizes=[config.disc_feature_size] * config.num_layers_disc,
trainable_embedding_size=config.trainable_embedding_size,
embedding_source=embedding_source,
use_layer_norm=config.layer_norm_disc,
pad_token=reader.PAD_INT,
vocab_file=vocab_file,
dropout=config.disc_dropout,
)
disc_inputs = {
"sequence": gen_outputs["sequence"],
"sequence_length": gen_outputs["sequence_length"],
}
gen_logits = disc(**disc_inputs)
train_logits = disc(**disc_inputs_train)
valid_logits = disc(**disc_inputs_valid)
# Saver.
saver = tf.train.Saver()
# Reduce over time and batch.
train_probs = tf.reduce_mean(tf.nn.sigmoid(train_logits))
valid_probs = tf.reduce_mean(tf.nn.sigmoid(valid_logits))
gen_probs = tf.reduce_mean(tf.nn.sigmoid(gen_logits))
outputs = {
"train_probs": train_probs,
"valid_probs": valid_probs,
"gen_probs": gen_probs,
"gen_sequences": gen_outputs["sequence"],
"valid_sequences": valid_sequence
}
# Get average discriminator score and store generated sequences.
all_valid_sentences = []
all_gen_sentences = []
all_gen_sequences = []
mean_train_prob = 0.0
mean_valid_prob = 0.0
mean_gen_prob = 0.0
logging.info("Graph constructed, generating batches.")
num_batches = num_examples_for_eval // batch_size + 1
# Restrict the thread pool size to prevent excessive GCU usage on Borg.
tf_config = tf.ConfigProto()
tf_config.intra_op_parallelism_threads = 16
tf_config.inter_op_parallelism_threads = 16
with tf.Session(config=tf_config) as sess:
# Restore variables from checkpoints.
logging.info("Restoring variables.")
saver.restore(sess, checkpoint_path)
for i in range(num_batches):
logging.info("Batch %d / %d", i, num_batches)
train_data_np = next(train_iterator)
valid_data_np = next(valid_iterator)
feed_dict = {
train_sequence: train_data_np["sequence"],
train_sequence_length: train_data_np["sequence_length"],
valid_sequence: valid_data_np["sequence"],
valid_sequence_length: valid_data_np["sequence_length"],
}
outputs_np = sess.run(outputs, feed_dict=feed_dict)
all_gen_sequences.extend(outputs_np["gen_sequences"])
gen_sentences = utils.batch_sequences_to_sentences(
outputs_np["gen_sequences"], id_to_word)
valid_sentences = utils.batch_sequences_to_sentences(
outputs_np["valid_sequences"], id_to_word)
all_valid_sentences.extend(valid_sentences)
all_gen_sentences.extend(gen_sentences)
mean_train_prob += outputs_np["train_probs"] / batch_size
mean_valid_prob += outputs_np["valid_probs"] / batch_size
mean_gen_prob += outputs_np["gen_probs"] / batch_size
logging.info("Evaluating FID.")
# Compute FID
fid = eval_metrics.fid(
generated_sentences=all_gen_sentences[:num_examples_for_eval],
real_sentences=all_valid_sentences[:num_examples_for_eval])
utils.write_eval_results(config.checkpoint_dir, all_gen_sentences,
os.path.basename(checkpoint_path), mean_train_prob,
mean_valid_prob, mean_gen_prob, fid)
if __name__ == "__main__":
app.run(main)
|
deepmind-research-master
|
scratchgan/experiment.py
|
# Copyright 2019 DeepMind Technologies Limited and Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import os
from absl import logging
import numpy as np
import tensorflow.compat.v1 as tf
from tensorflow.compat.v1.io import gfile
from scratchgan import reader
EVAL_FILENAME = "evaluated_checkpoints.csv"
GLOVE_DIM = 300
GLOVE_STD = 0.3836 # Standard dev. of GloVe embeddings.
def _get_embedding_initializer(vocab_file, embedding_source, vocab_size):
"""Loads pretrained embeddings from a file in GloVe format."""
with gfile.GFile(embedding_source, "r") as f:
embedding_lines = f.readlines()
# First line contains embedding dim.
_, embedding_dim = list(map(int, embedding_lines[0].split()))
# Get the tokens as strings.
tokens = [line.split()[0] for line in embedding_lines[1:]]
# Get the actual embedding matrix.
unsorted_emb = np.array(
[[float(x) for x in line.split()[1:]] for line in embedding_lines[1:]])
# Get the expected vocab order.
with gfile.GFile(vocab_file, "r") as f:
tokens_order = [l.strip() for l in f.readlines()]
assert vocab_size == len(tokens_order)
# Put the embeddings in the order.
sorted_emb = np.zeros((vocab_size, embedding_dim))
for i, token in enumerate(tokens_order):
if token in tokens:
sorted_emb[i, :] = unsorted_emb[tokens.index(token), :]
else: # If we don't have a pretrained embedding, initialize randomly.
sorted_emb[i, :] = np.random.normal(
loc=0.0, scale=GLOVE_STD, size=(GLOVE_DIM,))
return sorted_emb.astype(np.float32)
def append_position_signal(embeddings, position_dim=8):
"""Append position signal. See get_position_signal."""
batch_size, sequence_length, embedding_dim = embeddings.get_shape().as_list()
positions = get_position_signal(sequence_length, position_dim)
# Append to embeddings.
position_inputs = tf.tile(positions[None, :, :], [batch_size, 1, 1])
embeddings_pos = tf.concat([embeddings, position_inputs], axis=2)
embeddings_pos.shape.assert_is_compatible_with(
[batch_size, sequence_length, embedding_dim + position_dim])
return embeddings_pos
def get_position_signal(sequence_length, position_dim=8):
"""Return fixed position signal as sine waves.
Sine waves frequencies are linearly spaced so that shortest is 2 and
longest is half the maximum length. That way the longest frequency
is long enough to be monotonous over the whole sequence length.
Sine waves are also shifted so that they don't all start with the same
value.
We don't use learned positional embeddings because these embeddings are
projected linearly along with the original embeddings, and the projection is
learned.
Args:
sequence_length: int, T, length of the sequence..
position_dim: int, P, number of sine waves.
Returns:
A [T, P] tensor, position embeddings.
"""
# Compute the frequencies.
periods = tf.exp(
tf.lin_space(
tf.log(2.0), tf.log(tf.to_float(sequence_length)), position_dim))
frequencies = 1.0 / periods # Shape [T, P].
# Compute the sine waves.
xs = frequencies[None, :] * tf.to_float(tf.range(sequence_length)[:, None])
shifts = tf.lin_space(0.0, 2.0, position_dim)[None, :] # [1, P]
positions = tf.math.cos(math.pi * (xs + shifts)) # [T, P]
positions.shape.assert_is_compatible_with([sequence_length, position_dim])
return positions
def get_mask_by_length(lengths, max_length):
"""Returns a mask where x[i , j] = (j < lengths[i]).
Args:
lengths: [B] tensor of int32 such that 0 <= lengths[i] <= max_length.
max_length: scalar tensor of int32.
Returns:
[B, max_length] tensor of booleans such that x[i, j] is True
if and only if j < lengths[i].
"""
batch_size = lengths.get_shape().as_list()[0]
indices = tf.range(start=0, limit=max_length)
all_indices = tf.tile(indices[None, :], [batch_size, 1])
all_lengths = tf.tile(lengths[:, None], [1, max_length])
mask = (all_indices < all_lengths)
mask_boolean = tf.cast(mask, tf.bool)
return mask_boolean
def get_mask_past_symbol(reference, symbol, optimize_for_tpu=False):
"""For each row, mask is True before and at the first occurrence of symbol."""
batch_size, max_length = reference.get_shape().as_list()
symbol = tf.convert_to_tensor(symbol)
symbol.shape.assert_is_compatible_with([])
first_indices = get_first_occurrence_indices(reference, symbol,
optimize_for_tpu)
first_indices.shape.assert_is_compatible_with([batch_size])
keep_lengths = tf.minimum(first_indices, max_length)
mask = get_mask_by_length(keep_lengths, max_length)
mask.shape.assert_is_compatible_with([batch_size, max_length])
mask.set_shape([batch_size, max_length])
return mask
def get_first_occurrence_indices(reference, symbol, optimize_for_tpu=False):
"""For each row in reference, get index after the first occurrence of symbol.
If symbol is not present on a row, return reference.shape[1] instead.
Args:
reference: [B, T] tensor of elements of the same type as symbol.
symbol: int or [] scalar tensor of the same dtype as symbol.
optimize_for_tpu: bool, whether to use a TPU-capable variant.
Returns:
A [B] reference of tf.int32 where x[i] is such that
reference[i, x[i]-1] == symbol, and reference[i, j] != symbol
for j<i-1. If symbol is not present on row i then x[i] = T.
"""
if optimize_for_tpu:
# Run code which can be compiled on TPU.
# Transpose refernce to [T, B]
reference = tf.transpose(reference, [1, 0])
range_tensor = tf.range(reference.shape.as_list()[0])
indexes = tf.stack([range_tensor] * reference.shape.as_list()[1], 1)
symbol = tf.stack([symbol] * reference.shape.as_list()[1], 0)
initial_indices = tf.constant(
reference.shape.as_list()[0],
shape=[reference.shape.as_list()[1]],
dtype=tf.int32)
# We want a function which moves backwards.
def fn(current_index, elems):
ref, ind = elems
return tf.where(tf.equal(ref, symbol), ind + 1, current_index)
min_indexes = tf.scan(
fn, (reference, indexes),
initializer=initial_indices,
parallel_iterations=1,
reverse=True)
return min_indexes[0]
batch_size, max_length = reference.get_shape().as_list()
symbol = tf.convert_to_tensor(symbol)
symbol.shape.assert_is_compatible_with([])
# Add symbol at the end of each row, to make sure tf.where works.
tensor = tf.concat(
[reference, tf.tile(symbol[None, None], [batch_size, 1])], axis=1)
index_all_occurrences = tf.where(tf.equal(tensor, symbol))
index_all_occurrences = tf.cast(index_all_occurrences, tf.int32)
# `index_all_occurrences` is a [N, 2] tensor with coordinates of all positions
# of `symbol` in `tensor`. So N will be >= batch size since there can be
# several `symbol` in one row of tensor. We need to take only the position
# of the first occurrence for each row. `segment_min` does that, taking the
# lowest column index for each row index.
index_first_occurrences = tf.segment_min(index_all_occurrences[:, 1],
index_all_occurrences[:, 0])
index_first_occurrences.set_shape([batch_size])
index_first_occurrences = tf.minimum(index_first_occurrences + 1, max_length)
return index_first_occurrences
def sequence_to_sentence(sequence, id_to_word):
"""Turn a sequence into a sentence , inverse of sentence_to_sequence."""
words = []
for token_index in sequence:
if token_index in id_to_word:
words.append(id_to_word[token_index])
else:
words.append(reader.UNK)
return " ".join(words)
def batch_sequences_to_sentences(sequences, id_to_word):
return [sequence_to_sentence(sequence, id_to_word) for sequence in sequences]
def write_eval_results(checkpoint_dir, all_gen_sentences, checkpoint_name,
mean_train_prob, mean_valid_prob, mean_gen_prob, fid):
"""Write evaluation results to disk."""
to_write = ",".join(
map(str, [
checkpoint_name, mean_train_prob, mean_valid_prob, mean_gen_prob, fid
]))
eval_filepath = os.path.join(checkpoint_dir, EVAL_FILENAME)
previous_eval_content = ""
if gfile.exists(eval_filepath):
with gfile.GFile(eval_filepath, "r") as f:
previous_eval_content = f.read()
with gfile.GFile(eval_filepath, "w") as f:
f.write(previous_eval_content + to_write + "\n")
with gfile.GFile(
os.path.join(checkpoint_dir, checkpoint_name + "_sentences.txt"),
"w") as f:
f.write("\n".join(all_gen_sentences))
def maybe_pick_models_to_evaluate(checkpoint_dir):
"""Pick a checkpoint to evaluate that has not been evaluated already."""
logging.info("Picking checkpoint to evaluate from %s.", checkpoint_dir)
filenames = gfile.listdir(checkpoint_dir)
filenames = [f[:-5] for f in filenames if f[-5:] == ".meta"]
logging.info("Found existing checkpoints: %s", filenames)
evaluated_filenames = []
if gfile.exists(os.path.join(checkpoint_dir, EVAL_FILENAME)):
with gfile.GFile(os.path.join(checkpoint_dir, EVAL_FILENAME), "r") as f:
evaluated_filenames = [l.strip().split(",")[0] for l in f.readlines()]
logging.info("Found already evaluated checkpoints: %s", evaluated_filenames)
checkpoints_to_evaluate = [
f for f in filenames if f not in evaluated_filenames
]
logging.info("Remaining potential checkpoints: %s", checkpoints_to_evaluate)
if checkpoints_to_evaluate:
return os.path.join(checkpoint_dir, checkpoints_to_evaluate[0])
else:
return None
def get_embedding_path(data_dir, dataset):
"""By convention, this is where we store the embedding."""
return os.path.join(data_dir, "glove_%s.txt" % dataset)
def make_partially_trainable_embeddings(vocab_file, embedding_source,
vocab_size, trainable_embedding_size):
"""Makes embedding matrix with pretrained GloVe [1] part and trainable part.
[1] Pennington, J., Socher, R., & Manning, C. (2014, October). Glove: Global
vectors for word representation. In Proceedings of the 2014 conference on
empirical methods in natural language processing (EMNLP) (pp. 1532-1543).
Args:
vocab_file: vocabulary file.
embedding_source: path to the actual embeddings.
vocab_size: number of words in vocabulary.
trainable_embedding_size: size of the trainable part of the embeddings.
Returns:
A matrix of partially pretrained embeddings.
"""
# Our embeddings have 2 parts: a pre-trained, frozen, GloVe part,
# and a trainable, randomly initialized part.
# The standard deviation of the GloVe part is used to initialize
# the trainable part, so that both part have roughly the same distribution.
#
# Let g_ij be the j-th coordinates of the GloVe embedding of the i-th word.
# So that 0 < i < |vocab| and 0 < j < 300.
# Then sum_ij (g_ij - sum_kl g_kl)^2 = (0.3836)^2
#
# In reality g_ij follows a truncated normal distribution
# min(max(N(0, s), -4.2), 4.2) but we approximate it by N(0, 0.3836).
embedding_initializer = _get_embedding_initializer(
vocab_file=vocab_file,
embedding_source=embedding_source,
vocab_size=vocab_size)
pretrained_embedding = tf.get_variable(
"pretrained_embedding",
initializer=embedding_initializer,
dtype=tf.float32)
trainable_embedding = tf.get_variable(
"trainable_embedding",
shape=[vocab_size, trainable_embedding_size],
initializer=tf.initializers.random_normal(mean=0.0, stddev=GLOVE_STD))
# We just concatenate embeddings, they will pass through a projection
# matrix afterwards.
embedding = tf.concat([pretrained_embedding, trainable_embedding], axis=1)
return embedding
|
deepmind-research-master
|
scratchgan/utils.py
|
# Copyright 2019 DeepMind Technologies Limited and Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Losses for sequential GANs."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow.compat.v1 as tf
def sequential_cross_entropy_loss(logits, expected):
"""The cross entropy loss for binary classification.
Used to train the discriminator when not using WGAN loss.
Assume logits is the log probability of classifying as 1. (real).
Args:
logits: a `tf.Tensor`, the model produced logits, shape [batch_size,
sequence_length].
expected: a `tf.Tensor`, the expected output, shape [batch_size,
sequence_length].
Returns:
A scalar `tf.Tensor`, the average loss obtained on the given inputs.
"""
batch_size, sequence_length = logits.shape.as_list()
expected = tf.cast(expected, tf.float32)
ce = tf.nn.sigmoid_cross_entropy_with_logits(labels=expected, logits=logits)
return tf.reshape(ce, [batch_size, sequence_length])
def reinforce_loss(disc_logits, gen_logprobs, gamma, decay):
"""The REINFORCE loss.
Args:
disc_logits: float tensor, shape [batch_size, sequence_length].
gen_logprobs: float32 tensor, shape [batch_size, sequence_length]
gamma: a float, discount factor for cumulative reward.
decay: a float, decay rate for the EWMA baseline of REINFORCE.
Returns:
Float tensor, shape [batch_size, sequence_length], the REINFORCE loss for
each timestep.
"""
# Assume 1 logit for each timestep.
batch_size, sequence_length = disc_logits.shape.as_list()
gen_logprobs.shape.assert_is_compatible_with([batch_size, sequence_length])
disc_predictions = tf.nn.sigmoid(disc_logits)
# MaskGAN uses log(D), but this is more stable empirically.
rewards = 2.0 * disc_predictions - 1
# Compute cumulative rewards.
rewards_list = tf.unstack(rewards, axis=1)
cumulative_rewards = []
for t in range(sequence_length):
cum_value = tf.zeros(shape=[batch_size])
for s in range(t, sequence_length):
cum_value += np.power(gamma, (s - t)) * rewards_list[s]
cumulative_rewards.append(cum_value)
cumulative_rewards = tf.stack(cumulative_rewards, axis=1)
cumulative_rewards.shape.assert_is_compatible_with(
[batch_size, sequence_length])
with tf.variable_scope("reinforce", reuse=tf.AUTO_REUSE):
ewma_reward = tf.get_variable("ewma_reward", initializer=0.0)
mean_reward = tf.reduce_mean(cumulative_rewards)
new_ewma_reward = decay * ewma_reward + (1.0 - decay) * mean_reward
update_op = tf.assign(ewma_reward, new_ewma_reward)
# REINFORCE
with tf.control_dependencies([update_op]):
advantage = cumulative_rewards - ewma_reward
loss = -tf.stop_gradient(advantage) * gen_logprobs
loss.shape.assert_is_compatible_with([batch_size, sequence_length])
return loss, cumulative_rewards, ewma_reward
|
deepmind-research-master
|
scratchgan/losses.py
|
# Copyright 2019 DeepMind Technologies Limited and Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Discriminator networks for text data."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sonnet as snt
import tensorflow.compat.v1 as tf
from scratchgan import utils
class LSTMEmbedDiscNet(snt.AbstractModule):
"""An LSTM discriminator that operates on word indexes."""
def __init__(self,
feature_sizes,
vocab_size,
use_layer_norm,
trainable_embedding_size,
dropout,
pad_token,
embedding_source=None,
vocab_file=None,
name='LSTMEmbedDiscNet'):
super(LSTMEmbedDiscNet, self).__init__(name=name)
self._feature_sizes = feature_sizes
self._vocab_size = vocab_size
self._use_layer_norm = use_layer_norm
self._trainable_embedding_size = trainable_embedding_size
self._embedding_source = embedding_source
self._vocab_file = vocab_file
self._dropout = dropout
self._pad_token = pad_token
if self._embedding_source:
assert vocab_file
def _build(self, sequence, sequence_length, is_training=True):
"""Connect to the graph.
Args:
sequence: A [batch_size, max_sequence_length] tensor of int. For example
the indices of words as sampled by the generator.
sequence_length: A [batch_size] tensor of int. Length of the sequence.
is_training: Boolean, False to disable dropout.
Returns:
A [batch_size, max_sequence_length, feature_size] tensor of floats. For
each sequence in the batch, the features should (hopefully) allow to
distinguish if the value at each timestep is real or generated.
"""
batch_size, max_sequence_length = sequence.shape.as_list()
keep_prob = (1.0 - self._dropout) if is_training else 1.0
if self._embedding_source:
all_embeddings = utils.make_partially_trainable_embeddings(
self._vocab_file, self._embedding_source, self._vocab_size,
self._trainable_embedding_size)
else:
all_embeddings = tf.get_variable(
'trainable_embedding',
shape=[self._vocab_size, self._trainable_embedding_size],
trainable=True)
_, self._embedding_size = all_embeddings.shape.as_list()
input_embeddings = tf.nn.dropout(all_embeddings, keep_prob=keep_prob)
embeddings = tf.nn.embedding_lookup(input_embeddings, sequence)
embeddings.shape.assert_is_compatible_with(
[batch_size, max_sequence_length, self._embedding_size])
position_dim = 8
embeddings_pos = utils.append_position_signal(embeddings, position_dim)
embeddings_pos = tf.reshape(
embeddings_pos,
[batch_size * max_sequence_length, self._embedding_size + position_dim])
lstm_inputs = snt.Linear(self._feature_sizes[0])(embeddings_pos)
lstm_inputs = tf.reshape(
lstm_inputs, [batch_size, max_sequence_length, self._feature_sizes[0]])
lstm_inputs.shape.assert_is_compatible_with(
[batch_size, max_sequence_length, self._feature_sizes[0]])
encoder_cells = []
for feature_size in self._feature_sizes:
encoder_cells += [
snt.LSTM(feature_size, use_layer_norm=self._use_layer_norm)
]
encoder_cell = snt.DeepRNN(encoder_cells)
initial_state = encoder_cell.initial_state(batch_size)
hidden_states, _ = tf.nn.dynamic_rnn(
cell=encoder_cell,
inputs=lstm_inputs,
sequence_length=sequence_length,
initial_state=initial_state,
swap_memory=True)
hidden_states.shape.assert_is_compatible_with(
[batch_size, max_sequence_length,
sum(self._feature_sizes)])
logits = snt.BatchApply(snt.Linear(1))(hidden_states)
logits.shape.assert_is_compatible_with([batch_size, max_sequence_length, 1])
logits_flat = tf.reshape(logits, [batch_size, max_sequence_length])
# Mask past first PAD symbol
#
# Note that we still rely on tf.nn.bidirectional_dynamic_rnn taking
# into account the sequence_length properly, because otherwise
# the logits at a given timestep will depend on the inputs for all other
# timesteps, including the ones that should be masked.
mask = utils.get_mask_past_symbol(sequence, self._pad_token)
masked_logits_flat = logits_flat * tf.cast(mask, tf.float32)
return masked_logits_flat
|
deepmind-research-master
|
scratchgan/discriminator_nets.py
|
# pylint: disable=g-bad-file-header
# Copyright 2020 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Utilities for reading open sourced Learning Complex Physics data."""
import functools
import numpy as np
import tensorflow.compat.v1 as tf
# Create a description of the features.
_FEATURE_DESCRIPTION = {
'position': tf.io.VarLenFeature(tf.string),
}
_FEATURE_DESCRIPTION_WITH_GLOBAL_CONTEXT = _FEATURE_DESCRIPTION.copy()
_FEATURE_DESCRIPTION_WITH_GLOBAL_CONTEXT['step_context'] = tf.io.VarLenFeature(
tf.string)
_FEATURE_DTYPES = {
'position': {
'in': np.float32,
'out': tf.float32
},
'step_context': {
'in': np.float32,
'out': tf.float32
}
}
_CONTEXT_FEATURES = {
'key': tf.io.FixedLenFeature([], tf.int64, default_value=0),
'particle_type': tf.io.VarLenFeature(tf.string)
}
def convert_to_tensor(x, encoded_dtype):
if len(x) == 1:
out = np.frombuffer(x[0].numpy(), dtype=encoded_dtype)
else:
out = []
for el in x:
out.append(np.frombuffer(el.numpy(), dtype=encoded_dtype))
out = tf.convert_to_tensor(np.array(out))
return out
def parse_serialized_simulation_example(example_proto, metadata):
"""Parses a serialized simulation tf.SequenceExample.
Args:
example_proto: A string encoding of the tf.SequenceExample proto.
metadata: A dict of metadata for the dataset.
Returns:
context: A dict, with features that do not vary over the trajectory.
parsed_features: A dict of tf.Tensors representing the parsed examples
across time, where axis zero is the time axis.
"""
if 'context_mean' in metadata:
feature_description = _FEATURE_DESCRIPTION_WITH_GLOBAL_CONTEXT
else:
feature_description = _FEATURE_DESCRIPTION
context, parsed_features = tf.io.parse_single_sequence_example(
example_proto,
context_features=_CONTEXT_FEATURES,
sequence_features=feature_description)
for feature_key, item in parsed_features.items():
convert_fn = functools.partial(
convert_to_tensor, encoded_dtype=_FEATURE_DTYPES[feature_key]['in'])
parsed_features[feature_key] = tf.py_function(
convert_fn, inp=[item.values], Tout=_FEATURE_DTYPES[feature_key]['out'])
# There is an extra frame at the beginning so we can calculate pos change
# for all frames used in the paper.
position_shape = [metadata['sequence_length'] + 1, -1, metadata['dim']]
# Reshape positions to correct dim:
parsed_features['position'] = tf.reshape(parsed_features['position'],
position_shape)
# Set correct shapes of the remaining tensors.
sequence_length = metadata['sequence_length'] + 1
if 'context_mean' in metadata:
context_feat_len = len(metadata['context_mean'])
parsed_features['step_context'] = tf.reshape(
parsed_features['step_context'],
[sequence_length, context_feat_len])
# Decode particle type explicitly
context['particle_type'] = tf.py_function(
functools.partial(convert_fn, encoded_dtype=np.int64),
inp=[context['particle_type'].values],
Tout=[tf.int64])
context['particle_type'] = tf.reshape(context['particle_type'], [-1])
return context, parsed_features
def split_trajectory(context, features, window_length=7):
"""Splits trajectory into sliding windows."""
# Our strategy is to make sure all the leading dimensions are the same size,
# then we can use from_tensor_slices.
trajectory_length = features['position'].get_shape().as_list()[0]
# We then stack window_length position changes so the final
# trajectory length will be - window_length +1 (the 1 to make sure we get
# the last split).
input_trajectory_length = trajectory_length - window_length + 1
model_input_features = {}
# Prepare the context features per step.
model_input_features['particle_type'] = tf.tile(
tf.expand_dims(context['particle_type'], axis=0),
[input_trajectory_length, 1])
if 'step_context' in features:
global_stack = []
for idx in range(input_trajectory_length):
global_stack.append(features['step_context'][idx:idx + window_length])
model_input_features['step_context'] = tf.stack(global_stack)
pos_stack = []
for idx in range(input_trajectory_length):
pos_stack.append(features['position'][idx:idx + window_length])
# Get the corresponding positions
model_input_features['position'] = tf.stack(pos_stack)
return tf.data.Dataset.from_tensor_slices(model_input_features)
|
deepmind-research-master
|
learning_to_simulate/reading_utils.py
|
# pylint: disable=g-bad-file-header
# Copyright 2020 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Simple matplotlib rendering of a rollout prediction against ground truth.
Usage (from parent directory):
`python -m learning_to_simulate.render_rollout --rollout_path={OUTPUT_PATH}/rollout_test_1.pkl`
Where {OUTPUT_PATH} is the output path passed to `train.py` in "eval_rollout"
mode.
It may require installing Tkinter with `sudo apt-get install python3.7-tk`.
""" # pylint: disable=line-too-long
import pickle
from absl import app
from absl import flags
from matplotlib import animation
import matplotlib.pyplot as plt
import numpy as np
flags.DEFINE_string("rollout_path", None, help="Path to rollout pickle file")
flags.DEFINE_integer("step_stride", 3, help="Stride of steps to skip.")
flags.DEFINE_boolean("block_on_show", True, help="For test purposes.")
FLAGS = flags.FLAGS
TYPE_TO_COLOR = {
3: "black", # Boundary particles.
0: "green", # Rigid solids.
7: "magenta", # Goop.
6: "gold", # Sand.
5: "blue", # Water.
}
def main(unused_argv):
if not FLAGS.rollout_path:
raise ValueError("A `rollout_path` must be passed.")
with open(FLAGS.rollout_path, "rb") as file:
rollout_data = pickle.load(file)
fig, axes = plt.subplots(1, 2, figsize=(10, 5))
plot_info = []
for ax_i, (label, rollout_field) in enumerate(
[("Ground truth", "ground_truth_rollout"),
("Prediction", "predicted_rollout")]):
# Append the initial positions to get the full trajectory.
trajectory = np.concatenate([
rollout_data["initial_positions"],
rollout_data[rollout_field]], axis=0)
ax = axes[ax_i]
ax.set_title(label)
bounds = rollout_data["metadata"]["bounds"]
ax.set_xlim(bounds[0][0], bounds[0][1])
ax.set_ylim(bounds[1][0], bounds[1][1])
ax.set_xticks([])
ax.set_yticks([])
ax.set_aspect(1.)
points = {
particle_type: ax.plot([], [], "o", ms=2, color=color)[0]
for particle_type, color in TYPE_TO_COLOR.items()}
plot_info.append((ax, trajectory, points))
num_steps = trajectory.shape[0]
def update(step_i):
outputs = []
for _, trajectory, points in plot_info:
for particle_type, line in points.items():
mask = rollout_data["particle_types"] == particle_type
line.set_data(trajectory[step_i, mask, 0],
trajectory[step_i, mask, 1])
outputs.append(line)
return outputs
unused_animation = animation.FuncAnimation(
fig, update,
frames=np.arange(0, num_steps, FLAGS.step_stride), interval=10)
plt.show(block=FLAGS.block_on_show)
if __name__ == "__main__":
app.run(main)
|
deepmind-research-master
|
learning_to_simulate/render_rollout.py
|
# pylint: disable=g-bad-file-header
# Copyright 2020 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Full model implementation accompanying ICML 2020 submission.
"Learning to Simulate Complex Physics with Graph Networks"
Alvaro Sanchez-Gonzalez*, Jonathan Godwin*, Tobias Pfaff*, Rex Ying,
Jure Leskovec, Peter W. Battaglia
https://arxiv.org/abs/2002.09405
"""
import graph_nets as gn
import sonnet as snt
import tensorflow.compat.v1 as tf
from learning_to_simulate import connectivity_utils
from learning_to_simulate import graph_network
STD_EPSILON = 1e-8
class LearnedSimulator(snt.AbstractModule):
"""Learned simulator from https://arxiv.org/pdf/2002.09405.pdf."""
def __init__(
self,
num_dimensions,
connectivity_radius,
graph_network_kwargs,
boundaries,
normalization_stats,
num_particle_types,
particle_type_embedding_size,
name="LearnedSimulator"):
"""Inits the model.
Args:
num_dimensions: Dimensionality of the problem.
connectivity_radius: Scalar with the radius of connectivity.
graph_network_kwargs: Keyword arguments to pass to the learned part
of the graph network `model.EncodeProcessDecode`.
boundaries: List of 2-tuples, containing the lower and upper boundaries of
the cuboid containing the particles along each dimensions, matching
the dimensionality of the problem.
normalization_stats: Dictionary with statistics with keys "acceleration"
and "velocity", containing a named tuple for each with mean and std
fields, matching the dimensionality of the problem.
num_particle_types: Number of different particle types.
particle_type_embedding_size: Embedding size for the particle type.
name: Name of the Sonnet module.
"""
super().__init__(name=name)
self._connectivity_radius = connectivity_radius
self._num_particle_types = num_particle_types
self._boundaries = boundaries
self._normalization_stats = normalization_stats
with self._enter_variable_scope():
self._graph_network = graph_network.EncodeProcessDecode(
output_size=num_dimensions, **graph_network_kwargs)
if self._num_particle_types > 1:
self._particle_type_embedding = tf.get_variable(
"particle_embedding",
[self._num_particle_types, particle_type_embedding_size],
trainable=True, use_resource=True)
def _build(self, position_sequence, n_particles_per_example,
global_context=None, particle_types=None):
"""Produces a model step, outputting the next position for each particle.
Args:
position_sequence: Sequence of positions for each node in the batch,
with shape [num_particles_in_batch, sequence_length, num_dimensions]
n_particles_per_example: Number of particles for each graph in the batch
with shape [batch_size]
global_context: Tensor of shape [batch_size, context_size], with global
context.
particle_types: Integer tensor of shape [num_particles_in_batch] with
the integer types of the particles, from 0 to `num_particle_types - 1`.
If None, we assume all particles are the same type.
Returns:
Next position with shape [num_particles_in_batch, num_dimensions] for one
step into the future from the input sequence.
"""
input_graphs_tuple = self._encoder_preprocessor(
position_sequence, n_particles_per_example, global_context,
particle_types)
normalized_acceleration = self._graph_network(input_graphs_tuple)
next_position = self._decoder_postprocessor(
normalized_acceleration, position_sequence)
return next_position
def _encoder_preprocessor(
self, position_sequence, n_node, global_context, particle_types):
# Extract important features from the position_sequence.
most_recent_position = position_sequence[:, -1]
velocity_sequence = time_diff(position_sequence) # Finite-difference.
# Get connectivity of the graph.
(senders, receivers, n_edge
) = connectivity_utils.compute_connectivity_for_batch_pyfunc(
most_recent_position, n_node, self._connectivity_radius)
# Collect node features.
node_features = []
# Normalized velocity sequence, merging spatial an time axis.
velocity_stats = self._normalization_stats["velocity"]
normalized_velocity_sequence = (
velocity_sequence - velocity_stats.mean) / velocity_stats.std
flat_velocity_sequence = snt.MergeDims(start=1, size=2)(
normalized_velocity_sequence)
node_features.append(flat_velocity_sequence)
# Normalized clipped distances to lower and upper boundaries.
# boundaries are an array of shape [num_dimensions, 2], where the second
# axis, provides the lower/upper boundaries.
boundaries = tf.constant(self._boundaries, dtype=tf.float32)
distance_to_lower_boundary = (
most_recent_position - tf.expand_dims(boundaries[:, 0], 0))
distance_to_upper_boundary = (
tf.expand_dims(boundaries[:, 1], 0) - most_recent_position)
distance_to_boundaries = tf.concat(
[distance_to_lower_boundary, distance_to_upper_boundary], axis=1)
normalized_clipped_distance_to_boundaries = tf.clip_by_value(
distance_to_boundaries / self._connectivity_radius, -1., 1.)
node_features.append(normalized_clipped_distance_to_boundaries)
# Particle type.
if self._num_particle_types > 1:
particle_type_embeddings = tf.nn.embedding_lookup(
self._particle_type_embedding, particle_types)
node_features.append(particle_type_embeddings)
# Collect edge features.
edge_features = []
# Relative displacement and distances normalized to radius
normalized_relative_displacements = (
tf.gather(most_recent_position, senders) -
tf.gather(most_recent_position, receivers)) / self._connectivity_radius
edge_features.append(normalized_relative_displacements)
normalized_relative_distances = tf.norm(
normalized_relative_displacements, axis=-1, keepdims=True)
edge_features.append(normalized_relative_distances)
# Normalize the global context.
if global_context is not None:
context_stats = self._normalization_stats["context"]
# Context in some datasets are all zero, so add an epsilon for numerical
# stability.
global_context = (global_context - context_stats.mean) / tf.math.maximum(
context_stats.std, STD_EPSILON)
return gn.graphs.GraphsTuple(
nodes=tf.concat(node_features, axis=-1),
edges=tf.concat(edge_features, axis=-1),
globals=global_context, # self._graph_net will appending this to nodes.
n_node=n_node,
n_edge=n_edge,
senders=senders,
receivers=receivers,
)
def _decoder_postprocessor(self, normalized_acceleration, position_sequence):
# The model produces the output in normalized space so we apply inverse
# normalization.
acceleration_stats = self._normalization_stats["acceleration"]
acceleration = (
normalized_acceleration * acceleration_stats.std
) + acceleration_stats.mean
# Use an Euler integrator to go from acceleration to position, assuming
# a dt=1 corresponding to the size of the finite difference.
most_recent_position = position_sequence[:, -1]
most_recent_velocity = most_recent_position - position_sequence[:, -2]
new_velocity = most_recent_velocity + acceleration # * dt = 1
new_position = most_recent_position + new_velocity # * dt = 1
return new_position
def get_predicted_and_target_normalized_accelerations(
self, next_position, position_sequence_noise, position_sequence,
n_particles_per_example, global_context=None, particle_types=None): # pylint: disable=g-doc-args
"""Produces normalized and predicted acceleration targets.
Args:
next_position: Tensor of shape [num_particles_in_batch, num_dimensions]
with the positions the model should output given the inputs.
position_sequence_noise: Tensor of the same shape as `position_sequence`
with the noise to apply to each particle.
position_sequence, n_node, global_context, particle_types: Inputs to the
model as defined by `_build`.
Returns:
Tensors of shape [num_particles_in_batch, num_dimensions] with the
predicted and target normalized accelerations.
"""
# Add noise to the input position sequence.
noisy_position_sequence = position_sequence + position_sequence_noise
# Perform the forward pass with the noisy position sequence.
input_graphs_tuple = self._encoder_preprocessor(
noisy_position_sequence, n_particles_per_example, global_context,
particle_types)
predicted_normalized_acceleration = self._graph_network(input_graphs_tuple)
# Calculate the target acceleration, using an `adjusted_next_position `that
# is shifted by the noise in the last input position.
next_position_adjusted = next_position + position_sequence_noise[:, -1]
target_normalized_acceleration = self._inverse_decoder_postprocessor(
next_position_adjusted, noisy_position_sequence)
# As a result the inverted Euler update in the `_inverse_decoder` produces:
# * A target acceleration that does not explicitly correct for the noise in
# the input positions, as the `next_position_adjusted` is different
# from the true `next_position`.
# * A target acceleration that exactly corrects noise in the input velocity
# since the target next velocity calculated by the inverse Euler update
# as `next_position_adjusted - noisy_position_sequence[:,-1]`
# matches the ground truth next velocity (noise cancels out).
return predicted_normalized_acceleration, target_normalized_acceleration
def _inverse_decoder_postprocessor(self, next_position, position_sequence):
"""Inverse of `_decoder_postprocessor`."""
previous_position = position_sequence[:, -1]
previous_velocity = previous_position - position_sequence[:, -2]
next_velocity = next_position - previous_position
acceleration = next_velocity - previous_velocity
acceleration_stats = self._normalization_stats["acceleration"]
normalized_acceleration = (
acceleration - acceleration_stats.mean) / acceleration_stats.std
return normalized_acceleration
def time_diff(input_sequence):
return input_sequence[:, 1:] - input_sequence[:, :-1]
|
deepmind-research-master
|
learning_to_simulate/learned_simulator.py
|
# pylint: disable=g-bad-file-header
# Copyright 2020 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
# pylint: disable=line-too-long
"""Training script for https://arxiv.org/pdf/2002.09405.pdf.
Example usage (from parent directory):
`python -m learning_to_simulate.train --data_path={DATA_PATH} --model_path={MODEL_PATH}`
Evaluate model from checkpoint (from parent directory):
`python -m learning_to_simulate.train --data_path={DATA_PATH} --model_path={MODEL_PATH} --mode=eval`
Produce rollouts (from parent directory):
`python -m learning_to_simulate.train --data_path={DATA_PATH} --model_path={MODEL_PATH} --output_path={OUTPUT_PATH} --mode=eval_rollout`
"""
# pylint: enable=line-too-long
import collections
import functools
import json
import os
import pickle
from absl import app
from absl import flags
from absl import logging
import numpy as np
import tensorflow.compat.v1 as tf
from tensorflow.compat.v1 import estimator as tf_estimator
import tree
from learning_to_simulate import learned_simulator
from learning_to_simulate import noise_utils
from learning_to_simulate import reading_utils
flags.DEFINE_enum(
'mode', 'train', ['train', 'eval', 'eval_rollout'],
help='Train model, one step evaluation or rollout evaluation.')
flags.DEFINE_enum('eval_split', 'test', ['train', 'valid', 'test'],
help='Split to use when running evaluation.')
flags.DEFINE_string('data_path', None, help='The dataset directory.')
flags.DEFINE_integer('batch_size', 2, help='The batch size.')
flags.DEFINE_integer('num_steps', int(2e7), help='Number of steps of training.')
flags.DEFINE_float('noise_std', 6.7e-4, help='The std deviation of the noise.')
flags.DEFINE_string('model_path', None,
help=('The path for saving checkpoints of the model. '
'Defaults to a temporary directory.'))
flags.DEFINE_string('output_path', None,
help='The path for saving outputs (e.g. rollouts).')
FLAGS = flags.FLAGS
Stats = collections.namedtuple('Stats', ['mean', 'std'])
INPUT_SEQUENCE_LENGTH = 6 # So we can calculate the last 5 velocities.
NUM_PARTICLE_TYPES = 9
KINEMATIC_PARTICLE_ID = 3
def get_kinematic_mask(particle_types):
"""Returns a boolean mask, set to true for kinematic (obstacle) particles."""
return tf.equal(particle_types, KINEMATIC_PARTICLE_ID)
def prepare_inputs(tensor_dict):
"""Prepares a single stack of inputs by calculating inputs and targets.
Computes n_particles_per_example, which is a tensor that contains information
about how to partition the axis - i.e. which nodes belong to which graph.
Adds a batch axis to `n_particles_per_example` and `step_context` so they can
later be batched using `batch_concat`. This batch will be the same as if the
elements had been batched via stacking.
Note that all other tensors have a variable size particle axis,
and in this case they will simply be concatenated along that
axis.
Args:
tensor_dict: A dict of tensors containing positions, and step context (
if available).
Returns:
A tuple of input features and target positions.
"""
# Position is encoded as [sequence_length, num_particles, dim] but the model
# expects [num_particles, sequence_length, dim].
pos = tensor_dict['position']
pos = tf.transpose(pos, perm=[1, 0, 2])
# The target position is the final step of the stack of positions.
target_position = pos[:, -1]
# Remove the target from the input.
tensor_dict['position'] = pos[:, :-1]
# Compute the number of particles per example.
num_particles = tf.shape(pos)[0]
# Add an extra dimension for stacking via concat.
tensor_dict['n_particles_per_example'] = num_particles[tf.newaxis]
if 'step_context' in tensor_dict:
# Take the input global context. We have a stack of global contexts,
# and we take the penultimate since the final is the target.
tensor_dict['step_context'] = tensor_dict['step_context'][-2]
# Add an extra dimension for stacking via concat.
tensor_dict['step_context'] = tensor_dict['step_context'][tf.newaxis]
return tensor_dict, target_position
def prepare_rollout_inputs(context, features):
"""Prepares an inputs trajectory for rollout."""
out_dict = {**context}
# Position is encoded as [sequence_length, num_particles, dim] but the model
# expects [num_particles, sequence_length, dim].
pos = tf.transpose(features['position'], [1, 0, 2])
# The target position is the final step of the stack of positions.
target_position = pos[:, -1]
# Remove the target from the input.
out_dict['position'] = pos[:, :-1]
# Compute the number of nodes
out_dict['n_particles_per_example'] = [tf.shape(pos)[0]]
if 'step_context' in features:
out_dict['step_context'] = features['step_context']
out_dict['is_trajectory'] = tf.constant([True], tf.bool)
return out_dict, target_position
def batch_concat(dataset, batch_size):
"""We implement batching as concatenating on the leading axis."""
# We create a dataset of datasets of length batch_size.
windowed_ds = dataset.window(batch_size)
# The plan is then to reduce every nested dataset by concatenating. We can
# do this using tf.data.Dataset.reduce. This requires an initial state, and
# then incrementally reduces by running through the dataset
# Get initial state. In this case this will be empty tensors of the
# correct shape.
initial_state = tree.map_structure(
lambda spec: tf.zeros( # pylint: disable=g-long-lambda
shape=[0] + spec.shape.as_list()[1:], dtype=spec.dtype),
dataset.element_spec)
# We run through the nest and concatenate each entry with the previous state.
def reduce_window(initial_state, ds):
return ds.reduce(initial_state, lambda x, y: tf.concat([x, y], axis=0))
return windowed_ds.map(
lambda *x: tree.map_structure(reduce_window, initial_state, x))
def get_input_fn(data_path, batch_size, mode, split):
"""Gets the learning simulation input function for tf.estimator.Estimator.
Args:
data_path: the path to the dataset directory.
batch_size: the number of graphs in a batch.
mode: either 'one_step_train', 'one_step' or 'rollout'
split: either 'train', 'valid' or 'test.
Returns:
The input function for the learning simulation model.
"""
def input_fn():
"""Input function for learning simulation."""
# Loads the metadata of the dataset.
metadata = _read_metadata(data_path)
# Create a tf.data.Dataset from the TFRecord.
ds = tf.data.TFRecordDataset([os.path.join(data_path, f'{split}.tfrecord')])
ds = ds.map(functools.partial(
reading_utils.parse_serialized_simulation_example, metadata=metadata))
if mode.startswith('one_step'):
# Splits an entire trajectory into chunks of 7 steps.
# Previous 5 velocities, current velocity and target.
split_with_window = functools.partial(
reading_utils.split_trajectory,
window_length=INPUT_SEQUENCE_LENGTH + 1)
ds = ds.flat_map(split_with_window)
# Splits a chunk into input steps and target steps
ds = ds.map(prepare_inputs)
# If in train mode, repeat dataset forever and shuffle.
if mode == 'one_step_train':
ds = ds.repeat()
ds = ds.shuffle(512)
# Custom batching on the leading axis.
ds = batch_concat(ds, batch_size)
elif mode == 'rollout':
# Rollout evaluation only available for batch size 1
assert batch_size == 1
ds = ds.map(prepare_rollout_inputs)
else:
raise ValueError(f'mode: {mode} not recognized')
return ds
return input_fn
def rollout(simulator, features, num_steps):
"""Rolls out a trajectory by applying the model in sequence."""
initial_positions = features['position'][:, 0:INPUT_SEQUENCE_LENGTH]
ground_truth_positions = features['position'][:, INPUT_SEQUENCE_LENGTH:]
global_context = features.get('step_context')
def step_fn(step, current_positions, predictions):
if global_context is None:
global_context_step = None
else:
global_context_step = global_context[
step + INPUT_SEQUENCE_LENGTH - 1][tf.newaxis]
next_position = simulator(
current_positions,
n_particles_per_example=features['n_particles_per_example'],
particle_types=features['particle_type'],
global_context=global_context_step)
# Update kinematic particles from prescribed trajectory.
kinematic_mask = get_kinematic_mask(features['particle_type'])
next_position_ground_truth = ground_truth_positions[:, step]
next_position = tf.where(kinematic_mask, next_position_ground_truth,
next_position)
updated_predictions = predictions.write(step, next_position)
# Shift `current_positions`, removing the oldest position in the sequence
# and appending the next position at the end.
next_positions = tf.concat([current_positions[:, 1:],
next_position[:, tf.newaxis]], axis=1)
return (step + 1, next_positions, updated_predictions)
predictions = tf.TensorArray(size=num_steps, dtype=tf.float32)
_, _, predictions = tf.while_loop(
cond=lambda step, state, prediction: tf.less(step, num_steps),
body=step_fn,
loop_vars=(0, initial_positions, predictions),
back_prop=False,
parallel_iterations=1)
output_dict = {
'initial_positions': tf.transpose(initial_positions, [1, 0, 2]),
'predicted_rollout': predictions.stack(),
'ground_truth_rollout': tf.transpose(ground_truth_positions, [1, 0, 2]),
'particle_types': features['particle_type'],
}
if global_context is not None:
output_dict['global_context'] = global_context
return output_dict
def _combine_std(std_x, std_y):
return np.sqrt(std_x**2 + std_y**2)
def _get_simulator(model_kwargs, metadata, acc_noise_std, vel_noise_std):
"""Instantiates the simulator."""
# Cast statistics to numpy so they are arrays when entering the model.
cast = lambda v: np.array(v, dtype=np.float32)
acceleration_stats = Stats(
cast(metadata['acc_mean']),
_combine_std(cast(metadata['acc_std']), acc_noise_std))
velocity_stats = Stats(
cast(metadata['vel_mean']),
_combine_std(cast(metadata['vel_std']), vel_noise_std))
normalization_stats = {'acceleration': acceleration_stats,
'velocity': velocity_stats}
if 'context_mean' in metadata:
context_stats = Stats(
cast(metadata['context_mean']), cast(metadata['context_std']))
normalization_stats['context'] = context_stats
simulator = learned_simulator.LearnedSimulator(
num_dimensions=metadata['dim'],
connectivity_radius=metadata['default_connectivity_radius'],
graph_network_kwargs=model_kwargs,
boundaries=metadata['bounds'],
num_particle_types=NUM_PARTICLE_TYPES,
normalization_stats=normalization_stats,
particle_type_embedding_size=16)
return simulator
def get_one_step_estimator_fn(data_path,
noise_std,
latent_size=128,
hidden_size=128,
hidden_layers=2,
message_passing_steps=10):
"""Gets one step model for training simulation."""
metadata = _read_metadata(data_path)
model_kwargs = dict(
latent_size=latent_size,
mlp_hidden_size=hidden_size,
mlp_num_hidden_layers=hidden_layers,
num_message_passing_steps=message_passing_steps)
def estimator_fn(features, labels, mode):
target_next_position = labels
simulator = _get_simulator(model_kwargs, metadata,
vel_noise_std=noise_std,
acc_noise_std=noise_std)
# Sample the noise to add to the inputs to the model during training.
sampled_noise = noise_utils.get_random_walk_noise_for_position_sequence(
features['position'], noise_std_last_step=noise_std)
non_kinematic_mask = tf.logical_not(
get_kinematic_mask(features['particle_type']))
noise_mask = tf.cast(
non_kinematic_mask, sampled_noise.dtype)[:, tf.newaxis, tf.newaxis]
sampled_noise *= noise_mask
# Get the predictions and target accelerations.
pred_target = simulator.get_predicted_and_target_normalized_accelerations(
next_position=target_next_position,
position_sequence=features['position'],
position_sequence_noise=sampled_noise,
n_particles_per_example=features['n_particles_per_example'],
particle_types=features['particle_type'],
global_context=features.get('step_context'))
pred_acceleration, target_acceleration = pred_target
# Calculate the loss and mask out loss on kinematic particles/
loss = (pred_acceleration - target_acceleration)**2
num_non_kinematic = tf.reduce_sum(
tf.cast(non_kinematic_mask, tf.float32))
loss = tf.where(non_kinematic_mask, loss, tf.zeros_like(loss))
loss = tf.reduce_sum(loss) / tf.reduce_sum(num_non_kinematic)
global_step = tf.train.get_global_step()
# Set learning rate to decay from 1e-4 to 1e-6 exponentially.
min_lr = 1e-6
lr = tf.train.exponential_decay(learning_rate=1e-4 - min_lr,
global_step=global_step,
decay_steps=int(5e6),
decay_rate=0.1) + min_lr
opt = tf.train.AdamOptimizer(learning_rate=lr)
train_op = opt.minimize(loss, global_step)
# Calculate next position and add some additional eval metrics (only eval).
predicted_next_position = simulator(
position_sequence=features['position'],
n_particles_per_example=features['n_particles_per_example'],
particle_types=features['particle_type'],
global_context=features.get('step_context'))
predictions = {'predicted_next_position': predicted_next_position}
eval_metrics_ops = {
'loss_mse': tf.metrics.mean_squared_error(
pred_acceleration, target_acceleration),
'one_step_position_mse': tf.metrics.mean_squared_error(
predicted_next_position, target_next_position)
}
return tf_estimator.EstimatorSpec(
mode=mode,
train_op=train_op,
loss=loss,
predictions=predictions,
eval_metric_ops=eval_metrics_ops)
return estimator_fn
def get_rollout_estimator_fn(data_path,
noise_std,
latent_size=128,
hidden_size=128,
hidden_layers=2,
message_passing_steps=10):
"""Gets the model function for tf.estimator.Estimator."""
metadata = _read_metadata(data_path)
model_kwargs = dict(
latent_size=latent_size,
mlp_hidden_size=hidden_size,
mlp_num_hidden_layers=hidden_layers,
num_message_passing_steps=message_passing_steps)
def estimator_fn(features, labels, mode):
del labels # Labels to conform to estimator spec.
simulator = _get_simulator(model_kwargs, metadata,
acc_noise_std=noise_std,
vel_noise_std=noise_std)
num_steps = metadata['sequence_length'] - INPUT_SEQUENCE_LENGTH
rollout_op = rollout(simulator, features, num_steps=num_steps)
squared_error = (rollout_op['predicted_rollout'] -
rollout_op['ground_truth_rollout']) ** 2
loss = tf.reduce_mean(squared_error)
eval_ops = {'rollout_error_mse': tf.metrics.mean_squared_error(
rollout_op['predicted_rollout'], rollout_op['ground_truth_rollout'])}
# Add a leading axis, since Estimator's predict method insists that all
# tensors have a shared leading batch axis fo the same dims.
rollout_op = tree.map_structure(lambda x: x[tf.newaxis], rollout_op)
return tf_estimator.EstimatorSpec(
mode=mode,
train_op=None,
loss=loss,
predictions=rollout_op,
eval_metric_ops=eval_ops)
return estimator_fn
def _read_metadata(data_path):
with open(os.path.join(data_path, 'metadata.json'), 'rt') as fp:
return json.loads(fp.read())
def main(_):
"""Train or evaluates the model."""
if FLAGS.mode in ['train', 'eval']:
estimator = tf_estimator.Estimator(
get_one_step_estimator_fn(FLAGS.data_path, FLAGS.noise_std),
model_dir=FLAGS.model_path)
if FLAGS.mode == 'train':
# Train all the way through.
estimator.train(
input_fn=get_input_fn(FLAGS.data_path, FLAGS.batch_size,
mode='one_step_train', split='train'),
max_steps=FLAGS.num_steps)
else:
# One-step evaluation from checkpoint.
eval_metrics = estimator.evaluate(input_fn=get_input_fn(
FLAGS.data_path, FLAGS.batch_size,
mode='one_step', split=FLAGS.eval_split))
logging.info('Evaluation metrics:')
logging.info(eval_metrics)
elif FLAGS.mode == 'eval_rollout':
if not FLAGS.output_path:
raise ValueError('A rollout path must be provided.')
rollout_estimator = tf_estimator.Estimator(
get_rollout_estimator_fn(FLAGS.data_path, FLAGS.noise_std),
model_dir=FLAGS.model_path)
# Iterate through rollouts saving them one by one.
metadata = _read_metadata(FLAGS.data_path)
rollout_iterator = rollout_estimator.predict(
input_fn=get_input_fn(FLAGS.data_path, batch_size=1,
mode='rollout', split=FLAGS.eval_split))
for example_index, example_rollout in enumerate(rollout_iterator):
example_rollout['metadata'] = metadata
filename = f'rollout_{FLAGS.eval_split}_{example_index}.pkl'
filename = os.path.join(FLAGS.output_path, filename)
logging.info('Saving: %s.', filename)
if not os.path.exists(FLAGS.output_path):
os.mkdir(FLAGS.output_path)
with open(filename, 'wb') as file:
pickle.dump(example_rollout, file)
if __name__ == '__main__':
tf.disable_v2_behavior()
app.run(main)
|
deepmind-research-master
|
learning_to_simulate/train.py
|
# pylint: disable=g-bad-file-header
# Copyright 2020 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Tools to compute the connectivity of the graph."""
import functools
import numpy as np
from sklearn import neighbors
import tensorflow.compat.v1 as tf
def _compute_connectivity(positions, radius, add_self_edges):
"""Get the indices of connected edges with radius connectivity.
Args:
positions: Positions of nodes in the graph. Shape:
[num_nodes_in_graph, num_dims].
radius: Radius of connectivity.
add_self_edges: Whether to include self edges or not.
Returns:
senders indices [num_edges_in_graph]
receiver indices [num_edges_in_graph]
"""
tree = neighbors.KDTree(positions)
receivers_list = tree.query_radius(positions, r=radius)
num_nodes = len(positions)
senders = np.repeat(range(num_nodes), [len(a) for a in receivers_list])
receivers = np.concatenate(receivers_list, axis=0)
if not add_self_edges:
# Remove self edges.
mask = senders != receivers
senders = senders[mask]
receivers = receivers[mask]
return senders, receivers
def _compute_connectivity_for_batch(
positions, n_node, radius, add_self_edges):
"""`compute_connectivity` for a batch of graphs.
Args:
positions: Positions of nodes in the batch of graphs. Shape:
[num_nodes_in_batch, num_dims].
n_node: Number of nodes for each graph in the batch. Shape:
[num_graphs in batch].
radius: Radius of connectivity.
add_self_edges: Whether to include self edges or not.
Returns:
senders indices [num_edges_in_batch]
receiver indices [num_edges_in_batch]
number of edges per graph [num_graphs_in_batch]
"""
# TODO(alvarosg): Consider if we want to support batches here or not.
# Separate the positions corresponding to particles in different graphs.
positions_per_graph_list = np.split(positions, np.cumsum(n_node[:-1]), axis=0)
receivers_list = []
senders_list = []
n_edge_list = []
num_nodes_in_previous_graphs = 0
# Compute connectivity for each graph in the batch.
for positions_graph_i in positions_per_graph_list:
senders_graph_i, receivers_graph_i = _compute_connectivity(
positions_graph_i, radius, add_self_edges)
num_edges_graph_i = len(senders_graph_i)
n_edge_list.append(num_edges_graph_i)
# Because the inputs will be concatenated, we need to add offsets to the
# sender and receiver indices according to the number of nodes in previous
# graphs in the same batch.
receivers_list.append(receivers_graph_i + num_nodes_in_previous_graphs)
senders_list.append(senders_graph_i + num_nodes_in_previous_graphs)
num_nodes_graph_i = len(positions_graph_i)
num_nodes_in_previous_graphs += num_nodes_graph_i
# Concatenate all of the results.
senders = np.concatenate(senders_list, axis=0).astype(np.int32)
receivers = np.concatenate(receivers_list, axis=0).astype(np.int32)
n_edge = np.stack(n_edge_list).astype(np.int32)
return senders, receivers, n_edge
def compute_connectivity_for_batch_pyfunc(
positions, n_node, radius, add_self_edges=True):
"""`_compute_connectivity_for_batch` wrapped in a pyfunc."""
partial_fn = functools.partial(
_compute_connectivity_for_batch, add_self_edges=add_self_edges)
senders, receivers, n_edge = tf.py_function(
partial_fn,
[positions, n_node, radius],
[tf.int32, tf.int32, tf.int32])
senders.set_shape([None])
receivers.set_shape([None])
n_edge.set_shape(n_node.get_shape())
return senders, receivers, n_edge
|
deepmind-research-master
|
learning_to_simulate/connectivity_utils.py
|
# pylint: disable=g-bad-file-header
# Copyright 2020 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Methods to calculate input noise."""
import tensorflow.compat.v1 as tf
from learning_to_simulate import learned_simulator
def get_random_walk_noise_for_position_sequence(
position_sequence, noise_std_last_step):
"""Returns random-walk noise in the velocity applied to the position."""
velocity_sequence = learned_simulator.time_diff(position_sequence)
# We want the noise scale in the velocity at the last step to be fixed.
# Because we are going to compose noise at each step using a random_walk:
# std_last_step**2 = num_velocities * std_each_step**2
# so to keep `std_last_step` fixed, we apply at each step:
# std_each_step `std_last_step / np.sqrt(num_input_velocities)`
# TODO(alvarosg): Make sure this is consistent with the value and
# description provided in the paper.
num_velocities = velocity_sequence.shape.as_list()[1]
velocity_sequence_noise = tf.random.normal(
tf.shape(velocity_sequence),
stddev=noise_std_last_step / num_velocities ** 0.5,
dtype=position_sequence.dtype)
# Apply the random walk.
velocity_sequence_noise = tf.cumsum(velocity_sequence_noise, axis=1)
# Integrate the noise in the velocity to the positions, assuming
# an Euler intergrator and a dt = 1, and adding no noise to the very first
# position (since that will only be used to calculate the first position
# change).
position_sequence_noise = tf.concat([
tf.zeros_like(velocity_sequence_noise[:, 0:1]),
tf.cumsum(velocity_sequence_noise, axis=1)], axis=1)
return position_sequence_noise
|
deepmind-research-master
|
learning_to_simulate/noise_utils.py
|
# pylint: disable=g-bad-file-header
# Copyright 2020 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Example script accompanying ICML 2020 submission.
"Learning to Simulate Complex Physics with Graph Networks"
Alvaro Sanchez-Gonzalez*, Jonathan Godwin*, Tobias Pfaff*, Rex Ying,
Jure Leskovec, Peter W. Battaglia
https://arxiv.org/abs/2002.09405
Here we provide the utility function `sample_random_position_sequence()` which
returns a sequence of positions for a variable number of particles, similar to
what a real dataset would provide, and connect the model to it, in both,
single step inference and training mode.
Dependencies include Tensorflow 1.x, Sonnet 1.x and the Graph Nets 1.1 library.
"""
import collections
from learning_to_simulate import learned_simulator
from learning_to_simulate import noise_utils
import numpy as np
import tensorflow.compat.v1 as tf
INPUT_SEQUENCE_LENGTH = 6
SEQUENCE_LENGTH = INPUT_SEQUENCE_LENGTH + 1 # add one target position.
NUM_DIMENSIONS = 3
NUM_PARTICLE_TYPES = 6
BATCH_SIZE = 5
GLOBAL_CONTEXT_SIZE = 6
Stats = collections.namedtuple("Stats", ["mean", "std"])
DUMMY_STATS = Stats(
mean=np.zeros([NUM_DIMENSIONS], dtype=np.float32),
std=np.ones([NUM_DIMENSIONS], dtype=np.float32))
DUMMY_CONTEXT_STATS = Stats(
mean=np.zeros([GLOBAL_CONTEXT_SIZE], dtype=np.float32),
std=np.ones([GLOBAL_CONTEXT_SIZE], dtype=np.float32))
DUMMY_BOUNDARIES = [(-1., 1.)] * NUM_DIMENSIONS
def sample_random_position_sequence():
"""Returns mock data mimicking the input features collected by the encoder."""
num_particles = tf.random_uniform(
shape=(), minval=50, maxval=1000, dtype=tf.int32)
position_sequence = tf.random.normal(
shape=[num_particles, SEQUENCE_LENGTH, NUM_DIMENSIONS])
return position_sequence
def main():
# Build the model.
learnable_model = learned_simulator.LearnedSimulator(
num_dimensions=NUM_DIMENSIONS,
connectivity_radius=0.05,
graph_network_kwargs=dict(
latent_size=128,
mlp_hidden_size=128,
mlp_num_hidden_layers=2,
num_message_passing_steps=10,
),
boundaries=DUMMY_BOUNDARIES,
normalization_stats={"acceleration": DUMMY_STATS,
"velocity": DUMMY_STATS,
"context": DUMMY_CONTEXT_STATS,},
num_particle_types=NUM_PARTICLE_TYPES,
particle_type_embedding_size=16,
)
# Sample a batch of particle sequences with shape:
# [TOTAL_NUM_PARTICLES, SEQUENCE_LENGTH, NUM_DIMENSIONS]
sampled_position_sequences = [
sample_random_position_sequence() for _ in range(BATCH_SIZE)]
position_sequence_batch = tf.concat(sampled_position_sequences, axis=0)
# Count how many particles are present in each element in the batch.
# [BATCH_SIZE]
n_particles_per_example = tf.stack(
[tf.shape(seq)[0] for seq in sampled_position_sequences], axis=0)
# Sample particle types.
# [TOTAL_NUM_PARTICLES]
particle_types = tf.random_uniform(
[tf.shape(position_sequence_batch)[0]],
0, NUM_PARTICLE_TYPES, dtype=tf.int32)
# Sample global context.
global_context = tf.random_uniform(
[BATCH_SIZE, GLOBAL_CONTEXT_SIZE], -1., 1., dtype=tf.float32)
# Separate input sequence from target sequence.
# [TOTAL_NUM_PARTICLES, INPUT_SEQUENCE_LENGTH, NUM_DIMENSIONS]
input_position_sequence = position_sequence_batch[:, :-1]
# [TOTAL_NUM_PARTICLES, NUM_DIMENSIONS]
target_next_position = position_sequence_batch[:, -1]
# Single step of inference with the model to predict next position for each
# particle [TOTAL_NUM_PARTICLES, NUM_DIMENSIONS].
predicted_next_position = learnable_model(
input_position_sequence, n_particles_per_example, global_context,
particle_types)
print(f"Per-particle output tensor: {predicted_next_position}")
# Obtaining predicted and target normalized accelerations for training.
position_sequence_noise = (
noise_utils.get_random_walk_noise_for_position_sequence(
input_position_sequence, noise_std_last_step=6.7e-4))
# Both with shape [TOTAL_NUM_PARTICLES, NUM_DIMENSIONS]
predicted_normalized_acceleration, target_normalized_acceleration = (
learnable_model.get_predicted_and_target_normalized_accelerations(
target_next_position, position_sequence_noise,
input_position_sequence, n_particles_per_example, global_context,
particle_types))
print(f"Predicted norm. acceleration: {predicted_normalized_acceleration}")
print(f"Target norm. acceleration: {target_normalized_acceleration}")
with tf.train.SingularMonitoredSession() as sess:
sess.run([predicted_next_position,
predicted_normalized_acceleration,
target_normalized_acceleration])
if __name__ == "__main__":
tf.disable_v2_behavior()
main()
|
deepmind-research-master
|
learning_to_simulate/model_demo.py
|
# pylint: disable=g-bad-file-header
# Copyright 2020 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Graph network implementation accompanying ICML 2020 submission.
"Learning to Simulate Complex Physics with Graph Networks"
Alvaro Sanchez-Gonzalez*, Jonathan Godwin*, Tobias Pfaff*, Rex Ying,
Jure Leskovec, Peter W. Battaglia
https://arxiv.org/abs/2002.09405
The Sonnet `EncodeProcessDecode` module provided here implements the learnable
parts of the model.
It assumes an encoder preprocessor has already built a graph with
connectivity and features as described in the paper, with features normalized
to zero-mean unit-variance.
Dependencies include Tensorflow 1.x, Sonnet 1.x and the Graph Nets 1.1 library.
"""
from typing import Callable
import graph_nets as gn
import sonnet as snt
import tensorflow as tf
Reducer = Callable[[tf.Tensor, tf.Tensor, tf.Tensor], tf.Tensor]
def build_mlp(
hidden_size: int, num_hidden_layers: int, output_size: int) -> snt.Module:
"""Builds an MLP."""
return snt.nets.MLP(
output_sizes=[hidden_size] * num_hidden_layers + [output_size]) # pytype: disable=bad-return-type # gen-stub-imports
class EncodeProcessDecode(snt.AbstractModule):
"""Encode-Process-Decode function approximator for learnable simulator."""
def __init__(
self,
latent_size: int,
mlp_hidden_size: int,
mlp_num_hidden_layers: int,
num_message_passing_steps: int,
output_size: int,
reducer: Reducer = tf.math.unsorted_segment_sum,
name: str = "EncodeProcessDecode"):
"""Inits the model.
Args:
latent_size: Size of the node and edge latent representations.
mlp_hidden_size: Hidden layer size for all MLPs.
mlp_num_hidden_layers: Number of hidden layers in all MLPs.
num_message_passing_steps: Number of message passing steps.
output_size: Output size of the decode node representations as required
by the downstream update function.
reducer: Reduction to be used when aggregating the edges in the nodes in
the interaction network. This should be a callable whose signature
matches tf.math.unsorted_segment_sum.
name: Name of the model.
"""
super().__init__(name=name)
self._latent_size = latent_size
self._mlp_hidden_size = mlp_hidden_size
self._mlp_num_hidden_layers = mlp_num_hidden_layers
self._num_message_passing_steps = num_message_passing_steps
self._output_size = output_size
self._reducer = reducer
with self._enter_variable_scope():
self._networks_builder()
def _build(self, input_graph: gn.graphs.GraphsTuple) -> tf.Tensor:
"""Forward pass of the learnable dynamics model."""
# Encode the input_graph.
latent_graph_0 = self._encode(input_graph)
# Do `m` message passing steps in the latent graphs.
latent_graph_m = self._process(latent_graph_0)
# Decode from the last latent graph.
return self._decode(latent_graph_m)
def _networks_builder(self):
"""Builds the networks."""
def build_mlp_with_layer_norm():
mlp = build_mlp(
hidden_size=self._mlp_hidden_size,
num_hidden_layers=self._mlp_num_hidden_layers,
output_size=self._latent_size)
return snt.Sequential([mlp, snt.LayerNorm()])
# The encoder graph network independently encodes edge and node features.
encoder_kwargs = dict(
edge_model_fn=build_mlp_with_layer_norm,
node_model_fn=build_mlp_with_layer_norm)
self._encoder_network = gn.modules.GraphIndependent(**encoder_kwargs)
# Create `num_message_passing_steps` graph networks with unshared parameters
# that update the node and edge latent features.
# Note that we can use `modules.InteractionNetwork` because
# it also outputs the messages as updated edge latent features.
self._processor_networks = []
for _ in range(self._num_message_passing_steps):
self._processor_networks.append(
gn.modules.InteractionNetwork(
edge_model_fn=build_mlp_with_layer_norm,
node_model_fn=build_mlp_with_layer_norm,
reducer=self._reducer))
# The decoder MLP decodes node latent features into the output size.
self._decoder_network = build_mlp(
hidden_size=self._mlp_hidden_size,
num_hidden_layers=self._mlp_num_hidden_layers,
output_size=self._output_size)
def _encode(
self, input_graph: gn.graphs.GraphsTuple) -> gn.graphs.GraphsTuple:
"""Encodes the input graph features into a latent graph."""
# Copy the globals to all of the nodes, if applicable.
if input_graph.globals is not None:
broadcasted_globals = gn.blocks.broadcast_globals_to_nodes(input_graph)
input_graph = input_graph.replace(
nodes=tf.concat([input_graph.nodes, broadcasted_globals], axis=-1),
globals=None)
# Encode the node and edge features.
latent_graph_0 = self._encoder_network(input_graph)
return latent_graph_0
def _process(
self, latent_graph_0: gn.graphs.GraphsTuple) -> gn.graphs.GraphsTuple:
"""Processes the latent graph with several steps of message passing."""
# Do `m` message passing steps in the latent graphs.
# (In the shared parameters case, just reuse the same `processor_network`)
latent_graph_prev_k = latent_graph_0
latent_graph_k = latent_graph_0
for processor_network_k in self._processor_networks:
latent_graph_k = self._process_step(
processor_network_k, latent_graph_prev_k)
latent_graph_prev_k = latent_graph_k
latent_graph_m = latent_graph_k
return latent_graph_m
def _process_step(
self, processor_network_k: snt.Module,
latent_graph_prev_k: gn.graphs.GraphsTuple) -> gn.graphs.GraphsTuple:
"""Single step of message passing with node/edge residual connections."""
# One step of message passing.
latent_graph_k = processor_network_k(latent_graph_prev_k)
# Add residuals.
latent_graph_k = latent_graph_k.replace(
nodes=latent_graph_k.nodes+latent_graph_prev_k.nodes,
edges=latent_graph_k.edges+latent_graph_prev_k.edges)
return latent_graph_k
def _decode(self, latent_graph: gn.graphs.GraphsTuple) -> tf.Tensor:
"""Decodes from the latent graph."""
return self._decoder_network(latent_graph.nodes)
|
deepmind-research-master
|
learning_to_simulate/graph_network.py
|
# Copyright 2020 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Sqlalchemy schema for the metadata db."""
import sqlalchemy
from sqlalchemy.ext import declarative
Column = sqlalchemy.Column
Integer = sqlalchemy.Integer
String = sqlalchemy.String
LargeBinary = sqlalchemy.LargeBinary
ForeignKey = sqlalchemy.ForeignKey
# pylint: disable=invalid-name
# https://docs.sqlalchemy.org/en/13/orm/tutorial.html
Base = declarative.declarative_base()
EpisodeTag = sqlalchemy.Table(
'EpisodeTags', Base.metadata,
Column(
'EpisodeId', String, ForeignKey('Episodes.EpisodeId'),
primary_key=True),
Column('Tag', String, ForeignKey('Tags.Name'), primary_key=True))
"""Table relating episodes and tags.
Attributes:
EpisodeId: A string of digits that uniquely identifies the episode.
Tag: Human readable tag name.
"""
class Episode(Base):
"""Table describing individual episodes.
Attributes:
EpisodeId: A string of digits that uniquely identifies the episode.
TaskId: A human readable name for the task corresponding to the behavior
that generated the episode.
DataPath: The name of the episode file holding the data for this episode.
Timestamp: A unix timestamp recording when the episode was generated.
EpisodeType: A string describing the type of policy that generated the
episode. Possible values are:
- `EPISODE_ROBOT_AGENT`: The behavior policy is a learned or scripted
controller.
- `EPISODE_ROBOT_TELEOPERATION`: The behavior policy is a human
teleoperating the robot.
- `EPISODE_ROBOT_DAGGER`: The behavior policy is a mix of controller
and human generated actions.
Tags: A list of tags attached to this episode.
Rewards: A list of `RewardSequence`s containing sketched rewards for this
episode.
"""
__tablename__ = 'Episodes'
EpisodeId = Column(String, primary_key=True)
TaskId = Column(String)
DataPath = Column(String)
Timestamp = Column(Integer)
EpisodeType = Column(String)
Tags = sqlalchemy.orm.relationship(
'Tag', secondary=EpisodeTag, back_populates='Episodes')
Rewards = sqlalchemy.orm.relationship(
'RewardSequence', backref='Episode')
class Tag(Base):
"""Table of tags that can be attached to episodes.
Attributes:
Name: Human readable tag name.
Episodes: The epsidoes that have been annotated with this tag.
"""
__tablename__ = 'Tags'
Name = Column(String, primary_key=True)
Episodes = sqlalchemy.orm.relationship(
'Episode', secondary=EpisodeTag, back_populates='Tags')
class RewardSequence(Base):
"""Table describing reward sequences for episodes.
Attributes:
EpisodeId: Foreign key into the `Episodes` table.
RewardSequenceId: Distinguishes multiple rewards for the same episode.
RewardTaskId: A human readable name of the task for this reward signal.
Typically the same as the corresponding `TaskId` in the `Episodes`
table.
Type: A string describing the type of reward signal. Currently the only
value is `REWARD_SKETCH`.
User: The name of the user who produced this reward sequence.
Values: A sequence of float32 values, packed as a binary blob. There is one
float value for each frame of the episode, corresponding to the
annotated reward.
"""
__tablename__ = 'RewardSequences'
EpisodeId = Column(
'EpisodeId', String, ForeignKey('Episodes.EpisodeId'), primary_key=True)
RewardSequenceId = Column(String, primary_key=True)
RewardTaskId = Column('RewardTaskId', String)
Type = Column(String)
User = Column(String)
Values = Column(LargeBinary)
class ArchiveFile(Base):
"""Table describing where episodes are stored in archives.
This information is relevant if you want to download or extract a specific
episode from the archives they are distributed in.
Attributes:
EpisodeId: Foreign key into the `Episodes` table.
ArchiveFile: Name of the archive file containing the corresponding episode.
"""
__tablename__ = 'ArchiveFiles'
EpisodeId = Column(
'EpisodeId', String, ForeignKey('Episodes.EpisodeId'), primary_key=True)
ArchiveFile = Column(String)
# pylint: enable=invalid-name
|
deepmind-research-master
|
sketchy/metadata_schema.py
|
# Copyright 2020 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Example of loading rewards from the metadata file."""
from absl import app
from absl import flags
import numpy as np
import sqlalchemy
from sketchy import metadata_schema
flags.DEFINE_string(
'metadata', '/tmp/metadata.sqlite', 'Path to metadata file.')
FLAGS = flags.FLAGS
def main(argv):
if len(argv) > 1:
raise app.UsageError('Too many command-line arguments.')
engine = sqlalchemy.create_engine('sqlite:///' + FLAGS.metadata)
session = sqlalchemy.orm.sessionmaker(bind=engine)()
episodes = session.query(metadata_schema.Episode).join(
metadata_schema.RewardSequence).limit(5)
for episode in episodes:
rewards = np.frombuffer(episode.Rewards[0].Values, dtype=np.float32)
print('---')
print(f'Episode: {episode.EpisodeId}')
print(f'Episode file: {episode.DataPath}')
print(f'Reward type: {episode.Rewards[0].Type}')
print(f'Reward values: {rewards}')
if __name__ == '__main__':
app.run(main)
|
deepmind-research-master
|
sketchy/reward_example.py
|
# Copyright 2020 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
|
deepmind-research-master
|
sketchy/__init__.py
|
# Copyright 2020 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Example of loading sketchy data in tensorflow."""
from absl import app
from absl import flags
import matplotlib.pyplot as plt
import tensorflow.compat.v2 as tf
from sketchy import sketchy
flags.DEFINE_boolean('show_images', False, 'Enable to show example images.')
FLAGS = flags.FLAGS
def main(argv):
if len(argv) > 1:
raise app.UsageError('Too many command-line arguments.')
tf.enable_v2_behavior()
# The example file contains only a few timesteps from a single episode.
dataset = sketchy.load_frames('sketchy/example_data.tfrecords')
dataset = dataset.prefetch(5)
for example in dataset:
print('---')
for name, value in sorted(example.items()):
print(name, value.dtype, value.shape)
if FLAGS.show_images:
plt.imshow(example['pixels/basket_front_left'])
plt.show()
if __name__ == '__main__':
app.run(main)
|
deepmind-research-master
|
sketchy/dataset_example.py
|
# Copyright 2020 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Interface for loading sketchy data into tensorflow."""
import tensorflow.compat.v2 as tf
def load_frames(filenames, num_parallel_reads=1, num_map_threads=None):
if not num_map_threads:
num_map_threads = num_parallel_reads
dataset = tf.data.TFRecordDataset(
filenames, num_parallel_reads=num_parallel_reads)
return dataset.map(_parse_example, num_parallel_calls=num_map_threads)
_FEATURES = {
# Actions
'actions':
tf.io.FixedLenFeature(shape=7, dtype=tf.float32),
# Observations
'gripper/joints/velocity':
tf.io.FixedLenFeature(shape=1, dtype=tf.float32),
'gripper/joints/torque':
tf.io.FixedLenFeature(shape=1, dtype=tf.float32),
'gripper/grasp':
tf.io.FixedLenFeature(shape=1, dtype=tf.int64),
'gripper/joints/angle':
tf.io.FixedLenFeature(shape=1, dtype=tf.float32),
'sawyer/joints/velocity':
tf.io.FixedLenFeature(shape=7, dtype=tf.float32),
'sawyer/pinch/pose':
tf.io.FixedLenFeature(shape=7, dtype=tf.float32),
'sawyer/tcp/pose':
tf.io.FixedLenFeature(shape=7, dtype=tf.float32),
'sawyer/tcp/effort':
tf.io.FixedLenFeature(shape=6, dtype=tf.float32),
'sawyer/joints/torque':
tf.io.FixedLenFeature(shape=7, dtype=tf.float32),
'sawyer/tcp/velocity':
tf.io.FixedLenFeature(shape=6, dtype=tf.float32),
'sawyer/joints/angle':
tf.io.FixedLenFeature(shape=7, dtype=tf.float32),
'wrist/torque':
tf.io.FixedLenFeature(shape=3, dtype=tf.float32),
'wrist/force':
tf.io.FixedLenFeature(shape=3, dtype=tf.float32),
'pixels/basket_front_left':
tf.io.FixedLenFeature(shape=1, dtype=tf.string),
'pixels/basket_back_left':
tf.io.FixedLenFeature(shape=1, dtype=tf.string),
'pixels/basket_front_right':
tf.io.FixedLenFeature(shape=1, dtype=tf.string),
'pixels/royale_camera_driver_depth':
tf.io.FixedLenFeature(shape=(171, 224, 1), dtype=tf.float32),
'pixels/royale_camera_driver_gray':
tf.io.FixedLenFeature(shape=1, dtype=tf.string),
'pixels/usbcam0':
tf.io.FixedLenFeature(shape=1, dtype=tf.string),
'pixels/usbcam1':
tf.io.FixedLenFeature(shape=1, dtype=tf.string),
}
def _parse_example(example):
return _decode_images(tf.io.parse_single_example(example, _FEATURES))
def _decode_images(record):
for name, value in list(record.items()):
if value.dtype == tf.string:
record[name] = tf.io.decode_jpeg(value[0])
return record
|
deepmind-research-master
|
sketchy/sketchy.py
|
# Copyright 2018 Deepmind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Defines some `predicates` for the predicate_task."""
import abc
import colorsys
import numpy as np
HSV_SATURATION = 0.5
HSV_ACTIVATED_SATURATION = 0.75
HSV_VALUE = 1.0
WALKER_GOAL_RGBA = [0, 0, 0]
WALKER_GOAL_PRESSED_RGBA = [128, 128, 128]
INACTIVE_OBSERVATION_VALUE = [-1] * 5
# Define globals for the special encoding.
MOVABLE_TYPES = {'walker': 0, 'box': 1}
TARGET_TYPES = {'box': 0, 'target': 1}
PREDICATE_TYPES = {'on': 0, 'close_to': 1, 'far_from': 2}
class BasePredicate(object, metaclass=abc.ABCMeta):
"""Base class for all predicates."""
def __init__(self, walker):
self._walker = walker
@abc.abstractmethod
def reinitialize(self, random_state):
"""Reinitializes a new, potentially random, predicate state.
The reinitialize method should reset to a new predicate state which could
update the `objects_in_use` by the `Predicate`. This method could be called
multiple times before a finally binding predicate set has been found.
Therefore no changes to the model that are not reversible should be made
here (setting colors etc). Any changes affecting the Mujoco model should
instead be made in the `activate_predicate` method call.
Args:
random_state: An instance of `np.RandomState` which may be seeded to
ensure a deterministic environment.
"""
pass
@abc.abstractmethod
def activate_predicate(self):
"""Activates the current predicate configuration.
Any changes that are non-reversible like setting object properties or
affinities *must* only be done in this method. At this point, the
`predicate_task` logic has confirmed that a valid predicate configuration
has been found.
"""
pass
@property
def objects_in_use(self):
"""Returns the `set` of objects used for this episode."""
return set()
@abc.abstractproperty
def observation_value(self):
"""Returns a `dict` to be used as the predicate observable."""
pass
@abc.abstractmethod
def is_active(self, physics):
"""Boolean method indicating whether the predicate has been activated.
If `True`, it implies the condition for the predicate has been satisfied
and the walker can be rewarded.
Args:
physics: An instance of `control.Physics`.
"""
pass
@property
def inactive_observation_value(self):
"""observation_value indicating a `Predicate` is inactive.
The `PredicateTask` randomly samples the number of active predicates to be
used on each episode. For a consistent `observation_spec`, the predicates
that are not active need a special observation_value that cannot be used
anywhere else.
Returns:
A special value indicating that the predicate is inactive and is not used
by any other predicate in the task.
"""
return INACTIVE_OBSERVATION_VALUE
class MoveWalkerToTarget(BasePredicate):
"""Predicate to move a walker to a specific target."""
def __init__(self, walker, target, target_index=0):
"""Predicate to move a walker or box to a target.
Args:
walker: An locomotion `Walker` instance to use for this predicate.
target: `locomotion.prop` instance containing an `activated` property.
target_index: An 'int' argument to add to the observable to indicate the
index of the target.
"""
super(MoveWalkerToTarget, self).__init__(walker)
self._target = target
self._target_id = target_index
def reinitialize(self, random_state):
self._target.deregister_entities()
def activate_predicate(self):
self._target.register_entities(self._walker)
self._target.set_colors(WALKER_GOAL_RGBA, WALKER_GOAL_PRESSED_RGBA)
@property
def objects_in_use(self):
return set([self._walker, self._target])
@property
def observation_value(self):
return np.array([
MOVABLE_TYPES['walker'], 0, TARGET_TYPES['target'], self._target_id,
PREDICATE_TYPES['close_to']
])
def is_active(self, physics):
return self._target.activated
class MoveWalkerToRandomTarget(BasePredicate):
"""Predicate to move a walker to a random target."""
def __init__(self, walker, targets=None):
"""Predicate to move a walker or box to a target.
Args:
walker: An locomotion `Walker` instance to use for this predicate.
targets: An optional list of `locomotion.prop` instances each of which
contains an `activated` property.
"""
super(MoveWalkerToRandomTarget, self).__init__(walker)
self._targets = targets
self._target_to_move_to = None
def reinitialize(self, random_state):
if self._target_to_move_to is not None:
self._target_to_move_to.deregister_entities()
self._target_to_move_to = random_state.choice(self._targets)
self._target_idx = self._targets.index(self._target_to_move_to)
def activate_predicate(self):
self._target_to_move_to.register_entities(self._walker)
self._target_to_move_to.set_colors(WALKER_GOAL_RGBA,
WALKER_GOAL_PRESSED_RGBA)
@property
def objects_in_use(self):
return set([self._walker, self._target_to_move_to])
@property
def observation_value(self):
return np.array([
MOVABLE_TYPES['walker'], 0, TARGET_TYPES['target'], self._target_idx,
PREDICATE_TYPES['close_to']
])
def is_active(self, physics):
return self._target_to_move_to.activated
class MoveWalkerToBox(BasePredicate):
"""Predicate to move a walker to a specific box."""
def __init__(self, walker, box, box_index=0, detection_region=None):
"""Predicate to move a walker to a specific box.
Args:
walker: An locomotion `Walker` instance to use for this predicate.
box: A `manipulation.prop` instance to move.
box_index: An integer index to use for the observable to identify the
`box`.
detection_region: A 2-tuple indicating the tolerances in x and y for the
walker to be deemed `close_to` the box. If `None`, contact based
detection is used.
"""
super(MoveWalkerToBox, self).__init__(walker)
self._box = box
self._detection_region = detection_region
self._box_index = box_index
self._walker_geoms = None
def reinitialize(self, random_state):
if self._walker_geoms is None:
# pylint: disable=protected-access
self._walker_geoms = set(self._walker._mjcf_root.find_all('geom'))
def activate_predicate(self):
self._box.geom.rgba[:3] = WALKER_GOAL_RGBA
@property
def objects_in_use(self):
return set([self._walker, self._box])
@property
def observation_value(self):
return np.array([
MOVABLE_TYPES['walker'], 0, TARGET_TYPES['box'], self._box_index,
PREDICATE_TYPES['close_to']
])
def is_active(self, physics):
if self._detection_region is None:
return self._is_walker_contacting_box(physics)
else:
return np.all(
np.abs(
physics.bind(self._walker.root_body).xpos -
physics.bind(self._box.geom).xpos)[:2] < self._detection_region)
def _is_walker_contacting_box(self, physics):
walker_geom_ids = [
physics.bind(geom).element_id for geom in self._walker_geoms
]
for contact in physics.data.contact:
contact_geoms = set([contact.geom1, contact.geom2])
if (physics.bind(self._box.geom).element_id in contact_geoms and
contact_geoms.intersection(walker_geom_ids)):
return True
return False
class MoveBoxToBox(BasePredicate):
"""Predicate to move a walker to a specific box."""
def __init__(self,
walker,
first_box,
second_box,
first_box_index=0,
second_box_index=1,
detection_region=None):
"""Predicate to move a walker to a specific box.
Args:
walker: An locomotion `Walker` instance to use for this predicate.
first_box: A `manipulation.prop` instance to move.
second_box: A `manipulation.prop` instance to move.
first_box_index: An integer index to use for the observable to identify
the `box`.
second_box_index: An integer index to use for the observable to identify
the `box`.
detection_region: A 2-tuple indicating the tolerances in x and y for the
walker to be deemed `close_to` the box. If `None`, contact based
detection is used.
"""
super(MoveBoxToBox, self).__init__(walker)
self._first_box = first_box
self._second_box = second_box
self._detection_region = detection_region
self._first_box_index = first_box_index
self._second_box_index = second_box_index
self._walker_geoms = None
def reinitialize(self, random_state):
if self._walker_geoms is None:
# pylint: disable=protected-access
self._walker_geoms = set(self._walker._mjcf_root.find_all('geom'))
def activate_predicate(self):
self._first_box.geom.rgba[:3] = WALKER_GOAL_RGBA
@property
def objects_in_use(self):
return set([self._first_box, self._second_box])
@property
def observation_value(self):
return np.array([
MOVABLE_TYPES['box'], self._first_box_index, TARGET_TYPES['box'],
self._second_box_index, PREDICATE_TYPES['close_to']
])
def is_active(self, physics):
if self._detection_region is None:
return self._are_boxes_in_contact(physics)
else:
return np.all(
np.abs(
physics.bind(self._first_box.geom).xpos -
physics.bind(self._second_box.geom).xpos)[:2] <
self._detection_region)
def _are_boxes_in_contact(self, physics):
for contact in physics.data.contact:
contact_geoms = set([contact.geom1, contact.geom2])
if (physics.bind(self._first_box.geom).element_id in contact_geoms and
physics.bind(self._second_box.geom).element_id in contact_geoms):
return True
return False
class MoveBoxToTarget(BasePredicate):
"""Predicate to move a walker to a specific target."""
def __init__(self, walker, box, target, box_index=0, target_index=0):
"""Predicate to move a walker or box to a target.
Args:
walker: An locomotion `Walker` instance to use for this predicate.
box: A `manipulation.prop` to move to the target.
target: `locomotion.prop` instance containing an `activated` property.
box_index: An 'int' argument to add to the observable to indicate the
index of the box.
target_index: An 'int' argument to add to the observable to indicate the
index of the target.
"""
super(MoveBoxToTarget, self).__init__(walker)
self._box = box
self._target = target
self._box_id = box_index
self._target_id = target_index
self._original_box_size = np.copy(box.geom.size)
self._rgb = None
self._activated_rgb = None
def reinitialize(self, random_state):
self._target.deregister_entities()
self._get_box_properties(random_state)
def _get_box_properties(self, random_state):
hue0 = random_state.uniform()
hue = (hue0 + self._target_id) % 1.0
self._rgb = colorsys.hsv_to_rgb(hue, HSV_SATURATION, HSV_VALUE)
self._activated_rgb = colorsys.hsv_to_rgb(hue, HSV_ACTIVATED_SATURATION,
HSV_VALUE)
def activate_predicate(self):
self._target.set_colors(self._rgb, self._activated_rgb)
self._box.geom.rgba[:3] = self._rgb
self._target.register_entities(self._box)
@property
def objects_in_use(self):
return set([self._box, self._target])
@property
def observation_value(self):
return np.array([
MOVABLE_TYPES['box'], self._box_id, TARGET_TYPES['target'],
self._target_id, PREDICATE_TYPES['close_to']
])
def is_active(self, physics):
return self._target.activated
class MoveBoxToRandomTarget(BasePredicate):
"""Predicate to move a walker to a random target."""
def __init__(self, walker, box, box_index=0, targets=None):
"""Predicate to move a walker or box to a target.
Args:
walker: An locomotion `Walker` instance to use for this predicate.
box: A `manipulation.prop` to move to the target.
box_index: An optional 'int' argument to add to the observable to indicate
the index of the box.
targets: An optional list of `locomotion.prop` instances each of which
contains an `activated` property.
"""
super(MoveBoxToRandomTarget, self).__init__(walker)
self._targets = targets
self._box_to_move = box
self._box_index = box_index
self._target_to_move_to = None
self._original_box_size = np.copy(box.geom.size)
self._rgb = None
self._activated_rgb = None
def reinitialize(self, random_state):
if self._target_to_move_to is not None:
self._target_to_move_to.deregister_entities()
self._target_to_move_to = random_state.choice(self._targets)
self._target_idx = self._targets.index(self._target_to_move_to)
self._get_box_properties(random_state)
def _get_box_properties(self, random_state):
hue0 = random_state.uniform()
hue = (hue0 + (self._target_idx / len(self._targets))) % 1.0
self._rgb = colorsys.hsv_to_rgb(hue, HSV_SATURATION, HSV_VALUE)
self._activated_rgb = colorsys.hsv_to_rgb(hue, HSV_ACTIVATED_SATURATION,
HSV_VALUE)
def activate_predicate(self):
self._target_to_move_to.set_colors(self._rgb, self._activated_rgb)
self._box_to_move.geom.rgba[:3] = self._rgb
self._target_to_move_to.register_entities(self._box_to_move)
@property
def objects_in_use(self):
return set([self._box_to_move, self._target_to_move_to])
@property
def observation_value(self):
return np.array([
MOVABLE_TYPES['box'], self._box_index,
TARGET_TYPES['target'], self._target_idx,
PREDICATE_TYPES['close_to']
])
def is_active(self, physics):
return self._target_to_move_to.activated
|
deepmind-research-master
|
box_arrangement/predicates.py
|
# Copyright 2018 Deepmind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A task where different `Predicate`s need to be solved.
In each episode a spiking reward is given for each `Predicate` solved with an
extra reward bonus added when all of the predicates are solved. On each episode
the number of predicates are sampled randomly. This provides a common interface
to specify distributions over tasks ranging in difficulty levels but with common
components. Each `Predicate` involves some manipulation of the walker, props and
targets which thus allows for rich configurations of tasks to be defined.
"""
import colorsys
import functools
from dm_control import composer
from dm_control.composer.observation import observable
from dm_env import specs
import numpy as np
_FLOOR_GAP_CHAR = '#'
_AMBIENT_HEADLIGHT = 0.8
_HSV_SATURATION = 0.5
_HSV_ACTIVATED_SATURATION = 0.75
_HSV_VALUE = 1.0
_PROP_SIZE = 0.5
_MAX_ITERATIONS = 1000
def _generate_target_permutation(num_targets, random_state):
targets = list(range(num_targets))
random_state.shuffle(targets)
return targets
class PredicateTask(composer.Task):
"""Requires objects to be moved onto targets."""
def __init__(self,
walker,
maze_arena,
predicates,
props=None,
targets=None,
max_num_predicates=1,
randomize_num_predicates=False,
predicate_prob=None,
reward_scale=1.0,
terminating_reward_bonus=5.0,
regenerate_predicates=False,
physics_timestep=0.001,
control_timestep=0.025,
alive_threshold=-0.5):
"""Initializes a task with multiple sub-components(predicates) to be solved.
This task essentially contains different flavors of go to target. The
task contains a walker, props and target positions. To solve the entire
task, the walker would need to solve a certain number of 'predicates' or
sub-tasks. For instance, the task could contain 2 predicates for the
walker going to a target position and the walker moving a box to a target
position. In such a case, there is an implicit ordering of the way the
walker needs to solve things to achieve the net task.
Args:
walker: A `Walker` instance.
maze_arena: An `Entity` that defines a maze-like arena.
predicates: A list of `Predicate` instances for ths task.
props: An optional list of `manipulation.prop` instances for the task.
These are used to generate observables for the task.
targets: An optional list of `locomotion.prop` instances for the task.
These are used to generate observables for the task.
max_num_predicates: The maximum number of predicates to use in each
episode of the task.
randomize_num_predicates: A `bool` flag indicating whether the number of
`valid` predicates should be randomized for each task. If set to `True`,
then on each episode, between 1 and `num_predicates` are chosen as valid
predicates and `predicate.invalid_observation_value` is output for the
remaining slots in the observation.
predicate_prob: An optional `list` containing the probabilities for each
of the `predicates`. If not `None`, must have the same length as
`predicates.
reward_scale: `float` to scale the reward.
terminating_reward_bonus: A bonus added to the reward when all predicates
have been solved.
regenerate_predicates: A `bool` flag indicating which when set, spawns a
new set of predicates when the previous set is successful instead of
terminating.
physics_timestep: The time step of the physics simulation.
control_timestep: Should be an integer multiple of the physics time step.
alive_threshold: Aliveness in [-1., 0.].
Raises:
ValueError: If `num_props` is greater than `num_targets` or if
`num_predicates` is greater than `num_targets`.
"""
if max_num_predicates > len(predicates):
raise ValueError('Not enough predicates for task. The maximum number of '
'predicates can be '
'{} but only {} predicates provided.'.format(
max_num_predicates, len(predicates)))
self._arena = maze_arena
self._walker = walker
self._reward_scale = reward_scale
self._alive_threshold = alive_threshold
self._terminating_reward_bonus = terminating_reward_bonus
self._arena.mjcf_model.visual.headlight.ambient = [_AMBIENT_HEADLIGHT] * 3
maze_arena.text_maze_regenerated_hook = self._regenerate_positions
self._max_num_predicates = max_num_predicates
self._predicates = predicates
self._predicate_prob = predicate_prob
self._randomize_num_predicates = randomize_num_predicates
self._active_predicates = []
self._regen_predicates = regenerate_predicates
self._reward = 0
# Targets.
self._targets = targets
for target in targets:
self._arena.attach(target)
if props is None:
props = []
# Props.
self._props = props
# M Props + 1 Walker and we choose 'N' predicates as the task.
for prop in props:
prop.geom.rgba = [0, 0, 0, 1] # Will be randomized for each episode.
self._arena.add_free_entity(prop)
# Create walkers and corresponding observables.
walker.create_root_joints(self._arena.attach(walker))
self._create_per_walker_observables(walker)
self._generate_target_permutation = None
maze_arena.text_maze_regenerated_hook = self._regenerate_positions
# Set time steps.
self.set_timesteps(
physics_timestep=physics_timestep, control_timestep=control_timestep)
def _create_per_walker_observables(self, walker):
# Enable proprioceptive observables.
for obs in (walker.observables.proprioception +
walker.observables.kinematic_sensors +
[walker.observables.position, walker.observables.orientation]):
obs.enabled = True
xpos_origin_callable = lambda phys: phys.bind(walker.root_body).xpos
# Egocentric prop positions.
# For each prop, we add the positions for the 8 corners using the sites.
for prop_id, prop in enumerate(self._props):
def _prop_callable(physics, prop=prop):
return [physics.bind(s).xpos for s in prop.corner_sites]
if len(self._props) > 1:
observable_name = 'prop_{}_position'.format(prop_id)
else:
observable_name = 'prop_position'
walker.observables.add_egocentric_vector(
observable_name,
observable.Generic(_prop_callable),
origin_callable=xpos_origin_callable)
# Egocentric target positions.
def _target_callable(physics):
target_list = []
for target in self._targets:
target_list.append(target.site_pos(physics))
return np.array(target_list)
walker.observables.add_egocentric_vector(
'target_positions',
observable.Generic(_target_callable),
origin_callable=xpos_origin_callable)
# Whether targets are activated.
def _predicate_activated_callable(physics):
predicate_activated_list = np.full(self._max_num_predicates, True)
for i, predicate in enumerate(self._active_predicates):
predicate_activated_list[i] = predicate.is_active(physics)
return predicate_activated_list
walker.observables.add_observable(
'predicates_activated',
observable.Generic(_predicate_activated_callable))
self._observables = self._walker.observables.as_dict()
# Predicate observables.
for pred_idx in range(self._max_num_predicates):
def _predicate_callable(_, pred_idx=pred_idx):
"""Callable for the predicate observation."""
if pred_idx in range(len(self._active_predicates)):
predicate = self._active_predicates[pred_idx]
return predicate.observation_value
else:
# Use any predicates inactive observation to fill the rest.
predicate = self._predicates[0]
return predicate.inactive_observation_value
predicate_name = 'predicate_{}'.format(pred_idx)
self._observables[predicate_name] = observable.Generic(
_predicate_callable)
self._observables[predicate_name].enabled = True
@property
def observables(self):
return self._observables
@property
def name(self):
return 'predicate_task'
@property
def root_entity(self):
return self._arena
def _regenerate_positions(self):
target_permutation = self._generate_target_permutation(
len(self._arena.target_positions))
num_permutations = len(self._props) + len(self._targets)
target_permutation = target_permutation[:num_permutations]
if len(self._props) + len(self._targets) > len(
self._arena.target_positions):
raise RuntimeError(
'The generated maze does not contain enough target positions '
'for the requested number of props ({}) and targets ({}): got {}.'
.format(
len(self._props), len(self._targets),
len(self._arena.target_positions)))
self._prop_positions = []
for i in range(len(self._props)):
self._prop_positions.append(
self._arena.target_positions[target_permutation[i]])
self._target_positions = []
for i in range(len(self._targets)):
idx = i + len(self._props)
self._target_positions.append(
self._arena.target_positions[target_permutation[idx]])
def initialize_episode_mjcf(self, random_state):
self._generate_target_permutation = functools.partial(
_generate_target_permutation, random_state=random_state)
self._arena.regenerate()
# Set random colors for the props and targets.
self._set_random_colors(random_state)
self._set_active_predicates(random_state)
def _set_active_predicates(self, random_state):
# Reinitialize predicates to set any properties they want.
iteration = 0
valid_set_found = False
while not valid_set_found and iteration < _MAX_ITERATIONS:
for predicate in self._predicates:
predicate.reinitialize(random_state)
if self._randomize_num_predicates and self._max_num_predicates > 1:
num_predicates = random_state.choice(
list(range(1, self._max_num_predicates + 1)), size=1)[0]
else:
num_predicates = self._max_num_predicates
valid_set_found = self._choose_random_predicates(random_state,
num_predicates)
iteration += 1
if not valid_set_found:
raise ValueError(
'Could not find set of active predicates with '
'unique objects are after {} iterations.'.format(_MAX_ITERATIONS))
for predicate in self._active_predicates:
predicate.activate_predicate()
def _choose_random_predicates(self, random_state, num_predicates):
self._active_predicates = random_state.choice(
self._predicates,
replace=False,
size=num_predicates,
p=self._predicate_prob)
objects_in_common = self._active_predicates[0].objects_in_use
for predicate in self._active_predicates[1:]:
new_objects = predicate.objects_in_use
if objects_in_common.intersection(new_objects):
return False
objects_in_common.union(new_objects)
return True
def _set_random_colors(self, random_state):
hue0 = random_state.uniform()
hues = [(hue0 + i / len(self._targets)) % 1.0
for i in range(len(self._targets))]
rgbs = [
colorsys.hsv_to_rgb(hue, _HSV_SATURATION, _HSV_VALUE) for hue in hues
]
activated_rgbs = [
colorsys.hsv_to_rgb(hue, _HSV_ACTIVATED_SATURATION, _HSV_VALUE)
for hue in hues
]
# There are fewer props than targets.
# Pick as far apart colors for each prop as possible.
if self._props:
targets_per_prop = len(self._targets) // len(self._props)
else:
targets_per_prop = len(self._targets)
for prop_id in range(len(self._props)):
# The first few targets have to match the props' color.
rgb_id = prop_id * targets_per_prop
self._props[prop_id].geom.rgba[:3] = rgbs[rgb_id]
self._targets[prop_id].set_colors(rgbs[rgb_id], activated_rgbs[rgb_id])
# Assign colors not used by any prop to decoy targets.
for decoy_target_offset in range(targets_per_prop - 1):
target_id = len(
self._props) + prop_id * targets_per_prop + decoy_target_offset
rgb_id = prop_id * targets_per_prop + decoy_target_offset
self._targets[target_id].set_colors(rgbs[rgb_id], rgbs[rgb_id])
# Remainder loop for targets.
for target_id in range(targets_per_prop * len(self._props),
len(self._targets)):
self._targets[target_id].set_colors(rgbs[target_id], rgbs[target_id])
def initialize_episode(self, physics, random_state):
self._first_step = True
self._was_active = [False] * len(self._active_predicates)
walker = self._walker
spawn_indices = random_state.permutation(len(self._arena.spawn_positions))
spawn_index = spawn_indices[0]
walker.reinitialize_pose(physics, random_state)
spawn_position = self._arena.spawn_positions[spawn_index]
spawn_rotation = random_state.uniform(-np.pi, np.pi)
spawn_quat = np.array(
[np.cos(spawn_rotation / 2), 0, 0,
np.sin(spawn_rotation / 2)])
walker.shift_pose(
physics, [spawn_position[0], spawn_position[1], 0.0],
spawn_quat,
rotate_velocity=True)
for prop, prop_xy_position in zip(self._props, self._prop_positions):
# Position at the middle of a maze cell.
prop_position = np.array(
[prop_xy_position[0], prop_xy_position[1], prop.geom.size[2]])
# Randomly rotate the prop around the z-axis.
prop_rotation = random_state.uniform(-np.pi, np.pi)
prop_quat = np.array(
[np.cos(prop_rotation / 2), 0, 0,
np.sin(prop_rotation / 2)])
# Taking into account the prop's orientation, first calculate how much we
# can displace the prop from the center of a maze cell without any part of
# it sticking out of the cell.
x, y, _ = prop.geom.size
cos = np.cos(prop_rotation)
sin = np.sin(prop_rotation)
x_max = max([np.abs(x * cos - y * sin), np.abs(x * cos + y * sin)])
y_max = max([np.abs(y * cos + x * sin), np.abs(y * cos - x * sin)])
prop_max_displacement = self._arena.xy_scale / 2 - np.array(
[x_max, y_max])
assert np.all(prop_max_displacement >= 0)
prop_max_displacement *= 0.99 # Safety factor.
# Then randomly displace the prop from the center of the maze cell.
prop_position[:2] += prop_max_displacement * random_state.uniform(
-1, 1, 2)
# Commit the prop's final pose.
prop.set_pose(physics, position=prop_position, quaternion=prop_quat)
for target, target_position in zip(self._targets, self._target_positions):
target_position[2] = _PROP_SIZE
target.set_position(physics, target_position)
def before_step(self, physics, actions, random_state):
if isinstance(actions, list):
actions = np.concatenate(actions)
super(PredicateTask, self).before_step(physics, actions, random_state)
if self._first_step:
self._first_step = False
else:
self._was_active = [
predicate.is_active(physics) for predicate in self._active_predicates
]
def after_step(self, physics, random_state):
if self._all_predicates_satisfied() and self._regen_predicates:
self._set_random_colors(random_state)
self._set_active_predicates(random_state)
super(PredicateTask, self).after_step(physics, random_state)
def get_reward(self, physics):
reward = 0.0
for predicate, was_active in zip(self._active_predicates, self._was_active):
if predicate.is_active(physics) and not was_active:
reward += 1.0
elif was_active and not predicate.is_active(physics):
reward -= 1.0
if self._all_predicates_satisfied():
reward += self._terminating_reward_bonus
self._reward = reward
return reward * self._reward_scale
def _all_predicates_satisfied(self):
return sum(self._was_active) == len(self._active_predicates)
def should_terminate_episode(self, physics):
return ((self._all_predicates_satisfied() and not self._regen_predicates) or
self._walker.aliveness(physics) < self._alive_threshold)
def get_discount(self, physics):
if self.should_terminate_episode(physics):
return 0.0
return 1.0
def get_reward_spec(self):
return specs.Array(shape=[], dtype=np.float32)
def get_discount_spec(self):
return specs.Array(shape=[], dtype=np.float32)
|
deepmind-research-master
|
box_arrangement/predicate_task.py
|
# Copyright 2021 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
|
deepmind-research-master
|
box_arrangement/__init__.py
|
# Copyright 2020 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Setup for pip package."""
from setuptools import find_packages
from setuptools import setup
REQUIRED_PACKAGES = ['absl-py', 'dm_control', 'numpy', 'dm_env']
setup(
name='box_arrangement',
version='0.1',
description=('Sparse reward tasks involving moving and pushing boxes to'
' targets.'),
url='https://github.com/deepmind/deepmind-research/box_arrangement',
author='DeepMind',
author_email='dhruvat@google.com',
# Contained modules and scripts.
packages=find_packages(),
install_requires=REQUIRED_PACKAGES,
platforms=['any'],
license='Apache 2.0',
)
|
deepmind-research-master
|
box_arrangement/setup.py
|
# Copyright 2020 Deepmind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for box_arrangement.predicate_task."""
from absl.testing import absltest
from dm_control import composer
from dm_control.entities import props
from dm_control.locomotion import arenas
from dm_control.locomotion import walkers
import numpy as np
from box_arrangement import predicate_task
from box_arrangement import predicates
_EGOCENTRIC_OBSERVABLES = [
"walker/body_height",
"walker/end_effectors_pos",
"walker/joints_pos",
"walker/joints_vel",
"walker/sensors_accelerometer",
"walker/sensors_gyro",
"walker/sensors_velocimeter",
"walker/world_zaxis",
]
class PredicateTaskTest(absltest.TestCase):
def _setup_basic_gtt_task(self, num_targets=1, reward_scale=1.0):
walker = walkers.Ant()
text_maze = arenas.padded_room.PaddedRoom(
room_size=8, num_objects=2, pad_with_walls=True)
maze_arena = arenas.MazeWithTargets(maze=text_maze)
targets = []
for _ in range(num_targets):
targets.append(
props.PositionDetector(
pos=[0, 0, 0.5],
size=[0.5, 0.5, 0.5],
inverted=False,
visible=True))
test_predicates = [predicates.MoveWalkerToRandomTarget(walker, targets)]
self._task = predicate_task.PredicateTask(
walker=walker,
maze_arena=maze_arena,
predicates=test_predicates,
targets=targets,
randomize_num_predicates=False,
reward_scale=reward_scale,
terminating_reward_bonus=2.0,
)
random_state = np.random.RandomState(12345)
self._env = composer.Environment(self._task, random_state=random_state)
self._walker = walker
self._targets = targets
def test_observables(self):
self._setup_basic_gtt_task()
timestep = self._env.reset()
self.assertIn("predicate_0", timestep.observation)
self.assertIn("walker/target_positions", timestep.observation)
for observable in _EGOCENTRIC_OBSERVABLES:
self.assertIn(observable, timestep.observation)
def test_termination_and_discount(self):
self._setup_basic_gtt_task()
self._env.reset()
target_pos = (0, 0, 0.5)
# Initialize the walker away from the target.
self._walker.set_pose(
self._env.physics, position=(-2, 0, 0.0), quaternion=(1, 0, 0, 0))
self._targets[0].set_position(
self._env.physics,
target_pos)
self._env.physics.forward()
zero_action = np.zeros_like(self._env.physics.data.ctrl)
for _ in range(10):
timestep = self._env.step(zero_action)
self.assertEqual(timestep.discount, 1.0)
self.assertEqual(timestep.reward, 0.0)
walker_pos = (0, 0, 0.0)
self._walker.set_pose(
self._env.physics,
position=walker_pos)
self._env.physics.forward()
# For a single predicate, first the reward is +1.0 for activating the
# predicate
timestep = self._env.step(zero_action)
self.assertEqual(timestep.discount, 1.0)
self.assertEqual(timestep.reward, 1.0)
# If the predicate is active and *remains* active, the discount gets to 0.0
# and the terminating reward bonus is given.
timestep = self._env.step(zero_action)
self.assertEqual(timestep.discount, 0.0)
self.assertEqual(timestep.reward, 2.0)
# Make sure this is a termination step.
self.assertTrue(timestep.last())
def test_reward_scaling(self):
self._setup_basic_gtt_task(reward_scale=10.0)
self._env.reset()
zero_action = np.zeros_like(self._env.physics.data.ctrl)
target_pos = (0, 0, 0.5)
walker_pos = (0, 0, 0.0)
self._targets[0].set_position(self._env.physics, target_pos)
self._walker.set_pose(self._env.physics, position=walker_pos)
self._env.physics.forward()
# For a single predicate, first the reward is +1.0 for activating the
# predicate
timestep = self._env.step(zero_action)
self.assertEqual(timestep.discount, 1.0)
self.assertEqual(timestep.reward, 10.0)
# If the predicate is active and *remains* active, the discount gets to 0.0
# and the terminating reward bonus is given.
timestep = self._env.step(zero_action)
self.assertEqual(timestep.discount, 0.0)
self.assertEqual(timestep.reward, 20.0)
# Make sure this is a termination step.
self.assertTrue(timestep.last())
def test_too_few_predicates_raises_exception(self):
walker = walkers.Ant()
num_targets = 1
text_maze = arenas.padded_room.PaddedRoom(
room_size=8, num_objects=2, pad_with_walls=True)
maze_arena = arenas.MazeWithTargets(maze=text_maze)
targets = []
for _ in range(num_targets):
targets.append(
props.PositionDetector(
pos=[0, 0, 0.5],
size=[0.5, 0.5, 0.5],
inverted=False,
visible=True))
test_predicates = []
with self.assertRaisesWithLiteralMatch(
ValueError, "Not enough predicates for task."
" The maximum number of "
"predicates can be "
"1 but only 0 predicates provided."):
predicate_task.PredicateTask(
walker=walker,
maze_arena=maze_arena,
predicates=test_predicates,
targets=targets,
randomize_num_predicates=False,
reward_scale=1.0,
terminating_reward_bonus=2.0,
)
def test_error_too_few_targets(self):
walker = walkers.Ant()
num_targets = 5
text_maze = arenas.padded_room.PaddedRoom(
room_size=8, num_objects=2, pad_with_walls=True)
maze_arena = arenas.MazeWithTargets(maze=text_maze)
targets = []
for _ in range(num_targets):
targets.append(
props.PositionDetector(
pos=[0, 0, 0.5],
size=[0.5, 0.5, 0.5],
inverted=False,
visible=True))
test_predicates = [predicates.MoveWalkerToRandomTarget(walker, targets)]
task = predicate_task.PredicateTask(
walker=walker,
maze_arena=maze_arena,
predicates=test_predicates,
targets=targets,
randomize_num_predicates=False,
reward_scale=1.0,
terminating_reward_bonus=2.0,
)
random_state = np.random.RandomState(12345)
env = composer.Environment(task, random_state=random_state)
with self.assertRaisesWithLiteralMatch(
RuntimeError, "The generated maze does not contain enough target "
"positions for the requested number of props (0) and targets (5): "
"got 2."
):
env.reset()
def test_error_if_no_predicates_found(self):
walker = walkers.Ant()
num_targets = 2
text_maze = arenas.padded_room.PaddedRoom(
room_size=8, num_objects=6, pad_with_walls=True)
maze_arena = arenas.MazeWithTargets(maze=text_maze)
targets = []
for _ in range(num_targets):
targets.append(
props.PositionDetector(
pos=[0, 0, 0.5],
size=[0.5, 0.5, 0.5],
inverted=False,
visible=True))
# Moving the walker to two targets is not possible since the walker is a
# shared object in use.
test_predicates = [predicates.MoveWalkerToTarget(walker, targets[0]),
predicates.MoveWalkerToTarget(walker, targets[1])]
task = predicate_task.PredicateTask(
walker=walker,
maze_arena=maze_arena,
predicates=test_predicates,
targets=targets[1:],
randomize_num_predicates=False,
max_num_predicates=2,
reward_scale=1.0,
terminating_reward_bonus=2.0,
)
random_state = np.random.RandomState(12345)
env = composer.Environment(task, random_state=random_state)
with self.assertRaisesWithLiteralMatch(
ValueError, "Could not find set of active predicates"
" with unique objects are after 1000 iterations."):
env.reset()
# However moving to one of the two targets is fine.
walker = walkers.Ant()
num_targets = 2
text_maze = arenas.padded_room.PaddedRoom(
room_size=8, num_objects=6, pad_with_walls=True)
maze_arena = arenas.MazeWithTargets(maze=text_maze)
targets = []
for _ in range(num_targets):
targets.append(
props.PositionDetector(
pos=[0, 0, 0.5],
size=[0.5, 0.5, 0.5],
inverted=False,
visible=True))
test_predicates = [predicates.MoveWalkerToTarget(walker, targets[0]),
predicates.MoveWalkerToTarget(walker, targets[1])]
task = predicate_task.PredicateTask(
walker=walker,
maze_arena=maze_arena,
predicates=test_predicates,
targets=targets[1:],
randomize_num_predicates=False,
max_num_predicates=1,
reward_scale=1.0,
terminating_reward_bonus=2.0,
)
random_state = np.random.RandomState(12345)
env = composer.Environment(task, random_state=random_state)
env.reset()
if __name__ == "__main__":
absltest.main()
|
deepmind-research-master
|
box_arrangement/predicate_task_test.py
|
# Copyright 2020 Deepmind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Simple script to launch viewer with an example environment."""
from absl import app
from absl import flags
from dm_control import viewer
from box_arrangement import task_examples
FLAGS = flags.FLAGS
flags.DEFINE_enum('task', 'go_to_target', [
'go_to_target', 'move_box', 'move_box_or_go_to_target',
'move_box_and_go_to_target'
], 'The task to visualize.')
TASKS = {
'go_to_target': task_examples.go_to_k_targets,
'move_box': task_examples.move_box,
'move_box_or_go_to_target': task_examples.move_box_or_gtt,
'move_box_and_go_to_target': task_examples.move_box_and_gtt,
}
def main(unused_argv):
viewer.launch(environment_loader=TASKS[FLAGS.task])
if __name__ == '__main__':
app.run(main)
|
deepmind-research-master
|
box_arrangement/explore.py
|
# Copyright 2019 Deepmind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Example tasks used in publications."""
from dm_control import composer
from dm_control.entities import props
from dm_control.locomotion import arenas as locomotion_arenas
from dm_control.locomotion import walkers
from dm_control.manipulation import props as manipulation_props
from box_arrangement import dmlab_assets
from box_arrangement import predicates as predicates_module
from box_arrangement.predicate_task import PredicateTask
DEFAULT_TIME_LIMIT = 20.0
DEFAULT_CONTROL_TIMESTEP = 0.05
MIN_ROOM_SIZE = 3
def _make_predicate_task(n_boxes, n_targets,
include_gtt_predicates, include_move_box_predicates,
max_num_predicates, control_timestep, time_limit):
"""Auxiliary function to construct different predicates tasks."""
walker = walkers.Ant()
skybox = dmlab_assets.SkyBox(style='sky_03')
wall = dmlab_assets.WallTextures(style='style_03')
floor = dmlab_assets.FloorTextures(style='style_03')
# Make room size become bigger once the number of objects become larger.
num_objects = n_boxes + n_targets
room_size = max(MIN_ROOM_SIZE, num_objects)
text_maze = locomotion_arenas.padded_room.PaddedRoom(
room_size=room_size, num_objects=num_objects, pad_with_walls=True)
arena = locomotion_arenas.MazeWithTargets(
maze=text_maze,
skybox_texture=skybox,
wall_textures=wall,
floor_textures=floor)
boxes = []
for _ in range(n_boxes):
boxes.append(
manipulation_props.BoxWithSites(mass=1.5, half_lengths=[0.5, 0.5, 0.5]))
targets = []
for _ in range(n_targets):
targets.append(
props.PositionDetector(
pos=[0, 0, 0.5], size=[0.5, 0.5, 0.5], inverted=False,
visible=True))
predicates = []
if include_gtt_predicates:
predicates.append(
predicates_module.MoveWalkerToRandomTarget(
walker=walker, targets=targets))
if include_move_box_predicates:
for box_idx in range(len(boxes)):
predicates.append(
predicates_module.MoveBoxToRandomTarget(
walker=walker,
box=boxes[box_idx],
box_index=box_idx,
targets=targets))
task = PredicateTask(
walker=walker,
maze_arena=arena,
predicates=predicates,
props=boxes,
targets=targets,
max_num_predicates=max_num_predicates,
randomize_num_predicates=False,
reward_scale=10.,
regenerate_predicates=False,
physics_timestep=0.005,
control_timestep=control_timestep)
env = composer.Environment(task=task, time_limit=time_limit)
return env
def go_to_k_targets(n_targets=3,
time_limit=DEFAULT_TIME_LIMIT,
control_timestep=DEFAULT_CONTROL_TIMESTEP):
"""Loads `go_to_k_targets` task."""
return _make_predicate_task(
n_boxes=0,
n_targets=n_targets,
include_gtt_predicates=True,
include_move_box_predicates=False,
max_num_predicates=1,
control_timestep=control_timestep,
time_limit=time_limit)
def move_box(n_targets=3,
time_limit=DEFAULT_TIME_LIMIT,
control_timestep=DEFAULT_CONTROL_TIMESTEP):
"""Loads `move_box` task."""
return _make_predicate_task(
n_boxes=1,
n_targets=n_targets,
include_gtt_predicates=False,
include_move_box_predicates=True,
max_num_predicates=1,
control_timestep=control_timestep,
time_limit=time_limit)
def move_box_or_gtt(n_targets=3,
time_limit=DEFAULT_TIME_LIMIT,
control_timestep=DEFAULT_CONTROL_TIMESTEP):
"""Loads `move_box_or_gtt` task."""
return _make_predicate_task(
n_boxes=1,
n_targets=n_targets,
include_gtt_predicates=True,
include_move_box_predicates=True,
max_num_predicates=1,
control_timestep=control_timestep,
time_limit=time_limit)
def move_box_and_gtt(n_targets=3,
time_limit=DEFAULT_TIME_LIMIT,
control_timestep=DEFAULT_CONTROL_TIMESTEP):
"""Loads `move_box_or_gtt` task."""
return _make_predicate_task(
n_boxes=1,
n_targets=n_targets,
include_gtt_predicates=True,
include_move_box_predicates=True,
max_num_predicates=2,
control_timestep=control_timestep,
time_limit=time_limit)
|
deepmind-research-master
|
box_arrangement/task_examples.py
|
# Copyright 2018 Deepmind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""DeepMind Lab textures."""
from dm_control import composer
from dm_control import mjcf
from labmaze import assets as labmaze_assets
class SkyBox(composer.Entity):
"""Represents a texture asset for the sky box."""
def _build(self, style):
labmaze_textures = labmaze_assets.get_sky_texture_paths(style)
self._mjcf_root = mjcf.RootElement(model='dmlab_' + style)
self._texture = self._mjcf_root.asset.add(
'texture', type='skybox', name='texture',
fileleft=labmaze_textures.left, fileright=labmaze_textures.right,
fileup=labmaze_textures.up, filedown=labmaze_textures.down,
filefront=labmaze_textures.front, fileback=labmaze_textures.back)
@property
def mjcf_model(self):
return self._mjcf_root
@property
def texture(self):
return self._texture
class WallTextures(composer.Entity):
"""Represents wall texture assets."""
def _build(self, style):
labmaze_textures = labmaze_assets.get_wall_texture_paths(style)
self._mjcf_root = mjcf.RootElement(model='dmlab_' + style)
self._textures = []
for texture_name, texture_path in labmaze_textures.items():
self._textures.append(self._mjcf_root.asset.add(
'texture', type='2d', name=texture_name,
file=texture_path.format(texture_name)))
@property
def mjcf_model(self):
return self._mjcf_root
@property
def textures(self):
return self._textures
class FloorTextures(composer.Entity):
"""Represents floor texture assets."""
def _build(self, style):
labmaze_textures = labmaze_assets.get_floor_texture_paths(style)
self._mjcf_root = mjcf.RootElement(model='dmlab_' + style)
self._textures = []
for texture_name, texture_path in labmaze_textures.items():
self._textures.append(self._mjcf_root.asset.add(
'texture', type='2d', name=texture_name,
file=texture_path.format(texture_name)))
@property
def mjcf_model(self):
return self._mjcf_root
@property
def textures(self):
return self._textures
|
deepmind-research-master
|
box_arrangement/dmlab_assets.py
|
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""ResNet (post-activation) with FixUp."""
# pylint: disable=invalid-name
import functools
import haiku as hk
import jax
import jax.numpy as jnp
from nfnets import base
nonlinearities = {
'swish': jax.nn.silu,
'relu': jax.nn.relu,
'identity': lambda x: x}
class FixUp_ResNet(hk.Module):
"""Fixup based ResNet."""
variant_dict = {'ResNet50': {'depth': [3, 4, 6, 3]},
'ResNet101': {'depth': [3, 4, 23, 3]},
'ResNet152': {'depth': [3, 8, 36, 3]},
'ResNet200': {'depth': [3, 24, 36, 3]},
'ResNet288': {'depth': [24, 24, 24, 24]},
'ResNet600': {'depth': [50, 50, 50, 50]},
}
def __init__(self, num_classes, variant='ResNet50', width=4,
stochdepth_rate=0.1, drop_rate=None,
activation='relu', fc_init=jnp.zeros,
name='FixUp_ResNet'):
super().__init__(name=name)
self.num_classes = num_classes
self.variant = variant
self.width = width
# Get variant info
block_params = self.variant_dict[self.variant]
self.width_pattern = [item * self.width for item in [64, 128, 256, 512]]
self.depth_pattern = block_params['depth']
self.activation = nonlinearities[activation]
if drop_rate is None:
self.drop_rate = block_params['drop_rate']
else:
self.drop_rate = drop_rate
self.which_conv = functools.partial(hk.Conv2D,
with_bias=False)
# Stem
ch = int(16 * self.width)
self.initial_conv = self.which_conv(ch, kernel_shape=7, stride=2,
padding='SAME',
name='initial_conv')
# Body
self.blocks = []
num_blocks = sum(self.depth_pattern)
index = 0 # Overall block index
block_args = (self.width_pattern, self.depth_pattern, [1, 2, 2, 2])
for block_width, stage_depth, stride in zip(*block_args):
for block_index in range(stage_depth):
# Block stochastic depth drop-rate
block_stochdepth_rate = stochdepth_rate * index / num_blocks
self.blocks += [ResBlock(ch, block_width, num_blocks,
stride=stride if block_index == 0 else 1,
activation=self.activation,
which_conv=self.which_conv,
stochdepth_rate=block_stochdepth_rate,
)]
ch = block_width
index += 1
# Head
self.fc = hk.Linear(self.num_classes, w_init=fc_init, with_bias=True)
def __call__(self, x, is_training=True, return_metrics=False):
"""Return the output of the final layer without any [log-]softmax."""
# Stem
outputs = {}
out = self.initial_conv(x)
bias1 = hk.get_parameter('bias1', (), x.dtype, init=jnp.zeros)
out = self.activation(out + bias1)
out = hk.max_pool(out, window_shape=(1, 3, 3, 1),
strides=(1, 2, 2, 1), padding='SAME')
if return_metrics:
outputs.update(base.signal_metrics(out, 0))
# Blocks
for i, block in enumerate(self.blocks):
out, res_avg_var = block(out, is_training=is_training)
if return_metrics:
outputs.update(base.signal_metrics(out, i + 1))
outputs[f'res_avg_var_{i}'] = res_avg_var
# Final-conv->activation, pool, dropout, classify
pool = jnp.mean(out, [1, 2])
outputs['pool'] = pool
# Optionally apply dropout
if self.drop_rate > 0.0 and is_training:
pool = hk.dropout(hk.next_rng_key(), self.drop_rate, pool)
bias2 = hk.get_parameter('bias2', (), pool.dtype, init=jnp.zeros)
outputs['logits'] = self.fc(pool + bias2)
return outputs
def count_flops(self, h, w):
flops = []
flops += [base.count_conv_flops(3, self.initial_conv, h, w)]
h, w = h / 2, w / 2
# Body FLOPs
for block in self.blocks:
flops += [block.count_flops(h, w)]
if block.stride > 1:
h, w = h / block.stride, w / block.stride
# Count flops for classifier
flops += [self.blocks[-1].out_ch * self.fc.output_size]
return flops, sum(flops)
class ResBlock(hk.Module):
"""Post-activation Fixup Block."""
def __init__(self, in_ch, out_ch, num_blocks, bottleneck_ratio=0.25,
kernel_size=3, stride=1,
which_conv=hk.Conv2D, activation=jax.nn.relu,
stochdepth_rate=None, name=None):
super().__init__(name=name)
self.in_ch, self.out_ch = in_ch, out_ch
self.kernel_size = kernel_size
self.activation = activation
# Bottleneck width
self.width = int(self.out_ch * bottleneck_ratio)
self.stride = stride
# Conv 0 (typically expansion conv)
conv0_init = hk.initializers.RandomNormal(
stddev=((2 / self.width)**0.5) * (num_blocks**(-0.25)))
self.conv0 = which_conv(self.width, kernel_shape=1, padding='SAME',
name='conv0', w_init=conv0_init)
# Grouped NxN conv
conv1_init = hk.initializers.RandomNormal(
stddev=((2 / (self.width * (kernel_size**2)))**0.5)
* (num_blocks**(-0.25)))
self.conv1 = which_conv(self.width, kernel_shape=kernel_size, stride=stride,
padding='SAME', name='conv1', w_init=conv1_init)
# Conv 2, typically projection conv
self.conv2 = which_conv(self.out_ch, kernel_shape=1, padding='SAME',
name='conv2', w_init=hk.initializers.Constant(0))
# Use shortcut conv on channel change or downsample.
self.use_projection = stride > 1 or self.in_ch != self.out_ch
if self.use_projection:
shortcut_init = hk.initializers.RandomNormal(
stddev=(2 / self.out_ch) ** 0.5)
self.conv_shortcut = which_conv(self.out_ch, kernel_shape=1,
stride=stride, padding='SAME',
name='conv_shortcut',
w_init=shortcut_init)
# Are we using stochastic depth?
self._has_stochdepth = (stochdepth_rate is not None and
stochdepth_rate > 0. and stochdepth_rate < 1.0)
if self._has_stochdepth:
self.stoch_depth = base.StochDepth(stochdepth_rate)
def __call__(self, x, is_training):
bias1a = hk.get_parameter('bias1a', (), x.dtype, init=jnp.zeros)
bias1b = hk.get_parameter('bias1b', (), x.dtype, init=jnp.zeros)
bias2a = hk.get_parameter('bias2a', (), x.dtype, init=jnp.zeros)
bias2b = hk.get_parameter('bias2b', (), x.dtype, init=jnp.zeros)
bias3a = hk.get_parameter('bias3a', (), x.dtype, init=jnp.zeros)
bias3b = hk.get_parameter('bias3b', (), x.dtype, init=jnp.zeros)
scale = hk.get_parameter('scale', (), x.dtype, init=jnp.ones)
out = x + bias1a
shortcut = out
if self.use_projection: # Downsample with conv1x1
shortcut = self.conv_shortcut(shortcut)
out = self.conv0(out)
out = self.activation(out + bias1b)
out = self.conv1(out + bias2a)
out = self.activation(out + bias2b)
out = self.conv2(out + bias3a)
out = out * scale + bias3b
# Get average residual variance for reporting metrics.
res_avg_var = jnp.mean(jnp.var(out, axis=[0, 1, 2]))
# Apply stochdepth if applicable.
if self._has_stochdepth:
out = self.stoch_depth(out, is_training)
# SkipInit Gain
out = out + shortcut
return self.activation(out), res_avg_var
def count_flops(self, h, w):
# Count conv FLOPs based on input HW
expand_flops = base.count_conv_flops(self.in_ch, self.conv0, h, w)
# If block is strided we decrease resolution here.
dw_flops = base.count_conv_flops(self.width, self.conv1, h, w)
if self.stride > 1:
h, w = h / self.stride, w / self.stride
if self.use_projection:
sc_flops = base.count_conv_flops(self.in_ch, self.conv_shortcut, h, w)
else:
sc_flops = 0
contract_flops = base.count_conv_flops(self.width, self.conv2, h, w)
return sum([expand_flops, dw_flops, contract_flops, sc_flops])
|
deepmind-research-master
|
nfnets/fixup_resnet.py
|
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
r"""ImageNet experiment with NF-RegNets."""
from ml_collections import config_dict
from nfnets import experiment
def get_config():
"""Return config object for training."""
config = experiment.get_config()
# Experiment config.
train_batch_size = 1024 # Global batch size.
images_per_epoch = 1281167
num_epochs = 360
steps_per_epoch = images_per_epoch / train_batch_size
config.training_steps = ((images_per_epoch * num_epochs) // train_batch_size)
config.random_seed = 0
config.experiment_kwargs = config_dict.ConfigDict(
dict(
config=dict(
lr=0.4,
num_epochs=num_epochs,
label_smoothing=0.1,
model='NF_RegNet',
image_size=224,
use_ema=True,
ema_decay=0.99999, # Cinco nueves amigos
ema_start=0,
augment_name='mixup_cutmix',
train_batch_size=train_batch_size,
eval_batch_size=50,
eval_subset='test',
num_classes=1000,
which_dataset='imagenet',
which_loss='softmax_cross_entropy', # One of softmax or sigmoid
bfloat16=False,
lr_schedule=dict(
name='WarmupCosineDecay',
kwargs=dict(num_steps=config.training_steps,
start_val=0,
min_val=0.001,
warmup_steps=5*steps_per_epoch),
),
lr_scale_by_bs=False,
optimizer=dict(
name='SGD',
kwargs={'momentum': 0.9, 'nesterov': True,
'weight_decay': 5e-5,},
),
model_kwargs=dict(
variant='B0',
width=0.75,
expansion=2.25,
se_ratio=0.5,
alpha=0.2,
stochdepth_rate=0.1,
drop_rate=None,
activation='silu',
),
)))
# Set weight decay based on variant (scaled as 5e-5 + 1e-5 * level)
variant = config.experiment_kwargs.config.model_kwargs.variant
weight_decay = {'B0': 5e-5, 'B1': 6e-5, 'B2': 7e-5,
'B3': 8e-5, 'B4': 9e-5, 'B5': 1e-4}[variant]
config.experiment_kwargs.config.optimizer.kwargs.weight_decay = weight_decay
return config
Experiment = experiment.Experiment
|
deepmind-research-master
|
nfnets/experiment_nf_regnets.py
|
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""ResNetV2 (Pre-activation) with SkipInit."""
# pylint: disable=invalid-name
import haiku as hk
import jax
import jax.numpy as jnp
from nfnets import base
# Nonlinearities
nonlinearities = {
'swish': jax.nn.silu,
'relu': jax.nn.relu,
'identity': lambda x: x}
class SkipInit_ResNet(hk.Module):
"""Skip-Init based ResNet."""
variant_dict = {'ResNet50': {'depth': [3, 4, 6, 3]},
'ResNet101': {'depth': [3, 4, 23, 3]},
'ResNet152': {'depth': [3, 8, 36, 3]},
'ResNet200': {'depth': [3, 24, 36, 3]},
'ResNet288': {'depth': [24, 24, 24, 24]},
'ResNet600': {'depth': [50, 50, 50, 50]},
}
def __init__(self, num_classes, variant='ResNet50', width=4,
stochdepth_rate=0.1, drop_rate=None,
activation='relu', fc_init=jnp.zeros,
name='SkipInit_ResNet'):
super().__init__(name=name)
self.num_classes = num_classes
self.variant = variant
self.width = width
# Get variant info
block_params = self.variant_dict[self.variant]
self.width_pattern = [item * self.width for item in [64, 128, 256, 512]]
self.depth_pattern = block_params['depth']
self.activation = nonlinearities[activation]
if drop_rate is None:
self.drop_rate = block_params['drop_rate']
else:
self.drop_rate = drop_rate
self.which_conv = hk.Conv2D
# Stem
ch = int(16 * self.width)
self.initial_conv = self.which_conv(ch, kernel_shape=7, stride=2,
padding='SAME', with_bias=False,
name='initial_conv')
# Body
self.blocks = []
num_blocks = sum(self.depth_pattern)
index = 0 # Overall block index
block_args = (self.width_pattern, self.depth_pattern, [1, 2, 2, 2])
for block_width, stage_depth, stride in zip(*block_args):
for block_index in range(stage_depth):
# Block stochastic depth drop-rate
block_stochdepth_rate = stochdepth_rate * index / num_blocks
self.blocks += [NFResBlock(ch, block_width,
stride=stride if block_index == 0 else 1,
activation=self.activation,
which_conv=self.which_conv,
stochdepth_rate=block_stochdepth_rate,
)]
ch = block_width
index += 1
# Head
self.fc = hk.Linear(self.num_classes, w_init=fc_init, with_bias=True)
def __call__(self, x, is_training=True, return_metrics=False):
"""Return the output of the final layer without any [log-]softmax."""
# Stem
outputs = {}
out = self.initial_conv(x)
out = hk.max_pool(out, window_shape=(1, 3, 3, 1),
strides=(1, 2, 2, 1), padding='SAME')
if return_metrics:
outputs.update(base.signal_metrics(out, 0))
# Blocks
for i, block in enumerate(self.blocks):
out, res_avg_var = block(out, is_training=is_training)
if return_metrics:
outputs.update(base.signal_metrics(out, i + 1))
outputs[f'res_avg_var_{i}'] = res_avg_var
# Final-conv->activation, pool, dropout, classify
pool = jnp.mean(self.activation(out), [1, 2])
outputs['pool'] = pool
# Optionally apply dropout
if self.drop_rate > 0.0 and is_training:
pool = hk.dropout(hk.next_rng_key(), self.drop_rate, pool)
outputs['logits'] = self.fc(pool)
return outputs
def count_flops(self, h, w):
flops = []
flops += [base.count_conv_flops(3, self.initial_conv, h, w)]
h, w = h / 2, w / 2
# Body FLOPs
for block in self.blocks:
flops += [block.count_flops(h, w)]
if block.stride > 1:
h, w = h / block.stride, w / block.stride
# Count flops for classifier
flops += [self.blocks[-1].out_ch * self.fc.output_size]
return flops, sum(flops)
class NFResBlock(hk.Module):
"""Normalizer-Free pre-activation ResNet Block."""
def __init__(self, in_ch, out_ch, bottleneck_ratio=0.25,
kernel_size=3, stride=1,
which_conv=hk.Conv2D, activation=jax.nn.relu,
stochdepth_rate=None, name=None):
super().__init__(name=name)
self.in_ch, self.out_ch = in_ch, out_ch
self.kernel_size = kernel_size
self.activation = activation
# Bottleneck width
self.width = int(self.out_ch * bottleneck_ratio)
self.stride = stride
# Conv 0 (typically expansion conv)
self.conv0 = which_conv(self.width, kernel_shape=1, padding='SAME',
name='conv0')
# Grouped NxN conv
self.conv1 = which_conv(self.width, kernel_shape=kernel_size, stride=stride,
padding='SAME', name='conv1')
# Conv 2, typically projection conv
self.conv2 = which_conv(self.out_ch, kernel_shape=1, padding='SAME',
name='conv2')
# Use shortcut conv on channel change or downsample.
self.use_projection = stride > 1 or self.in_ch != self.out_ch
if self.use_projection:
self.conv_shortcut = which_conv(self.out_ch, kernel_shape=1,
stride=stride, padding='SAME',
name='conv_shortcut')
# Are we using stochastic depth?
self._has_stochdepth = (stochdepth_rate is not None and
stochdepth_rate > 0. and stochdepth_rate < 1.0)
if self._has_stochdepth:
self.stoch_depth = base.StochDepth(stochdepth_rate)
def __call__(self, x, is_training):
out = self.activation(x)
shortcut = x
if self.use_projection: # Downsample with conv1x1
shortcut = self.conv_shortcut(out)
out = self.conv0(out)
out = self.conv1(self.activation(out))
out = self.conv2(self.activation(out))
# Get average residual standard deviation for reporting metrics.
res_avg_var = jnp.mean(jnp.var(out, axis=[0, 1, 2]))
# Apply stochdepth if applicable.
if self._has_stochdepth:
out = self.stoch_depth(out, is_training)
# SkipInit Gain
out = out * hk.get_parameter('skip_gain', (), out.dtype, init=jnp.zeros)
return out + shortcut, res_avg_var
def count_flops(self, h, w):
# Count conv FLOPs based on input HW
expand_flops = base.count_conv_flops(self.in_ch, self.conv0, h, w)
# If block is strided we decrease resolution here.
dw_flops = base.count_conv_flops(self.width, self.conv1, h, w)
if self.stride > 1:
h, w = h / self.stride, w / self.stride
if self.use_projection:
sc_flops = base.count_conv_flops(self.in_ch, self.conv_shortcut, h, w)
else:
sc_flops = 0
# SE flops happen on avg-pooled activations
contract_flops = base.count_conv_flops(self.width, self.conv2, h, w)
return sum([expand_flops, dw_flops, contract_flops, sc_flops])
|
deepmind-research-master
|
nfnets/skipinit_resnet.py
|
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Normalizer-Free RegNets."""
# pylint: disable=invalid-name
import haiku as hk
import jax
import jax.numpy as jnp
from nfnets import base
class NF_RegNet(hk.Module):
"""Normalizer-Free RegNets."""
variant_dict = base.nf_regnet_params
def __init__(self, num_classes, variant='B0',
width=0.75, expansion=2.25, group_size=8, se_ratio=0.5,
alpha=0.2, stochdepth_rate=0.1, drop_rate=None,
activation='swish', fc_init=jnp.zeros,
name='NF_RegNet'):
super().__init__(name=name)
self.num_classes = num_classes
self.variant = variant
self.width = width
self.expansion = expansion
self.group_size = group_size
self.se_ratio = se_ratio
# Get variant info
block_params = self.variant_dict[self.variant]
self.train_imsize = block_params['train_imsize']
self.test_imsize = block_params['test_imsize']
self.width_pattern = block_params['width']
self.depth_pattern = block_params['depth']
self.activation = base.nonlinearities[activation]
if drop_rate is None:
self.drop_rate = block_params['drop_rate']
else:
self.drop_rate = drop_rate
self.which_conv = base.WSConv2D
# Stem
ch = int(self.width_pattern[0] * self.width)
self.initial_conv = self.which_conv(ch, kernel_shape=3, stride=2,
padding='SAME', name='initial_conv')
# Body
self.blocks = []
expected_std = 1.0
num_blocks = sum(self.depth_pattern)
index = 0 # Overall block index
for block_width, stage_depth in zip(self.width_pattern, self.depth_pattern):
for block_index in range(stage_depth):
# Scalar pre-multiplier so each block sees an N(0,1) input at init
beta = 1./ expected_std
# Block stochastic depth drop-rate
block_stochdepth_rate = stochdepth_rate * index / num_blocks
# Use a bottleneck expansion ratio of 1 for first block following EffNet
expand_ratio = 1 if index == 0 else expansion
out_ch = (int(block_width * self.width))
self.blocks += [NFBlock(ch, out_ch,
expansion=expand_ratio, se_ratio=se_ratio,
group_size=self.group_size,
stride=2 if block_index == 0 else 1,
beta=beta, alpha=alpha,
activation=self.activation,
which_conv=self.which_conv,
stochdepth_rate=block_stochdepth_rate,
)]
ch = out_ch
index += 1
# Reset expected std but still give it 1 block of growth
if block_index == 0:
expected_std = 1.0
expected_std = (expected_std **2 + alpha**2)**0.5
# Head with final conv mimicking EffNets
self.final_conv = self.which_conv(int(1280 * ch // 440), kernel_shape=1,
padding='SAME', name='final_conv')
self.fc = hk.Linear(self.num_classes, w_init=fc_init, with_bias=True)
def __call__(self, x, is_training=True, return_metrics=False):
"""Return the output of the final layer without any [log-]softmax."""
# Stem
outputs = {}
out = self.initial_conv(x)
if return_metrics:
outputs.update(base.signal_metrics(out, 0))
# Blocks
for i, block in enumerate(self.blocks):
out, res_avg_var = block(out, is_training=is_training)
if return_metrics:
outputs.update(base.signal_metrics(out, i + 1))
outputs[f'res_avg_var_{i}'] = res_avg_var
# Final-conv->activation, pool, dropout, classify
out = self.activation(self.final_conv(out))
pool = jnp.mean(out, [1, 2])
outputs['pool'] = pool
# Optionally apply dropout
if self.drop_rate > 0.0 and is_training:
pool = hk.dropout(hk.next_rng_key(), self.drop_rate, pool)
outputs['logits'] = self.fc(pool)
return outputs
def count_flops(self, h, w):
flops = []
flops += [base.count_conv_flops(3, self.initial_conv, h, w)]
h, w = h / 2, w / 2
# Body FLOPs
for block in self.blocks:
flops += [block.count_flops(h, w)]
if block.stride > 1:
h, w = h / block.stride, w / block.stride
# Head module FLOPs
out_ch = self.blocks[-1].out_ch
flops += [base.count_conv_flops(out_ch, self.final_conv, h, w)]
# Count flops for classifier
flops += [self.final_conv.output_channels * self.fc.output_size]
return flops, sum(flops)
class NFBlock(hk.Module):
"""Normalizer-Free RegNet Block."""
def __init__(self, in_ch, out_ch, expansion=2.25, se_ratio=0.5,
kernel_size=3, group_size=8, stride=1,
beta=1.0, alpha=0.2,
which_conv=base.WSConv2D, activation=jax.nn.relu,
stochdepth_rate=None, name=None):
super().__init__(name=name)
self.in_ch, self.out_ch = in_ch, out_ch
self.expansion = expansion
self.se_ratio = se_ratio
self.kernel_size = kernel_size
self.activation = activation
self.beta, self.alpha = beta, alpha
# Round expanded with based on group count
width = int(self.in_ch * expansion)
self.groups = width // group_size
self.width = group_size * self.groups
self.stride = stride
# Conv 0 (typically expansion conv)
self.conv0 = which_conv(self.width, kernel_shape=1, padding='SAME',
name='conv0')
# Grouped NxN conv
self.conv1 = which_conv(self.width, kernel_shape=kernel_size, stride=stride,
padding='SAME', feature_group_count=self.groups,
name='conv1')
# Conv 2, typically projection conv
self.conv2 = which_conv(self.out_ch, kernel_shape=1, padding='SAME',
name='conv2')
# Use shortcut conv on channel change or downsample.
self.use_projection = stride > 1 or self.in_ch != self.out_ch
if self.use_projection:
self.conv_shortcut = which_conv(self.out_ch, kernel_shape=1,
padding='SAME', name='conv_shortcut')
# Squeeze + Excite Module
self.se = base.SqueezeExcite(self.width, self.width, self.se_ratio)
# Are we using stochastic depth?
self._has_stochdepth = (stochdepth_rate is not None and
stochdepth_rate > 0. and stochdepth_rate < 1.0)
if self._has_stochdepth:
self.stoch_depth = base.StochDepth(stochdepth_rate)
def __call__(self, x, is_training):
out = self.activation(x) * self.beta
if self.stride > 1: # Average-pool downsample.
shortcut = hk.avg_pool(out, window_shape=(1, 2, 2, 1),
strides=(1, 2, 2, 1), padding='SAME')
if self.use_projection:
shortcut = self.conv_shortcut(shortcut)
elif self.use_projection:
shortcut = self.conv_shortcut(out)
else:
shortcut = x
out = self.conv0(out)
out = self.conv1(self.activation(out))
out = 2 * self.se(out) * out # Multiply by 2 for rescaling
out = self.conv2(self.activation(out))
# Get average residual standard deviation for reporting metrics.
res_avg_var = jnp.mean(jnp.var(out, axis=[0, 1, 2]))
# Apply stochdepth if applicable.
if self._has_stochdepth:
out = self.stoch_depth(out, is_training)
# SkipInit Gain
out = out * hk.get_parameter('skip_gain', (), out.dtype, init=jnp.zeros)
return out * self.alpha + shortcut, res_avg_var
def count_flops(self, h, w):
# Count conv FLOPs based on input HW
expand_flops = base.count_conv_flops(self.in_ch, self.conv0, h, w)
# If block is strided we decrease resolution here.
dw_flops = base.count_conv_flops(self.width, self.conv1, h, w)
if self.stride > 1:
h, w = h / self.stride, w / self.stride
if self.use_projection:
sc_flops = base.count_conv_flops(self.in_ch, self.conv_shortcut, h, w)
else:
sc_flops = 0
# SE flops happen on avg-pooled activations
se_flops = self.se.fc0.output_size * self.width
se_flops += self.se.fc0.output_size * self.se.fc1.output_size
contract_flops = base.count_conv_flops(self.width, self.conv2, h, w)
return sum([expand_flops, dw_flops, se_flops, contract_flops, sc_flops])
|
deepmind-research-master
|
nfnets/nf_regnet.py
|
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Quick script to test that experiment can import and run."""
import jax
import jax.numpy as jnp
from nfnets import experiment
from nfnets import experiment_nfnets
def test_experiment():
"""Tests the main experiment."""
config = experiment.get_config()
exp_config = config.experiment_kwargs.config
exp_config.train_batch_size = 2
exp_config.eval_batch_size = 2
exp_config.lr = 0.1
exp_config.fake_data = True
exp_config.model_kwargs.width = 2
print(exp_config.model_kwargs)
xp = experiment.Experiment('train', exp_config, jax.random.PRNGKey(0))
bcast = jax.pmap(lambda x: x)
global_step = bcast(jnp.zeros(jax.local_device_count()))
rng = bcast(jnp.stack([jax.random.PRNGKey(0)] * jax.local_device_count()))
print('Taking a single experiment step for test purposes!')
result = xp.step(global_step, rng)
print(f'Step successfully taken, resulting metrics are {result}')
def test_nfnet_experiment():
"""Tests the NFNet experiment."""
config = experiment_nfnets.get_config()
exp_config = config.experiment_kwargs.config
exp_config.train_batch_size = 2
exp_config.eval_batch_size = 2
exp_config.lr = 0.1
exp_config.fake_data = True
exp_config.model_kwargs.width = 2
print(exp_config.model_kwargs)
xp = experiment_nfnets.Experiment('train', exp_config, jax.random.PRNGKey(0))
bcast = jax.pmap(lambda x: x)
global_step = bcast(jnp.zeros(jax.local_device_count()))
rng = bcast(jnp.stack([jax.random.PRNGKey(0)] * jax.local_device_count()))
print('Taking a single NFNet experiment step for test purposes!')
result = xp.step(global_step, rng)
print(f'NFNet Step successfully taken, resulting metrics are {result}')
test_experiment()
test_nfnet_experiment()
|
deepmind-research-master
|
nfnets/test.py
|
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Adaptive gradient clipping transform for Optax."""
import jax
import jax.numpy as jnp
import optax
def compute_norm(x, axis, keepdims):
"""Axis-wise euclidean norm."""
return jnp.sum(x ** 2, axis=axis, keepdims=keepdims) ** 0.5
def unitwise_norm(x):
"""Compute norms of each output unit separately, also for linear layers."""
if len(jnp.squeeze(x).shape) <= 1: # Scalars and vectors
axis = None
keepdims = False
elif len(x.shape) in [2, 3]: # Linear layers of shape IO or multihead linear
axis = 0
keepdims = True
elif len(x.shape) == 4: # Conv kernels of shape HWIO
axis = [0, 1, 2,]
keepdims = True
else:
raise ValueError(f'Got a parameter with shape not in [1, 2, 4]! {x}')
return compute_norm(x, axis, keepdims)
def my_clip(g_norm, max_norm, grad):
"""Applies my gradient clipping unit-wise."""
trigger = g_norm < max_norm
# This little max(., 1e-6) is distinct from the normal eps and just prevents
# division by zero. It technically should be impossible to engage.
clipped_grad = grad * (max_norm / jnp.maximum(g_norm, 1e-6))
return jnp.where(trigger, grad, clipped_grad)
def adaptive_grad_clip(clip, eps=1e-3) -> optax.GradientTransformation:
"""Clip updates to be at most clipping * parameter_norm.
References:
[Brock, Smith, De, Simonyan 2021] High-Performance Large-Scale Image
Recognition Without Normalization.
Args:
clip: Maximum allowed ratio of update norm to parameter norm.
eps: epsilon term to prevent clipping of zero-initialized params.
Returns:
An (init_fn, update_fn) tuple.
"""
def init_fn(_):
return optax.ClipByGlobalNormState()
def update_fn(updates, state, params):
g_norm = jax.tree_map(unitwise_norm, updates)
p_norm = jax.tree_map(unitwise_norm, params)
# Maximum allowable norm
max_norm = jax.tree_map(lambda x: clip * jnp.maximum(x, eps), p_norm)
# If grad norm > clipping * param_norm, rescale
updates = jax.tree_map(my_clip, g_norm, max_norm, updates)
return updates, state
return optax.GradientTransformation(init_fn, update_fn)
|
deepmind-research-master
|
nfnets/agc_optax.py
|
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""ImageNet dataset with typical pre-processing and advanced augs."""
# pylint: disable=logging-format-interpolation
import enum
import itertools as it
import logging
import re
from typing import Generator, Mapping, Optional, Sequence, Text, Tuple
import jax
import jax.numpy as jnp
import numpy as np
import tensorflow.compat.v2 as tf
import tensorflow_datasets as tfds
import tensorflow_probability as tfp
from nfnets import autoaugment
Batch = Mapping[Text, np.ndarray]
MEAN_RGB = (0.485 * 255, 0.456 * 255, 0.406 * 255)
STDDEV_RGB = (0.229 * 255, 0.224 * 255, 0.225 * 255)
AUTOTUNE = tf.data.experimental.AUTOTUNE
class Split(enum.Enum):
"""Imagenet dataset split."""
TRAIN = 1
TRAIN_AND_VALID = 2
VALID = 3
TEST = 4
@classmethod
def from_string(cls, name: Text) -> 'Split':
return {'TRAIN': Split.TRAIN, 'TRAIN_AND_VALID': Split.TRAIN_AND_VALID,
'VALID': Split.VALID, 'VALIDATION': Split.VALID,
'TEST': Split.TEST}[name.upper()]
@property
def num_examples(self):
return {Split.TRAIN_AND_VALID: 1281167, Split.TRAIN: 1271167,
Split.VALID: 10000, Split.TEST: 50000}[self]
def load(
split: Split,
*,
is_training: bool,
batch_dims: Sequence[int],
name: str = 'imagenet',
dtype: jnp.dtype = jnp.float32,
transpose: bool = False,
fake_data: bool = False,
image_size: Tuple[int, int] = (224, 224),
augment_name: Optional[str] = None,
eval_preproc: str = 'crop_resize',
augment_before_mix: bool = True,
) -> Generator[Batch, None, None]:
"""Loads the given split of the dataset.
Args:
split: Dataset split to use.
is_training: If true, use training preproc and augmentation.
batch_dims: List indicating how to batch the dataset (typically expected to
be of shape (num_devices, bs_per_device)
name: Which dataset to use, (must be 'imagenet')
dtype: One of float32 or bfloat16 (bf16 may not be supported fully)
transpose: If true, employs double transpose trick.
fake_data: Return batches of fake data for debugging purposes.
image_size: Final image size returned by dataset pipeline. Note that the
exact procedure to arrive at this size will depend on the chosen preproc.
augment_name: Optional additional aug strategy (applied atop the default
of distorted bboxes and random L/R flips). Specified with a string
such as 'cutmix_mixup_0.4_randaugment_415'. See README for deets.
eval_preproc: Eval preproc method, either 'crop_resize' (crop on the long
edge then resize) or `resize_crop_{pct}`, which will resize the image to
`image_size / pct` on each side then take a center crop.
augment_before_mix: Apply augs like RA/AA before or after cutmix/mixup.
Yields:
A TFDS numpy iterator.
"""
start, end = _shard(split, jax.host_id(), jax.host_count())
if fake_data:
print('Using fake data!')
images = np.zeros(tuple(batch_dims) + image_size + (3,), dtype=dtype)
labels = np.zeros(tuple(batch_dims), dtype=np.int32)
if transpose:
axes = tuple(range(images.ndim))
axes = axes[:-4] + axes[-3:] + (axes[-4],) # NHWC -> HWCN
images = np.transpose(images, axes)
yield from it.repeat({'images': images, 'labels': labels}, end - start)
return
total_batch_size = np.prod(batch_dims)
if name.lower() == 'imagenet':
tfds_split = tfds.core.ReadInstruction(_to_tfds_split(split),
from_=start, to=end, unit='abs')
ds = tfds.load('imagenet2012:5.*.*', split=tfds_split,
decoders={'image': tfds.decode.SkipDecoding()})
else:
raise ValueError('Only imagenet is presently supported for this dataset.')
options = tf.data.Options()
options.experimental_threading.private_threadpool_size = 48
options.experimental_threading.max_intra_op_parallelism = 1
options.experimental_optimization.map_parallelization = True
options.experimental_optimization.parallel_batch = True
if is_training:
options.experimental_deterministic = False
ds = ds.with_options(options)
if is_training:
if jax.host_count() > 1:
# Only cache if we are reading a subset of the dataset.
ds = ds.cache()
ds = ds.repeat()
ds = ds.shuffle(buffer_size=10 * total_batch_size, seed=None)
else:
if split.num_examples % total_batch_size != 0:
raise ValueError(f'Test/valid must be divisible by {total_batch_size}')
def augment_normalize(batch):
"""Optionally augment, then normalize an image."""
batch = dict(**batch)
image = _augment_image(batch['images'], is_training, augment_name)
batch['images'] = _normalize_image(image)
return batch
def preprocess(example):
image = _preprocess_image(example['image'], is_training, image_size,
eval_preproc)
label = tf.cast(example['label'], tf.int32)
out = {'images': image, 'labels': label}
if augment_name is not None and 'cutmix' in augment_name:
out['mask'] = cutmix_padding(*image_size)
out['cutmix_ratio'] = tf.reduce_mean(out['mask'])
if augment_name is not None and 'mixup' in augment_name:
mixup_alpha = 0.2 # default to alpha=0.2
# If float provided, get it
if 'mixup_' in augment_name:
alpha = augment_name.split('mixup_')[1].split('_')
if any(alpha) and re.match(r'^-?\d+(?:\.\d+)?$', alpha[0]) is not None:
mixup_alpha = float(alpha[0])
beta = tfp.distributions.Beta(mixup_alpha, mixup_alpha)
out['mixup_ratio'] = beta.sample()
# Apply augs before mixing?
if augment_before_mix or augment_name is None:
out = augment_normalize(out)
return out
ds = ds.map(preprocess, num_parallel_calls=AUTOTUNE)
ds = ds.prefetch(AUTOTUNE)
def transpose_fn(batch):
# Applies the double-transpose trick for TPU.
batch = dict(**batch)
batch['images'] = tf.transpose(batch['images'], (1, 2, 3, 0))
return batch
def cast_fn(batch):
batch = dict(**batch)
batch['images'] = tf.cast(batch['images'], tf.dtypes.as_dtype(dtype))
return batch
for i, batch_size in enumerate(reversed(batch_dims)):
if i == 0:
# Deal with vectorized MixUp + CutMix ops
if augment_name is not None:
if 'mixup' in augment_name or 'cutmix' in augment_name:
ds = ds.batch(batch_size * 2)
else:
ds = ds.map(augment_normalize, num_parallel_calls=AUTOTUNE)
ds = ds.batch(batch_size)
# Apply mixup, cutmix, or mixup + cutmix
if 'mixup' in augment_name and 'cutmix' not in augment_name:
logging.info('Applying MixUp!')
ds = ds.map(my_mixup, num_parallel_calls=AUTOTUNE)
elif 'cutmix' in augment_name and 'mixup' not in augment_name:
logging.info('Applying CutMix!')
ds = ds.map(my_cutmix, num_parallel_calls=AUTOTUNE)
elif 'mixup' in augment_name and 'cutmix' in augment_name:
logging.info('Applying MixUp and CutMix!')
ds = ds.map(my_mixup_cutmix, num_parallel_calls=AUTOTUNE)
# If applying augs after mixing, unbatch, map, and rebatch
if (not augment_before_mix and
('mixup' in augment_name or 'cutmix' in augment_name)):
ds = ds.unbatch().map(augment_normalize, num_parallel_calls=AUTOTUNE)
ds = ds.batch(batch_size)
else:
ds = ds.batch(batch_size)
# Transpose and cast as needbe
if transpose:
ds = ds.map(transpose_fn) # NHWC -> HWCN
# NOTE: You may be tempted to move the casting earlier on in the pipeline,
# but for bf16 some operations will end up silently placed on the TPU and
# this causes stalls while TF and JAX battle for the accelerator.
ds = ds.map(cast_fn)
else:
ds = ds.batch(batch_size)
ds = ds.prefetch(AUTOTUNE)
ds = tfds.as_numpy(ds)
yield from ds
def cutmix_padding(h, w):
"""Returns image mask for CutMix.
Taken from (https://github.com/google/edward2/blob/master/experimental
/marginalization_mixup/data_utils.py#L367)
Args:
h: image height.
w: image width.
"""
r_x = tf.random.uniform([], 0, w, tf.int32)
r_y = tf.random.uniform([], 0, h, tf.int32)
# Beta dist in paper, but they used Beta(1,1) which is just uniform.
image1_proportion = tf.random.uniform([])
patch_length_ratio = tf.math.sqrt(1 - image1_proportion)
r_w = tf.cast(patch_length_ratio * tf.cast(w, tf.float32), tf.int32)
r_h = tf.cast(patch_length_ratio * tf.cast(h, tf.float32), tf.int32)
bbx1 = tf.clip_by_value(tf.cast(r_x - r_w // 2, tf.int32), 0, w)
bby1 = tf.clip_by_value(tf.cast(r_y - r_h // 2, tf.int32), 0, h)
bbx2 = tf.clip_by_value(tf.cast(r_x + r_w // 2, tf.int32), 0, w)
bby2 = tf.clip_by_value(tf.cast(r_y + r_h // 2, tf.int32), 0, h)
# Create the binary mask.
pad_left = bbx1
pad_top = bby1
pad_right = tf.maximum(w - bbx2, 0)
pad_bottom = tf.maximum(h - bby2, 0)
r_h = bby2 - bby1
r_w = bbx2 - bbx1
mask = tf.pad(
tf.ones((r_h, r_w)),
paddings=[[pad_top, pad_bottom], [pad_left, pad_right]],
mode='CONSTANT',
constant_values=0)
mask.set_shape((h, w))
return mask[..., None] # Add channel dim.
def my_cutmix(batch):
"""Cutmix."""
batch = dict(**batch)
bs = tf.shape(batch['images'])[0] // 2
mask = batch['mask'][:bs]
images = (mask * batch['images'][:bs] + (1.0 - mask) * batch['images'][bs:])
mix_labels = batch['labels'][bs:]
labels = batch['labels'][:bs]
ratio = batch['cutmix_ratio'][:bs]
return {'images': images, 'labels': labels,
'mix_labels': mix_labels, 'ratio': ratio}
def my_mixup(batch):
"""Mixup."""
batch = dict(**batch)
bs = tf.shape(batch['images'])[0] // 2
ratio = batch['mixup_ratio'][:bs, None, None, None]
images = (ratio * batch['images'][:bs] + (1.0 - ratio) * batch['images'][bs:])
mix_labels = batch['labels'][bs:]
labels = batch['labels'][:bs]
ratio = ratio[..., 0, 0, 0] # Unsqueeze
return {'images': images, 'labels': labels,
'mix_labels': mix_labels, 'ratio': ratio}
def mixup_or_cutmix(batch):
"""Randomly applies one of cutmix or mixup to a batch."""
logging.info('Randomly applying cutmix or mixup with 50% chance!')
return tf.cond(
tf.cast(tf.random.uniform([], maxval=2, dtype=tf.int32), tf.bool),
lambda: my_mixup(batch),
lambda: my_cutmix(batch))
def my_mixup_cutmix(batch):
"""Apply mixup to half the batch, and cutmix to the other."""
batch = dict(**batch)
bs = tf.shape(batch['images'])[0] // 4
mixup_ratio = batch['mixup_ratio'][:bs, None, None, None]
mixup_images = (mixup_ratio * batch['images'][:bs]
+ (1.0 - mixup_ratio) * batch['images'][bs:2*bs])
mixup_labels = batch['labels'][:bs]
mixup_mix_labels = batch['labels'][bs:2*bs]
cutmix_mask = batch['mask'][2*bs:3*bs]
cutmix_images = (cutmix_mask * batch['images'][2*bs:3*bs]
+ (1.0 - cutmix_mask) * batch['images'][-bs:])
cutmix_labels = batch['labels'][2*bs:3*bs]
cutmix_mix_labels = batch['labels'][-bs:]
cutmix_ratio = batch['cutmix_ratio'][2*bs : 3*bs]
return {'images': tf.concat([mixup_images, cutmix_images], axis=0),
'labels': tf.concat([mixup_labels, cutmix_labels], axis=0),
'mix_labels': tf.concat([mixup_mix_labels, cutmix_mix_labels], 0),
'ratio': tf.concat([mixup_ratio[..., 0, 0, 0], cutmix_ratio], axis=0)}
def _to_tfds_split(split: Split) -> tfds.Split:
"""Returns the TFDS split appropriately sharded."""
if split in (Split.TRAIN, Split.TRAIN_AND_VALID, Split.VALID):
return tfds.Split.TRAIN
else:
assert split == Split.TEST
return tfds.Split.VALIDATION
def _shard(split: Split, shard_index: int, num_shards: int) -> Tuple[int, int]:
"""Returns [start, end) for the given shard index."""
assert shard_index < num_shards
arange = np.arange(split.num_examples)
shard_range = np.array_split(arange, num_shards)[shard_index]
start, end = shard_range[0], (shard_range[-1] + 1)
if split == Split.TRAIN:
# Note that our TRAIN=TFDS_TRAIN[10000:] and VALID=TFDS_TRAIN[:10000].
offset = Split.VALID.num_examples
start += offset
end += offset
return start, end
def _preprocess_image(
image_bytes: tf.Tensor,
is_training: bool,
image_size: Sequence[int],
eval_preproc: str = 'crop_resize'
) -> tf.Tensor:
"""Returns processed and resized images."""
# NOTE: Bicubic resize (1) casts uint8 to float32 and (2) resizes without
# clamping overshoots. This means values returned will be outside the range
# [0.0, 255.0] (e.g. we have observed outputs in the range [-51.1, 336.6]).
if is_training:
image = _decode_and_random_crop(image_bytes, image_size)
image = tf.image.random_flip_left_right(image)
assert image.dtype == tf.uint8
image = tf.image.resize(image, image_size, tf.image.ResizeMethod.BICUBIC)
else:
if eval_preproc == 'crop_resize':
image = _decode_and_center_crop(image_bytes, image_size=image_size)
assert image.dtype == tf.uint8
image = tf.image.resize(image, image_size, tf.image.ResizeMethod.BICUBIC)
elif 'resize_crop' in eval_preproc:
# Pass in crop percent
crop_pct = float(eval_preproc.split('_')[-1])
image = _decode_and_resize_then_crop(image_bytes, image_size=image_size,
crop_pct=crop_pct)
else:
raise ValueError(f'Unknown Eval Preproc {eval_preproc} provided!')
return image
def _augment_image(
image: tf.Tensor,
is_training: bool,
augment_name: Optional[str] = None,
) -> tf.Tensor:
"""Applies AA/RA to an image."""
if is_training and augment_name:
if 'autoaugment' in augment_name or 'randaugment' in augment_name:
input_image_type = image.dtype
image = tf.clip_by_value(image, 0.0, 255.0)
# Autoaugment requires a uint8 image; we cast here and then cast back
image = tf.cast(image, dtype=tf.uint8)
if 'autoaugment' in augment_name:
logging.info(f'Applying AutoAugment policy {augment_name}')
image = autoaugment.distort_image_with_autoaugment(image, 'v0')
elif 'randaugment' in augment_name:
magnitude = int(augment_name.split('_')[-1]) # pytype: disable=attribute-error
# Allow passing in num_layers as a magnitude > 100
if magnitude > 100:
num_layers = magnitude // 100
magnitude = magnitude - int(num_layers * 100)
else:
num_layers = 2
logging.info(f'Applying RA {num_layers} x {magnitude}')
image = autoaugment.distort_image_with_randaugment(
image, num_layers=num_layers, magnitude=magnitude)
image = tf.cast(image, dtype=input_image_type)
return image
def _normalize_image(image: tf.Tensor) -> tf.Tensor:
"""Normalize the image to zero mean and unit variance."""
image -= tf.constant(MEAN_RGB, shape=[1, 1, 3], dtype=image.dtype)
image /= tf.constant(STDDEV_RGB, shape=[1, 1, 3], dtype=image.dtype)
return image
def _distorted_bounding_box_crop(
image_bytes: tf.Tensor,
*,
jpeg_shape: tf.Tensor,
bbox: tf.Tensor,
min_object_covered: float,
aspect_ratio_range: Tuple[float, float],
area_range: Tuple[float, float],
max_attempts: int,
) -> tf.Tensor:
"""Generates cropped_image using one of the bboxes randomly distorted."""
bbox_begin, bbox_size, _ = tf.image.sample_distorted_bounding_box(
jpeg_shape,
bounding_boxes=bbox,
min_object_covered=min_object_covered,
aspect_ratio_range=aspect_ratio_range,
area_range=area_range,
max_attempts=max_attempts,
use_image_if_no_bounding_boxes=True)
# Crop the image to the specified bounding box.
offset_y, offset_x, _ = tf.unstack(bbox_begin)
target_height, target_width, _ = tf.unstack(bbox_size)
crop_window = [offset_y, offset_x, target_height, target_width]
image = crop(image_bytes, crop_window)
return image
def _decode_and_random_crop(image_bytes: tf.Tensor,
image_size: Sequence[int] = (224, 224),
jpeg_shape: Optional[tf.Tensor] = None
) -> tf.Tensor:
"""Make a random crop of chosen size."""
if jpeg_shape is None:
jpeg_shape = get_shape(image_bytes)
bbox = tf.constant([0.0, 0.0, 1.0, 1.0], dtype=tf.float32, shape=[1, 1, 4])
image = _distorted_bounding_box_crop(
image_bytes,
jpeg_shape=jpeg_shape,
bbox=bbox,
min_object_covered=0.1,
aspect_ratio_range=(3 / 4, 4 / 3),
area_range=(0.08, 1.0),
max_attempts=10)
if tf.reduce_all(tf.equal(jpeg_shape, tf.shape(image))):
# If the random crop failed fall back to center crop.
image = _decode_and_center_crop(image_bytes, jpeg_shape, image_size)
return image
def _decode_and_center_crop(
image_bytes: tf.Tensor,
jpeg_shape: Optional[tf.Tensor] = None,
image_size: Sequence[int] = (224, 224),
) -> tf.Tensor:
"""Crops to center of image with padding then scales."""
if jpeg_shape is None:
jpeg_shape = get_shape(image_bytes)
image_height = jpeg_shape[0]
image_width = jpeg_shape[1]
# Pad the image with at least 32px on the short edge and take a
# crop that maintains aspect ratio.
scale = tf.minimum(tf.cast(image_height, tf.float32) / (image_size[0] + 32),
tf.cast(image_width, tf.float32) / (image_size[1] + 32))
padded_center_crop_height = tf.cast(scale * image_size[0], tf.int32)
padded_center_crop_width = tf.cast(scale * image_size[1], tf.int32)
offset_height = ((image_height - padded_center_crop_height) + 1) // 2
offset_width = ((image_width - padded_center_crop_width) + 1) // 2
crop_window = [offset_height, offset_width,
padded_center_crop_height, padded_center_crop_width]
image = crop(image_bytes, crop_window)
return image
def get_shape(image_bytes):
"""Gets the image shape for jpeg bytes or a uint8 decoded image."""
if image_bytes.dtype == tf.dtypes.string:
image_shape = tf.image.extract_jpeg_shape(image_bytes)
else:
image_shape = tf.shape(image_bytes)
return image_shape
def crop(image_bytes, crop_window):
"""Helper function to crop a jpeg or a decoded image."""
if image_bytes.dtype == tf.dtypes.string:
image = tf.image.decode_and_crop_jpeg(image_bytes,
tf.stack(crop_window),
channels=3)
else:
image = tf.image.crop_to_bounding_box(image_bytes, *crop_window)
return image
def _decode_and_resize_then_crop(
image_bytes: tf.Tensor,
image_size: Sequence[int] = (224, 224),
crop_pct: float = 1.0,
) -> tf.Tensor:
"""Rescales an image to image_size / crop_pct, then center crops."""
image = tf.image.decode_jpeg(image_bytes, channels=3)
# Scale image to "scaled size" before taking a center crop
if crop_pct > 1.0: # If crop_pct is >1, treat it as num pad pixels (like VGG)
scale_size = tuple([int(x + crop_pct) for x in image_size])
else:
scale_size = tuple([int(float(x) / crop_pct) for x in image_size])
image = tf.image.resize(image, scale_size, tf.image.ResizeMethod.BICUBIC)
crop_height = tf.cast(image_size[0], tf.int32)
crop_width = tf.cast(image_size[1], tf.int32)
offset_height = ((scale_size[0] - crop_height) + 1) // 2
offset_width = ((scale_size[1] - crop_width) + 1) // 2
crop_window = [offset_height, offset_width, crop_height, crop_width]
image = crop(image, crop_window)
return image
|
deepmind-research-master
|
nfnets/dataset.py
|
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
r"""Basic Jaxline ImageNet experiment."""
import importlib
import sys
from absl import flags
from absl import logging
import haiku as hk
import jax
import jax.numpy as jnp
from jaxline import base_config
from jaxline import experiment
from jaxline import platform
from jaxline import utils as jl_utils
from ml_collections import config_dict
import numpy as np
from nfnets import dataset
from nfnets import optim
from nfnets import utils
# pylint: disable=logging-format-interpolation
FLAGS = flags.FLAGS
# We define the experiment launch config in the same file as the experiment to
# keep things self-contained in a single file, but one might consider moving the
# config and/or sweep functions to a separate file, if necessary.
def get_config():
"""Return config object for training."""
config = base_config.get_base_config()
# Experiment config.
train_batch_size = 1024 # Global batch size.
images_per_epoch = 1281167
num_epochs = 90
steps_per_epoch = images_per_epoch / train_batch_size
config.training_steps = ((images_per_epoch * num_epochs) // train_batch_size)
config.random_seed = 0
config.experiment_kwargs = config_dict.ConfigDict(
dict(
config=dict(
lr=0.1,
num_epochs=num_epochs,
label_smoothing=0.1,
model='ResNet',
image_size=224,
use_ema=False,
ema_decay=0.9999, # Quatros nuevos amigos
ema_start=0,
which_ema='tf1_ema',
augment_name=None, # 'mixup_cutmix',
augment_before_mix=True,
eval_preproc='crop_resize',
train_batch_size=train_batch_size,
eval_batch_size=50,
eval_subset='test',
num_classes=1000,
which_dataset='imagenet',
fake_data=False,
which_loss='softmax_cross_entropy', # For now, must be softmax
transpose=True, # Use the double-transpose trick?
bfloat16=False,
lr_schedule=dict(
name='WarmupCosineDecay',
kwargs=dict(
num_steps=config.training_steps,
start_val=0,
min_val=0,
warmup_steps=5 * steps_per_epoch),
),
lr_scale_by_bs=True,
optimizer=dict(
name='SGD',
kwargs={
'momentum': 0.9,
'nesterov': True,
'weight_decay': 1e-4,
},
),
model_kwargs=dict(
width=4,
which_norm='BatchNorm',
norm_kwargs=dict(
create_scale=True,
create_offset=True,
decay_rate=0.9,
), # cross_replica_axis='i'),
variant='ResNet50',
activation='relu',
drop_rate=0.0,
),
),))
# Training loop config: log and checkpoint every minute
config.log_train_data_interval = 60
config.log_tensors_interval = 60
config.save_checkpoint_interval = 60
config.eval_specific_checkpoint_dir = ''
return config
class Experiment(experiment.AbstractExperiment):
"""Imagenet experiment."""
CHECKPOINT_ATTRS = {
'_params': 'params',
'_state': 'state',
'_ema_params': 'ema_params',
'_ema_state': 'ema_state',
'_opt_state': 'opt_state',
}
def __init__(self, mode, config, init_rng):
super().__init__(mode=mode)
self.mode = mode
self.config = config
self.init_rng = init_rng
# Checkpointed experiment state.
self._params = None
self._state = None
self._ema_params = None
self._ema_state = None
self._opt_state = None
# Input pipelines.
self._train_input = None
self._eval_input = None
# Get model, loaded in from the zoo
module_prefix = 'nfnets.'
self.model_module = importlib.import_module(
(module_prefix + self.config.model.lower()))
self.net = hk.transform_with_state(self._forward_fn)
# Assign image sizes
if self.config.get('override_imsize', False):
self.train_imsize = self.config.image_size
self.test_imsize = self.config.get('eval_image_size', self.train_imsize)
else:
variant_dict = getattr(self.model_module, self.config.model).variant_dict
variant_dict = variant_dict[self.config.model_kwargs.variant]
self.train_imsize = variant_dict.get('train_imsize',
self.config.image_size)
# Test imsize defaults to model-specific value, then to config imsize
test_imsize = self.config.get('eval_image_size', self.config.image_size)
self.test_imsize = variant_dict.get('test_imsize', test_imsize)
donate_argnums = (0, 1, 2, 6, 7) if self.config.use_ema else (0, 1, 2)
self.train_fn = jax.pmap(
self._train_fn, axis_name='i', donate_argnums=donate_argnums)
self.eval_fn = jax.pmap(self._eval_fn, axis_name='i')
def _initialize_train(self):
self._train_input = self._build_train_input()
# Initialize net and EMA copy of net if no params available.
if self._params is None:
inputs = next(self._train_input)
init_net = jax.pmap(
lambda *a: self.net.init(*a, is_training=True), axis_name='i')
init_rng = jl_utils.bcast_local_devices(self.init_rng)
self._params, self._state = init_net(init_rng, inputs)
if self.config.use_ema:
self._ema_params, self._ema_state = init_net(init_rng, inputs)
num_params = hk.data_structures.tree_size(self._params)
logging.info(f'Net parameters: {num_params / jax.local_device_count()}')
self._make_opt()
def _make_opt(self):
# Separate conv params and gains/biases
def pred(mod, name, val): # pylint:disable=unused-argument
return (name in ['scale', 'offset', 'b'] or 'gain' in name or
'bias' in name)
gains_biases, weights = hk.data_structures.partition(pred, self._params)
# Lr schedule with batch-based LR scaling
if self.config.lr_scale_by_bs:
max_lr = (self.config.lr * self.config.train_batch_size) / 256
else:
max_lr = self.config.lr
lr_sched_fn = getattr(optim, self.config.lr_schedule.name)
lr_schedule = lr_sched_fn(max_val=max_lr, **self.config.lr_schedule.kwargs)
# Optimizer; no need to broadcast!
opt_kwargs = {key: val for key, val in self.config.optimizer.kwargs.items()}
opt_kwargs['lr'] = lr_schedule
opt_module = getattr(optim, self.config.optimizer.name)
self.opt = opt_module([{
'params': gains_biases,
'weight_decay': None
}, {
'params': weights
}], **opt_kwargs)
if self._opt_state is None:
self._opt_state = self.opt.states()
else:
self.opt.plugin(self._opt_state)
def _forward_fn(self, inputs, is_training):
net_kwargs = {
'num_classes': self.config.num_classes,
**self.config.model_kwargs
}
net = getattr(self.model_module, self.config.model)(**net_kwargs)
if self.config.get('transpose', False):
images = jnp.transpose(inputs['images'], (3, 0, 1, 2)) # HWCN -> NHWC
else:
images = inputs['images']
if self.config.bfloat16 and self.mode == 'train':
images = utils.to_bf16(images)
return net(images, is_training=is_training)['logits']
def _one_hot(self, value):
"""One-hot encoding potentially over a sequence of labels."""
y = jax.nn.one_hot(value, self.config.num_classes)
return y
def _loss_fn(self, params, state, inputs, rng):
logits, state = self.net.apply(params, state, rng, inputs, is_training=True)
y = self._one_hot(inputs['labels'])
if 'mix_labels' in inputs: # Handle cutmix/mixup label mixing
logging.info('Using mixup or cutmix!')
y1 = self._one_hot(inputs['mix_labels'])
y = inputs['ratio'][:, None] * y + (1. - inputs['ratio'][:, None]) * y1
if self.config.label_smoothing > 0: # get smoothy
spositives = 1. - self.config.label_smoothing
snegatives = self.config.label_smoothing / self.config.num_classes
y = spositives * y + snegatives
if self.config.bfloat16: # Cast logits to float32
logits = logits.astype(jnp.float32)
which_loss = getattr(utils, self.config.which_loss)
loss = which_loss(logits, y, reduction='mean')
metrics = utils.topk_correct(logits, inputs['labels'], prefix='train_')
# Average top-1 and top-5 correct labels
metrics = jax.tree_map(jnp.mean, metrics)
metrics['train_loss'] = loss # Metrics will be pmeaned so don't divide here
scaled_loss = loss / jax.device_count() # Grads get psummed so do divide
return scaled_loss, (metrics, state)
def _train_fn(self, params, states, opt_states, inputs, rng, global_step,
ema_params, ema_states):
"""Runs one batch forward + backward and run a single opt step."""
grad_fn = jax.grad(self._loss_fn, argnums=0, has_aux=True)
if self.config.bfloat16:
in_params, states = jax.tree_map(utils.to_bf16, (params, states))
else:
in_params = params
grads, (metrics, states) = grad_fn(in_params, states, inputs, rng)
if self.config.bfloat16:
states, metrics, grads = jax.tree_map(utils.from_bf16,
(states, metrics, grads))
# Sum gradients and average losses for pmap
grads = jax.lax.psum(grads, 'i')
metrics = jax.lax.pmean(metrics, 'i')
# Compute updates and update parameters
metrics['learning_rate'] = self.opt._hyperparameters['lr'](global_step) # pylint: disable=protected-access
params, opt_states = self.opt.step(params, grads, opt_states, global_step)
if ema_params is not None:
ema_fn = getattr(utils, self.config.get('which_ema', 'tf1_ema'))
ema = lambda x, y: ema_fn(x, y, self.config.ema_decay, global_step)
ema_params = jax.tree_map(ema, ema_params, params)
ema_states = jax.tree_map(ema, ema_states, states)
return {
'params': params,
'states': states,
'opt_states': opt_states,
'ema_params': ema_params,
'ema_states': ema_states,
'metrics': metrics
}
# _ _
# | |_ _ __ __ _(_)_ __
# | __| '__/ _` | | '_ \
# | |_| | | (_| | | | | |
# \__|_| \__,_|_|_| |_|
#
def step(self, global_step, rng, *unused_args, **unused_kwargs):
if self._train_input is None:
self._initialize_train()
inputs = next(self._train_input)
out = self.train_fn(
params=self._params,
states=self._state,
opt_states=self._opt_state,
inputs=inputs,
rng=rng,
global_step=global_step,
ema_params=self._ema_params,
ema_states=self._ema_state)
self._params, self._state = out['params'], out['states']
self._opt_state = out['opt_states']
self._ema_params, self._ema_state = out['ema_params'], out['ema_states']
self.opt.plugin(self._opt_state)
return jl_utils.get_first(out['metrics'])
def _build_train_input(self):
num_devices = jax.device_count()
global_batch_size = self.config.train_batch_size
bs_per_device, ragged = divmod(global_batch_size, num_devices)
if ragged:
raise ValueError(
f'Global batch size {global_batch_size} must be divisible by '
f'num devices {num_devices}')
return dataset.load(
dataset.Split.TRAIN_AND_VALID,
is_training=True,
batch_dims=[jax.local_device_count(), bs_per_device],
transpose=self.config.get('transpose', False),
image_size=(self.train_imsize,) * 2,
augment_name=self.config.augment_name,
augment_before_mix=self.config.get('augment_before_mix', True),
name=self.config.which_dataset,
fake_data=self.config.get('fake_data', False))
# _
# _____ ____ _| |
# / _ \ \ / / _` | |
# | __/\ V / (_| | |
# \___| \_/ \__,_|_|
#
def evaluate(self, global_step, **unused_args):
metrics = self._eval_epoch(self._params, self._state)
if self.config.use_ema:
ema_metrics = self._eval_epoch(self._ema_params, self._ema_state)
metrics.update({f'ema_{key}': val for key, val in ema_metrics.items()})
logging.info(f'[Step {global_step}] Eval scalars: {metrics}')
return metrics
def _eval_epoch(self, params, state):
"""Evaluates an epoch."""
num_samples = 0.
summed_metrics = None
for inputs in self._build_eval_input():
num_samples += np.prod(inputs['labels'].shape[:2]) # Account for pmaps
metrics = self.eval_fn(params, state, inputs)
# Accumulate the sum of metrics for each step.
metrics = jax.tree_map(lambda x: jnp.sum(x[0], axis=0), metrics)
if summed_metrics is None:
summed_metrics = metrics
else:
summed_metrics = jax.tree_map(jnp.add, summed_metrics, metrics)
mean_metrics = jax.tree_map(lambda x: x / num_samples, summed_metrics)
return jax.device_get(mean_metrics)
def _eval_fn(self, params, state, inputs):
"""Evaluate a single batch and return loss and top-k acc."""
logits, _ = self.net.apply(params, state, None, inputs, is_training=False)
y = self._one_hot(inputs['labels'])
which_loss = getattr(utils, self.config.which_loss)
loss = which_loss(logits, y, reduction=None)
metrics = utils.topk_correct(logits, inputs['labels'], prefix='eval_')
metrics['eval_loss'] = loss
return jax.lax.psum(metrics, 'i')
def _build_eval_input(self):
"""Builds the evaluation input pipeline."""
bs_per_device = (self.config.eval_batch_size // jax.local_device_count())
split = dataset.Split.from_string(self.config.eval_subset)
eval_preproc = self.config.get('eval_preproc', 'crop_resize')
return dataset.load(
split,
is_training=False,
batch_dims=[jax.local_device_count(), bs_per_device],
transpose=self.config.get('transpose', False),
image_size=(self.test_imsize,) * 2,
name=self.config.which_dataset,
eval_preproc=eval_preproc,
fake_data=self.config.get('fake_data', False))
if __name__ == '__main__':
flags.mark_flag_as_required('config')
platform.main(Experiment, sys.argv[1:])
|
deepmind-research-master
|
nfnets/experiment.py
|
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
r"""ImageNet experiment with NFNets."""
import sys
from absl import flags
import haiku as hk
from jaxline import platform
from ml_collections import config_dict
from nfnets import experiment
from nfnets import optim
FLAGS = flags.FLAGS
def get_config():
"""Return config object for training."""
config = experiment.get_config()
# Experiment config.
train_batch_size = 4096 # Global batch size.
images_per_epoch = 1281167
num_epochs = 360
steps_per_epoch = images_per_epoch / train_batch_size
config.training_steps = ((images_per_epoch * num_epochs) // train_batch_size)
config.random_seed = 0
config.experiment_kwargs = config_dict.ConfigDict(
dict(
config=dict(
lr=0.1,
num_epochs=num_epochs,
label_smoothing=0.1,
model='NFNet',
image_size=224,
use_ema=True,
ema_decay=0.99999,
ema_start=0,
augment_name=None,
augment_before_mix=False,
eval_preproc='resize_crop_32',
train_batch_size=train_batch_size,
eval_batch_size=50,
eval_subset='test',
num_classes=1000,
which_dataset='imagenet',
which_loss='softmax_cross_entropy', # One of softmax or sigmoid
bfloat16=True,
lr_schedule=dict(
name='WarmupCosineDecay',
kwargs=dict(num_steps=config.training_steps,
start_val=0,
min_val=0.0,
warmup_steps=5*steps_per_epoch),
),
lr_scale_by_bs=True,
optimizer=dict(
name='SGD_AGC',
kwargs={'momentum': 0.9, 'nesterov': True,
'weight_decay': 2e-5,
'clipping': 0.01, 'eps': 1e-3},
),
model_kwargs=dict(
variant='F0',
width=1.0,
se_ratio=0.5,
alpha=0.2,
stochdepth_rate=0.25,
drop_rate=None, # Use native drop-rate
activation='gelu',
final_conv_mult=2,
final_conv_ch=None,
use_two_convs=True,
),
)))
# Unlike NF-RegNets, use the same weight decay for all, but vary RA levels
variant = config.experiment_kwargs.config.model_kwargs.variant
# RandAugment levels (e.g. 405 = 4 layers, magnitude 5, 205 = 2 layers, mag 5)
augment = {'F0': '405', 'F1': '410', 'F2': '410', 'F3': '415',
'F4': '415', 'F5': '415', 'F6': '415', 'F7': '415'}[variant]
aug_base_name = 'cutmix_mixup_randaugment'
config.experiment_kwargs.config.augment_name = f'{aug_base_name}_{augment}'
return config
class Experiment(experiment.Experiment):
"""Experiment with correct parameter filtering for applying AGC."""
def _make_opt(self):
# Separate conv params and gains/biases
def pred_gb(mod, name, val):
del mod, val
return (name in ['scale', 'offset', 'b']
or 'gain' in name or 'bias' in name)
gains_biases, weights = hk.data_structures.partition(pred_gb, self._params)
def pred_fc(mod, name, val):
del name, val
return 'linear' in mod and 'squeeze_excite' not in mod
fc_weights, weights = hk.data_structures.partition(pred_fc, weights)
# Lr schedule with batch-based LR scaling
if self.config.lr_scale_by_bs:
max_lr = (self.config.lr * self.config.train_batch_size) / 256
else:
max_lr = self.config.lr
lr_sched_fn = getattr(optim, self.config.lr_schedule.name)
lr_schedule = lr_sched_fn(max_val=max_lr, **self.config.lr_schedule.kwargs)
# Optimizer; no need to broadcast!
opt_kwargs = {key: val for key, val in self.config.optimizer.kwargs.items()}
opt_kwargs['lr'] = lr_schedule
opt_module = getattr(optim, self.config.optimizer.name)
self.opt = opt_module([{'params': gains_biases, 'weight_decay': None,},
{'params': fc_weights, 'clipping': None},
{'params': weights}], **opt_kwargs)
if self._opt_state is None:
self._opt_state = self.opt.states()
else:
self.opt.plugin(self._opt_state)
if __name__ == '__main__':
flags.mark_flag_as_required('config')
platform.main(Experiment, sys.argv[1:])
|
deepmind-research-master
|
nfnets/experiment_nfnets.py
|
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""ResNet model family."""
import functools
import haiku as hk
import jax
import jax.numpy as jnp
from nfnets import base
class ResNet(hk.Module):
"""ResNetv2 Models."""
variant_dict = {'ResNet50': {'depth': [3, 4, 6, 3]},
'ResNet101': {'depth': [3, 4, 23, 3]},
'ResNet152': {'depth': [3, 8, 36, 3]},
'ResNet200': {'depth': [3, 24, 36, 3]},
'ResNet288': {'depth': [24, 24, 24, 24]},
'ResNet600': {'depth': [50, 50, 50, 50]},
}
def __init__(self, width, num_classes,
variant='ResNet50',
which_norm='BatchNorm', norm_kwargs=None,
activation='relu', drop_rate=0.0,
fc_init=jnp.zeros, conv_kwargs=None,
preactivation=True, use_se=False, se_ratio=0.25,
name='ResNet'):
super().__init__(name=name)
self.width = width
self.num_classes = num_classes
self.variant = variant
self.depth_pattern = self.variant_dict[variant]['depth']
self.activation = getattr(jax.nn, activation)
self.drop_rate = drop_rate
self.which_norm = getattr(hk, which_norm)
if norm_kwargs is not None:
self.which_norm = functools.partial(self.which_norm, **norm_kwargs)
if conv_kwargs is not None:
self.which_conv = functools.partial(hk.Conv2D, **conv_kwargs)
else:
self.which_conv = hk.Conv2D
self.preactivation = preactivation
# Stem
self.initial_conv = self.which_conv(16 * self.width, kernel_shape=7,
stride=2, padding='SAME',
with_bias=False, name='initial_conv')
if not self.preactivation:
self.initial_bn = self.which_norm(name='initial_bn')
which_block = ResBlockV2 if self.preactivation else ResBlockV1
# Body
self.blocks = []
for multiplier, blocks_per_stage, stride in zip([64, 128, 256, 512],
self.depth_pattern,
[1, 2, 2, 2]):
for block_index in range(blocks_per_stage):
self.blocks += [which_block(multiplier * self.width,
use_projection=block_index == 0,
stride=stride if block_index == 0 else 1,
activation=self.activation,
which_norm=self.which_norm,
which_conv=self.which_conv,
use_se=use_se,
se_ratio=se_ratio)]
# Head
self.final_bn = self.which_norm(name='final_bn')
self.fc = hk.Linear(self.num_classes, w_init=fc_init, with_bias=True)
def __call__(self, x, is_training, test_local_stats=False,
return_metrics=False):
"""Return the output of the final layer without any [log-]softmax."""
outputs = {}
# Stem
out = self.initial_conv(x)
if not self.preactivation:
out = self.activation(self.initial_bn(out, is_training, test_local_stats))
out = hk.max_pool(out, window_shape=(1, 3, 3, 1),
strides=(1, 2, 2, 1), padding='SAME')
if return_metrics:
outputs.update(base.signal_metrics(out, 0))
# Blocks
for i, block in enumerate(self.blocks):
out, res_var = block(out, is_training, test_local_stats)
if return_metrics:
outputs.update(base.signal_metrics(out, i + 1))
outputs[f'res_avg_var_{i}'] = res_var
if self.preactivation:
out = self.activation(self.final_bn(out, is_training, test_local_stats))
# Pool, dropout, classify
pool = jnp.mean(out, axis=[1, 2])
# Return pool before dropout in case we want to regularize it separately.
outputs['pool'] = pool
# Optionally apply dropout
if self.drop_rate > 0.0 and is_training:
pool = hk.dropout(hk.next_rng_key(), self.drop_rate, pool)
outputs['logits'] = self.fc(pool)
return outputs
class ResBlockV2(hk.Module):
"""ResNet preac block, 1x1->3x3->1x1 with strides and shortcut downsample."""
def __init__(self, out_ch, stride=1, use_projection=False,
activation=jax.nn.relu, which_norm=hk.BatchNorm,
which_conv=hk.Conv2D, use_se=False, se_ratio=0.25,
name=None):
super().__init__(name=name)
self.out_ch = out_ch
self.stride = stride
self.use_projection = use_projection
self.activation = activation
self.which_norm = which_norm
self.which_conv = which_conv
self.use_se = use_se
self.se_ratio = se_ratio
self.width = self.out_ch // 4
self.bn0 = which_norm(name='bn0')
self.conv0 = which_conv(self.width, kernel_shape=1, with_bias=False,
padding='SAME', name='conv0')
self.bn1 = which_norm(name='bn1')
self.conv1 = which_conv(self.width, stride=self.stride,
kernel_shape=3, with_bias=False,
padding='SAME', name='conv1')
self.bn2 = which_norm(name='bn2')
self.conv2 = which_conv(self.out_ch, kernel_shape=1, with_bias=False,
padding='SAME', name='conv2')
if self.use_projection:
self.conv_shortcut = which_conv(self.out_ch, stride=stride,
kernel_shape=1, with_bias=False,
padding='SAME', name='conv_shortcut')
if self.use_se:
self.se = base.SqueezeExcite(self.out_ch, self.out_ch, self.se_ratio)
def __call__(self, x, is_training, test_local_stats):
bn_args = (is_training, test_local_stats)
out = self.activation(self.bn0(x, *bn_args))
if self.use_projection:
shortcut = self.conv_shortcut(out)
else:
shortcut = x
out = self.conv0(out)
out = self.conv1(self.activation(self.bn1(out, *bn_args)))
out = self.conv2(self.activation(self.bn2(out, *bn_args)))
if self.use_se:
out = self.se(out) * out
# Get average residual standard deviation for reporting metrics.
res_avg_var = jnp.mean(jnp.var(out, axis=[0, 1, 2]))
return out + shortcut, res_avg_var
class ResBlockV1(ResBlockV2):
"""Post-Ac Residual Block."""
def __call__(self, x, is_training, test_local_stats):
bn_args = (is_training, test_local_stats)
if self.use_projection:
shortcut = self.conv_shortcut(x)
shortcut = self.which_norm(name='shortcut_bn')(shortcut, *bn_args)
else:
shortcut = x
out = self.activation(self.bn0(self.conv0(x), *bn_args))
out = self.activation(self.bn1(self.conv1(out), *bn_args))
out = self.bn2(self.conv2(out), *bn_args)
if self.use_se:
out = self.se(out) * out
res_avg_var = jnp.mean(jnp.var(out, axis=[0, 1, 2]))
return self.activation(out + shortcut), res_avg_var
|
deepmind-research-master
|
nfnets/resnet.py
|
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utils."""
import dill
import jax
import jax.numpy as jnp
import tree
def reduce_fn(x, mode):
"""Reduce fn for various losses."""
if mode == 'none' or mode is None:
return jnp.asarray(x)
elif mode == 'sum':
return jnp.sum(x)
elif mode == 'mean':
return jnp.mean(x)
else:
raise ValueError('Unsupported reduction option.')
def softmax_cross_entropy(logits, labels, reduction='sum'):
"""Computes softmax cross entropy given logits and one-hot class labels.
Args:
logits: Logit output values.
labels: Ground truth one-hot-encoded labels.
reduction: Type of reduction to apply to loss.
Returns:
Loss value. If `reduction` is `none`, this has the same shape as `labels`;
otherwise, it is scalar.
Raises:
ValueError: If the type of `reduction` is unsupported.
"""
loss = -jnp.sum(labels * jax.nn.log_softmax(logits), axis=-1)
return reduce_fn(loss, reduction)
def topk_correct(logits, labels, mask=None, prefix='', topk=(1, 5)):
"""Calculate top-k error for multiple k values."""
metrics = {}
argsorted_logits = jnp.argsort(logits)
for k in topk:
pred_labels = argsorted_logits[..., -k:]
# Get the number of examples where the label is in the top-k predictions
correct = any_in(pred_labels, labels).any(axis=-1).astype(jnp.float32)
if mask is not None:
correct *= mask
metrics[f'{prefix}top_{k}_acc'] = correct
return metrics
@jax.vmap
def any_in(prediction, target):
"""For each row in a and b, checks if any element of a is in b."""
return jnp.isin(prediction, target)
def tf1_ema(ema_value, current_value, decay, step):
"""Implements EMA with TF1-style decay warmup."""
decay = jnp.minimum(decay, (1.0 + step) / (10.0 + step))
return ema_value * decay + current_value * (1 - decay)
def ema(ema_value, current_value, decay, step):
"""Implements EMA without any warmup."""
del step
return ema_value * decay + current_value * (1 - decay)
to_bf16 = lambda x: x.astype(jnp.bfloat16) if x.dtype == jnp.float32 else x
from_bf16 = lambda x: x.astype(jnp.float32) if x.dtype == jnp.bfloat16 else x
def _replicate(x, devices=None):
"""Replicate an object on each device."""
x = jax.numpy.array(x)
if devices is None:
devices = jax.local_devices()
return jax.device_put_sharded(len(devices) * [x], devices)
def broadcast(obj):
"""Broadcasts an object to all devices."""
if obj is not None and not isinstance(obj, bool):
return _replicate(obj)
else:
return obj
def split_tree(tuple_tree, base_tree, n):
"""Splits tuple_tree with n-tuple leaves into n trees."""
return [tree.map_structure_up_to(base_tree, lambda x: x[i], tuple_tree) # pylint: disable=cell-var-from-loop
for i in range(n)]
def load_haiku_file(filename):
"""Loads a haiku parameter tree, using dill."""
with open(filename, 'rb') as in_file:
output = dill.load(in_file)
return output
def flatten_haiku_tree(haiku_dict):
"""Flattens a haiku parameter tree into a flat dictionary."""
out = {}
for module in haiku_dict.keys():
out_module = module.replace('/~/', '.').replace('/', '.')
for key in haiku_dict[module]:
out_key = f'{out_module}.{key}'
out[out_key] = haiku_dict[module][key]
return out
|
deepmind-research-master
|
nfnets/utils.py
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""AutoAugment and RandAugment policies for enhanced image preprocessing.
AutoAugment Reference: https://arxiv.org/abs/1805.09501
RandAugment Reference: https://arxiv.org/abs/1909.13719
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import inspect
import math
from ml_collections import config_dict
import tensorflow.compat.v1 as tf
from tensorflow_addons import image as contrib_image
# pylint: disable=deprecated-method
# This signifies the max integer that the controller RNN could predict for the
# augmentation scheme.
_MAX_LEVEL = 10.
def policy_v0():
"""Autoaugment policy that was used in AutoAugment Paper."""
# Each tuple is an augmentation operation of the form
# (operation, probability, magnitude). Each element in policy is a
# sub-policy that will be applied sequentially on the image.
policy = [
[('Equalize', 0.8, 1), ('ShearY', 0.8, 4)],
[('Color', 0.4, 9), ('Equalize', 0.6, 3)],
[('Color', 0.4, 1), ('Rotate', 0.6, 8)],
[('Solarize', 0.8, 3), ('Equalize', 0.4, 7)],
[('Solarize', 0.4, 2), ('Solarize', 0.6, 2)],
[('Color', 0.2, 0), ('Equalize', 0.8, 8)],
[('Equalize', 0.4, 8), ('SolarizeAdd', 0.8, 3)],
[('ShearX', 0.2, 9), ('Rotate', 0.6, 8)],
[('Color', 0.6, 1), ('Equalize', 1.0, 2)],
[('Invert', 0.4, 9), ('Rotate', 0.6, 0)],
[('Equalize', 1.0, 9), ('ShearY', 0.6, 3)],
[('Color', 0.4, 7), ('Equalize', 0.6, 0)],
[('Posterize', 0.4, 6), ('AutoContrast', 0.4, 7)],
[('Solarize', 0.6, 8), ('Color', 0.6, 9)],
[('Solarize', 0.2, 4), ('Rotate', 0.8, 9)],
[('Rotate', 1.0, 7), ('TranslateY', 0.8, 9)],
[('ShearX', 0.0, 0), ('Solarize', 0.8, 4)],
[('ShearY', 0.8, 0), ('Color', 0.6, 4)],
[('Color', 1.0, 0), ('Rotate', 0.6, 2)],
[('Equalize', 0.8, 4), ('Equalize', 0.0, 8)],
[('Equalize', 1.0, 4), ('AutoContrast', 0.6, 2)],
[('ShearY', 0.4, 7), ('SolarizeAdd', 0.6, 7)],
[('Posterize', 0.8, 2), ('Solarize', 0.6, 10)],
[('Solarize', 0.6, 8), ('Equalize', 0.6, 1)],
[('Color', 0.8, 6), ('Rotate', 0.4, 5)],
]
return policy
def policy_vtest():
"""Autoaugment test policy for debugging."""
# Each tuple is an augmentation operation of the form
# (operation, probability, magnitude). Each element in policy is a
# sub-policy that will be applied sequentially on the image.
policy = [
[('TranslateX', 1.0, 4), ('Equalize', 1.0, 10)],
]
return policy
def blend(image1, image2, factor):
"""Blend image1 and image2 using 'factor'.
Factor can be above 0.0. A value of 0.0 means only image1 is used.
A value of 1.0 means only image2 is used. A value between 0.0 and
1.0 means we linearly interpolate the pixel values between the two
images. A value greater than 1.0 "extrapolates" the difference
between the two pixel values, and we clip the results to values
between 0 and 255.
Args:
image1: An image Tensor of type uint8.
image2: An image Tensor of type uint8.
factor: A floating point value above 0.0.
Returns:
A blended image Tensor of type uint8.
"""
if factor == 0.0:
return tf.convert_to_tensor(image1)
if factor == 1.0:
return tf.convert_to_tensor(image2)
image1 = tf.to_float(image1)
image2 = tf.to_float(image2)
difference = image2 - image1
scaled = factor * difference
# Do addition in float.
temp = tf.to_float(image1) + scaled
# Interpolate
if factor > 0.0 and factor < 1.0:
# Interpolation means we always stay within 0 and 255.
return tf.cast(temp, tf.uint8)
# Extrapolate:
#
# We need to clip and then cast.
return tf.cast(tf.clip_by_value(temp, 0.0, 255.0), tf.uint8)
def cutout(image, pad_size, replace=0):
"""Apply cutout (https://arxiv.org/abs/1708.04552) to image.
This operation applies a (2*pad_size x 2*pad_size) mask of zeros to
a random location within `img`. The pixel values filled in will be of the
value `replace`. The located where the mask will be applied is randomly
chosen uniformly over the whole image.
Args:
image: An image Tensor of type uint8.
pad_size: Specifies how big the zero mask that will be generated is that
is applied to the image. The mask will be of size
(2*pad_size x 2*pad_size).
replace: What pixel value to fill in the image in the area that has
the cutout mask applied to it.
Returns:
An image Tensor that is of type uint8.
"""
image_height = tf.shape(image)[0]
image_width = tf.shape(image)[1]
# Sample the center location in the image where the zero mask will be applied.
cutout_center_height = tf.random_uniform(
shape=[], minval=0, maxval=image_height,
dtype=tf.int32)
cutout_center_width = tf.random_uniform(
shape=[], minval=0, maxval=image_width,
dtype=tf.int32)
lower_pad = tf.maximum(0, cutout_center_height - pad_size)
upper_pad = tf.maximum(0, image_height - cutout_center_height - pad_size)
left_pad = tf.maximum(0, cutout_center_width - pad_size)
right_pad = tf.maximum(0, image_width - cutout_center_width - pad_size)
cutout_shape = [image_height - (lower_pad + upper_pad),
image_width - (left_pad + right_pad)]
padding_dims = [[lower_pad, upper_pad], [left_pad, right_pad]]
mask = tf.pad(
tf.zeros(cutout_shape, dtype=image.dtype),
padding_dims, constant_values=1)
mask = tf.expand_dims(mask, -1)
mask = tf.tile(mask, [1, 1, 3])
image = tf.where(
tf.equal(mask, 0),
tf.ones_like(image, dtype=image.dtype) * replace,
image)
return image
def solarize(image, threshold=128):
# For each pixel in the image, select the pixel
# if the value is less than the threshold.
# Otherwise, subtract the pixel from 255.
return tf.where(image < threshold, image, 255 - image)
def solarize_add(image, addition=0, threshold=128):
# For each pixel in the image less than threshold
# we add 'addition' amount to it and then clip the
# pixel value to be between 0 and 255. The value
# of 'addition' is between -128 and 128.
added_image = tf.cast(image, tf.int64) + addition
added_image = tf.cast(tf.clip_by_value(added_image, 0, 255), tf.uint8)
return tf.where(image < threshold, added_image, image)
def color(image, factor):
"""Equivalent of PIL Color."""
degenerate = tf.image.grayscale_to_rgb(tf.image.rgb_to_grayscale(image))
return blend(degenerate, image, factor)
def contrast(image, factor):
"""Equivalent of PIL Contrast."""
degenerate = tf.image.rgb_to_grayscale(image)
# Cast before calling tf.histogram.
degenerate = tf.cast(degenerate, tf.int32)
# Compute the grayscale histogram, then compute the mean pixel value,
# and create a constant image size of that value. Use that as the
# blending degenerate target of the original image.
hist = tf.histogram_fixed_width(degenerate, [0, 255], nbins=256)
mean = tf.reduce_sum(tf.cast(hist, tf.float32)) / 256.0
degenerate = tf.ones_like(degenerate, dtype=tf.float32) * mean
degenerate = tf.clip_by_value(degenerate, 0.0, 255.0)
degenerate = tf.image.grayscale_to_rgb(tf.cast(degenerate, tf.uint8))
return blend(degenerate, image, factor)
def brightness(image, factor):
"""Equivalent of PIL Brightness."""
degenerate = tf.zeros_like(image)
return blend(degenerate, image, factor)
def posterize(image, bits):
"""Equivalent of PIL Posterize."""
shift = 8 - bits
return tf.bitwise.left_shift(tf.bitwise.right_shift(image, shift), shift)
def rotate(image, degrees, replace):
"""Rotates the image by degrees either clockwise or counterclockwise.
Args:
image: An image Tensor of type uint8.
degrees: Float, a scalar angle in degrees to rotate all images by. If
degrees is positive the image will be rotated clockwise otherwise it will
be rotated counterclockwise.
replace: A one or three value 1D tensor to fill empty pixels caused by
the rotate operation.
Returns:
The rotated version of image.
"""
# Convert from degrees to radians.
degrees_to_radians = math.pi / 180.0
radians = degrees * degrees_to_radians
# In practice, we should randomize the rotation degrees by flipping
# it negatively half the time, but that's done on 'degrees' outside
# of the function.
image = contrib_image.rotate(wrap(image), radians)
return unwrap(image, replace)
def translate_x(image, pixels, replace):
"""Equivalent of PIL Translate in X dimension."""
image = contrib_image.translate(wrap(image), [-pixels, 0])
return unwrap(image, replace)
def translate_y(image, pixels, replace):
"""Equivalent of PIL Translate in Y dimension."""
image = contrib_image.translate(wrap(image), [0, -pixels])
return unwrap(image, replace)
def shear_x(image, level, replace):
"""Equivalent of PIL Shearing in X dimension."""
# Shear parallel to x axis is a projective transform
# with a matrix form of:
# [1 level
# 0 1].
image = contrib_image.transform(
wrap(image), [1., level, 0., 0., 1., 0., 0., 0.])
return unwrap(image, replace)
def shear_y(image, level, replace):
"""Equivalent of PIL Shearing in Y dimension."""
# Shear parallel to y axis is a projective transform
# with a matrix form of:
# [1 0
# level 1].
image = contrib_image.transform(
wrap(image), [1., 0., 0., level, 1., 0., 0., 0.])
return unwrap(image, replace)
def autocontrast(image):
"""Implements Autocontrast function from PIL using TF ops.
Args:
image: A 3D uint8 tensor.
Returns:
The image after it has had autocontrast applied to it and will be of type
uint8.
"""
def scale_channel(image):
"""Scale the 2D image using the autocontrast rule."""
# A possibly cheaper version can be done using cumsum/unique_with_counts
# over the histogram values, rather than iterating over the entire image.
# to compute mins and maxes.
lo = tf.to_float(tf.reduce_min(image))
hi = tf.to_float(tf.reduce_max(image))
# Scale the image, making the lowest value 0 and the highest value 255.
def scale_values(im):
scale = 255.0 / (hi - lo)
offset = -lo * scale
im = tf.to_float(im) * scale + offset
im = tf.clip_by_value(im, 0.0, 255.0)
return tf.cast(im, tf.uint8)
result = tf.cond(hi > lo, lambda: scale_values(image), lambda: image)
return result
# Assumes RGB for now. Scales each channel independently
# and then stacks the result.
s1 = scale_channel(image[:, :, 0])
s2 = scale_channel(image[:, :, 1])
s3 = scale_channel(image[:, :, 2])
image = tf.stack([s1, s2, s3], 2)
return image
def sharpness(image, factor):
"""Implements Sharpness function from PIL using TF ops."""
orig_image = image
image = tf.cast(image, tf.float32)
# Make image 4D for conv operation.
image = tf.expand_dims(image, 0)
# SMOOTH PIL Kernel.
kernel = tf.constant(
[[1, 1, 1], [1, 5, 1], [1, 1, 1]], dtype=tf.float32,
shape=[3, 3, 1, 1]) / 13.
# Tile across channel dimension.
kernel = tf.tile(kernel, [1, 1, 3, 1])
strides = [1, 1, 1, 1]
with tf.device('/cpu:0'):
# Some augmentation that uses depth-wise conv will cause crashing when
# training on GPU. See (b/156242594) for details.
degenerate = tf.nn.depthwise_conv2d(
image, kernel, strides, padding='VALID', rate=[1, 1])
degenerate = tf.clip_by_value(degenerate, 0.0, 255.0)
degenerate = tf.squeeze(tf.cast(degenerate, tf.uint8), [0])
# For the borders of the resulting image, fill in the values of the
# original image.
mask = tf.ones_like(degenerate)
padded_mask = tf.pad(mask, [[1, 1], [1, 1], [0, 0]])
padded_degenerate = tf.pad(degenerate, [[1, 1], [1, 1], [0, 0]])
result = tf.where(tf.equal(padded_mask, 1), padded_degenerate, orig_image)
# Blend the final result.
return blend(result, orig_image, factor)
def equalize(image):
"""Implements Equalize function from PIL using TF ops."""
def scale_channel(im, c):
"""Scale the data in the channel to implement equalize."""
im = tf.cast(im[:, :, c], tf.int32)
# Compute the histogram of the image channel.
histo = tf.histogram_fixed_width(im, [0, 255], nbins=256)
# For the purposes of computing the step, filter out the nonzeros.
nonzero = tf.where(tf.not_equal(histo, 0))
nonzero_histo = tf.reshape(tf.gather(histo, nonzero), [-1])
step = (tf.reduce_sum(nonzero_histo) - nonzero_histo[-1]) // 255
def build_lut(histo, step):
# Compute the cumulative sum, shifting by step // 2
# and then normalization by step.
lut = (tf.cumsum(histo) + (step // 2)) // step
# Shift lut, prepending with 0.
lut = tf.concat([[0], lut[:-1]], 0)
# Clip the counts to be in range. This is done
# in the C code for image.point.
return tf.clip_by_value(lut, 0, 255)
# If step is zero, return the original image. Otherwise, build
# lut from the full histogram and step and then index from it.
result = tf.cond(tf.equal(step, 0),
lambda: im,
lambda: tf.gather(build_lut(histo, step), im))
return tf.cast(result, tf.uint8)
# Assumes RGB for now. Scales each channel independently
# and then stacks the result.
s1 = scale_channel(image, 0)
s2 = scale_channel(image, 1)
s3 = scale_channel(image, 2)
image = tf.stack([s1, s2, s3], 2)
return image
def invert(image):
"""Inverts the image pixels."""
image = tf.convert_to_tensor(image)
return 255 - image
def wrap(image):
"""Returns 'image' with an extra channel set to all 1s."""
shape = tf.shape(image)
extended_channel = tf.ones([shape[0], shape[1], 1], image.dtype)
extended = tf.concat([image, extended_channel], 2)
return extended
def unwrap(image, replace):
"""Unwraps an image produced by wrap.
Where there is a 0 in the last channel for every spatial position,
the rest of the three channels in that spatial dimension are grayed
(set to 128). Operations like translate and shear on a wrapped
Tensor will leave 0s in empty locations. Some transformations look
at the intensity of values to do preprocessing, and we want these
empty pixels to assume the 'average' value, rather than pure black.
Args:
image: A 3D Image Tensor with 4 channels.
replace: A one or three value 1D tensor to fill empty pixels.
Returns:
image: A 3D image Tensor with 3 channels.
"""
image_shape = tf.shape(image)
# Flatten the spatial dimensions.
flattened_image = tf.reshape(image, [-1, image_shape[2]])
# Find all pixels where the last channel is zero.
alpha_channel = flattened_image[:, 3]
replace = tf.concat([replace, tf.ones([1], image.dtype)], 0)
# Where they are zero, fill them in with 'replace'.
flattened_image = tf.where(
tf.equal(alpha_channel, 0),
tf.ones_like(flattened_image, dtype=image.dtype) * replace,
flattened_image)
image = tf.reshape(flattened_image, image_shape)
image = tf.slice(image, [0, 0, 0], [image_shape[0], image_shape[1], 3])
return image
NAME_TO_FUNC = {
'AutoContrast': autocontrast,
'Equalize': equalize,
'Invert': invert,
'Rotate': rotate,
'Posterize': posterize,
'Solarize': solarize,
'SolarizeAdd': solarize_add,
'Color': color,
'Contrast': contrast,
'Brightness': brightness,
'Sharpness': sharpness,
'ShearX': shear_x,
'ShearY': shear_y,
'TranslateX': translate_x,
'TranslateY': translate_y,
'Cutout': cutout,
}
def _randomly_negate_tensor(tensor):
"""With 50% prob turn the tensor negative."""
should_flip = tf.cast(tf.floor(tf.random_uniform([]) + 0.5), tf.bool)
final_tensor = tf.cond(should_flip, lambda: tensor, lambda: -tensor)
return final_tensor
def _rotate_level_to_arg(level):
level = (level/_MAX_LEVEL) * 30.
level = _randomly_negate_tensor(level)
return (level,)
def _shrink_level_to_arg(level):
"""Converts level to ratio by which we shrink the image content."""
if level == 0:
return (1.0,) # if level is zero, do not shrink the image
# Maximum shrinking ratio is 2.9.
level = 2. / (_MAX_LEVEL / level) + 0.9
return (level,)
def _enhance_level_to_arg(level):
return ((level/_MAX_LEVEL) * 1.8 + 0.1,)
def _shear_level_to_arg(level):
level = (level/_MAX_LEVEL) * 0.3
# Flip level to negative with 50% chance.
level = _randomly_negate_tensor(level)
return (level,)
def _translate_level_to_arg(level, translate_const):
level = (level/_MAX_LEVEL) * float(translate_const)
# Flip level to negative with 50% chance.
level = _randomly_negate_tensor(level)
return (level,)
def level_to_arg(hparams):
return {
'AutoContrast': lambda level: (),
'Equalize': lambda level: (),
'Invert': lambda level: (),
'Rotate': _rotate_level_to_arg,
'Posterize': lambda level: (int((level/_MAX_LEVEL) * 4),),
'Solarize': lambda level: (int((level/_MAX_LEVEL) * 256),),
'SolarizeAdd': lambda level: (int((level/_MAX_LEVEL) * 110),),
'Color': _enhance_level_to_arg,
'Contrast': _enhance_level_to_arg,
'Brightness': _enhance_level_to_arg,
'Sharpness': _enhance_level_to_arg,
'ShearX': _shear_level_to_arg,
'ShearY': _shear_level_to_arg,
'Cutout': lambda level: (int((level/_MAX_LEVEL) * hparams.cutout_const),),
# pylint:disable=g-long-lambda
'TranslateX': lambda level: _translate_level_to_arg(
level, hparams.translate_const),
'TranslateY': lambda level: _translate_level_to_arg(
level, hparams.translate_const),
# pylint:enable=g-long-lambda
}
def _parse_policy_info(name, prob, level, replace_value, augmentation_hparams):
"""Return the function that corresponds to `name` and update `level` param."""
func = NAME_TO_FUNC[name]
args = level_to_arg(augmentation_hparams)[name](level)
# Check to see if prob is passed into function. This is used for operations
# where we alter bboxes independently.
# pytype:disable=wrong-arg-types
if 'prob' in inspect.getargspec(func)[0]:
args = tuple([prob] + list(args))
# pytype:enable=wrong-arg-types
# Add in replace arg if it is required for the function that is being called.
# pytype:disable=wrong-arg-types
if 'replace' in inspect.getargspec(func)[0]:
# Make sure replace is the final argument
assert 'replace' == inspect.getargspec(func)[0][-1]
args = tuple(list(args) + [replace_value])
# pytype:enable=wrong-arg-types
return (func, prob, args)
def _apply_func_with_prob(func, image, args, prob):
"""Apply `func` to image w/ `args` as input with probability `prob`."""
assert isinstance(args, tuple)
# If prob is a function argument, then this randomness is being handled
# inside the function, so make sure it is always called.
# pytype:disable=wrong-arg-types
if 'prob' in inspect.getargspec(func)[0]:
prob = 1.0
# pytype:enable=wrong-arg-types
# Apply the function with probability `prob`.
should_apply_op = tf.cast(
tf.floor(tf.random_uniform([], dtype=tf.float32) + prob), tf.bool)
augmented_image = tf.cond(
should_apply_op,
lambda: func(image, *args),
lambda: image)
return augmented_image
def select_and_apply_random_policy(policies, image):
"""Select a random policy from `policies` and apply it to `image`."""
policy_to_select = tf.random_uniform([], maxval=len(policies), dtype=tf.int32)
# Note that using tf.case instead of tf.conds would result in significantly
# larger graphs and would even break export for some larger policies.
for (i, policy) in enumerate(policies):
image = tf.cond(
tf.equal(i, policy_to_select),
lambda selected_policy=policy: selected_policy(image),
lambda: image)
return image
def build_and_apply_nas_policy(policies, image,
augmentation_hparams):
"""Build a policy from the given policies passed in and apply to image.
Args:
policies: list of lists of tuples in the form `(func, prob, level)`, `func`
is a string name of the augmentation function, `prob` is the probability
of applying the `func` operation, `level` is the input argument for
`func`.
image: tf.Tensor that the resulting policy will be applied to.
augmentation_hparams: Hparams associated with the NAS learned policy.
Returns:
A version of image that now has data augmentation applied to it based on
the `policies` pass into the function.
"""
replace_value = [128, 128, 128]
# func is the string name of the augmentation function, prob is the
# probability of applying the operation and level is the parameter associated
# with the tf op.
# tf_policies are functions that take in an image and return an augmented
# image.
tf_policies = []
for policy in policies:
tf_policy = []
# Link string name to the correct python function and make sure the correct
# argument is passed into that function.
for policy_info in policy:
policy_info = list(policy_info) + [replace_value, augmentation_hparams]
tf_policy.append(_parse_policy_info(*policy_info))
# Now build the tf policy that will apply the augmentation procedue
# on image.
def make_final_policy(tf_policy_):
def final_policy(image_):
for func, prob, args in tf_policy_:
image_ = _apply_func_with_prob(
func, image_, args, prob)
return image_
return final_policy
tf_policies.append(make_final_policy(tf_policy))
augmented_image = select_and_apply_random_policy(
tf_policies, image)
return augmented_image
def distort_image_with_autoaugment(image, augmentation_name):
"""Applies the AutoAugment policy to `image`.
AutoAugment is from the paper: https://arxiv.org/abs/1805.09501.
Args:
image: `Tensor` of shape [height, width, 3] representing an image.
augmentation_name: The name of the AutoAugment policy to use. The available
options are `v0` and `test`. `v0` is the policy used for
all of the results in the paper and was found to achieve the best results
on the COCO dataset. `v1`, `v2` and `v3` are additional good policies
found on the COCO dataset that have slight variation in what operations
were used during the search procedure along with how many operations are
applied in parallel to a single image (2 vs 3).
Returns:
A tuple containing the augmented versions of `image`.
"""
available_policies = {'v0': policy_v0,
'test': policy_vtest}
if augmentation_name not in available_policies:
raise ValueError('Invalid augmentation_name: {}'.format(augmentation_name))
policy = available_policies[augmentation_name]()
# Hparams that will be used for AutoAugment.
augmentation_hparams = config_dict.ConfigDict(dict(
cutout_const=100, translate_const=250))
return build_and_apply_nas_policy(policy, image, augmentation_hparams)
def distort_image_with_randaugment(image, num_layers, magnitude):
"""Applies the RandAugment policy to `image`.
RandAugment is from the paper https://arxiv.org/abs/1909.13719,
Args:
image: `Tensor` of shape [height, width, 3] representing an image.
num_layers: Integer, the number of augmentation transformations to apply
sequentially to an image. Represented as (N) in the paper. Usually best
values will be in the range [1, 3].
magnitude: Integer, shared magnitude across all augmentation operations.
Represented as (M) in the paper. Usually best values are in the range
[5, 30].
Returns:
The augmented version of `image`.
"""
replace_value = [128] * 3
tf.logging.info('Using RandAug.')
augmentation_hparams = config_dict.ConfigDict(dict(
cutout_const=40, translate_const=100))
available_ops = [
'AutoContrast', 'Equalize', 'Invert', 'Rotate', 'Posterize',
'Solarize', 'Color', 'Contrast', 'Brightness', 'Sharpness',
'ShearX', 'ShearY', 'TranslateX', 'TranslateY', 'Cutout', 'SolarizeAdd']
for layer_num in range(num_layers):
op_to_select = tf.random_uniform(
[], maxval=len(available_ops), dtype=tf.int32)
random_magnitude = float(magnitude)
with tf.name_scope('randaug_layer_{}'.format(layer_num)):
for (i, op_name) in enumerate(available_ops):
prob = tf.random_uniform([], minval=0.2, maxval=0.8, dtype=tf.float32)
func, _, args = _parse_policy_info(op_name, prob, random_magnitude,
replace_value, augmentation_hparams)
image = tf.cond(
tf.equal(i, op_to_select),
# pylint:disable=g-long-lambda
lambda selected_func=func, selected_args=args: selected_func(
image, *selected_args),
# pylint:enable=g-long-lambda
lambda: image)
return image
|
deepmind-research-master
|
nfnets/autoaugment.py
|
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Norm-Free Residual Networks."""
# pylint: disable=invalid-name
import haiku as hk
import jax
import jax.numpy as jnp
from nfnets import base
class NF_ResNet(hk.Module):
"""Norm-Free preactivation ResNet."""
variant_dict = {'ResNet50': {'depth': [3, 4, 6, 3]},
'ResNet101': {'depth': [3, 4, 23, 3]},
'ResNet152': {'depth': [3, 8, 36, 3]},
'ResNet200': {'depth': [3, 24, 36, 3]},
'ResNet288': {'depth': [24, 24, 24, 24]},
'ResNet600': {'depth': [50, 50, 50, 50]},
}
def __init__(self, num_classes, variant='ResNet50', width=4,
alpha=0.2, stochdepth_rate=0.1, drop_rate=None,
activation='relu', fc_init=None, skipinit_gain=jnp.zeros,
use_se=False, se_ratio=0.25,
name='NF_ResNet'):
super().__init__(name=name)
self.num_classes = num_classes
self.variant = variant
self.width = width
# Get variant info
block_params = self.variant_dict[self.variant]
self.width_pattern = [item * self.width for item in [64, 128, 256, 512]]
self.depth_pattern = block_params['depth']
self.activation = base.nonlinearities[activation]
if drop_rate is None:
self.drop_rate = block_params['drop_rate']
else:
self.drop_rate = drop_rate
self.which_conv = base.WSConv2D
# Stem
ch = int(16 * self.width)
self.initial_conv = self.which_conv(ch, kernel_shape=7, stride=2,
padding='SAME', with_bias=False,
name='initial_conv')
# Body
self.blocks = []
expected_std = 1.0
num_blocks = sum(self.depth_pattern)
index = 0 # Overall block index
block_args = (self.width_pattern, self.depth_pattern, [1, 2, 2, 2])
for block_width, stage_depth, stride in zip(*block_args):
for block_index in range(stage_depth):
# Scalar pre-multiplier so each block sees an N(0,1) input at init
beta = 1./ expected_std
# Block stochastic depth drop-rate
block_stochdepth_rate = stochdepth_rate * index / num_blocks
self.blocks += [NFResBlock(ch, block_width,
stride=stride if block_index == 0 else 1,
beta=beta, alpha=alpha,
activation=self.activation,
which_conv=self.which_conv,
stochdepth_rate=block_stochdepth_rate,
skipinit_gain=skipinit_gain,
use_se=use_se,
se_ratio=se_ratio,
)]
ch = block_width
index += 1
# Reset expected std but still give it 1 block of growth
if block_index == 0:
expected_std = 1.0
expected_std = (expected_std **2 + alpha**2)**0.5
# Head. By default, initialize with N(0, 0.01)
if fc_init is None:
fc_init = hk.initializers.RandomNormal(0.01, 0)
self.fc = hk.Linear(self.num_classes, w_init=fc_init, with_bias=True)
def __call__(self, x, is_training=True, return_metrics=False):
"""Return the output of the final layer without any [log-]softmax."""
# Stem
outputs = {}
out = self.initial_conv(x)
out = hk.max_pool(out, window_shape=(1, 3, 3, 1),
strides=(1, 2, 2, 1), padding='SAME')
if return_metrics:
outputs.update(base.signal_metrics(out, 0))
# Blocks
for i, block in enumerate(self.blocks):
out, res_avg_var = block(out, is_training=is_training)
if return_metrics:
outputs.update(base.signal_metrics(out, i + 1))
outputs[f'res_avg_var_{i}'] = res_avg_var
# Final-conv->activation, pool, dropout, classify
pool = jnp.mean(self.activation(out), [1, 2])
outputs['pool'] = pool
# Optionally apply dropout
if self.drop_rate > 0.0 and is_training:
pool = hk.dropout(hk.next_rng_key(), self.drop_rate, pool)
outputs['logits'] = self.fc(pool)
return outputs
def count_flops(self, h, w):
flops = []
flops += [base.count_conv_flops(3, self.initial_conv, h, w)]
h, w = h / 2, w / 2
# Body FLOPs
for block in self.blocks:
flops += [block.count_flops(h, w)]
if block.stride > 1:
h, w = h / block.stride, w / block.stride
# Count flops for classifier
flops += [self.blocks[-1].out_ch * self.fc.output_size]
return flops, sum(flops)
class NFResBlock(hk.Module):
"""Normalizer-Free pre-activation ResNet Block."""
def __init__(self, in_ch, out_ch, bottleneck_ratio=0.25,
kernel_size=3, stride=1,
beta=1.0, alpha=0.2,
which_conv=base.WSConv2D, activation=jax.nn.relu,
skipinit_gain=jnp.zeros,
stochdepth_rate=None,
use_se=False, se_ratio=0.25,
name=None):
super().__init__(name=name)
self.in_ch, self.out_ch = in_ch, out_ch
self.kernel_size = kernel_size
self.activation = activation
self.beta, self.alpha = beta, alpha
self.skipinit_gain = skipinit_gain
self.use_se, self.se_ratio = use_se, se_ratio
# Bottleneck width
self.width = int(self.out_ch * bottleneck_ratio)
self.stride = stride
# Conv 0 (typically expansion conv)
self.conv0 = which_conv(self.width, kernel_shape=1, padding='SAME',
name='conv0')
# Grouped NxN conv
self.conv1 = which_conv(self.width, kernel_shape=kernel_size, stride=stride,
padding='SAME', name='conv1')
# Conv 2, typically projection conv
self.conv2 = which_conv(self.out_ch, kernel_shape=1, padding='SAME',
name='conv2')
# Use shortcut conv on channel change or downsample.
self.use_projection = stride > 1 or self.in_ch != self.out_ch
if self.use_projection:
self.conv_shortcut = which_conv(self.out_ch, kernel_shape=1,
stride=stride, padding='SAME',
name='conv_shortcut')
# Are we using stochastic depth?
self._has_stochdepth = (stochdepth_rate is not None and
stochdepth_rate > 0. and stochdepth_rate < 1.0)
if self._has_stochdepth:
self.stoch_depth = base.StochDepth(stochdepth_rate)
if self.use_se:
self.se = base.SqueezeExcite(self.out_ch, self.out_ch, self.se_ratio)
def __call__(self, x, is_training):
out = self.activation(x) * self.beta
shortcut = x
if self.use_projection: # Downsample with conv1x1
shortcut = self.conv_shortcut(out)
out = self.conv0(out)
out = self.conv1(self.activation(out))
out = self.conv2(self.activation(out))
if self.use_se:
out = 2 * self.se(out) * out
# Get average residual standard deviation for reporting metrics.
res_avg_var = jnp.mean(jnp.var(out, axis=[0, 1, 2]))
# Apply stochdepth if applicable.
if self._has_stochdepth:
out = self.stoch_depth(out, is_training)
# SkipInit Gain
out = out * hk.get_parameter('skip_gain', (), out.dtype,
init=self.skipinit_gain)
return out * self.alpha + shortcut, res_avg_var
def count_flops(self, h, w):
# Count conv FLOPs based on input HW
expand_flops = base.count_conv_flops(self.in_ch, self.conv0, h, w)
# If block is strided we decrease resolution here.
dw_flops = base.count_conv_flops(self.width, self.conv1, h, w)
if self.stride > 1:
h, w = h / self.stride, w / self.stride
if self.use_projection:
sc_flops = base.count_conv_flops(self.in_ch, self.conv_shortcut, h, w)
else:
sc_flops = 0
# SE flops happen on avg-pooled activations
se_flops = self.se.fc0.output_size * self.width
se_flops += self.se.fc0.output_size * self.se.fc1.output_size
contract_flops = base.count_conv_flops(self.width, self.conv2, h, w)
return sum([expand_flops, dw_flops, se_flops, contract_flops, sc_flops])
|
deepmind-research-master
|
nfnets/nf_resnet.py
|
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Optimizers and Schedulers, inspired by the PyTorch API."""
from collections import ChainMap # pylint:disable=g-importing-member
from typing import Callable, Mapping
import haiku as hk
import jax
import jax.numpy as jnp
import tree
from nfnets import utils
class Optimizer(object):
"""Optimizer base class."""
def __init__(self, params, defaults):
# Flag indicating if parameters have been broadcasted
self._broadcasted = False
# Optimizer hyperparameters; this is a dict to support using param_groups
self._hyperparameters = {}
# Mapping from model parameters to optimizer hyperparameters
self._params2hyperparams = {}
# Assign defaults
self._hyperparameters = dict(**defaults)
# Prepare parameter groups and mappings
self.create_param_groups(params, defaults)
# Join params at top-level if params is a list of groups
if isinstance(params, list):
if any(_is_non_empty_two_level_mapping(g['params']) for g in params):
params = hk.data_structures.merge(*[g['params'] for g in params])
else:
params = dict(ChainMap(*[g['params'] for g in params]))
# Prepare states
create_buffers = lambda k, v: self.create_buffers('/'.join(k), v)
self._states = tree.map_structure_with_path(create_buffers, params)
def add_hyperparam_group(self, group, suffix, defaults):
"""Adds new hyperparameters to the hyperparams dict."""
# Use default hyperparams unless overridden by group hyperparams
group_dict = {key: key for key in defaults if key not in group}
for key in group:
if key != 'params': # Reserved keyword 'params'
group_dict[key] = '%s_%s' % (key, suffix)
self._hyperparameters[group_dict[key]] = group[key]
# Set up params2hyperparams
def set_p2h(k, _):
self._params2hyperparams['/'.join(k)] = group_dict
tree.map_structure_with_path(set_p2h, group['params'])
def create_param_groups(self, params, defaults):
"""Creates param-hyperparam mappings."""
if isinstance(params, list):
for group_index, group in enumerate(params):
# Add group to hyperparams and get this group's full hyperparameters
self.add_hyperparam_group(group, group_index, defaults)
else:
mapping = {key: key for key in self._hyperparameters}
def set_p2h(k, _):
self._params2hyperparams['/'.join(k)] = mapping
tree.map_structure_with_path(set_p2h, params)
def create_buffers(self, name, params):
"""Method to be overridden by child classes."""
pass
def get_opt_params(self, param_name, itr):
"""Returns hyperparams corresponding to param_name."""
mapping = self._params2hyperparams[param_name]
output = {}
for key in mapping:
hyper = self._hyperparameters[mapping[key]]
# Handle the case where a hyper is a class, for hybrids
if isinstance(hyper, Callable) and not isinstance(hyper, type):
output[key] = hyper(itr)
else:
output[key] = hyper
return output
def get_hyper(self, param_name, hyper_name):
"""Get an individual hyperparam for a given param."""
mapping = self._params2hyperparams[param_name]
return self._hyperparameters[mapping[hyper_name]]
def plugin(self, states):
self._states = states
def states(self):
return self._states
def broadcast(self):
"""Brodcasts all buffers and parameters."""
self._broadcasted = True
for name, state in self._states.items():
self._states[name] = {key: utils.broadcast(state[key]) for key in state}
def gather(self):
"""Gathers state (if broadcasted) for saving."""
states = {}
for name in self._states:
state = self._states[name]
states[name] = {key: state[key] if state[key] is None else state[key][0]
for key in state}
return states
def __setattr__(self, name, value):
"""Overrides the object's set-attribute function to register states, etc."""
if '_hyperparameters' in self.__dict__ and name in self._hyperparameters:
self._hyperparameters[name] = value
elif '_states' in self.__dict__ and name in self._states:
self._states[name] = value
else:
object.__setattr__(self, name, value)
def __getattr__(self, name):
"""Override the object's get-attribute function to return states, etc."""
if '_hyperparameters' in self.__dict__ and name in self._hyperparameters:
return self._hyperparameters[name]
elif '_states' in self.__dict__ and name in self._states:
return self._states[name]
else:
object.__getattribute__(self, name)
def step(self, params, grads, states, itr=None):
"""Takes a single optimizer step.
Args:
params: a dict containing the parameters to be updated.
grads: a dict containing the gradients for each parameter in params.
states: a dict containing any optimizer buffers (momentum, etc) for
each parameter in params.
itr: an optional integer indicating the current step, for scheduling.
Returns:
The updated params and optimizer buffers.
"""
get_hyper = lambda k, v: self.get_opt_params('/'.join(k), itr)
hypers = tree.map_structure_with_path(get_hyper, params)
outs = tree.map_structure_up_to(params, self.update_param,
params, grads, states, hypers)
return utils.split_tree(outs, params, 2)
def _is_non_empty_two_level_mapping(obj):
instof = lambda t: lambda v: isinstance(v, t)
# Basically: isinstance(obj, Mapping[str, Mapping[str, Any]]) ...
return (isinstance(obj, Mapping) and all(map(instof(str), obj.keys())) and
all(map(instof(Mapping), obj.values())) and
all(map(lambda v: all(map(instof(str), v.keys())), obj.values())) and
# ... and has at least one leaf.
bool(obj) and any(map(bool, obj.values())))
class Schedule(object):
"""Hyperparameter scheduling objects."""
class CosineDecay(Schedule):
"""Cosine decay."""
def __init__(self, min_val, max_val, num_steps):
self.min_val = min_val
self.max_val = max_val
self.num_steps = num_steps
def __call__(self, itr):
cos = (1 + jnp.cos(jnp.pi * itr / self.num_steps))
return 0.5 * (self.max_val - self.min_val) * cos + self.min_val
class WarmupCosineDecay(Schedule):
"""Cosine decay with linear warmup."""
def __init__(self, start_val, min_val, max_val, num_steps, warmup_steps):
self.start_val = start_val
self.min_val = min_val
self.max_val = max_val
self.num_steps = num_steps
self.warmup_steps = warmup_steps
def __call__(self, itr):
warmup_val = ((self.max_val - self.start_val) * (itr / self.warmup_steps)
+ self.start_val)
cos_itr = (itr - self.warmup_steps) / (self.num_steps - self.warmup_steps)
cos = 1 + jnp.cos(jnp.pi * cos_itr)
cos_val = 0.5 * (self.max_val - self.min_val) * cos + self.min_val
# Select warmup_val if itr < warmup, else cosine val
values = jnp.array([warmup_val, cos_val])
index = jnp.sum(jnp.array(self.warmup_steps) < itr)
return jnp.take(values, index)
class WarmupExpDecay(Schedule):
"""Exponential step decay with linear warmup."""
def __init__(self, start_val, max_val, warmup_steps,
decay_factor, decay_interval):
self.start_val = start_val
self.max_val = max_val
self.warmup_steps = warmup_steps
self.decay_factor = decay_factor
self.decay_interval = decay_interval
def __call__(self, itr):
warmup_val = ((self.max_val - self.start_val) * (itr / self.warmup_steps)
+ self.start_val)
# How many decay steps have we taken?
num_decays = jnp.floor((itr - self.warmup_steps) / self.decay_interval)
exp_val = self.max_val * (self.decay_factor ** num_decays)
# Select warmup_val if itr < warmup, else exp_val
values = jnp.array([warmup_val, exp_val])
index = jnp.sum(jnp.array(self.warmup_steps) < itr)
return jnp.take(values, index)
class SGD(Optimizer):
"""Standard SGD with (nesterov) momentum and weight decay.
Attributes:
params: Either a dict mapping param names to JAX tensors, or a list where
each member of the list is a dict containing parameters
and hyperparameters, allowing one to specify param-specific hyperparams.
lr: Learning rate.
weight_decay: Weight decay parameter. Note that this is decay, not L2 reg.
momentum: Momentum parameter
dampening: Dampening parameter
nesterov: Bool indicating this optimizer will use the NAG formulation.
"""
defaults = {'weight_decay': None, 'momentum': None, 'dampening': 0,
'nesterov': None}
def __init__(self, params, lr, weight_decay=None,
momentum=None, dampening=0, nesterov=None):
super().__init__(
params, defaults={'lr': lr, 'weight_decay': weight_decay,
'momentum': momentum, 'dampening': dampening,
'nesterov': nesterov})
def create_buffers(self, name, param):
"""Prepares all momentum buffers for each parameter."""
state = {'step': jnp.zeros(jax.local_device_count())}
if self.get_hyper(name, 'momentum') is not None:
state['momentum'] = jnp.zeros_like(param)
return state
def update_param(self, param, grad, state, opt_params):
"""The actual update step for this optimizer."""
if param is None:
return param, state
# Apply weight decay
if opt_params.get('weight_decay') is not None:
grad = grad + param * opt_params['weight_decay']
# Update momentum buffers if needed
if 'momentum' in state:
state['momentum'] = (opt_params['momentum'] * state['momentum']
+ (1 - opt_params['dampening']) * grad)
if opt_params['nesterov'] is not None:
grad = grad + opt_params['momentum'] * state['momentum']
else:
grad = state['momentum']
state['step'] += 1
return param - opt_params['lr'] * grad, state
class Adam(Optimizer):
"""Adam optimizer, Kingma & Ba, arxiv.org/abs/1412.6980.
Args:
params (iterable): nested list of params to optimize
lr (float, optional): learning rate (default: 1e-3)
betas (Tuple[float, float], optional): coefficients used for computing
running averages of gradient and its square (default: (0.9, 0.999))
eps (float, optional): term added to the denominator to improve
numerical stability (default: 1e-8)
weight_decay (float, optional): weight decay (default: 0)
use_adamw (bool, optional): If not None, use decoupled weight decay
as in arxiv.org/abs/1711.05101. The paper version adds an additional
"schedule" hyperparameter eta, which we instead just replace with the
learning rate following the PyTorch implementation.
Note that this implementation will not instantiate a buffer if the
beta term for that buffer is passed in as None, thus conserving memory.
"""
defaults = {'beta1': 0.9, 'beta2': 0.999, 'weight_decay': None, 'eps': 1e-8,
'use_adamw': None}
def __init__(self, params, lr, beta1=0.9, beta2=0.999,
eps=1e-8, weight_decay=None, use_adamw=None):
super().__init__(params=params,
defaults={'lr': lr, 'beta1': beta1,
'beta2': beta2, 'eps': eps,
'weight_decay': weight_decay,
'use_adamw': use_adamw})
def create_buffers(self, name, param):
"""Prepare exp_avg and exp_avg_sq buffers."""
state = {'step': jnp.zeros(jax.local_device_count())}
if self.get_hyper(name, 'beta1') is not None:
state['exp_avg'] = jnp.zeros_like(param)
if self.get_hyper(name, 'beta2') is not None:
state['exp_avg_sq'] = jnp.zeros_like(param)
return state
def update_param(self, param, grad, state, opt_params):
"""The actual update step for this optimizer."""
if param is None:
return param, state
state['step'] = state['step'] + 1
# Apply weight decay
if opt_params.get('weight_decay') is not None:
if opt_params.get('use_adamw') is not None:
param = param * (1 - opt_params['lr'] * opt_params['weight_decay'])
else:
grad = grad + param * opt_params['weight_decay']
# First moment
if 'exp_avg' in state:
bias_correction1 = 1 - opt_params['beta1'] ** state['step']
state['exp_avg'] = (state['exp_avg'] * opt_params['beta1']
+ (1 - opt_params['beta1']) * grad)
step_size = opt_params['lr'] * state['exp_avg'] / bias_correction1
else:
step_size = opt_params['lr'] * grad
# Second moment
if 'exp_avg_sq' in state:
bias_correction2 = 1 - opt_params['beta2'] ** state['step']
state['exp_avg_sq'] = (state['exp_avg_sq'] * opt_params['beta2']
+ (1 - opt_params['beta2']) * grad * grad)
denom = jnp.sqrt(state['exp_avg_sq']) * jax.lax.rsqrt(bias_correction2)
denom = denom + opt_params['eps']
else:
denom = jnp.abs(grad) + opt_params['eps'] # Add eps to avoid divide-by-0
return param - step_size / denom, state
class RMSProp(Optimizer):
"""RMSProp optimizer, Tieleman and Hinton, ref: powerpoint slides.
Implements RMSProp as
rms = decay * rms{t-1} + (1-decay) * gradient ** 2
mom = momentum * mom{t-1} + learning_rate * g_t / sqrt(rms + epsilon)
param -= mom
Note that the rms buffer is initialized with ones as in TF, as opposed to
zeros as in all other implementations.
Args:
params (iterable): nested list of params to optimize
lr (float): learning rate (default: 1e-3)
decay (float): EMA decay rate for running estimate of squared gradient.
momentum (float or None): Use heavy ball momentum instead of instant grad.
eps (float, optional): term added to the denominator to improve
numerical stability (default: 1e-8)
weight_decay (float, optional): weight decay (NOT ADAMW (default: 0))
"""
defaults = {'weight_decay': None, 'eps': 1e-8}
def __init__(self, params, lr, decay, momentum, weight_decay=None, eps=1e-8):
super().__init__(params=params,
defaults={'lr': lr, 'decay': decay,
'momentum': momentum, 'eps': eps,
'weight_decay': weight_decay})
def create_buffers(self, name, param):
"""Prepare exp_avg and exp_avg_sq buffers."""
state = {'step': jnp.zeros(jax.local_device_count())}
state['rms'] = jnp.ones_like(param)
if self.get_hyper(name, 'momentum') is not None:
state['momentum'] = jnp.zeros_like(param)
return state
def update_param(self, param, grad, state, opt_params):
"""The actual update step for this optimizer."""
if param is None:
return param, state
state['step'] = state['step'] + 1
# Apply weight decay
if opt_params.get('weight_decay') is not None:
grad = grad + param * opt_params['weight_decay']
# EMA of the squared gradient
state['rms'] = (state['rms'] * opt_params['decay']
+ (1 - opt_params['decay']) * (grad ** 2))
scaled_grad = (opt_params['lr'] * grad
/ (state['rms'] + opt_params['eps']) ** 0.5)
if state['momentum'] is not None:
state['momentum'] = (state['momentum'] * opt_params['momentum']
+ scaled_grad)
step_size = state['momentum']
else:
step_size = scaled_grad
return param - step_size, state
class Fromage(Optimizer):
"""Fromage optimizer, Bernstein et al. arXiv.org/abs/2002.03432.
This version optionally includes weight decay.
Attributes:
params (iterable): nested list of params to optimize
lr (float): learning rate.
eps (float, optional): Minimum allowable norm. This term is required for
in case parameters are zero-initialized (default: 1e-5).
weight_decay (float, optional): weight decay (default: 0).
"""
defaults = {'weight_decay': None, 'eps': 1e-5}
def __init__(self, params, lr, weight_decay=None, eps=1e-5):
super().__init__(
params, defaults={'lr': lr, 'weight_decay': weight_decay, 'eps': eps})
def create_buffers(self, name, param): # pylint: disable=unused-argument
"""Prepares all momentum buffers for each parameter."""
return {'step': jnp.zeros(1)}
def update_param(self, param, grad, state, opt_params):
"""The actual update step for this optimizer."""
if param is None:
return param, state
if opt_params['weight_decay'] is not None:
grad = grad + param * opt_params['weight_decay']
grad_norm = jnp.maximum(jnp.linalg.norm(grad), opt_params['eps'])
param_norm = jnp.maximum(jnp.linalg.norm(param), opt_params['eps'])
mult = jax.lax.rsqrt(1 + opt_params['lr'] ** 2)
out = (param - opt_params['lr'] * grad * (param_norm / grad_norm)) * mult
return out, state
def compute_norm(x, axis, keepdims):
"""Returns norm over arbitrary axis."""
norm = jnp.sum(x ** 2, axis=axis, keepdims=keepdims) ** 0.5
return norm
def unitwise_norm(x):
"""Computes norms of each output unit separately, assuming (HW)IO weights."""
if len(jnp.squeeze(x).shape) <= 1: # Scalars and vectors
axis = None
keepdims = False
elif len(x.shape) in [2, 3]: # Linear layers of shape IO
axis = 0
keepdims = True
elif len(x.shape) == 4: # Conv kernels of shape HWIO
axis = [0, 1, 2,]
keepdims = True
else:
raise ValueError(f'Got a parameter with shape not in [1, 2, 3, 4]! {x}')
return compute_norm(x, axis, keepdims)
class SGD_AGC(Optimizer): # pylint:disable=invalid-name
"""SGD with Unit-Adaptive Gradient-Clipping.
References:
[Brock, Smith, De, Simonyan 2021] High-Performance Large-Scale Image
Recognition Without Normalization.
"""
defaults = {'weight_decay': None, 'momentum': None, 'dampening': 0,
'nesterov': None, 'clipping': 0.01, 'eps': 1e-3}
def __init__(self, params, lr, weight_decay=None,
momentum=None, dampening=0, nesterov=None,
clipping=0.01, eps=1e-3):
super().__init__(
params, defaults={'lr': lr, 'weight_decay': weight_decay,
'momentum': momentum, 'dampening': dampening,
'clipping': clipping, 'nesterov': nesterov,
'eps': eps})
def create_buffers(self, name, param):
return SGD.create_buffers(self, name, param)
def update_param(self, param, grad, state, opt_params):
"""Clips grads if necessary, then applies the optimizer update."""
if param is None:
return param, state
if opt_params['clipping'] is not None:
param_norm = jnp.maximum(unitwise_norm(param), opt_params['eps'])
grad_norm = unitwise_norm(grad)
max_norm = param_norm * opt_params['clipping']
# If grad norm > clipping * param_norm, rescale
trigger = grad_norm > max_norm
# Note the max(||G||, 1e-6) is technically unnecessary here, as
# the clipping shouldn't trigger if the grad norm is zero,
# but we include it in practice as a "just-in-case".
clipped_grad = grad * (max_norm / jnp.maximum(grad_norm, 1e-6))
grad = jnp.where(trigger, clipped_grad, grad)
return SGD.update_param(self, param, grad, state, opt_params)
class Hybrid(Optimizer):
"""Optimizer which permits passing param groups with different base opts.
The API for this class follows the case for any other optimizer where one
specifies a list of dicts with separate hyperparams, but in this case it
requires the user to also specify an 'opt' key for each group, such as e.g.
[{'params': params0, 'opt': optim.Adam, 'lr': 0.1}].
The user must also provide values for any arg in the selected optimizers which
does not have a default value associated
"""
def __init__(self, param_groups):
if any(['opt' not in group for group in param_groups]):
raise ValueError('All parameter groups must have an opt key!')
self.defaults = ChainMap(*[group['opt'].defaults for group in param_groups])
super().__init__(param_groups, defaults=dict(self.defaults))
def create_buffers(self, name, param):
return self.get_hyper(name, 'opt').create_buffers(self, name, param)
def update_param(self, param, grad, state, opt_params):
return opt_params['opt'].update_param(self, param, grad, state, opt_params)
|
deepmind-research-master
|
nfnets/optim.py
|
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Architecture definitions for different models."""
import haiku as hk
import jax
import jax.numpy as jnp
import numpy as np
# Model settings for NF-RegNets
nf_regnet_params = {
'B0': {'width': [48, 104, 208, 440], 'depth': [1, 3, 6, 6],
'train_imsize': 192, 'test_imsize': 224,
'drop_rate': 0.2},
'B1': {'width': [48, 104, 208, 440], 'depth': [2, 4, 7, 7],
'train_imsize': 224, 'test_imsize': 256,
'drop_rate': 0.2},
'B2': {'width': [56, 112, 232, 488], 'depth': [2, 4, 8, 8],
'train_imsize': 240, 'test_imsize': 272,
'drop_rate': 0.3},
'B3': {'width': [56, 128, 248, 528], 'depth': [2, 5, 9, 9],
'train_imsize': 288, 'test_imsize': 320,
'drop_rate': 0.3},
'B4': {'width': [64, 144, 288, 616], 'depth': [2, 6, 11, 11],
'train_imsize': 320, 'test_imsize': 384,
'drop_rate': 0.4},
'B5': {'width': [80, 168, 336, 704], 'depth': [3, 7, 14, 14],
'train_imsize': 384, 'test_imsize': 456,
'drop_rate': 0.4},
'B6': {'width': [88, 184, 376, 792], 'depth': [3, 8, 16, 16],
'train_imsize': 448, 'test_imsize': 528,
'drop_rate': 0.5},
'B7': {'width': [96, 208, 416, 880], 'depth': [4, 10, 19, 19],
'train_imsize': 512, 'test_imsize': 600,
'drop_rate': 0.5},
'B8': {'width': [104, 232, 456, 968], 'depth': [4, 11, 22, 22],
'train_imsize': 600, 'test_imsize': 672,
'drop_rate': 0.5},
}
nfnet_params = {}
# F-series models
nfnet_params.update(**{
'F0': {
'width': [256, 512, 1536, 1536], 'depth': [1, 2, 6, 3],
'train_imsize': 192, 'test_imsize': 256,
'RA_level': '405', 'drop_rate': 0.2},
'F1': {
'width': [256, 512, 1536, 1536], 'depth': [2, 4, 12, 6],
'train_imsize': 224, 'test_imsize': 320,
'RA_level': '410', 'drop_rate': 0.3},
'F2': {
'width': [256, 512, 1536, 1536], 'depth': [3, 6, 18, 9],
'train_imsize': 256, 'test_imsize': 352,
'RA_level': '410', 'drop_rate': 0.4},
'F3': {
'width': [256, 512, 1536, 1536], 'depth': [4, 8, 24, 12],
'train_imsize': 320, 'test_imsize': 416,
'RA_level': '415', 'drop_rate': 0.4},
'F4': {
'width': [256, 512, 1536, 1536], 'depth': [5, 10, 30, 15],
'train_imsize': 384, 'test_imsize': 512,
'RA_level': '415', 'drop_rate': 0.5},
'F5': {
'width': [256, 512, 1536, 1536], 'depth': [6, 12, 36, 18],
'train_imsize': 416, 'test_imsize': 544,
'RA_level': '415', 'drop_rate': 0.5},
'F6': {
'width': [256, 512, 1536, 1536], 'depth': [7, 14, 42, 21],
'train_imsize': 448, 'test_imsize': 576,
'RA_level': '415', 'drop_rate': 0.5},
'F7': {
'width': [256, 512, 1536, 1536], 'depth': [8, 16, 48, 24],
'train_imsize': 480, 'test_imsize': 608,
'RA_level': '415', 'drop_rate': 0.5},
})
# Minor variants FN+, slightly wider
nfnet_params.update(**{
**{f'{key}+': {**nfnet_params[key], 'width': [384, 768, 2048, 2048],}
for key in nfnet_params}
})
# Nonlinearities with magic constants (gamma) baked in.
# Note that not all nonlinearities will be stable, especially if they are
# not perfectly monotonic. Good choices include relu, silu, and gelu.
nonlinearities = {
'identity': lambda x: x,
'celu': lambda x: jax.nn.celu(x) * 1.270926833152771,
'elu': lambda x: jax.nn.elu(x) * 1.2716004848480225,
'gelu': lambda x: jax.nn.gelu(x) * 1.7015043497085571,
'glu': lambda x: jax.nn.glu(x) * 1.8484294414520264,
'leaky_relu': lambda x: jax.nn.leaky_relu(x) * 1.70590341091156,
'log_sigmoid': lambda x: jax.nn.log_sigmoid(x) * 1.9193484783172607,
'log_softmax': lambda x: jax.nn.log_softmax(x) * 1.0002083778381348,
'relu': lambda x: jax.nn.relu(x) * 1.7139588594436646,
'relu6': lambda x: jax.nn.relu6(x) * 1.7131484746932983,
'selu': lambda x: jax.nn.selu(x) * 1.0008515119552612,
'sigmoid': lambda x: jax.nn.sigmoid(x) * 4.803835391998291,
'silu': lambda x: jax.nn.silu(x) * 1.7881293296813965,
'soft_sign': lambda x: jax.nn.soft_sign(x) * 2.338853120803833,
'softplus': lambda x: jax.nn.softplus(x) * 1.9203323125839233,
'tanh': lambda x: jnp.tanh(x) * 1.5939117670059204,
}
class WSConv2D(hk.Conv2D):
"""2D Convolution with Scaled Weight Standardization and affine gain+bias."""
@hk.transparent
def standardize_weight(self, weight, eps=1e-4):
"""Apply scaled WS with affine gain."""
mean = jnp.mean(weight, axis=(0, 1, 2), keepdims=True)
var = jnp.var(weight, axis=(0, 1, 2), keepdims=True)
fan_in = np.prod(weight.shape[:-1])
# Get gain
gain = hk.get_parameter('gain', shape=(weight.shape[-1],),
dtype=weight.dtype, init=jnp.ones)
# Manually fused normalization, eq. to (w - mean) * gain / sqrt(N * var)
scale = jax.lax.rsqrt(jnp.maximum(var * fan_in, eps)) * gain
shift = mean * scale
return weight * scale - shift
def __call__(self, inputs: jnp.ndarray, eps: float = 1e-4) -> jnp.ndarray:
w_shape = self.kernel_shape + (
inputs.shape[self.channel_index] // self.feature_group_count,
self.output_channels)
# Use fan-in scaled init, but WS is largely insensitive to this choice.
w_init = hk.initializers.VarianceScaling(1.0, 'fan_in', 'normal')
w = hk.get_parameter('w', w_shape, inputs.dtype, init=w_init)
weight = self.standardize_weight(w, eps)
out = jax.lax.conv_general_dilated(
inputs, weight, window_strides=self.stride, padding=self.padding,
lhs_dilation=self.lhs_dilation, rhs_dilation=self.kernel_dilation,
dimension_numbers=self.dimension_numbers,
feature_group_count=self.feature_group_count)
# Always add bias
bias_shape = (self.output_channels,)
bias = hk.get_parameter('bias', bias_shape, inputs.dtype, init=jnp.zeros)
return out + bias
def signal_metrics(x, i):
"""Things to measure about a NCHW tensor activation."""
metrics = {}
# Average channel-wise mean-squared
metrics[f'avg_sq_mean_{i}'] = jnp.mean(jnp.mean(x, axis=[0, 1, 2])**2)
# Average channel variance
metrics[f'avg_var_{i}'] = jnp.mean(jnp.var(x, axis=[0, 1, 2]))
return metrics
def count_conv_flops(in_ch, conv, h, w):
"""For a conv layer with in_ch inputs, count the FLOPS."""
# How many outputs are we producing? Note this is wrong for VALID padding.
output_shape = conv.output_channels * (h * w) / np.prod(conv.stride)
# At each OHW location we do computation equal to (I//G) * kh * kw
flop_per_loc = (in_ch / conv.feature_group_count)
flop_per_loc *= np.prod(conv.kernel_shape)
return output_shape * flop_per_loc
class SqueezeExcite(hk.Module):
"""Simple Squeeze+Excite module."""
def __init__(self, in_ch, out_ch, se_ratio=0.5,
hidden_ch=None, activation=jax.nn.relu,
name=None):
super().__init__(name=name)
self.in_ch, self.out_ch = in_ch, out_ch
if se_ratio is None:
if hidden_ch is None:
raise ValueError('Must provide one of se_ratio or hidden_ch')
self.hidden_ch = hidden_ch
else:
self.hidden_ch = max(1, int(self.in_ch * se_ratio))
self.activation = activation
self.fc0 = hk.Linear(self.hidden_ch, with_bias=True)
self.fc1 = hk.Linear(self.out_ch, with_bias=True)
def __call__(self, x):
h = jnp.mean(x, axis=[1, 2]) # Mean pool over HW extent
h = self.fc1(self.activation(self.fc0(h)))
h = jax.nn.sigmoid(h)[:, None, None] # Broadcast along H, W
return h
class StochDepth(hk.Module):
"""Batchwise Dropout used in EfficientNet, optionally sans rescaling."""
def __init__(self, drop_rate, scale_by_keep=False, name=None):
super().__init__(name=name)
self.drop_rate = drop_rate
self.scale_by_keep = scale_by_keep
def __call__(self, x, is_training) -> jnp.ndarray:
if not is_training:
return x
batch_size = x.shape[0]
r = jax.random.uniform(hk.next_rng_key(), [batch_size, 1, 1, 1],
dtype=x.dtype)
keep_prob = 1. - self.drop_rate
binary_tensor = jnp.floor(keep_prob + r)
if self.scale_by_keep:
x = x / keep_prob
return x * binary_tensor
|
deepmind-research-master
|
nfnets/base.py
|
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Norm-Free Nets."""
# pylint: disable=unused-import
# pylint: disable=invalid-name
import functools
import haiku as hk
import jax
import jax.numpy as jnp
import jax.random as jrandom
import numpy as np
from nfnets import base
class NFNet(hk.Module):
"""Normalizer-Free Networks with an improved architecture.
References:
[Brock, Smith, De, Simonyan 2021] High-Performance Large-Scale Image
Recognition Without Normalization.
"""
variant_dict = base.nfnet_params
def __init__(self, num_classes, variant='F0',
width=1.0, se_ratio=0.5,
alpha=0.2, stochdepth_rate=0.1, drop_rate=None,
activation='gelu', fc_init=None,
final_conv_mult=2, final_conv_ch=None,
use_two_convs=True,
name='NFNet'):
super().__init__(name=name)
self.num_classes = num_classes
self.variant = variant
self.width = width
self.se_ratio = se_ratio
# Get variant info
block_params = self.variant_dict[self.variant]
self.train_imsize = block_params['train_imsize']
self.test_imsize = block_params['test_imsize']
self.width_pattern = block_params['width']
self.depth_pattern = block_params['depth']
self.bneck_pattern = block_params.get('expansion', [0.5] * 4)
self.group_pattern = block_params.get('group_width', [128] * 4)
self.big_pattern = block_params.get('big_width', [True] * 4)
self.activation = base.nonlinearities[activation]
if drop_rate is None:
self.drop_rate = block_params['drop_rate']
else:
self.drop_rate = drop_rate
self.which_conv = base.WSConv2D
# Stem
ch = self.width_pattern[0] // 2
self.stem = hk.Sequential([
self.which_conv(16, kernel_shape=3, stride=2,
padding='SAME', name='stem_conv0'),
self.activation,
self.which_conv(32, kernel_shape=3, stride=1,
padding='SAME', name='stem_conv1'),
self.activation,
self.which_conv(64, kernel_shape=3, stride=1,
padding='SAME', name='stem_conv2'),
self.activation,
self.which_conv(ch, kernel_shape=3, stride=2,
padding='SAME', name='stem_conv3'),
])
# Body
self.blocks = []
expected_std = 1.0
num_blocks = sum(self.depth_pattern)
index = 0 # Overall block index
stride_pattern = [1, 2, 2, 2]
block_args = zip(self.width_pattern, self.depth_pattern, self.bneck_pattern,
self.group_pattern, self.big_pattern, stride_pattern)
for (block_width, stage_depth, expand_ratio,
group_size, big_width, stride) in block_args:
for block_index in range(stage_depth):
# Scalar pre-multiplier so each block sees an N(0,1) input at init
beta = 1./ expected_std
# Block stochastic depth drop-rate
block_stochdepth_rate = stochdepth_rate * index / num_blocks
out_ch = (int(block_width * self.width))
self.blocks += [NFBlock(ch, out_ch,
expansion=expand_ratio, se_ratio=se_ratio,
group_size=group_size,
stride=stride if block_index == 0 else 1,
beta=beta, alpha=alpha,
activation=self.activation,
which_conv=self.which_conv,
stochdepth_rate=block_stochdepth_rate,
big_width=big_width,
use_two_convs=use_two_convs,
)]
ch = out_ch
index += 1
# Reset expected std but still give it 1 block of growth
if block_index == 0:
expected_std = 1.0
expected_std = (expected_std **2 + alpha**2)**0.5
# Head
if final_conv_mult is None:
if final_conv_ch is None:
raise ValueError('Must provide one of final_conv_mult or final_conv_ch')
ch = final_conv_ch
else:
ch = int(final_conv_mult * ch)
self.final_conv = self.which_conv(ch, kernel_shape=1,
padding='SAME', name='final_conv')
# By default, initialize with N(0, 0.01)
if fc_init is None:
fc_init = hk.initializers.RandomNormal(mean=0, stddev=0.01)
self.fc = hk.Linear(self.num_classes, w_init=fc_init, with_bias=True)
def __call__(self, x, is_training=True, return_metrics=False):
"""Return the output of the final layer without any [log-]softmax."""
# Stem
outputs = {}
out = self.stem(x)
if return_metrics:
outputs.update(base.signal_metrics(out, 0))
# Blocks
for i, block in enumerate(self.blocks):
out, res_avg_var = block(out, is_training=is_training)
if return_metrics:
outputs.update(base.signal_metrics(out, i + 1))
outputs[f'res_avg_var_{i}'] = res_avg_var
# Final-conv->activation, pool, dropout, classify
out = self.activation(self.final_conv(out))
pool = jnp.mean(out, [1, 2])
outputs['pool'] = pool
# Optionally apply dropout
if self.drop_rate > 0.0 and is_training:
pool = hk.dropout(hk.next_rng_key(), self.drop_rate, pool)
outputs['logits'] = self.fc(pool)
return outputs
def count_flops(self, h, w):
flops = []
ch = 3
for module in self.stem.layers:
if isinstance(module, hk.Conv2D):
flops += [base.count_conv_flops(ch, module, h, w)]
if any([item > 1 for item in module.stride]):
h, w = h / module.stride[0], w / module.stride[1]
ch = module.output_channels
# Body FLOPs
for block in self.blocks:
flops += [block.count_flops(h, w)]
if block.stride > 1:
h, w = h / block.stride, w / block.stride
# Head module FLOPs
out_ch = self.blocks[-1].out_ch
flops += [base.count_conv_flops(out_ch, self.final_conv, h, w)]
# Count flops for classifier
flops += [self.final_conv.output_channels * self.fc.output_size]
return flops, sum(flops)
class NFBlock(hk.Module):
"""Normalizer-Free Net Block."""
def __init__(self, in_ch, out_ch, expansion=0.5, se_ratio=0.5,
kernel_shape=3, group_size=128, stride=1,
beta=1.0, alpha=0.2,
which_conv=base.WSConv2D, activation=jax.nn.gelu,
big_width=True, use_two_convs=True,
stochdepth_rate=None, name=None):
super().__init__(name=name)
self.in_ch, self.out_ch = in_ch, out_ch
self.expansion = expansion
self.se_ratio = se_ratio
self.kernel_shape = kernel_shape
self.activation = activation
self.beta, self.alpha = beta, alpha
# Mimic resnet style bigwidth scaling?
width = int((self.out_ch if big_width else self.in_ch) * expansion)
# Round expanded with based on group count
self.groups = width // group_size
self.width = group_size * self.groups
self.stride = stride
self.use_two_convs = use_two_convs
# Conv 0 (typically expansion conv)
self.conv0 = which_conv(self.width, kernel_shape=1, padding='SAME',
name='conv0')
# Grouped NxN conv
self.conv1 = which_conv(self.width, kernel_shape=kernel_shape,
stride=stride, padding='SAME',
feature_group_count=self.groups, name='conv1')
if self.use_two_convs:
self.conv1b = which_conv(self.width, kernel_shape=kernel_shape,
stride=1, padding='SAME',
feature_group_count=self.groups, name='conv1b')
# Conv 2, typically projection conv
self.conv2 = which_conv(self.out_ch, kernel_shape=1, padding='SAME',
name='conv2')
# Use shortcut conv on channel change or downsample.
self.use_projection = stride > 1 or self.in_ch != self.out_ch
if self.use_projection:
self.conv_shortcut = which_conv(self.out_ch, kernel_shape=1,
padding='SAME', name='conv_shortcut')
# Squeeze + Excite Module
self.se = base.SqueezeExcite(self.out_ch, self.out_ch, self.se_ratio)
# Are we using stochastic depth?
self._has_stochdepth = (stochdepth_rate is not None and
stochdepth_rate > 0. and stochdepth_rate < 1.0)
if self._has_stochdepth:
self.stoch_depth = base.StochDepth(stochdepth_rate)
def __call__(self, x, is_training):
out = self.activation(x) * self.beta
if self.stride > 1: # Average-pool downsample.
shortcut = hk.avg_pool(out, window_shape=(1, 2, 2, 1),
strides=(1, 2, 2, 1), padding='SAME')
if self.use_projection:
shortcut = self.conv_shortcut(shortcut)
elif self.use_projection:
shortcut = self.conv_shortcut(out)
else:
shortcut = x
out = self.conv0(out)
out = self.conv1(self.activation(out))
if self.use_two_convs:
out = self.conv1b(self.activation(out))
out = self.conv2(self.activation(out))
out = (self.se(out) * 2) * out # Multiply by 2 for rescaling
# Get average residual standard deviation for reporting metrics.
res_avg_var = jnp.mean(jnp.var(out, axis=[0, 1, 2]))
# Apply stochdepth if applicable.
if self._has_stochdepth:
out = self.stoch_depth(out, is_training)
# SkipInit Gain
out = out * hk.get_parameter('skip_gain', (), out.dtype, init=jnp.zeros)
return out * self.alpha + shortcut, res_avg_var
def count_flops(self, h, w):
# Count conv FLOPs based on input HW
expand_flops = base.count_conv_flops(self.in_ch, self.conv0, h, w)
# If block is strided we decrease resolution here.
dw_flops = base.count_conv_flops(self.width, self.conv1, h, w)
if self.stride > 1:
h, w = h / self.stride, w / self.stride
if self.use_two_convs:
dw_flops += base.count_conv_flops(self.width, self.conv1b, h, w)
if self.use_projection:
sc_flops = base.count_conv_flops(self.in_ch, self.conv_shortcut, h, w)
else:
sc_flops = 0
# SE flops happen on avg-pooled activations
se_flops = self.se.fc0.output_size * self.out_ch
se_flops += self.se.fc0.output_size * self.se.fc1.output_size
contract_flops = base.count_conv_flops(self.width, self.conv2, h, w)
return sum([expand_flops, dw_flops, se_flops, contract_flops, sc_flops])
|
deepmind-research-master
|
nfnets/nfnet.py
|
# Copyright 2021 DeepMind Technologies Limited.
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Default config, focused on model evaluation."""
from ml_collections import config_dict
def get_config(filter_time_intervals=None):
"""Return config object for training."""
config = config_dict.ConfigDict()
config.eval_strategy = config_dict.ConfigDict()
config.eval_strategy.class_name = 'OneDeviceConfig'
config.eval_strategy.kwargs = config_dict.ConfigDict(
dict(device_type='v100'))
## Experiment config.
config.experiment_kwargs = config_dict.ConfigDict(dict(
resnet_kwargs=dict(
blocks_per_group_list=[3, 4, 6, 3], # This choice is ResNet50.
bn_config=dict(
decay_rate=0.9,
eps=1e-5),
resnet_v2=False,
additional_features_mode='mlp',
),
optimizer_config=dict(
class_name='Momentum',
kwargs={'momentum': 0.9},
# Set up the learning rate schedule.
lr_init=0.025,
lr_factor=0.1,
lr_schedule=(50e3, 100e3, 150e3),
gradient_clip=5.,
),
l2_regularization=1e-4,
total_train_batch_size=128,
train_net_args={'is_training': True},
eval_batch_size=128,
eval_net_args={'is_training': True},
data_config=dict(
# dataset loading
dataset_path=None,
num_val_splits=10,
val_split=0,
# image cropping
image_size=(80, 80, 7),
train_crop_type='crop_fixed',
test_crop_type='crop_fixed',
n_crop_repeat=1,
train_augmentations=dict(
rotation_and_flip=True,
rescaling=True,
translation=True,
),
test_augmentations=dict(
rotation_and_flip=False,
rescaling=False,
translation=False,
),
test_time_ensembling='sum',
num_eval_buckets=5,
eval_confidence_interval=95,
task='grounded_unnormalized_regression',
loss_config=dict(
loss='mse',
mse_normalize=False,
),
model_uncertainty=True,
additional_features='',
time_filter_intervals=filter_time_intervals,
class_boundaries={
'0': [[-1., 0]],
'1': [[0, 1.]]
},
frequencies_to_use='all',
),
n_train_epochs=100
))
return config
|
deepmind-research-master
|
galaxy_mergers/config.py
|
# Copyright 2021 DeepMind Technologies Limited.
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Helpers to visualize gradients and other interpretability analysis."""
import numpy as np
import tensorflow.compat.v2 as tf
def rotate_by_right_angle_multiple(image, rot=90):
"""Rotate an image by right angles."""
if rot not in [0, 90, 180, 270]:
raise ValueError(f"Cannot rotate by non-90 degree angle {rot}")
if rot in [90, -270]:
image = np.transpose(image, (1, 0, 2))
image = image[::-1]
elif rot in [180, -180]:
image = image[::-1, ::-1]
elif rot in [270, -90]:
image = np.transpose(image, (1, 0, 2))
image = image[:, ::-1]
return image
def compute_gradient(images, evaluator, is_training=False):
inputs = tf.Variable(images[None], dtype=tf.float32)
with tf.GradientTape() as tape:
tape.watch(inputs)
time_sigma = evaluator.model(inputs, None, is_training)
grad_time = tape.gradient(time_sigma[:, 0], inputs)
return grad_time, time_sigma
def compute_grads_for_rotations(images, evaluator, is_training=False):
test_gradients, test_outputs = [], []
for rotation in np.arange(0, 360, 90):
images_rot = rotate_by_right_angle_multiple(images, rotation)
grads, time_sigma = compute_gradient(images_rot, evaluator, is_training)
grads = np.squeeze(grads.numpy())
inv_grads = rotate_by_right_angle_multiple(grads, -rotation)
test_gradients.append(inv_grads)
test_outputs.append(time_sigma.numpy())
return np.squeeze(test_gradients), np.squeeze(test_outputs)
def compute_grads_for_rotations_and_flips(images, evaluator):
grads, time_sigma = compute_grads_for_rotations(images, evaluator)
grads_f, time_sigma_f = compute_grads_for_rotations(images[::-1], evaluator)
grads_f = grads_f[:, ::-1]
all_grads = np.concatenate([grads, grads_f], 0)
model_outputs = np.concatenate((time_sigma, time_sigma_f), 0)
return all_grads, model_outputs
|
deepmind-research-master
|
galaxy_mergers/interpretability_helpers.py
|
# Copyright 2021 DeepMind Technologies Limited.
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Fork of a generic ResNet to incorporate additional cosmological features."""
from typing import Mapping, Optional, Sequence, Text
import sonnet.v2 as snt
import tensorflow.compat.v2 as tf
class ResNet(snt.Module):
"""ResNet model."""
def __init__(self,
n_repeats: int,
blocks_per_group_list: Sequence[int],
num_classes: int,
bn_config: Optional[Mapping[Text, float]] = None,
resnet_v2: bool = False,
channels_per_group_list: Sequence[int] = (256, 512, 1024, 2048),
use_additional_features: bool = False,
additional_features_mode: Optional[Text] = "per_block",
name: Optional[Text] = None):
"""Constructs a ResNet model.
Args:
n_repeats: The batch dimension for the input is expected to have the form
`B = b * n_repeats`. After the conv stack, the logits for the
`n_repeats` replicas are reduced, leading to an output batch dimension
of `b`.
blocks_per_group_list: A sequence of length 4 that indicates the number of
blocks created in each group.
num_classes: The number of classes to classify the inputs into.
bn_config: A dictionary of two elements, `decay_rate` and `eps` to be
passed on to the `BatchNorm` layers. By default the `decay_rate` is
`0.9` and `eps` is `1e-5`.
resnet_v2: Whether to use the v1 or v2 ResNet implementation. Defaults to
False.
channels_per_group_list: A sequence of length 4 that indicates the number
of channels used for each block in each group.
use_additional_features: If true, additional vector features will be
concatenated to the residual stack before logits are computed.
additional_features_mode: Mode for processing additional features.
Supported modes: 'mlp' and 'per_block'.
name: Name of the module.
"""
super(ResNet, self).__init__(name=name)
self._n_repeats = n_repeats
if bn_config is None:
bn_config = {"decay_rate": 0.9, "eps": 1e-5}
self._bn_config = bn_config
self._resnet_v2 = resnet_v2
# Number of blocks in each group for ResNet.
if len(blocks_per_group_list) != 4:
raise ValueError(
"`blocks_per_group_list` must be of length 4 not {}".format(
len(blocks_per_group_list)))
self._blocks_per_group_list = blocks_per_group_list
# Number of channels in each group for ResNet.
if len(channels_per_group_list) != 4:
raise ValueError(
"`channels_per_group_list` must be of length 4 not {}".format(
len(channels_per_group_list)))
self._channels_per_group_list = channels_per_group_list
self._use_additional_features = use_additional_features
self._additional_features_mode = additional_features_mode
self._initial_conv = snt.Conv2D(
output_channels=64,
kernel_shape=7,
stride=2,
with_bias=False,
padding="SAME",
name="initial_conv")
if not self._resnet_v2:
self._initial_batchnorm = snt.BatchNorm(
create_scale=True,
create_offset=True,
name="initial_batchnorm",
**bn_config)
self._block_groups = []
strides = [1, 2, 2, 2]
for i in range(4):
self._block_groups.append(
snt.nets.resnet.BlockGroup(
channels=self._channels_per_group_list[i],
num_blocks=self._blocks_per_group_list[i],
stride=strides[i],
bn_config=bn_config,
resnet_v2=resnet_v2,
name="block_group_%d" % (i)))
if self._resnet_v2:
self._final_batchnorm = snt.BatchNorm(
create_scale=True,
create_offset=True,
name="final_batchnorm",
**bn_config)
self._logits = snt.Linear(
output_size=num_classes,
w_init=snt.initializers.VarianceScaling(scale=2.0), name="logits")
if self._use_additional_features:
self._embedding = LinearBNReLU(output_size=16, name="embedding",
**bn_config)
if self._additional_features_mode == "mlp":
self._feature_repr = LinearBNReLU(
output_size=self._channels_per_group_list[-1], name="features_repr",
**bn_config)
elif self._additional_features_mode == "per_block":
self._feature_repr = []
for i, ch in enumerate(self._channels_per_group_list):
self._feature_repr.append(
LinearBNReLU(output_size=ch, name=f"features_{i}", **bn_config))
else:
raise ValueError(f"Unsupported addiitonal features mode: "
f"{additional_features_mode}")
def __call__(self, inputs, features, is_training):
net = inputs
net = self._initial_conv(net)
if not self._resnet_v2:
net = self._initial_batchnorm(net, is_training=is_training)
net = tf.nn.relu(net)
net = tf.nn.max_pool2d(
net, ksize=3, strides=2, padding="SAME", name="initial_max_pool")
if self._use_additional_features:
assert features is not None
features = self._embedding(features, is_training=is_training)
for i, block_group in enumerate(self._block_groups):
net = block_group(net, is_training)
if (self._use_additional_features and
self._additional_features_mode == "per_block"):
features_i = self._feature_repr[i](features, is_training=is_training)
# support for n_repeats > 1
features_i = tf.repeat(features_i, self._n_repeats, axis=0)
net += features_i[:, None, None, :] # expand to spacial resolution
if self._resnet_v2:
net = self._final_batchnorm(net, is_training=is_training)
net = tf.nn.relu(net)
net = tf.reduce_mean(net, axis=[1, 2], name="final_avg_pool")
# Re-split the batch dimension
net = tf.reshape(net, [-1, self._n_repeats] + net.shape.as_list()[1:])
# Average over the various repeats of the input (e.g. those could have
# corresponded to different crops).
net = tf.reduce_mean(net, axis=1)
if (self._use_additional_features and
self._additional_features_mode == "mlp"):
net += self._feature_repr(features, is_training=is_training)
return self._logits(net)
class LinearBNReLU(snt.Module):
"""Wrapper class for Linear layer with Batch Norm and ReLU activation."""
def __init__(self, output_size=64,
w_init=snt.initializers.VarianceScaling(scale=2.0),
name="linear", **bn_config):
"""Constructs a LinearBNReLU module.
Args:
output_size: Output dimension.
w_init: weight Initializer for snt.Linear.
name: Name of the module.
**bn_config: Optional parameters to be passed to snt.BatchNorm.
"""
super(LinearBNReLU, self).__init__(name=name)
self._linear = snt.Linear(output_size=output_size, w_init=w_init,
name=f"{name}_linear")
self._bn = snt.BatchNorm(create_scale=True, create_offset=True,
name=f"{name}_bn", **bn_config)
def __call__(self, x, is_training):
x = self._linear(x)
x = self._bn(x, is_training=is_training)
return tf.nn.relu(x)
|
deepmind-research-master
|
galaxy_mergers/model.py
|
# Copyright 2021 DeepMind Technologies Limited.
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Helpers to pre-process Antennae galaxy images."""
import collections
import os
from astropy.io import fits
import numpy as np
from scipy import ndimage
import tensorflow.compat.v2 as tf
def norm_antennae_images(images, scale=1000):
return tf.math.asinh(images/scale)
def renorm_antennae(images):
median = np.percentile(images.numpy().flatten(), 50)
img_range = np.ptp(images.numpy().flatten())
return (images - median) / (img_range / 2)
def get_antennae_images(antennae_fits_dir):
"""Load the raw Antennae galaxy images."""
all_fits_files = [
os.path.join(antennae_fits_dir, f)
for f in os.listdir(antennae_fits_dir)
]
freq_mapping = {'red': 160, 'blue': 850}
paired_fits_files = collections.defaultdict(list)
for f in all_fits_files:
redshift = float(f[-8:-5])
paired_fits_files[redshift].append(f)
for redshift, files in paired_fits_files.items():
paired_fits_files[redshift] = sorted(
files, key=lambda f: freq_mapping[f.split('/')[-1].split('_')[0]])
print('Reading files:', paired_fits_files)
print('Redshifts:', sorted(paired_fits_files.keys()))
galaxy_views = collections.defaultdict(list)
for redshift in paired_fits_files:
for view_path in paired_fits_files[redshift]:
with open(view_path, 'rb') as f:
fits_data = fits.open(f)
galaxy_views[redshift].append(np.array(fits_data[0].data))
batched_images = []
for redshift in paired_fits_files:
img = tf.constant(np.array(galaxy_views[redshift]))
img = tf.transpose(img, (1, 2, 0))
img = tf.image.resize(img, size=(60, 60))
batched_images.append(img)
return tf.stack(batched_images)
def preprocess_antennae_images(antennae_images):
"""Pre-process the Antennae galaxy images into a reasonable range."""
rotated_antennae_images = [
ndimage.rotate(img, 10, reshape=True, cval=-1)[10:-10, 10:-10]
for img in antennae_images
]
rotated_antennae_images = [
np.clip(img, 0, 1e9) for img in rotated_antennae_images
]
rotated_antennae_images = tf.stack(rotated_antennae_images)
normed_antennae_images = norm_antennae_images(rotated_antennae_images)
normed_antennae_images = tf.clip_by_value(normed_antennae_images, 1, 4.5)
renormed_antennae_images = renorm_antennae(normed_antennae_images)
return renormed_antennae_images
|
deepmind-research-master
|
galaxy_mergers/antennae_helpers.py
|
# Copyright 2021 DeepMind Technologies Limited.
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Pre-processing functions for input data."""
import functools
from absl import logging
import tensorflow.compat.v2 as tf
from galaxy_mergers import losses
CROP_TYPE_NONE = 'crop_none'
CROP_TYPE_FIXED = 'crop_fixed'
CROP_TYPE_RANDOM = 'crop_random'
DATASET_FREQUENCY_MEAN = 4.0
DATASET_FREQUENCY_RANGE = 8.0
PHYSICAL_FEATURES_MIN_MAX = {
'redshift': (0.572788, 2.112304),
'mass': (9.823963, 10.951282)
}
ALL_FREQUENCIES = [105, 125, 160, 435, 606, 775, 850]
VALID_ADDITIONAL_FEATURES = ['redshift', 'sequence_average_redshift', 'mass']
def _make_padding_sizes(pad_size, random_centering):
if random_centering:
pad_size_left = tf.random.uniform(
shape=[], minval=0, maxval=pad_size+1, dtype=tf.int32)
else:
pad_size_left = pad_size // 2
pad_size_right = pad_size - pad_size_left
return pad_size_left, pad_size_right
def resize_and_pad(image, target_size, random_centering):
"""Resize image to target_size (<= image.size) and pad to original size."""
original_shape = image.shape
size = tf.reshape(target_size, [1])
size = tf.concat([size, size], axis=0)
image = tf.image.resize(image, size=size)
pad_size = original_shape[1] - target_size
pad_size_left, pad_size_right = _make_padding_sizes(
pad_size, random_centering)
padding = [[pad_size_left, pad_size_right],
[pad_size_left, pad_size_right], [0, 0]]
if len(original_shape) == 4:
padding = [[0, 0]] + padding
image = tf.pad(image, padding)
image.set_shape(original_shape)
return image
def resize_and_extract(image, target_size, random_centering):
"""Upscale image to target_size (>image.size), extract original size crop."""
original_shape = image.shape
size = tf.reshape(target_size, [1])
size = tf.concat([size, size], axis=0)
image = tf.image.resize(image, size=size)
pad_size = target_size - original_shape[1]
pad_size_left, pad_size_right = _make_padding_sizes(
pad_size, random_centering)
if len(original_shape) == 3:
image = tf.expand_dims(image, 0)
image = tf.cond(pad_size_right > 0,
lambda: image[:, pad_size_left:-pad_size_right, :, :],
lambda: image[:, pad_size_left:, :, :])
image = tf.cond(pad_size_right > 0,
lambda: image[:, :, pad_size_left:-pad_size_right, :],
lambda: image[:, :, pad_size_left:, :])
if len(original_shape) == 3:
image = tf.squeeze(image, 0)
image.set_shape(original_shape)
return image
def resize_and_center(image, target_size, random_centering):
return tf.cond(
tf.math.less_equal(target_size, image.shape[1]),
lambda: resize_and_pad(image, target_size, random_centering),
lambda: resize_and_extract(image, target_size, random_centering))
def random_rotation_and_flip(image):
angle = tf.random.uniform(shape=[], minval=0, maxval=4, dtype=tf.int32)
return tf.image.random_flip_left_right(tf.image.rot90(image, angle))
def get_all_rotations_and_flips(images):
assert isinstance(images, list)
new_images = []
for image in images:
for rotation in range(4):
new_images.append(tf.image.rot90(image, rotation))
flipped_image = tf.image.flip_left_right(image)
new_images.append(tf.image.rot90(flipped_image, rotation))
return new_images
def random_rescaling(image, random_centering):
assert image.shape.as_list()[0] == image.shape.as_list()[1]
original_size = image.shape.as_list()[1]
min_size = 2 * (original_size // 4)
max_size = original_size * 2
target_size = tf.random.uniform(
shape=[], minval=min_size, maxval=max_size // 2,
dtype=tf.int32) * 2
return resize_and_center(image, target_size, random_centering)
def get_all_rescalings(images, image_width, random_centering):
"""Get a uniform sample of rescalings of all images in input."""
assert isinstance(images, list)
min_size = 2 * (image_width // 4)
max_size = image_width * 2
delta_size = (max_size + 2 - min_size) // 5
sizes = range(min_size, max_size + 2, delta_size)
new_images = []
for image in images:
for size in sizes:
new_images.append(resize_and_center(image, size, random_centering))
return new_images
def move_repeats_to_batch(image, n_repeats):
width, height, n_channels = image.shape.as_list()[1:]
image = tf.reshape(image, [-1, width, height, n_channels, n_repeats])
image = tf.transpose(image, [0, 4, 1, 2, 3]) # [B, repeats, x, y, c]
return tf.reshape(image, [-1, width, height, n_channels])
def get_classification_label(dataset_row, class_boundaries):
merge_time = dataset_row['grounded_normalized_time']
label = tf.dtypes.cast(0, tf.int64)
for category, intervals in class_boundaries.items():
for interval in intervals:
if merge_time > interval[0] and merge_time < interval[1]:
label = tf.dtypes.cast(int(category), tf.int64)
return label
def get_regression_label(dataset_row, task_type):
"""Returns time-until-merger regression target given desired modeling task."""
if task_type == losses.TASK_NORMALIZED_REGRESSION:
return tf.dtypes.cast(dataset_row['normalized_time'], tf.float32)
elif task_type == losses.TASK_GROUNDED_UNNORMALIZED_REGRESSION:
return tf.dtypes.cast(dataset_row['grounded_normalized_time'], tf.float32)
elif task_type == losses.TASK_UNNORMALIZED_REGRESSION:
return tf.dtypes.cast(dataset_row['unnormalized_time'], tf.float32)
elif task_type == losses.TASK_CLASSIFICATION:
return tf.dtypes.cast(dataset_row['grounded_normalized_time'], tf.float32)
else:
raise ValueError
def get_normalized_time_target(dataset_row):
return tf.dtypes.cast(dataset_row['normalized_time'], tf.float32)
def apply_time_filter(dataset_row, time_interval):
"""Returns True if data is within the given time intervals."""
merge_time = dataset_row['grounded_normalized_time']
lower_time, upper_time = time_interval
return merge_time > lower_time and merge_time < upper_time
def normalize_physical_feature(name, dataset_row):
min_feat, max_feat = PHYSICAL_FEATURES_MIN_MAX[name]
value = getattr(dataset_row, name)
return 2 * (value - min_feat) / (max_feat - min_feat) - 1
def prepare_dataset(ds, target_size, crop_type, n_repeats, augmentations,
task_type, additional_features, class_boundaries,
time_intervals=None, frequencies_to_use='all',
additional_lambdas=None):
"""Prepare a zipped dataset of image, classification/regression labels."""
def _prepare_image(dataset_row):
"""Transpose, crop and cast an image."""
image = tf.dtypes.cast(dataset_row['image'], tf.float32)
image = tf.reshape(image, tf.cast(dataset_row['image_shape'], tf.int32))
image = tf.transpose(image, perm=[1, 2, 0]) # Convert to NHWC
freqs = ALL_FREQUENCIES if frequencies_to_use == 'all' else frequencies_to_use
idxs_to_keep = [ALL_FREQUENCIES.index(f) for f in freqs]
image = tf.gather(params=image, indices=idxs_to_keep, axis=-1)
# Based on offline computation on the empirical frequency range:
# Converts [0, 8.] ~~> [-1, 1]
image = (image - DATASET_FREQUENCY_MEAN)/(DATASET_FREQUENCY_RANGE/2.0)
def crop(image):
if crop_type == CROP_TYPE_FIXED:
crop_loc = tf.cast(dataset_row['proposed_crop'][0], tf.int32)
crop_size = tf.cast(dataset_row['proposed_crop'][1], tf.int32)
image = image[
crop_loc[0]:crop_loc[0] + crop_size[0],
crop_loc[1]:crop_loc[1] + crop_size[1], :]
image = tf.image.resize(image, target_size[0:2])
image.set_shape([target_size[0], target_size[1], target_size[2]])
elif crop_type == CROP_TYPE_RANDOM:
image = tf.image.random_crop(image, target_size)
image.set_shape([target_size[0], target_size[1], target_size[2]])
elif crop_type != CROP_TYPE_NONE:
raise NotImplementedError
return image
repeated_images = []
for _ in range(n_repeats):
repeated_images.append(crop(image))
image = tf.concat(repeated_images, axis=-1)
if augmentations['rotation_and_flip']:
image = random_rotation_and_flip(image)
if augmentations['rescaling']:
image = random_rescaling(image, augmentations['translation'])
return image
def get_regression_label_wrapper(dataset_row):
return get_regression_label(dataset_row, task_type=task_type)
def get_classification_label_wrapper(dataset_row):
return get_classification_label(dataset_row,
class_boundaries=class_boundaries)
if time_intervals:
for time_interval in time_intervals:
filter_fn = functools.partial(apply_time_filter,
time_interval=time_interval)
ds = ds.filter(filter_fn)
datasets = [ds.map(_prepare_image)]
if additional_features:
additional_features = additional_features.split(',')
assert all([f in VALID_ADDITIONAL_FEATURES for f in additional_features])
logging.info('Running with additional features: %s.',
', '.join(additional_features))
def _prepare_additional_features(dataset_row):
features = []
for f in additional_features:
features.append(normalize_physical_feature(f, dataset_row))
features = tf.convert_to_tensor(features, dtype=tf.float32)
features.set_shape([len(additional_features)])
return features
datasets += [ds.map(_prepare_additional_features)]
datasets += [
ds.map(get_classification_label_wrapper),
ds.map(get_regression_label_wrapper),
ds.map(get_normalized_time_target)]
if additional_lambdas:
for process_fn in additional_lambdas:
datasets += [ds.map(process_fn)]
return tf.data.Dataset.zip(tuple(datasets))
|
deepmind-research-master
|
galaxy_mergers/preprocessing.py
|
# Copyright 2021 DeepMind Technologies Limited.
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Helpers to compute loss metrics."""
import scipy.stats
import tensorflow.compat.v2 as tf
import tensorflow_probability as tfp
TASK_CLASSIFICATION = 'classification'
TASK_NORMALIZED_REGRESSION = 'normalized_regression'
TASK_UNNORMALIZED_REGRESSION = 'unnormalized_regression'
TASK_GROUNDED_UNNORMALIZED_REGRESSION = 'grounded_unnormalized_regression'
REGRESSION_TASKS = [TASK_NORMALIZED_REGRESSION, TASK_UNNORMALIZED_REGRESSION,
TASK_GROUNDED_UNNORMALIZED_REGRESSION]
ALL_TASKS = [TASK_CLASSIFICATION] + REGRESSION_TASKS
LOSS_MSE = 'mse'
LOSS_SOFTMAX_CROSS_ENTROPY = 'softmax_cross_entropy'
ALL_LOSSES = [LOSS_SOFTMAX_CROSS_ENTROPY, LOSS_MSE]
def normalize_regression_loss(regression_loss, predictions):
# Normalize loss such that:
# 1) E_{x uniform}[loss(x, prediction)] does not depend on prediction
# 2) E_{x uniform, prediction uniform}[loss(x, prediction)] is as before.
# Divides MSE regression loss by E[(prediction-x)^2]; assumes x=[-1,1]
normalization = 2./3.
normalized_loss = regression_loss / ((1./3 + predictions**2) / normalization)
return normalized_loss
def equal32(x, y):
return tf.cast(tf.equal(x, y), tf.float32)
def mse_loss(predicted, targets):
return (predicted - targets) ** 2
def get_std_factor_from_confidence_percent(percent):
dec = percent/100.
inv_dec = 1 - dec
return scipy.stats.norm.ppf(dec+inv_dec/2)
def get_all_metric_names(task_type, model_uncertainty, loss_config, # pylint: disable=unused-argument
mode='eval', return_dict=True):
"""Get all the scalar fields produced by compute_loss_and_metrics."""
names = ['regularization_loss', 'prediction_accuracy', str(mode)+'_loss']
if task_type == TASK_CLASSIFICATION:
names += ['classification_loss']
else:
names += ['regression_loss', 'avg_mu', 'var_mu']
if model_uncertainty:
names += ['uncertainty_loss', 'scaled_regression_loss',
'uncertainty_plus_scaled_regression',
'avg_sigma', 'var_sigma',
'percent_in_conf_interval', 'error_sigma_correlation',
'avg_prob']
if return_dict:
return {name: 0. for name in names}
else:
return names
def compute_loss_and_metrics(mu, log_sigma_sq,
regression_targets, labels,
task_type, model_uncertainty, loss_config,
regularization_loss=0., confidence_interval=95,
mode='train'):
"""Computes loss statistics and other metrics."""
scalars_to_log = dict()
vectors_to_log = dict()
scalars_to_log['regularization_loss'] = regularization_loss
vectors_to_log['mu'] = mu
if task_type == TASK_CLASSIFICATION:
cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=mu, labels=labels, name='cross_entropy')
classification_loss = tf.reduce_mean(cross_entropy, name='class_loss')
total_loss = classification_loss
sigma = None
scalars_to_log['classification_loss'] = classification_loss
predicted_labels = tf.argmax(mu, axis=1)
correct_predictions = equal32(predicted_labels, labels)
else:
regression_loss = mse_loss(mu, regression_targets)
if 'mse_normalize' in loss_config and loss_config['mse_normalize']:
assert task_type in [TASK_GROUNDED_UNNORMALIZED_REGRESSION,
TASK_NORMALIZED_REGRESSION]
regression_loss = normalize_regression_loss(regression_loss, mu)
avg_regression_loss = tf.reduce_mean(regression_loss)
vectors_to_log['regression_loss'] = regression_loss
scalars_to_log['regression_loss'] = avg_regression_loss
scalars_to_log['avg_mu'] = tf.reduce_mean(mu)
scalars_to_log['var_mu'] = tf.reduce_mean(mse_loss(mu, tf.reduce_mean(mu)))
predicted_labels = tf.cast(mu > 0, tf.int64)
correct_predictions = equal32(predicted_labels, labels)
if model_uncertainty:
# This implements Eq. (1) in https://arxiv.org/pdf/1612.01474.pdf
inv_sigma_sq = tf.math.exp(-log_sigma_sq)
scaled_regression_loss = regression_loss * inv_sigma_sq
scaled_regression_loss = tf.reduce_mean(scaled_regression_loss)
uncertainty_loss = tf.reduce_mean(log_sigma_sq)
total_loss = uncertainty_loss + scaled_regression_loss
scalars_to_log['uncertainty_loss'] = uncertainty_loss
scalars_to_log['scaled_regression_loss'] = scaled_regression_loss
scalars_to_log['uncertainty_plus_scaled_regression'] = total_loss
sigma = tf.math.exp(log_sigma_sq / 2.)
vectors_to_log['sigma'] = sigma
scalars_to_log['avg_sigma'] = tf.reduce_mean(sigma)
var_sigma = tf.reduce_mean(mse_loss(sigma, tf.reduce_mean(sigma)))
scalars_to_log['var_sigma'] = var_sigma
# Compute # of labels that fall into the confidence interval.
std_factor = get_std_factor_from_confidence_percent(confidence_interval)
lower_bound = mu - std_factor * sigma
upper_bound = mu + std_factor * sigma
preds = tf.logical_and(tf.greater(regression_targets, lower_bound),
tf.less(regression_targets, upper_bound))
percent_in_conf_interval = tf.reduce_mean(tf.cast(preds, tf.float32))
scalars_to_log['percent_in_conf_interval'] = percent_in_conf_interval*100
error_sigma_corr = tfp.stats.correlation(x=regression_loss,
y=sigma, event_axis=None)
scalars_to_log['error_sigma_correlation'] = error_sigma_corr
dists = tfp.distributions.Normal(mu, sigma)
probs = dists.prob(regression_targets)
scalars_to_log['avg_prob'] = tf.reduce_mean(probs)
else:
total_loss = avg_regression_loss
loss_name = str(mode)+'_loss'
total_loss = tf.add(total_loss, regularization_loss, name=loss_name)
scalars_to_log[loss_name] = total_loss
vectors_to_log['correct_predictions'] = correct_predictions
scalars_to_log['prediction_accuracy'] = tf.reduce_mean(correct_predictions)
# Validate that metrics outputted are exactly what is expected
expected = get_all_metric_names(task_type, model_uncertainty,
loss_config, mode, False)
assert set(expected) == set(scalars_to_log.keys())
return scalars_to_log, vectors_to_log
|
deepmind-research-master
|
galaxy_mergers/losses.py
|
# Copyright 2021 DeepMind Technologies Limited.
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Simple script to model evaluation on a checkpoint and dataset."""
import ast
from absl import app
from absl import flags
from absl import logging
from galaxy_mergers import evaluator
flags.DEFINE_string('checkpoint_path', '', 'Path to TF2 checkpoint to eval.')
flags.DEFINE_string('data_path', '', 'Path to TFRecord(s) with data.')
flags.DEFINE_string('filter_time_intervals', None,
'Merger time intervals on which to perform regression.'
'Specify None for the default time interval [-1,1], or'
' a custom list of intervals, e.g. [[-0.2,0], [0.5,1]].')
FLAGS = flags.FLAGS
def main(_) -> None:
if FLAGS.filter_time_intervals is not None:
filter_time_intervals = ast.literal_eval(FLAGS.filter_time_intervals)
else:
filter_time_intervals = None
config, ds, experiment = evaluator.get_config_dataset_evaluator(
filter_time_intervals,
FLAGS.checkpoint_path,
config_override={
'experiment_kwargs.data_config.dataset_path': FLAGS.data_path,
})
metrics, _, _ = evaluator.run_model_on_dataset(experiment, ds, config)
logging.info('Evaluation complete. Metrics: %s', metrics)
if __name__ == '__main__':
app.run(main)
|
deepmind-research-master
|
galaxy_mergers/main.py
|
# Copyright 2021 DeepMind Technologies Limited.
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Helpers for a galaxy merger model evaluation."""
import glob
import os
from astropy import cosmology
from astropy.io import fits
import matplotlib.pyplot as plt
import numpy as np
from PIL import Image
import tensorflow.compat.v2 as tf
def restore_checkpoint(checkpoint_dir, experiment):
checkpoint_path = tf.train.latest_checkpoint(checkpoint_dir)
global_step = tf.Variable(
0, dtype=tf.int32, trainable=False, name='global_step')
checkpoint = tf.train.Checkpoint(
_global_step_=global_step, **experiment.checkpoint_items)
checkpoint.restore(checkpoint_path)
def sum_average_transformed_mu_and_sigma(mu, log_sigma_sq):
"""Computes <mu>, var(mu) + <var> in transformed representation.
This corresponds to assuming that the output distribution is a sum of
Gaussian and computing the mean and variance of the resulting (non-Gaussian)
distribution.
Args:
mu: Tensor of shape [B, ...] representing the means of the input
distributions.
log_sigma_sq: Tensor of shape [B, ...] representing log(sigma**2) of the
input distributions. Can be None, in which case the variance is assumed
to be zero.
Returns:
mu: Tensor of shape [...] representing the means of the output
distributions.
log_sigma_sq: Tensor of shape [...] representing log(sigma**2) of the
output distributions.
"""
av_mu = tf.reduce_mean(mu, axis=0)
var_mu = tf.math.reduce_std(mu, axis=0)**2
if log_sigma_sq is None:
return av_mu, tf.math.log(var_mu)
max_log_sigma_sq = tf.reduce_max(log_sigma_sq, axis=0)
log_sigma_sq -= max_log_sigma_sq
# (sigma/sigma_0)**2
sigma_sq = tf.math.exp(log_sigma_sq)
# (<sigma**2>)/sigma_0**2 (<1)
av_sigma_sq = tf.reduce_mean(sigma_sq, axis=0)
# (<sigma**2> + var(mu))/sigma_0**2
av_sigma_sq += var_mu * tf.math.exp(-max_log_sigma_sq)
# log(<sigma**2> + var(mu))
log_av_sigma_sq = tf.math.log(av_sigma_sq) + max_log_sigma_sq
return av_mu, log_av_sigma_sq
def aggregate_regression_ensemble(logits_or_times, ensemble_size,
use_uncertainty, test_time_ensembling):
"""Aggregate output of model ensemble."""
out_shape = logits_or_times.shape.as_list()[1:]
logits_or_times = tf.reshape(logits_or_times, [ensemble_size, -1] + out_shape)
mus = logits_or_times[..., 0]
log_sigma_sqs = logits_or_times[..., -1] if use_uncertainty else None
if test_time_ensembling == 'sum':
mu, log_sigma_sq = sum_average_transformed_mu_and_sigma(mus, log_sigma_sqs)
elif test_time_ensembling == 'none':
mu = mus[0]
log_sigma_sq = log_sigma_sqs[0] if use_uncertainty else None
else:
raise ValueError('Unexpected test_time_ensembling')
return mu, log_sigma_sq
def aggregate_classification_ensemble(logits_or_times, ensemble_size,
test_time_ensembling):
"""Averages the output logits across models in the ensemble."""
out_shape = logits_or_times.shape.as_list()[1:]
logits = tf.reshape(logits_or_times, [ensemble_size, -1] + out_shape)
if test_time_ensembling == 'sum':
logits = tf.reduce_mean(logits, axis=0)
return logits, None
elif test_time_ensembling == 'none':
return logits, None
else:
raise ValueError('Unexpected test_time_ensembling')
def unpack_evaluator_output(data, return_seq_info=False, return_redshift=False):
"""Unpack evaluator.run_model_on_dataset output."""
mus = np.array(data[1]['mu']).flatten()
sigmas = np.array(data[1]['sigma']).flatten()
regression_targets = np.array(data[1]['regression_targets']).flatten()
outputs = [mus, sigmas, regression_targets]
if return_seq_info:
seq_ids = np.array(data[2][0]).flatten()
seq_ids = np.array([seq_id.decode('UTF-8') for seq_id in seq_ids])
time_idxs = np.array(data[2][1]).flatten()
axes = np.array(data[2][2]).flatten()
outputs += [seq_ids, axes, time_idxs]
if return_redshift:
redshifts = np.array(data[2][6]).flatten()
outputs += [redshifts]
return outputs
def process_data_into_myrs(redshifts, *data_lists):
"""Converts normalized time to virial time using Planck cosmology."""
# small hack to avoid build tools not recognizing non-standard trickery
# done in the astropy library:
# https://github.com/astropy/astropy/blob/master/astropy/cosmology/core.py#L3290
# that dynamically generates and imports new classes.
planck13 = getattr(cosmology, 'Plank13')
hubble_constants = planck13.H(redshifts) # (km/s)/megaparsec
inv_hubble_constants = 1/hubble_constants # (megaparsec*s) / km
megaparsec_to_km = 1e19*3.1
seconds_to_gigayears = 1e-15/31.556
conversion_factor = megaparsec_to_km * seconds_to_gigayears
hubble_time_gigayears = conversion_factor * inv_hubble_constants
hubble_to_virial_time = 0.14 # approximate simulation-based conversion factor
virial_dyn_time = hubble_to_virial_time*hubble_time_gigayears.value
return [data_list*virial_dyn_time for data_list in data_lists]
def print_rmse_and_class_accuracy(mus, regression_targets, redshifts):
"""Convert to virial dynamical time and print stats."""
time_pred, time_gt = process_data_into_myrs(
redshifts, mus, regression_targets)
time_sq_errors = (time_pred-time_gt)**2
rmse = np.sqrt(np.mean(time_sq_errors))
labels = regression_targets > 0
class_preds = mus > 0
accuracy = sum((labels == class_preds).astype(np.int8)) / len(class_preds)
print(f'95% Error: {np.percentile(np.sqrt(time_sq_errors), 95)}')
print(f'RMSE: {rmse}')
print(f'Classification Accuracy: {accuracy}')
def print_stats(vec, do_print=True):
fvec = vec.flatten()
if do_print:
print(len(fvec), min(fvec), np.mean(fvec), np.median(fvec), max(fvec))
return (len(fvec), min(fvec), np.mean(fvec), np.median(fvec), max(fvec))
def get_image_from_fits(base_dir, seq='475_31271', time='497', axis=2):
"""Read *.fits galaxy image from directory."""
axis_map = {0: 'x', 1: 'y', 2: 'z'}
fits_glob = f'{base_dir}/{seq}/fits_of_flux_psf/{time}/*_{axis_map[axis]}_*.fits'
def get_freq_from_path(p):
return int(p.split('/')[-1].split('_')[2][1:])
fits_image_paths = sorted(glob.glob(fits_glob), key=get_freq_from_path)
assert len(fits_image_paths) == 7
combined_frequencies = []
for fit_path in fits_image_paths:
with open(fit_path, 'rb') as f:
fits_data = np.array(fits.open(f)[0].data.astype(np.float32))
combined_frequencies.append(fits_data)
fits_image = np.transpose(np.array(combined_frequencies), (1, 2, 0))
return fits_image
def stack_desired_galaxy_images(base_dir, seq, n_time_slices):
"""Searth through galaxy image directory gathering images."""
fits_sequence_dir = os.path.join(base_dir, seq, 'fits_of_flux_psf')
all_times_for_seq = os.listdir(fits_sequence_dir)
hop = (len(all_times_for_seq)-1)//(n_time_slices-1)
desired_time_idxs = [k*hop for k in range(n_time_slices)]
all_imgs = []
for j in desired_time_idxs:
time = all_times_for_seq[j]
img = get_image_from_fits(base_dir=base_dir, seq=seq, time=time, axis=2)
all_imgs.append(img)
min_img_size = min([img.shape[0] for img in all_imgs])
return all_imgs, min_img_size
def draw_galaxy_image(image, target_size=None, color_map='viridis'):
normalized_image = image / max(image.flatten())
color_map = plt.get_cmap(color_map)
colored_image = color_map(normalized_image)[:, :, :3]
colored_image = (colored_image * 255).astype(np.uint8)
colored_image = Image.fromarray(colored_image, mode='RGB')
if target_size:
colored_image = colored_image.resize(target_size, Image.ANTIALIAS)
return colored_image
def collect_merger_sequence(ds, seq=b'370_11071', n_examples_to_sift=5000):
images, targets, redshifts = [], [], []
for i, all_inputs in enumerate(ds):
if all_inputs[4][0].numpy() == seq:
images.append(all_inputs[0][0].numpy())
targets.append(all_inputs[2][0].numpy())
redshifts.append(all_inputs[10][0].numpy())
if i > n_examples_to_sift: break
return np.squeeze(images), np.squeeze(targets), np.squeeze(redshifts)
def take_samples(sample_idxs, *data_lists):
return [np.take(l, sample_idxs, axis=0) for l in data_lists]
|
deepmind-research-master
|
galaxy_mergers/helpers.py
|
# Copyright 2021 DeepMind Technologies Limited.
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Evaluation runner."""
import collections
from absl import logging
import tensorflow.compat.v2 as tf
from galaxy_mergers import config as tp_config
from galaxy_mergers import helpers
from galaxy_mergers import losses
from galaxy_mergers import model
from galaxy_mergers import preprocessing
class GalaxyMergeClassifierEvaluator():
"""Galaxy Merge Rate Prediction Evaluation Runner."""
def __init__(self, strategy, optimizer_config, total_train_batch_size,
train_net_args, eval_batch_size, eval_net_args,
l2_regularization, data_config, resnet_kwargs, n_train_epochs):
"""Initializes evaluator/experiment."""
logging.info('Initializing evaluator...')
self._strategy = strategy
self._data_config = data_config
self._use_additional_features = bool(data_config['additional_features'])
self._eval_batch_size = eval_batch_size
self._eval_net_args = eval_net_args
self._num_buckets = data_config['num_eval_buckets']
self._n_repeats = data_config['n_crop_repeat']
self._image_size = data_config['image_size']
self._task_type = data_config['task']
self._loss_config = data_config['loss_config']
self._model_uncertainty = data_config['model_uncertainty']
del l2_regularization, optimizer_config, train_net_args
del total_train_batch_size, n_train_epochs
logging.info('Creating model...')
num_classes = 2 if self._model_uncertainty else 1
if self._task_type == losses.TASK_CLASSIFICATION:
num_classes = len(self._data_config['class_boundaries'])
self.model = model.ResNet(
n_repeats=self._data_config['n_crop_repeat'], num_classes=num_classes,
use_additional_features=self._use_additional_features, **resnet_kwargs)
self._eval_input = None
def build_eval_input(self, additional_lambdas=None):
"""Create the galaxy merger evaluation dataset."""
def decode_fn(record_bytes):
parsed_example = tf.io.parse_single_example(
record_bytes,
{
'image':
tf.io.VarLenFeature(tf.float32),
'image_shape':
tf.io.FixedLenFeature([3], dtype=tf.int64),
'axis':
tf.io.FixedLenFeature([], dtype=tf.int64),
'proposed_crop':
tf.io.FixedLenFeature([2, 2], dtype=tf.int64),
'normalized_time':
tf.io.FixedLenFeature([], dtype=tf.float32),
'unnormalized_time':
tf.io.FixedLenFeature([], dtype=tf.float32),
'grounded_normalized_time':
tf.io.FixedLenFeature([], dtype=tf.float32),
'redshift':
tf.io.FixedLenFeature([], dtype=tf.float32),
'sequence_average_redshift':
tf.io.FixedLenFeature([], dtype=tf.float32),
'mass':
tf.io.FixedLenFeature([], dtype=tf.float32),
'time_index':
tf.io.FixedLenFeature([], dtype=tf.int64),
'sequence_id':
tf.io.FixedLenFeature([], dtype=tf.string),
})
parsed_example['image'] = tf.sparse.to_dense(
parsed_example['image'], default_value=0)
dataset_row = parsed_example
return dataset_row
def build_eval_pipeline(_):
"""Generate the processed input evaluation data."""
logging.info('Building evaluation input pipeline...')
ds_path = self._data_config['dataset_path']
ds = tf.data.TFRecordDataset([ds_path]).map(decode_fn)
augmentations = dict(
rotation_and_flip=False,
rescaling=False,
translation=False
)
ds = preprocessing.prepare_dataset(
ds=ds, target_size=self._image_size,
crop_type=self._data_config['test_crop_type'],
n_repeats=self._n_repeats,
augmentations=augmentations,
task_type=self._task_type,
additional_features=self._data_config['additional_features'],
class_boundaries=self._data_config['class_boundaries'],
time_intervals=self._data_config['time_filter_intervals'],
frequencies_to_use=self._data_config['frequencies_to_use'],
additional_lambdas=additional_lambdas)
batched_ds = ds.cache().batch(self._eval_batch_size).prefetch(128)
logging.info('Finished building input pipeline...')
return batched_ds
return self._strategy.experimental_distribute_datasets_from_function(
build_eval_pipeline)
def run_test_model_ensemble(self, images, physical_features, augmentations):
"""Run evaluation on input images."""
image_variations = [images]
image_shape = images.shape.as_list()
if augmentations['rotation_and_flip']:
image_variations = preprocessing.get_all_rotations_and_flips(
image_variations)
if augmentations['rescaling']:
image_variations = preprocessing.get_all_rescalings(
image_variations, image_shape[1], augmentations['translation'])
# Put all augmented images into the batch: batch * num_augmented
augmented_images = tf.stack(image_variations, axis=0)
augmented_images = tf.reshape(augmented_images, [-1] + image_shape[1:])
if self._use_additional_features:
physical_features = tf.concat(
[physical_features] * len(image_variations), axis=0)
n_reps = self._data_config['n_crop_repeat']
augmented_images = preprocessing.move_repeats_to_batch(augmented_images,
n_reps)
logits_or_times = self.model(augmented_images, physical_features,
**self._eval_net_args)
if self._task_type == losses.TASK_CLASSIFICATION:
mu, log_sigma_sq = helpers.aggregate_classification_ensemble(
logits_or_times, len(image_variations),
self._data_config['test_time_ensembling'])
else:
assert self._task_type in losses.REGRESSION_TASKS
mu, log_sigma_sq = helpers.aggregate_regression_ensemble(
logits_or_times, len(image_variations),
self._model_uncertainty,
self._data_config['test_time_ensembling'])
return mu, log_sigma_sq
@property
def checkpoint_items(self):
return {'model': self.model}
def run_model_on_dataset(evaluator, dataset, config, n_batches=16):
"""Runs the model against a dataset, aggregates model output."""
scalar_metrics_to_log = collections.defaultdict(list)
model_outputs_to_log = collections.defaultdict(list)
dataset_features_to_log = collections.defaultdict(list)
batch_count = 1
for all_inputs in dataset:
if config.experiment_kwargs.data_config['additional_features']:
images = all_inputs[0]
physical_features = all_inputs[1]
labels, regression_targets, _ = all_inputs[2:5]
other_dataset_features = all_inputs[5:]
else:
images, physical_features = all_inputs[0], None
labels, regression_targets, _ = all_inputs[1:4]
other_dataset_features = all_inputs[4:]
mu, log_sigma_sq = evaluator.run_test_model_ensemble(
images, physical_features,
config.experiment_kwargs.data_config['test_augmentations'])
loss_config = config.experiment_kwargs.data_config['loss_config']
task_type = config.experiment_kwargs.data_config['task']
uncertainty = config.experiment_kwargs.data_config['model_uncertainty']
conf = config.experiment_kwargs.data_config['eval_confidence_interval']
scalar_metrics, vector_metrics = losses.compute_loss_and_metrics(
mu, log_sigma_sq, regression_targets, labels,
task_type, uncertainty, loss_config, 0, conf, mode='eval')
for i, dataset_feature in enumerate(other_dataset_features):
dataset_features_to_log[i].append(dataset_feature.numpy())
for scalar_metric in scalar_metrics:
v = scalar_metrics[scalar_metric]
val = v if isinstance(v, int) or isinstance(v, float) else v.numpy()
scalar_metrics_to_log[scalar_metric].append(val)
for vector_metric in vector_metrics:
val = vector_metrics[vector_metric].numpy()
model_outputs_to_log[vector_metric].append(val)
regression_targets_np = regression_targets.numpy()
labels_np = labels.numpy()
model_outputs_to_log['regression_targets'].append(regression_targets_np)
model_outputs_to_log['labels'].append(labels_np)
model_outputs_to_log['model_input_images'].append(images.numpy())
if n_batches and batch_count >= n_batches:
break
batch_count += 1
return scalar_metrics_to_log, model_outputs_to_log, dataset_features_to_log
def get_config_dataset_evaluator(filter_time_intervals,
ckpt_path,
config_override=None,
setup_dataset=True):
"""Set-up a default config, evaluation dataset, and evaluator."""
config = tp_config.get_config(filter_time_intervals=filter_time_intervals)
if config_override:
with config.ignore_type():
config.update_from_flattened_dict(config_override)
strategy = tf.distribute.OneDeviceStrategy(device='/gpu:0')
experiment = GalaxyMergeClassifierEvaluator(
strategy=strategy, **config.experiment_kwargs)
helpers.restore_checkpoint(ckpt_path, experiment)
if setup_dataset:
additional_lambdas = [
lambda ds: ds['sequence_id'],
lambda ds: ds['time_index'],
lambda ds: ds['axis'],
lambda ds: ds['normalized_time'],
lambda ds: ds['grounded_normalized_time'],
lambda ds: ds['unnormalized_time'],
lambda ds: ds['redshift'],
lambda ds: ds['mass']
]
ds = experiment.build_eval_input(additional_lambdas=additional_lambdas)
else:
ds = None
return config, ds, experiment
|
deepmind-research-master
|
galaxy_mergers/evaluator.py
|
# Copyright 2020 Deepmind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A prop-carry task that transition between multiple phases."""
import collections
import colorsys
import enum
from absl import logging
from dm_control import composer
from dm_control import mjcf
from dm_control.composer.observation import observable
from dm_control.locomotion.arenas import floors
from dm_control.locomotion.mocap import loader as mocap_loader
from dm_control.mujoco.wrapper import mjbindings
import numpy as np
from catch_carry import arm_opener
from catch_carry import mocap_data
from catch_carry import props
from catch_carry import trajectories
_PHYSICS_TIMESTEP = 0.005
# Maximum number of physics steps to run when settling props onto pedestals
# during episode initialization.
_MAX_SETTLE_STEPS = 1000
# Maximum velocity for prop to be considered settled.
# Used during episode initialization only.
_SETTLE_QVEL_TOL = 1e-5
# Magnitude of the sparse reward.
_SPARSE_REWARD = 1.0
# Maximum distance for walkers to be considered to be "near" a pedestal/target.
_TARGET_TOL = 0.65
# Defines how pedestals are placed around the arena.
# Pedestals are placed at constant angle intervals around the arena's center.
_BASE_PEDESTAL_DIST = 3 # Base distance from center.
_PEDESTAL_DIST_DELTA = 0.5 # Maximum variation on the base distance.
# Base hue-luminosity-saturation of the pedestal colors.
# We rotate through the hue for each pedestal created in the environment.
_BASE_PEDESTAL_H = 0.1
_BASE_PEDESTAL_L = 0.3
_BASE_PEDESTAL_S = 0.7
# Pedestal luminosity when active.
_ACTIVATED_PEDESTAL_L = 0.8
_PEDESTAL_SIZE = (0.2, 0.2, 0.02)
_SINGLE_PEDESTAL_COLOR = colorsys.hls_to_rgb(.3, .15, .35) + (1.0,)
WALKER_PEDESTAL = 'walker_pedestal'
WALKER_PROP = 'walker_prop'
PROP_PEDESTAL = 'prop_pedestal'
TARGET_STATE = 'target_state/'
CURRENT_STATE = 'meta/current_state/'
def _is_same_state(state_1, state_2):
if state_1.keys() != state_2.keys():
return False
for k in state_1:
if not np.all(state_1[k] == state_2[k]):
return False
return True
def _singleton_or_none(iterable):
iterator = iter(iterable)
try:
return next(iterator)
except StopIteration:
return None
def _generate_pedestal_colors(num_pedestals):
"""Function to get colors for pedestals."""
colors = []
for i in range(num_pedestals):
h = _BASE_PEDESTAL_H + i / num_pedestals
while h > 1:
h -= 1
colors.append(
colorsys.hls_to_rgb(h, _BASE_PEDESTAL_L, _BASE_PEDESTAL_S) + (1.0,))
return colors
InitializationParameters = collections.namedtuple(
'InitializationParameters', ('clip_segment', 'prop_id', 'pedestal_id'))
def _rotate_vector_by_quaternion(vec, quat):
result = np.empty(3)
mjbindings.mjlib.mju_rotVecQuat(result, np.asarray(vec), np.asarray(quat))
return result
@enum.unique
class WarehousePhase(enum.Enum):
TERMINATED = 0
GOTOTARGET = 1
PICKUP = 2
CARRYTOTARGET = 3
PUTDOWN = 4
def _find_random_free_pedestal_id(target_state, random_state):
free_pedestals = (
np.where(np.logical_not(np.any(target_state, axis=0)))[0])
return random_state.choice(free_pedestals)
def _find_random_occupied_pedestal_id(target_state, random_state):
occupied_pedestals = (
np.where(np.any(target_state, axis=0))[0])
return random_state.choice(occupied_pedestals)
def one_hot(values, num_unique):
return np.squeeze(np.eye(num_unique)[np.array(values).reshape(-1)])
class SinglePropFourPhases(object):
"""A phase manager that transitions between four phases for a single prop."""
def __init__(self, fixed_initialization_phase=None):
self._phase = WarehousePhase.TERMINATED
self._fixed_initialization_phase = fixed_initialization_phase
def initialize_episode(self, target_state, random_state):
"""Randomly initializes an episode into one of the four phases."""
if self._fixed_initialization_phase is None:
self._phase = random_state.choice([
WarehousePhase.GOTOTARGET, WarehousePhase.PICKUP,
WarehousePhase.CARRYTOTARGET, WarehousePhase.PUTDOWN
])
else:
self._phase = self._fixed_initialization_phase
self._prop_id = random_state.randint(len(target_state[PROP_PEDESTAL]))
self._pedestal_id = np.nonzero(
target_state[PROP_PEDESTAL][self._prop_id])[0][0]
pedestal_id_for_initialization = self._pedestal_id
if self._phase == WarehousePhase.GOTOTARGET:
clip_segment = trajectories.ClipSegment.APPROACH
target_state[WALKER_PROP][:] = 0
target_state[WALKER_PEDESTAL][self._pedestal_id] = 1
elif self._phase == WarehousePhase.PICKUP:
clip_segment = trajectories.ClipSegment.PICKUP
target_state[WALKER_PROP][self._prop_id] = 1
target_state[WALKER_PEDESTAL][self._pedestal_id] = 1
# Set self._pedestal_id to the next pedestal after pickup is successful.
self._pedestal_id = _find_random_free_pedestal_id(
target_state[PROP_PEDESTAL], random_state)
target_state[PROP_PEDESTAL][self._prop_id, :] = 0
elif self._phase == WarehousePhase.CARRYTOTARGET:
clip_segment = random_state.choice([
trajectories.ClipSegment.CARRY1, trajectories.ClipSegment.CARRY2])
self._pedestal_id = _find_random_free_pedestal_id(
target_state[PROP_PEDESTAL], random_state)
if clip_segment == trajectories.ClipSegment.CARRY2:
pedestal_id_for_initialization = self._pedestal_id
target_state[WALKER_PROP][self._prop_id] = 1
target_state[WALKER_PEDESTAL][self._pedestal_id] = 1
target_state[PROP_PEDESTAL][self._prop_id, :] = 0
elif self._phase == WarehousePhase.PUTDOWN:
clip_segment = trajectories.ClipSegment.PUTDOWN
target_state[WALKER_PROP][:] = 0
target_state[WALKER_PEDESTAL][self._pedestal_id] = 1
return InitializationParameters(
clip_segment, self._prop_id, pedestal_id_for_initialization)
def on_success(self, target_state, random_state):
"""Transitions into the next phase upon success of current phase."""
if self._phase == WarehousePhase.GOTOTARGET:
if self._prop_id is not None:
self._phase = WarehousePhase.PICKUP
# Set self._pedestal_id to the next pedestal after pickup is successful.
self._pedestal_id = (
_find_random_free_pedestal_id(
target_state[PROP_PEDESTAL], random_state))
target_state[WALKER_PROP][self._prop_id] = 1
target_state[PROP_PEDESTAL][self._prop_id, :] = 0
else:
# If you go to an empty pedestal, go to pedestal with a prop.
self._pedestal_id = (
_find_random_occupied_pedestal_id(
target_state[PROP_PEDESTAL], random_state))
target_state[WALKER_PEDESTAL][:] = 0
target_state[WALKER_PEDESTAL][self._pedestal_id] = 1
self._prop_id = np.argwhere(
target_state[PROP_PEDESTAL][:, self._pedestal_id])[0, 0]
elif self._phase == WarehousePhase.PICKUP:
self._phase = WarehousePhase.CARRYTOTARGET
target_state[WALKER_PEDESTAL][:] = 0
target_state[WALKER_PEDESTAL][self._pedestal_id] = 1
elif self._phase == WarehousePhase.CARRYTOTARGET:
self._phase = WarehousePhase.PUTDOWN
target_state[WALKER_PROP][:] = 0
target_state[PROP_PEDESTAL][self._prop_id, self._pedestal_id] = 1
elif self._phase == WarehousePhase.PUTDOWN:
self._phase = WarehousePhase.GOTOTARGET
# Set self._pedestal_id to the next pedestal after putdown is successful.
self._pedestal_id = (
_find_random_free_pedestal_id(
target_state[PROP_PEDESTAL], random_state))
self._prop_id = None
target_state[WALKER_PEDESTAL][:] = 0
target_state[WALKER_PEDESTAL][self._pedestal_id] = 1
return self._phase
@property
def phase(self):
return self._phase
@property
def prop_id(self):
return self._prop_id
@property
def pedestal_id(self):
return self._pedestal_id
class PhasedBoxCarry(composer.Task):
"""A prop-carry task that transitions between multiple phases."""
def __init__(
self,
walker,
num_props,
num_pedestals,
proto_modifier=None,
transition_class=SinglePropFourPhases,
min_prop_gap=0.05,
pedestal_height_range=(0.45, 0.75),
log_transitions=False,
negative_reward_on_failure_termination=True,
use_single_pedestal_color=True,
priority_friction=False,
fixed_initialization_phase=None):
"""Initialize phased/instructed box-carrying ("warehouse") task.
Args:
walker: the walker to be used in this task.
num_props: the number of props in the task scene.
num_pedestals: the number of floating shelves (pedestals) in the task
scene.
proto_modifier: function to modify trajectory proto.
transition_class: the object that handles the transition logic.
min_prop_gap: arms are automatically opened to leave a gap around the prop
to avoid problematic collisions upon initialization.
pedestal_height_range: range of heights for the pedestal.
log_transitions: logging/printing of transitions.
negative_reward_on_failure_termination: boolean for whether to provide
negative sparse rewards on failure termination.
use_single_pedestal_color: boolean option for pedestals being the same
color or different colors.
priority_friction: sets friction priority thereby making prop objects have
higher friction.
fixed_initialization_phase: an instance of the `WarehousePhase` enum that
specifies the phase in which to always initialize the task, or `None` if
the initial task phase should be chosen randomly for each episode.
"""
self._num_props = num_props
self._num_pedestals = num_pedestals
self._proto_modifier = proto_modifier
self._transition_manager = transition_class(
fixed_initialization_phase=fixed_initialization_phase)
self._min_prop_gap = min_prop_gap
self._pedestal_height_range = pedestal_height_range
self._log_transitions = log_transitions
self._target_state = collections.OrderedDict([
(WALKER_PEDESTAL, np.zeros(num_pedestals)),
(WALKER_PROP, np.zeros(num_props)),
(PROP_PEDESTAL, np.zeros([num_props, num_pedestals]))
])
self._current_state = collections.OrderedDict([
(WALKER_PEDESTAL, np.zeros(num_pedestals)),
(WALKER_PROP, np.zeros(num_props)),
(PROP_PEDESTAL, np.zeros([num_props, num_pedestals]))
])
self._negative_reward_on_failure_termination = (
negative_reward_on_failure_termination)
self._priority_friction = priority_friction
clips = sorted(
set(mocap_data.medium_pedestal())
& (set(mocap_data.small_box()) | set(mocap_data.large_box())))
loader = mocap_loader.HDF5TrajectoryLoader(
mocap_data.H5_PATH, trajectories.SinglePropCarrySegmentedTrajectory)
self._trajectories = [
loader.get_trajectory(clip.clip_identifier) for clip in clips]
self._arena = floors.Floor()
self._walker = walker
self._feet_geoms = (
walker.mjcf_model.find('body', 'lfoot').find_all('geom') +
walker.mjcf_model.find('body', 'rfoot').find_all('geom'))
self._lhand_geoms = (
walker.mjcf_model.find('body', 'lhand').find_all('geom'))
self._rhand_geoms = (
walker.mjcf_model.find('body', 'rhand').find_all('geom'))
self._trajectories[0].configure_walkers([self._walker])
walker.create_root_joints(self._arena.attach(walker))
control_timestep = self._trajectories[0].dt
for i, trajectory in enumerate(self._trajectories):
if trajectory.dt != control_timestep:
raise ValueError(
'Inconsistent control timestep: '
'trajectories[{}].dt == {} but trajectories[0].dt == {}'
.format(i, trajectory.dt, control_timestep))
self.set_timesteps(control_timestep, _PHYSICS_TIMESTEP)
if use_single_pedestal_color:
self._pedestal_colors = [_SINGLE_PEDESTAL_COLOR] * num_pedestals
else:
self._pedestal_colors = _generate_pedestal_colors(num_pedestals)
self._pedestals = [props.Pedestal(_PEDESTAL_SIZE, rgba)
for rgba in self._pedestal_colors]
for pedestal in self._pedestals:
self._arena.attach(pedestal)
self._props = [
self._trajectories[0].create_props(
priority_friction=self._priority_friction)[0]
for _ in range(num_props)
]
for prop in self._props:
self._arena.add_free_entity(prop)
self._task_observables = collections.OrderedDict()
self._task_observables['target_phase'] = observable.Generic(
lambda _: one_hot(self._transition_manager.phase.value, num_unique=5))
def ego_prop_xpos(physics):
prop_id = self._focal_prop_id
if prop_id is None:
return np.zeros((3,))
prop = self._props[prop_id]
prop_xpos, _ = prop.get_pose(physics)
walker_xpos = physics.bind(self._walker.root_body).xpos
return self._walker.transform_vec_to_egocentric_frame(
physics, prop_xpos - walker_xpos)
self._task_observables['target_prop/xpos'] = (
observable.Generic(ego_prop_xpos))
def prop_zaxis(physics):
prop_id = self._focal_prop_id
if prop_id is None:
return np.zeros((3,))
prop = self._props[prop_id]
prop_xmat = physics.bind(
mjcf.get_attachment_frame(prop.mjcf_model)).xmat
return prop_xmat[[2, 5, 8]]
self._task_observables['target_prop/zaxis'] = (
observable.Generic(prop_zaxis))
def ego_pedestal_xpos(physics):
pedestal_id = self._focal_pedestal_id
if pedestal_id is None:
return np.zeros((3,))
pedestal = self._pedestals[pedestal_id]
pedestal_xpos, _ = pedestal.get_pose(physics)
walker_xpos = physics.bind(self._walker.root_body).xpos
return self._walker.transform_vec_to_egocentric_frame(
physics, pedestal_xpos - walker_xpos)
self._task_observables['target_pedestal/xpos'] = (
observable.Generic(ego_pedestal_xpos))
for obs in (self._walker.observables.proprioception +
self._walker.observables.kinematic_sensors +
self._walker.observables.dynamic_sensors +
list(self._task_observables.values())):
obs.enabled = True
self._focal_prop_id = None
self._focal_pedestal_id = None
@property
def root_entity(self):
return self._arena
@property
def task_observables(self):
return self._task_observables
@property
def name(self):
return 'warehouse'
def initialize_episode_mjcf(self, random_state):
self._reward = 0.0
self._discount = 1.0
self._should_terminate = False
self._before_step_success = False
for target_value in self._target_state.values():
target_value[:] = 0
for pedestal_id, pedestal in enumerate(self._pedestals):
angle = 2 * np.pi * pedestal_id / len(self._pedestals)
dist = (_BASE_PEDESTAL_DIST +
_PEDESTAL_DIST_DELTA * random_state.uniform(-1, 1))
height = random_state.uniform(*self._pedestal_height_range)
pedestal_pos = [dist * np.cos(angle), dist * np.sin(angle),
height - pedestal.geom.size[2]]
mjcf.get_attachment_frame(pedestal.mjcf_model).pos = pedestal_pos
for prop in self._props:
prop.detach()
self._props = []
self._trajectory_for_prop = []
for prop_id in range(self._num_props):
trajectory = random_state.choice(self._trajectories)
if self._proto_modifier:
trajectory = trajectory.get_modified_trajectory(
self._proto_modifier, random_state=random_state)
prop = trajectory.create_props(
priority_friction=self._priority_friction)[0]
prop.mjcf_model.model = 'prop_{}'.format(prop_id)
self._arena.add_free_entity(prop)
self._props.append(prop)
self._trajectory_for_prop.append(trajectory)
def _settle_props(self, physics):
prop_freejoints = [mjcf.get_attachment_frame(prop.mjcf_model).freejoint
for prop in self._props]
physics.bind(prop_freejoints).qvel = 0
physics.forward()
for _ in range(_MAX_SETTLE_STEPS):
self._update_current_state(physics)
success = self._evaluate_target_state()
stopped = max(abs(physics.bind(prop_freejoints).qvel)) < _SETTLE_QVEL_TOL
if success and stopped:
break
else:
physics.step()
physics.data.time = 0
def initialize_episode(self, physics, random_state):
self._ground_geomid = physics.bind(
self._arena.mjcf_model.worldbody.geom[0]).element_id
self._feet_geomids = set(physics.bind(self._feet_geoms).element_id)
self._lhand_geomids = set(physics.bind(self._lhand_geoms).element_id)
self._rhand_geomids = set(physics.bind(self._rhand_geoms).element_id)
for prop_id in range(len(self._props)):
pedestal_id = _find_random_free_pedestal_id(
self._target_state[PROP_PEDESTAL], random_state)
pedestal = self._pedestals[pedestal_id]
self._target_state[PROP_PEDESTAL][prop_id, pedestal_id] = 1
for prop_id, prop in enumerate(self._props):
trajectory = self._trajectory_for_prop[prop_id]
pedestal_id = np.nonzero(
self._target_state[PROP_PEDESTAL][prop_id])[0][0]
pedestal = self._pedestals[pedestal_id]
pedestal_pos, _ = pedestal.get_pose(physics)
pedestal_delta = np.array(
pedestal_pos - trajectory.infer_pedestal_positions()[0])
pedestal_delta[2] += pedestal.geom.size[2]
prop_timestep = trajectory.get_timestep_data(0).props[0]
prop_pos = prop_timestep.position + np.array(pedestal_delta)
prop_quat = prop_timestep.quaternion
prop_pos[:2] += random_state.uniform(
-pedestal.geom.size[:2] / 2, pedestal.geom.size[:2] / 2)
prop.set_pose(physics, prop_pos, prop_quat)
self._settle_props(physics)
init_params = self._transition_manager.initialize_episode(
self._target_state, random_state)
if self._log_transitions:
logging.info(init_params)
self._on_transition(physics)
init_prop = self._props[init_params.prop_id]
init_pedestal = self._pedestals[init_params.pedestal_id]
self._init_prop_id = init_params.prop_id
self._init_pedestal_id = init_params.pedestal_id
init_trajectory = self._trajectory_for_prop[init_params.prop_id]
init_timestep = init_trajectory.get_random_timestep_in_segment(
init_params.clip_segment, random_state)
trajectory_pedestal_pos = init_trajectory.infer_pedestal_positions()[0]
init_pedestal_pos = np.array(init_pedestal.get_pose(physics)[0])
delta_pos = init_pedestal_pos - trajectory_pedestal_pos
delta_pos[2] = 0
delta_angle = np.pi + np.arctan2(init_pedestal_pos[1], init_pedestal_pos[0])
delta_quat = (np.cos(delta_angle / 2), 0, 0, np.sin(delta_angle / 2))
trajectory_pedestal_to_walker = (
init_timestep.walkers[0].position - trajectory_pedestal_pos)
rotated_pedestal_to_walker = _rotate_vector_by_quaternion(
trajectory_pedestal_to_walker, delta_quat)
self._walker.set_pose(
physics,
position=trajectory_pedestal_pos + rotated_pedestal_to_walker,
quaternion=init_timestep.walkers[0].quaternion)
self._walker.set_velocity(
physics, velocity=init_timestep.walkers[0].velocity,
angular_velocity=init_timestep.walkers[0].angular_velocity)
self._walker.shift_pose(
physics, position=delta_pos, quaternion=delta_quat,
rotate_velocity=True)
physics.bind(self._walker.mocap_joints).qpos = (
init_timestep.walkers[0].joints)
physics.bind(self._walker.mocap_joints).qvel = (
init_timestep.walkers[0].joints_velocity)
if init_params.clip_segment in (trajectories.ClipSegment.CARRY1,
trajectories.ClipSegment.CARRY2,
trajectories.ClipSegment.PUTDOWN):
trajectory_pedestal_to_prop = (
init_timestep.props[0].position - trajectory_pedestal_pos)
rotated_pedestal_to_prop = _rotate_vector_by_quaternion(
trajectory_pedestal_to_prop, delta_quat)
init_prop.set_pose(
physics,
position=trajectory_pedestal_pos + rotated_pedestal_to_prop,
quaternion=init_timestep.props[0].quaternion)
init_prop.set_velocity(
physics, velocity=init_timestep.props[0].velocity,
angular_velocity=init_timestep.props[0].angular_velocity)
init_prop.shift_pose(
physics, position=delta_pos,
quaternion=delta_quat, rotate_velocity=True)
# If we have moved the pedestal upwards during height initialization,
# the prop may now be lodged inside it. We fix that here.
if init_pedestal_pos[2] > trajectory_pedestal_pos[2]:
init_prop_geomid = physics.bind(init_prop.geom).element_id
init_pedestal_geomid = physics.bind(init_pedestal.geom).element_id
disallowed_contact = sorted((init_prop_geomid, init_pedestal_geomid))
def has_disallowed_contact():
physics.forward()
for contact in physics.data.contact:
if sorted((contact.geom1, contact.geom2)) == disallowed_contact:
return True
return False
while has_disallowed_contact():
init_prop.shift_pose(physics, (0, 0, 0.001))
self._move_arms_if_necessary(physics)
self._update_current_state(physics)
self._previous_step_success = self._evaluate_target_state()
self._focal_prop_id = self._init_prop_id
self._focal_pedestal_id = self._init_pedestal_id
def _move_arms_if_necessary(self, physics):
if self._min_prop_gap is not None:
for entity in self._props + self._pedestals:
try:
arm_opener.open_arms_for_prop(
physics, self._walker.left_arm_root, self._walker.right_arm_root,
entity.mjcf_model, self._min_prop_gap)
except RuntimeError as e:
raise composer.EpisodeInitializationError(e)
def after_step(self, physics, random_state):
# First we check for failure termination.
for contact in physics.data.contact:
if ((contact.geom1 == self._ground_geomid and
contact.geom2 not in self._feet_geomids) or
(contact.geom2 == self._ground_geomid and
contact.geom1 not in self._feet_geomids)):
if self._negative_reward_on_failure_termination:
self._reward = -_SPARSE_REWARD
else:
self._reward = 0.0
self._should_terminate = True
self._discount = 0.0
return
# Then check for normal reward and state transitions.
self._update_current_state(physics)
success = self._evaluate_target_state()
if success and not self._previous_step_success:
self._reward = _SPARSE_REWARD
new_phase = (
self._transition_manager.on_success(self._target_state, random_state))
self._should_terminate = (new_phase == WarehousePhase.TERMINATED)
self._on_transition(physics)
self._previous_step_success = self._evaluate_target_state()
else:
self._reward = 0.0
def _on_transition(self, physics):
self._focal_prop_id = self._transition_manager.prop_id
self._focal_pedestal_id = self._transition_manager.pedestal_id
if self._log_transitions:
logging.info('target_state:\n%s', self._target_state)
for pedestal_id, pedestal_active in enumerate(
self._target_state[WALKER_PEDESTAL]):
r, g, b, a = self._pedestal_colors[pedestal_id]
if pedestal_active:
h, _, s = colorsys.rgb_to_hls(r, g, b)
r, g, b = colorsys.hls_to_rgb(h, _ACTIVATED_PEDESTAL_L, s)
physics.bind(self._pedestals[pedestal_id].geom).rgba = (r, g, b, a)
def get_reward(self, physics):
return self._reward
def get_discount(self, physics):
return self._discount
def should_terminate_episode(self, physics):
return self._should_terminate
def _update_current_state(self, physics):
for current_state_value in self._current_state.values():
current_state_value[:] = 0
# Check if the walker is near each pedestal.
walker_pos, _ = self._walker.get_pose(physics)
for pedestal_id, pedestal in enumerate(self._pedestals):
target_pos, _ = pedestal.get_pose(physics)
walker_to_target_dist = np.linalg.norm(walker_pos[:2] - target_pos[:2])
if walker_to_target_dist <= _TARGET_TOL:
self._current_state[WALKER_PEDESTAL][pedestal_id] = 1
prop_geomids = {
physics.bind(prop.geom).element_id: prop_id
for prop_id, prop in enumerate(self._props)}
pedestal_geomids = {
physics.bind(pedestal.geom).element_id: pedestal_id
for pedestal_id, pedestal in enumerate(self._pedestals)}
prop_pedestal_contact_counts = np.zeros(
[self._num_props, self._num_pedestals])
prop_lhand_contact = [False] * self._num_props
prop_rhand_contact = [False] * self._num_props
for contact in physics.data.contact:
prop_id = prop_geomids.get(contact.geom1, prop_geomids.get(contact.geom2))
pedestal_id = pedestal_geomids.get(
contact.geom1, pedestal_geomids.get(contact.geom2))
has_lhand = (contact.geom1 in self._lhand_geomids or
contact.geom2 in self._lhand_geomids)
has_rhand = (contact.geom1 in self._rhand_geomids or
contact.geom2 in self._rhand_geomids)
if prop_id is not None and pedestal_id is not None:
prop_pedestal_contact_counts[prop_id, pedestal_id] += 1
if prop_id is not None and has_lhand:
prop_lhand_contact[prop_id] = True
if prop_id is not None and has_rhand:
prop_rhand_contact[prop_id] = True
for prop_id in range(self._num_props):
if prop_lhand_contact[prop_id] and prop_rhand_contact[prop_id]:
self._current_state[WALKER_PROP][prop_id] = 1
pedestal_contact_counts = prop_pedestal_contact_counts[prop_id]
for pedestal_id in range(self._num_pedestals):
if pedestal_contact_counts[pedestal_id] >= 4:
self._current_state[PROP_PEDESTAL][prop_id, pedestal_id] = 1
def _evaluate_target_state(self):
return _is_same_state(self._current_state, self._target_state)
|
deepmind-research-master
|
catch_carry/warehouse.py
|
# Copyright 2020 Deepmind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility for opening arms until they are not in contact with a prop."""
import contextlib
from dm_control.mujoco.wrapper import mjbindings
import numpy as np
_MAX_IK_ATTEMPTS = 100
_IK_MAX_CORRECTION_WEIGHT = 0.1
_JOINT_LIMIT_TOLERANCE = 1e-4
_GAP_TOLERANCE = 0.1
class _ArmPropContactRemover(object):
"""Helper class for removing contacts between an arm and a prop via IK."""
def __init__(self, physics, arm_root, prop, gap):
arm_geoms = arm_root.find_all('geom')
self._arm_geom_ids = set(physics.bind(arm_geoms).element_id)
arm_joints = arm_root.find_all('joint')
self._arm_joint_ids = list(physics.bind(arm_joints).element_id)
self._arm_qpos_indices = physics.model.jnt_qposadr[self._arm_joint_ids]
self._arm_dof_indices = physics.model.jnt_dofadr[self._arm_joint_ids]
self._prop_geoms = prop.find_all('geom')
self._prop_geom_ids = set(physics.bind(self._prop_geoms).element_id)
self._arm_joint_min = np.full(len(self._arm_joint_ids), float('-inf'),
dtype=physics.model.jnt_range.dtype)
self._arm_joint_max = np.full(len(self._arm_joint_ids), float('inf'),
dtype=physics.model.jnt_range.dtype)
for i, joint_id in enumerate(self._arm_joint_ids):
if physics.model.jnt_limited[joint_id]:
self._arm_joint_min[i], self._arm_joint_max[i] = (
physics.model.jnt_range[joint_id])
self._gap = gap
def _contact_pair_is_relevant(self, contact):
set1 = self._arm_geom_ids
set2 = self._prop_geom_ids
return ((contact.geom1 in set1 and contact.geom2 in set2) or
(contact.geom2 in set1 and contact.geom1 in set2))
def _forward_and_find_next_contact(self, physics):
"""Forwards the physics and finds the next contact to handle."""
physics.forward()
next_contact = None
for contact in physics.data.contact:
if (self._contact_pair_is_relevant(contact) and
(next_contact is None or contact.dist < next_contact.dist)):
next_contact = contact
return next_contact
def _remove_contact_ik_iteration(self, physics, contact):
"""Performs one linearized IK iteration to remove the specified contact."""
if contact.geom1 in self._arm_geom_ids:
sign = -1
geom_id = contact.geom1
else:
sign = 1
geom_id = contact.geom2
body_id = physics.model.geom_bodyid[geom_id]
normal = sign * contact.frame[:3]
jac_dtype = physics.data.qpos.dtype
jac = np.empty((6, physics.model.nv), dtype=jac_dtype)
jac_pos, jac_rot = jac[:3], jac[3:]
mjbindings.mjlib.mj_jacPointAxis(
physics.model.ptr, physics.data.ptr,
jac_pos, jac_rot,
contact.pos + (contact.dist / 2) * normal, normal, body_id)
# Calculate corrections w.r.t. all joints, disregarding joint limits.
delta_xpos = normal * max(0, self._gap - contact.dist)
jac_all_joints = jac_pos[:, self._arm_dof_indices]
update_unfiltered = np.linalg.lstsq(
jac_all_joints, delta_xpos, rcond=None)[0]
# Filter out joints at limit that are corrected in the "wrong" direction.
initial_qpos = np.array(physics.data.qpos[self._arm_qpos_indices])
min_filter = np.logical_and(
initial_qpos - self._arm_joint_min < _JOINT_LIMIT_TOLERANCE,
update_unfiltered < 0)
max_filter = np.logical_and(
self._arm_joint_max - initial_qpos < _JOINT_LIMIT_TOLERANCE,
update_unfiltered > 0)
active_joints = np.where(
np.logical_not(np.logical_or(min_filter, max_filter)))[0]
# Calculate corrections w.r.t. valid joints only.
active_dof_indices = self._arm_dof_indices[active_joints]
jac_joints = jac_pos[:, active_dof_indices]
update_filtered = np.linalg.lstsq(jac_joints, delta_xpos, rcond=None)[0]
update_nv = np.zeros(physics.model.nv, dtype=jac_dtype)
update_nv[active_dof_indices] = update_filtered
# Calculate maximum correction weight that does not violate joint limits.
weights = np.full_like(update_filtered, _IK_MAX_CORRECTION_WEIGHT)
active_initial_qpos = initial_qpos[active_joints]
active_joint_min = self._arm_joint_min[active_joints]
active_joint_max = self._arm_joint_max[active_joints]
for i in range(len(weights)):
proposed_update = update_filtered[i]
if proposed_update > 0:
max_allowed_update = active_joint_max[i] - active_initial_qpos[i]
weights[i] = min(max_allowed_update / proposed_update, weights[i])
elif proposed_update < 0:
min_allowed_update = active_joint_min[i] - active_initial_qpos[i]
weights[i] = min(min_allowed_update / proposed_update, weights[i])
weight = min(weights)
# Integrate the correction into `qpos`.
mjbindings.mjlib.mj_integratePos(
physics.model.ptr, physics.data.qpos, update_nv, weight)
# "Paranoid" clip the modified joint `qpos` to within joint limits.
active_qpos_indices = self._arm_qpos_indices[active_joints]
physics.data.qpos[active_qpos_indices] = np.clip(
physics.data.qpos[active_qpos_indices],
active_joint_min, active_joint_max)
@contextlib.contextmanager
def _override_margins_and_gaps(self, physics):
"""Context manager that overrides geom margins and gaps to `self._gap`."""
prop_geom_bindings = physics.bind(self._prop_geoms)
original_margins = np.array(prop_geom_bindings.margin)
original_gaps = np.array(prop_geom_bindings.gap)
prop_geom_bindings.margin = self._gap * (1 - _GAP_TOLERANCE)
prop_geom_bindings.gap = self._gap * (1 - _GAP_TOLERANCE)
yield
prop_geom_bindings.margin = original_margins
prop_geom_bindings.gap = original_gaps
physics.forward()
def remove_contacts(self, physics):
with self._override_margins_and_gaps(physics):
for _ in range(_MAX_IK_ATTEMPTS):
contact = self._forward_and_find_next_contact(physics)
if contact is None:
return
self._remove_contact_ik_iteration(physics, contact)
contact = self._forward_and_find_next_contact(physics)
if contact and contact.dist < 0:
raise RuntimeError(
'Failed to remove contact with prop after {} iterations. '
'Final contact distance is {}.'.format(
_MAX_IK_ATTEMPTS, contact.dist))
def open_arms_for_prop(physics, left_arm_root, right_arm_root, prop, gap):
"""Opens left and right arms so as to leave a specified gap with the prop."""
left_arm_opener = _ArmPropContactRemover(physics, left_arm_root, prop, gap)
left_arm_opener.remove_contacts(physics)
right_arm_opener = _ArmPropContactRemover(physics, right_arm_root, prop, gap)
right_arm_opener.remove_contacts(physics)
|
deepmind-research-master
|
catch_carry/arm_opener.py
|
# Copyright 2020 Deepmind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Metadata for mocap clips that correspond to a walker carrying a prop."""
import collections
import enum
import os
from dm_control.locomotion.mocap import loader as mocap_loader
from catch_carry import trajectories
H5_DIR = os.path.dirname(__file__)
H5_PATH = os.path.join(H5_DIR, 'mocap_data.h5')
IDENTIFIER_PREFIX = 'DeepMindCatchCarry'
IDENTIFIER_TEMPLATE = IDENTIFIER_PREFIX + '-{:03d}'
ClipInfo = collections.namedtuple(
'ClipInfo', ('clip_identifier', 'num_steps', 'dt', 'flags'))
class Flag(enum.IntEnum):
BOX = 1 << 0
BALL = 1 << 1
LIGHT_PROP = 1 << 2
HEAVY_PROP = 1 << 3
SMALL_PROP = 1 << 4
LARGE_PROP = 1 << 5
FLOOR_LEVEL = 1 << 6
MEDIUM_PEDESTAL = 1 << 7
HIGH_PEDESTAL = 1 << 8
_ALL_CLIPS = None
def _get_clip_info(loader, clip_number, flags):
clip = loader.get_trajectory(IDENTIFIER_TEMPLATE.format(clip_number))
return ClipInfo(
clip_identifier=clip.identifier,
num_steps=clip.num_steps,
dt=clip.dt,
flags=flags)
def _get_all_clip_infos_if_necessary():
"""Creates the global _ALL_CLIPS list if it has not already been created."""
global _ALL_CLIPS
if _ALL_CLIPS is None:
loader = mocap_loader.HDF5TrajectoryLoader(
H5_PATH, trajectories.WarehouseTrajectory)
clip_numbers = (1, 2, 3, 4, 5, 6, 9, 10,
11, 12, 15, 16, 17, 18, 19, 20,
21, 22, 23, 24, 25, 26, 27, 28,
29, 30, 31, 32, 33, 34, 35, 36,
37, 38, 39, 40, 42, 43, 44, 45,
46, 47, 48, 49, 50, 51, 52, 53)
clip_infos = []
for i, clip_number in enumerate(clip_numbers):
flags = 0
if i in _FLOOR_LEVEL:
flags |= Flag.FLOOR_LEVEL
elif i in _MEDIUM_PEDESTAL:
flags |= Flag.MEDIUM_PEDESTAL
elif i in _HIGH_PEDESTAL:
flags |= Flag.HIGH_PEDESTAL
if i in _LIGHT_PROP:
flags |= Flag.LIGHT_PROP
elif i in _HEAVY_PROP:
flags |= Flag.HEAVY_PROP
if i in _SMALL_BOX:
flags |= Flag.SMALL_PROP
flags |= Flag.BOX
elif i in _LARGE_BOX:
flags |= Flag.LARGE_PROP
flags |= Flag.BOX
elif i in _SMALL_BALL:
flags |= Flag.SMALL_PROP
flags |= Flag.BALL
elif i in _LARGE_BALL:
flags |= Flag.LARGE_PROP
flags |= Flag.BALL
clip_infos.append(_get_clip_info(loader, clip_number, flags))
_ALL_CLIPS = tuple(clip_infos)
def _assert_partitions_all_clips(*args):
"""Asserts that a given set of subcollections partitions ALL_CLIPS."""
sets = tuple(set(arg) for arg in args)
# Check that the union of all the sets is ALL_CLIPS.
union = set()
for subset in sets:
union = union | set(subset)
assert union == set(range(48))
# Check that the sets are pairwise disjoint.
for i in range(len(sets)):
for j in range(i + 1, len(sets)):
assert sets[i] & sets[j] == set()
_FLOOR_LEVEL = tuple(range(0, 16))
_MEDIUM_PEDESTAL = tuple(range(16, 32))
_HIGH_PEDESTAL = tuple(range(32, 48))
_assert_partitions_all_clips(_FLOOR_LEVEL, _MEDIUM_PEDESTAL, _HIGH_PEDESTAL)
_LIGHT_PROP = (0, 1, 2, 3, 8, 9, 12, 13, 16, 17, 18, 19, 24,
25, 26, 27, 34, 35, 38, 39, 42, 43, 46, 47)
_HEAVY_PROP = (4, 5, 6, 7, 10, 11, 14, 15, 20, 21, 22, 23, 28,
29, 30, 31, 32, 33, 36, 37, 40, 41, 44, 45)
_assert_partitions_all_clips(_LIGHT_PROP, _HEAVY_PROP)
_SMALL_BOX = (0, 1, 4, 5, 16, 17, 20, 21, 34, 35, 36, 37)
_LARGE_BOX = (2, 3, 6, 7, 18, 19, 22, 23, 32, 33, 38, 39)
_SMALL_BALL = (8, 9, 10, 11, 24, 25, 30, 31, 40, 41, 46, 47)
_LARGE_BALL = (12, 13, 14, 15, 26, 27, 28, 29, 42, 43, 44, 45)
_assert_partitions_all_clips(_SMALL_BOX, _LARGE_BOX, _SMALL_BALL, _LARGE_BALL)
def all_clips():
_get_all_clip_infos_if_necessary()
return _ALL_CLIPS
def floor_level():
clips = all_clips()
return tuple(clips[i] for i in _FLOOR_LEVEL)
def medium_pedestal():
clips = all_clips()
return tuple(clips[i] for i in _MEDIUM_PEDESTAL)
def high_pedestal():
clips = all_clips()
return tuple(clips[i] for i in _HIGH_PEDESTAL)
def light_prop():
clips = all_clips()
return tuple(clips[i] for i in _LIGHT_PROP)
def heavy_prop():
clips = all_clips()
return tuple(clips[i] for i in _HEAVY_PROP)
def small_box():
clips = all_clips()
return tuple(clips[i] for i in _SMALL_BOX)
def large_box():
clips = all_clips()
return tuple(clips[i] for i in _LARGE_BOX)
def small_ball():
clips = all_clips()
return tuple(clips[i] for i in _SMALL_BALL)
def large_ball():
clips = all_clips()
return tuple(clips[i] for i in _LARGE_BALL)
|
deepmind-research-master
|
catch_carry/mocap_data.py
|
# Copyright 2020 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
|
deepmind-research-master
|
catch_carry/__init__.py
|
# Copyright 2020 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Setup for pip package."""
from setuptools import find_packages
from setuptools import setup
REQUIRED_PACKAGES = ['absl-py', 'dm_control', 'numpy']
setup(
name='catch_carry',
version='0.1',
description='Whole-body object manipulation tasks and motion capture data.',
url='https://github.com/deepmind/deepmind-research/catch_carry',
author='DeepMind',
author_email='stunya@google.com',
# Contained modules and scripts.
packages=find_packages(),
install_requires=REQUIRED_PACKAGES,
platforms=['any'],
license='Apache 2.0',
)
|
deepmind-research-master
|
catch_carry/setup.py
|
# Copyright 2020 Deepmind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A ball-tossing task."""
import collections
from dm_control import composer
from dm_control import mjcf
from dm_control.composer import variation
from dm_control.composer.observation import observable
from dm_control.locomotion.arenas import floors
from dm_control.locomotion.mocap import loader as mocap_loader
import numpy as np
from catch_carry import mocap_data
from catch_carry import props
from catch_carry import trajectories
_PHYSICS_TIMESTEP = 0.005
_BUCKET_SIZE = (0.2, 0.2, 0.02)
# Magnitude of the sparse reward.
_SPARSE_REWARD = 1.0
class BallToss(composer.Task):
"""A task involving catching and throwing a ball."""
def __init__(self, walker,
proto_modifier=None,
negative_reward_on_failure_termination=True,
priority_friction=False,
bucket_offset=1.,
y_range=0.5,
toss_delay=0.5,
randomize_init=False,
):
"""Initialize ball tossing task.
Args:
walker: the walker to be used in this task.
proto_modifier: function to modify trajectory proto.
negative_reward_on_failure_termination: flag to provide negative reward
as task fails.
priority_friction: sets friction priority thereby making prop objects have
higher friction.
bucket_offset: distance in meters to push bucket (away from walker)
y_range: range (uniformly sampled) of distance in meters the ball is
thrown left/right of the walker.
toss_delay: time in seconds to delay after catching before changing reward
to encourage throwing the ball.
randomize_init: flag to randomize initial pose.
"""
self._proto_modifier = proto_modifier
self._negative_reward_on_failure_termination = (
negative_reward_on_failure_termination)
self._priority_friction = priority_friction
self._bucket_rewarded = False
self._bucket_offset = bucket_offset
self._y_range = y_range
self._toss_delay = toss_delay
self._randomize_init = randomize_init
# load a clip to grab a ball prop and initializations
loader = mocap_loader.HDF5TrajectoryLoader(
mocap_data.H5_PATH, trajectories.WarehouseTrajectory)
clip_number = 54
self._trajectory = loader.get_trajectory(
mocap_data.IDENTIFIER_TEMPLATE.format(clip_number))
# create the floor arena
self._arena = floors.Floor()
self._walker = walker
self._walker_geoms = tuple(self._walker.mjcf_model.find_all('geom'))
self._feet_geoms = (
walker.mjcf_model.find('body', 'lfoot').find_all('geom') +
walker.mjcf_model.find('body', 'rfoot').find_all('geom'))
self._lhand_geoms = (
walker.mjcf_model.find('body', 'lhand').find_all('geom'))
self._rhand_geoms = (
walker.mjcf_model.find('body', 'rhand').find_all('geom'))
# resize the humanoid based on the motion capture data subject
self._trajectory.configure_walkers([self._walker])
walker.create_root_joints(self._arena.attach(walker))
control_timestep = self._trajectory.dt
self.set_timesteps(control_timestep, _PHYSICS_TIMESTEP)
# build and attach the bucket to the arena
self._bucket = props.Bucket(_BUCKET_SIZE)
self._arena.attach(self._bucket)
self._prop = self._trajectory.create_props(
priority_friction=self._priority_friction)[0]
self._arena.add_free_entity(self._prop)
self._task_observables = collections.OrderedDict()
# define feature based observations (agent may or may not use these)
def ego_prop_xpos(physics):
prop_xpos, _ = self._prop.get_pose(physics)
walker_xpos = physics.bind(self._walker.root_body).xpos
return self._walker.transform_vec_to_egocentric_frame(
physics, prop_xpos - walker_xpos)
self._task_observables['prop_{}/xpos'.format(0)] = (
observable.Generic(ego_prop_xpos))
def prop_zaxis(physics):
prop_xmat = physics.bind(
mjcf.get_attachment_frame(self._prop.mjcf_model)).xmat
return prop_xmat[[2, 5, 8]]
self._task_observables['prop_{}/zaxis'.format(0)] = (
observable.Generic(prop_zaxis))
def ego_bucket_xpos(physics):
bucket_xpos, _ = self._bucket.get_pose(physics)
walker_xpos = physics.bind(self._walker.root_body).xpos
return self._walker.transform_vec_to_egocentric_frame(
physics, bucket_xpos - walker_xpos)
self._task_observables['bucket_{}/xpos'.format(0)] = (
observable.Generic(ego_bucket_xpos))
for obs in (self._walker.observables.proprioception +
self._walker.observables.kinematic_sensors +
self._walker.observables.dynamic_sensors +
list(self._task_observables.values())):
obs.enabled = True
@property
def root_entity(self):
return self._arena
@property
def task_observables(self):
return self._task_observables
@property
def name(self):
return 'ball_toss'
def initialize_episode_mjcf(self, random_state):
self._reward = 0.0
self._discount = 1.0
self._should_terminate = False
self._prop.detach()
if self._proto_modifier:
trajectory = self._trajectory.get_modified_trajectory(
self._proto_modifier)
self._prop = trajectory.create_props(
priority_friction=self._priority_friction)[0]
self._arena.add_free_entity(self._prop)
# set the bucket position for this episode
bucket_distance = 1.*random_state.rand()+self._bucket_offset
mjcf.get_attachment_frame(self._bucket.mjcf_model).pos = [bucket_distance,
0, 0]
def initialize_episode(self, physics, random_state):
self._ground_geomid = physics.bind(
self._arena.mjcf_model.worldbody.geom[0]).element_id
self._feet_geomids = set(physics.bind(self._feet_geoms).element_id)
self._lhand_geomids = set(physics.bind(self._lhand_geoms).element_id)
self._rhand_geomids = set(physics.bind(self._rhand_geoms).element_id)
self._walker_geomids = set(physics.bind(self._walker_geoms).element_id)
self._bucket_rewarded = False
if self._randomize_init:
timestep_ind = random_state.randint(
len(self._trajectory._proto.timesteps)) # pylint: disable=protected-access
else:
timestep_ind = 0
walker_init_timestep = self._trajectory._proto.timesteps[timestep_ind] # pylint: disable=protected-access
prop_init_timestep = self._trajectory._proto.timesteps[0] # pylint: disable=protected-access
self._walker.set_pose(
physics,
position=walker_init_timestep.walkers[0].position,
quaternion=walker_init_timestep.walkers[0].quaternion)
self._walker.set_velocity(
physics, velocity=walker_init_timestep.walkers[0].velocity,
angular_velocity=walker_init_timestep.walkers[0].angular_velocity)
physics.bind(self._walker.mocap_joints).qpos = (
walker_init_timestep.walkers[0].joints)
physics.bind(self._walker.mocap_joints).qvel = (
walker_init_timestep.walkers[0].joints_velocity)
initial_prop_pos = np.copy(prop_init_timestep.props[0].position)
initial_prop_pos[0] += 1. # move ball (from mocap) relative to origin
initial_prop_pos[1] = 0 # align ball with walker along y-axis
self._prop.set_pose(
physics,
position=initial_prop_pos,
quaternion=prop_init_timestep.props[0].quaternion)
# specify the distributions of ball velocity componentwise
x_vel_mag = 4.5*random_state.rand()+1.5 # m/s
x_dist = 3 # approximate initial distance from walker to ball
self._t_dist = x_dist/x_vel_mag # target time at which to hit the humanoid
z_offset = .4*random_state.rand()+.1 # height at which to hit person
# compute velocity to satisfy desired projectile trajectory
z_vel_mag = (4.9*(self._t_dist**2) + z_offset)/self._t_dist
y_range = variation.evaluate(self._y_range, random_state=random_state)
y_vel_mag = y_range*random_state.rand()-y_range/2
trans_vel = [-x_vel_mag, y_vel_mag, z_vel_mag]
ang_vel = 1.5*random_state.rand(3)-0.75
self._prop.set_velocity(
physics,
velocity=trans_vel,
angular_velocity=ang_vel)
def after_step(self, physics, random_state):
# First we check for failure termination (walker or ball touches ground).
ground_failure = False
for contact in physics.data.contact:
if ((contact.geom1 == self._ground_geomid and
contact.geom2 not in self._feet_geomids) or
(contact.geom2 == self._ground_geomid and
contact.geom1 not in self._feet_geomids)):
ground_failure = True
break
contact_features = self._evaluate_contacts(physics)
prop_lhand, prop_rhand, bucket_prop, bucket_walker, walker_prop = contact_features
# or also fail if walker hits bucket
if ground_failure or bucket_walker:
if self._negative_reward_on_failure_termination:
self._reward = -_SPARSE_REWARD
else:
self._reward = 0.0
self._should_terminate = True
self._discount = 0.0
return
self._reward = 0.0
# give reward if prop is in bucket (prop touching bottom surface of bucket)
if bucket_prop:
self._reward += _SPARSE_REWARD/10
# shaping reward for being closer to bucket
if physics.data.time > (self._t_dist + self._toss_delay):
bucket_xy = physics.bind(self._bucket.geom).xpos[0][:2]
prop_xy = self._prop.get_pose(physics)[0][:2]
xy_dist = np.sum(np.array(np.abs(bucket_xy - prop_xy)))
self._reward += np.exp(-xy_dist/3.)*_SPARSE_REWARD/50
else:
# bonus for hands touching ball
if prop_lhand:
self._reward += _SPARSE_REWARD/100
if prop_rhand:
self._reward += _SPARSE_REWARD/100
# combined with penalty for other body parts touching the ball
if walker_prop:
self._reward -= _SPARSE_REWARD/100
def get_reward(self, physics):
return self._reward
def get_discount(self, physics):
return self._discount
def should_terminate_episode(self, physics):
return self._should_terminate
def _evaluate_contacts(self, physics):
prop_elem_id = physics.bind(self._prop.geom).element_id
bucket_bottom_elem_id = physics.bind(self._bucket.geom[0]).element_id
bucket_any_elem_id = set(physics.bind(self._bucket.geom).element_id)
prop_lhand_contact = False
prop_rhand_contact = False
bucket_prop_contact = False
bucket_walker_contact = False
walker_prop_contact = False
for contact in physics.data.contact:
has_prop = (contact.geom1 == prop_elem_id or
contact.geom2 == prop_elem_id)
has_bucket_bottom = (contact.geom1 == bucket_bottom_elem_id or
contact.geom2 == bucket_bottom_elem_id)
has_bucket_any = (contact.geom1 in bucket_any_elem_id or
contact.geom2 in bucket_any_elem_id)
has_lhand = (contact.geom1 in self._lhand_geomids or
contact.geom2 in self._lhand_geomids)
has_rhand = (contact.geom1 in self._rhand_geomids or
contact.geom2 in self._rhand_geomids)
has_walker = (contact.geom1 in self._walker_geomids or
contact.geom2 in self._walker_geomids)
if has_prop and has_bucket_bottom:
bucket_prop_contact = True
if has_walker and has_bucket_any:
bucket_walker_contact = True
if has_walker and has_prop:
walker_prop_contact = True
if has_prop and has_lhand:
prop_lhand_contact = True
if has_prop and has_rhand:
prop_rhand_contact = True
return (prop_lhand_contact, prop_rhand_contact, bucket_prop_contact,
bucket_walker_contact, walker_prop_contact)
|
deepmind-research-master
|
catch_carry/ball_toss.py
|
# Copyright 2020 Deepmind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Simple script to launch viewer with an example environment."""
from absl import app
from absl import flags
from dm_control import viewer
from catch_carry import task_examples
FLAGS = flags.FLAGS
flags.DEFINE_enum('task', 'warehouse', ['warehouse', 'toss'],
'The task to visualize.')
TASKS = {
'warehouse': task_examples.build_vision_warehouse,
'toss': task_examples.build_vision_toss,
}
def main(unused_argv):
viewer.launch(environment_loader=TASKS[FLAGS.task])
if __name__ == '__main__':
app.run(main)
|
deepmind-research-master
|
catch_carry/explore.py
|
# Copyright 2020 Deepmind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A rectangular pedestal."""
from dm_control import composer
from dm_control import mjcf
class Pedestal(composer.Entity):
"""A rectangular pedestal."""
def _build(self, size=(.2, .3, .05), rgba=(0, .5, 0, 1), name='pedestal'):
self._mjcf_root = mjcf.RootElement(model=name)
self._geom = self._mjcf_root.worldbody.add(
'geom', type='box', size=size, name='geom', rgba=rgba)
@property
def mjcf_model(self):
return self._mjcf_root
@property
def geom(self):
return self._geom
def after_compile(self, physics, unused_random_state):
super(Pedestal, self).after_compile(physics, unused_random_state)
self._body_geom_ids = set(
physics.bind(geom).element_id
for geom in self.mjcf_model.find_all('geom'))
@property
def body_geom_ids(self):
return self._body_geom_ids
class Bucket(composer.Entity):
"""A rectangular bucket."""
def _build(self, size=(.2, .3, .05), rgba=(0, .5, 0, 1), name='pedestal'):
self._mjcf_root = mjcf.RootElement(model=name)
self._geoms = []
self._geoms.append(self._mjcf_root.worldbody.add(
'geom', type='box', size=size, name='geom_bottom', rgba=rgba))
self._geoms.append(self._mjcf_root.worldbody.add(
'geom', type='box', size=(size[2], size[1], size[0]), name='geom_s1',
rgba=rgba, pos=[size[0], 0, size[0]]))
self._geoms.append(self._mjcf_root.worldbody.add(
'geom', type='box', size=(size[2], size[1], size[0]), name='geom_s2',
rgba=rgba, pos=[-size[0], 0, size[0]]))
self._geoms.append(self._mjcf_root.worldbody.add(
'geom', type='box', size=(size[0], size[2], size[0]), name='geom_s3',
rgba=rgba, pos=[0, size[1], size[0]]))
self._geoms.append(self._mjcf_root.worldbody.add(
'geom', type='box', size=(size[0], size[2], size[0]), name='geom_s4',
rgba=rgba, pos=[0, -size[1], size[0]]))
@property
def mjcf_model(self):
return self._mjcf_root
@property
def geom(self):
return self._geoms
def after_compile(self, physics, unused_random_state):
super(Bucket, self).after_compile(physics, unused_random_state)
self._body_geom_ids = set(
physics.bind(geom).element_id
for geom in self.mjcf_model.find_all('geom'))
@property
def body_geom_ids(self):
return self._body_geom_ids
|
deepmind-research-master
|
catch_carry/props.py
|
# Copyright 2020 Deepmind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Mocap trajectory that assumes props start stationary on pedestals."""
import copy
import enum
import itertools
from dm_control.locomotion.mocap import mocap_pb2
from dm_control.locomotion.mocap import trajectory
from dm_control.utils import transformations
import numpy as np
_PEDESTAL_SIZE = (0.2, 0.2, 0.02)
_MAX_SETTLE_STEPS = 100
@enum.unique
class ClipSegment(enum.Enum):
"""Annotations for subsegments within a warehouse clips."""
# Clip segment corresponding to a walker approaching an object
APPROACH = 1
# Clip segment corresponding to a walker picking up an object.
PICKUP = 2
# Clip segment corresponding to the "first half" of the walker carrying an
# object, beginning from the walker backing away from a pedestal with
# object in hand.
CARRY1 = 3
# Clip segment corresponding to the "second half" of the walker carrying an
# object, ending in the walker approaching a pedestal the object in hand.
CARRY2 = 4
# Clip segment corresponding to a walker putting down an object on a pedestal.
PUTDOWN = 5
# Clip segment corresponding to a walker backing off after successfully
# placing an object on a pedestal.
BACKOFF = 6
def _get_rotated_bounding_box(size, quaternion):
"""Calculates the bounding box of a rotated 3D box.
Args:
size: An array of length 3 specifying the half-lengths of a box.
quaternion: A unit quaternion specifying the box's orientation.
Returns:
An array of length 3 specifying the half-lengths of the bounding box of
the rotated box.
"""
corners = ((size[0], size[1], size[2]),
(size[0], size[1], -size[2]),
(size[0], -size[1], size[2]),
(-size[0], size[1], size[2]))
rotated_corners = tuple(
transformations.quat_rotate(quaternion, corner) for corner in corners)
return np.amax(np.abs(rotated_corners), axis=0)
def _get_prop_z_extent(prop_proto, quaternion):
"""Calculates the "z-extent" of the prop in given orientation.
This is the distance from the centre of the prop to its lowest point in the
world frame, taking into account the prop's orientation.
Args:
prop_proto: A `mocap_pb2.Prop` protocol buffer defining a prop.
quaternion: A unit quaternion specifying the prop's orientation.
Returns:
the distance from the centre of the prop to its lowest point in the
world frame in the specified orientation.
"""
if prop_proto.shape == mocap_pb2.Prop.BOX:
return _get_rotated_bounding_box(prop_proto.size, quaternion)[2]
elif prop_proto.shape == mocap_pb2.Prop.SPHERE:
return prop_proto.size[0]
else:
raise NotImplementedError(
'Unsupported prop shape: {}'.format(prop_proto.shape))
class WarehouseTrajectory(trajectory.Trajectory):
"""Mocap trajectory that assumes props start stationary on pedestals."""
def infer_pedestal_positions(self, num_averaged_steps=30,
ground_height_tolerance=0.1,
proto_modifier=None):
proto = self._proto
if proto_modifier is not None:
proto = copy.copy(proto)
proto_modifier(proto)
if not proto.props:
return []
positions = []
for timestep in itertools.islice(proto.timesteps, num_averaged_steps):
positions_for_timestep = []
for prop_proto, prop_timestep in zip(proto.props, timestep.props):
z_extent = _get_prop_z_extent(prop_proto, prop_timestep.quaternion)
positions_for_timestep.append([prop_timestep.position[0],
prop_timestep.position[1],
prop_timestep.position[2] - z_extent])
positions.append(positions_for_timestep)
median_positions = np.median(positions, axis=0)
median_positions[:, 2][median_positions[:, 2] < ground_height_tolerance] = 0
return median_positions
def get_props_z_extent(self, physics):
timestep = self._proto.timesteps[self._get_step_id(physics.time())]
out = []
for prop_proto, prop_timestep in zip(self._proto.props, timestep.props):
z_extent = _get_prop_z_extent(prop_proto, prop_timestep.quaternion)
out.append(z_extent)
return out
class SinglePropCarrySegmentedTrajectory(WarehouseTrajectory):
"""A mocap trajectory class that automatically segments prop-carry clips.
The algorithm implemented in the class only works if the trajectory consists
of exactly one walker and one prop. The value of `pedestal_zone_distance`
the exact nature of zone crossings are determined empirically from the
DeepMindCatchCarry dataset, and are likely to not work well outside of this
setting.
"""
def __init__(self,
proto,
start_time=None,
end_time=None,
pedestal_zone_distance=0.65,
start_step=None,
end_step=None,
zero_out_velocities=True):
super(SinglePropCarrySegmentedTrajectory, self).__init__(
proto, start_time, end_time, start_step=start_step, end_step=end_step,
zero_out_velocities=zero_out_velocities)
self._pedestal_zone_distance = pedestal_zone_distance
self._generate_segments()
def _generate_segments(self):
pedestal_position = self.infer_pedestal_positions()[0]
# First we find the timesteps at which the walker cross the pedestal's
# vicinity zone. This should happen exactly 4 times: enter it to pick up,
# leave it, enter it again to put down, and leave it again.
was_in_pedestal_zone = False
crossings = []
for i, timestep in enumerate(self._proto.timesteps):
pedestal_dist = np.linalg.norm(
timestep.walkers[0].position[:2] - pedestal_position[:2])
if pedestal_dist > self._pedestal_zone_distance and was_in_pedestal_zone:
crossings.append(i)
was_in_pedestal_zone = False
elif (pedestal_dist <= self._pedestal_zone_distance and
not was_in_pedestal_zone):
crossings.append(i)
was_in_pedestal_zone = True
if len(crossings) < 3:
raise RuntimeError(
'Failed to segment the given trajectory: '
'walker should cross the pedestal zone\'s boundary >= 3 times '
'but got {}'.format(len(crossings)))
elif len(crossings) == 3:
crossings.append(len(self._proto.timesteps) - 1)
elif len(crossings) > 4:
crossings = [crossings[0], crossings[1], crossings[-2], crossings[-1]]
# Identify the pick up event during the first in-zone interval.
start_position = np.array(self._proto.timesteps[0].props[0].position)
end_position = np.array(self._proto.timesteps[-1].props[0].position)
pick_up_step = crossings[1] - 1
while pick_up_step > crossings[0]:
prev_position = self._proto.timesteps[pick_up_step - 1].props[0].position
if np.linalg.norm(start_position[2] - prev_position[2]) < 0.001:
break
pick_up_step -= 1
# Identify the put down event during the second in-zone interval.
put_down_step = crossings[2]
while put_down_step <= crossings[3]:
next_position = self._proto.timesteps[put_down_step + 1].props[0].position
if np.linalg.norm(end_position[2] - next_position[2]) < 0.001:
break
put_down_step += 1
carry_halfway_step = int((crossings[1] + crossings[2]) / 2)
self._segment_intervals = {
ClipSegment.APPROACH: (0, crossings[0]),
ClipSegment.PICKUP: (crossings[0], pick_up_step),
ClipSegment.CARRY1: (pick_up_step, carry_halfway_step),
ClipSegment.CARRY2: (carry_halfway_step, crossings[2]),
ClipSegment.PUTDOWN: (crossings[2], put_down_step),
ClipSegment.BACKOFF: (put_down_step, len(self._proto.timesteps))
}
def segment_interval(self, segment):
start_step, end_step = self._segment_intervals[segment]
return (start_step * self._proto.dt, (end_step - 1) * self._proto.dt)
def get_random_timestep_in_segment(self, segment, random_step):
return self._proto.timesteps[
random_step.randint(*self._segment_intervals[segment])]
|
deepmind-research-master
|
catch_carry/trajectories.py
|
# Copyright 2020 Deepmind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Functions that build representative tasks."""
from dm_control import composer
from dm_control.composer.variation import distributions
from dm_control.locomotion.mocap import loader as mocap_loader
from dm_control.locomotion.walkers import cmu_humanoid
from catch_carry import ball_toss
from catch_carry import warehouse
def build_vision_warehouse(random_state=None):
"""Build canonical 4-pedestal, 2-prop task."""
# Build a position-controlled CMU humanoid walker.
walker = cmu_humanoid.CMUHumanoidPositionControlled(
observable_options={'egocentric_camera': dict(enabled=True)})
# Build the task.
size_distribution = distributions.Uniform(low=0.75, high=1.25)
mass_distribution = distributions.Uniform(low=2, high=7)
prop_resizer = mocap_loader.PropResizer(size_factor=size_distribution,
mass=mass_distribution)
task = warehouse.PhasedBoxCarry(
walker=walker,
num_props=2,
num_pedestals=4,
proto_modifier=prop_resizer,
negative_reward_on_failure_termination=True)
# return the environment
return composer.Environment(
time_limit=15,
task=task,
random_state=random_state,
strip_singleton_obs_buffer_dim=True,
max_reset_attempts=float('inf'))
def build_vision_toss(random_state=None):
"""Build canonical ball tossing task."""
# Build a position-controlled CMU humanoid walker.
walker = cmu_humanoid.CMUHumanoidPositionControlled(
observable_options={'egocentric_camera': dict(enabled=True)})
# Build the task.
size_distribution = distributions.Uniform(low=0.95, high=1.5)
mass_distribution = distributions.Uniform(low=2, high=4)
prop_resizer = mocap_loader.PropResizer(size_factor=size_distribution,
mass=mass_distribution)
task = ball_toss.BallToss(
walker=walker,
proto_modifier=prop_resizer,
negative_reward_on_failure_termination=True,
priority_friction=True,
bucket_offset=3.,
y_range=0.5,
toss_delay=1.5,
randomize_init=True)
# return the environment
return composer.Environment(
time_limit=6,
task=task,
random_state=random_state,
strip_singleton_obs_buffer_dim=True,
max_reset_attempts=float('inf'))
|
deepmind-research-master
|
catch_carry/task_examples.py
|
# Copyright 2019 Deepmind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for the Hierarchical Probabilistic U-Net open-source version."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from model import HierarchicalProbUNet
import tensorflow.compat.v1 as tf
_NUM_CLASSES = 2
_BATCH_SIZE = 2
_SPATIAL_SHAPE = [32, 32]
_CHANNELS_PER_BLOCK = [5, 7, 9, 11, 13]
_IMAGE_SHAPE = [_BATCH_SIZE] + _SPATIAL_SHAPE + [1]
_BOTTLENECK_SIZE = _SPATIAL_SHAPE[0] // 2 ** (len(_CHANNELS_PER_BLOCK) - 1)
_SEGMENTATION_SHAPE = [_BATCH_SIZE] + _SPATIAL_SHAPE + [_NUM_CLASSES]
_LATENT_DIMS = [3, 2, 1]
_INITIALIZERS = {'w': tf.orthogonal_initializer(gain=1.0, seed=None),
'b': tf.truncated_normal_initializer(stddev=0.001)}
def _get_placeholders():
"""Returns placeholders for the image and segmentation."""
img = tf.placeholder(dtype=tf.float32, shape=_IMAGE_SHAPE)
seg = tf.placeholder(dtype=tf.float32, shape=_SEGMENTATION_SHAPE)
return img, seg
class HierarchicalProbUNetTest(tf.test.TestCase):
def test_shape_of_sample(self):
hpu_net = HierarchicalProbUNet(latent_dims=_LATENT_DIMS,
channels_per_block=_CHANNELS_PER_BLOCK,
num_classes=_NUM_CLASSES,
initializers=_INITIALIZERS)
img, _ = _get_placeholders()
sample = hpu_net.sample(img)
self.assertEqual(sample.shape.as_list(), _SEGMENTATION_SHAPE)
def test_shape_of_reconstruction(self):
hpu_net = HierarchicalProbUNet(latent_dims=_LATENT_DIMS,
channels_per_block=_CHANNELS_PER_BLOCK,
num_classes=_NUM_CLASSES,
initializers=_INITIALIZERS)
img, seg = _get_placeholders()
reconstruction = hpu_net.reconstruct(img, seg)
self.assertEqual(reconstruction.shape.as_list(), _SEGMENTATION_SHAPE)
def test_shapes_in_prior(self):
hpu_net = HierarchicalProbUNet(latent_dims=_LATENT_DIMS,
channels_per_block=_CHANNELS_PER_BLOCK,
num_classes=_NUM_CLASSES,
initializers=_INITIALIZERS)
img, _ = _get_placeholders()
prior_out = hpu_net._prior(img)
distributions = prior_out['distributions']
latents = prior_out['used_latents']
encoder_features = prior_out['encoder_features']
decoder_features = prior_out['decoder_features']
# Test number of latent disctributions.
self.assertEqual(len(distributions), len(_LATENT_DIMS))
# Test shapes of latent scales.
for level in range(len(_LATENT_DIMS)):
latent_spatial_shape = _BOTTLENECK_SIZE * 2 ** level
latent_shape = [_BATCH_SIZE, latent_spatial_shape, latent_spatial_shape,
_LATENT_DIMS[level]]
self.assertEqual(latents[level].shape.as_list(), latent_shape)
# Test encoder shapes.
for level in range(len(_CHANNELS_PER_BLOCK)):
spatial_shape = _SPATIAL_SHAPE[0] // 2 ** level
feature_shape = [_BATCH_SIZE, spatial_shape, spatial_shape,
_CHANNELS_PER_BLOCK[level]]
self.assertEqual(encoder_features[level].shape.as_list(), feature_shape)
# Test decoder shape.
start_level = len(_LATENT_DIMS)
latent_spatial_shape = _BOTTLENECK_SIZE * 2 ** start_level
latent_shape = [_BATCH_SIZE, latent_spatial_shape, latent_spatial_shape,
_CHANNELS_PER_BLOCK[::-1][start_level]]
self.assertEqual(decoder_features.shape.as_list(), latent_shape)
def test_shape_of_kl(self):
hpu_net = HierarchicalProbUNet(latent_dims=_LATENT_DIMS,
channels_per_block=_CHANNELS_PER_BLOCK,
num_classes=_NUM_CLASSES,
initializers=_INITIALIZERS)
img, seg = _get_placeholders()
kl_dict = hpu_net.kl(img, seg)
self.assertEqual(len(kl_dict), len(_LATENT_DIMS))
if __name__ == '__main__':
tf.test.main()
|
deepmind-research-master
|
hierarchical_probabilistic_unet/model_test.py
|
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility Functions for the GECO-objective.
(GECO is described in `Taming VAEs`, see https://arxiv.org/abs/1810.00597).
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import sonnet as snt
import tensorflow.compat.v1 as tf
class MovingAverage(snt.AbstractModule):
"""A thin wrapper around snt.MovingAverage.
The module adds the option not to differentiate through the last element that
is added to the moving average, specified by means of the kwarg
`differentiable`.
"""
def __init__(self, decay, local=True, differentiable=False,
name='snt_moving_average'):
super(MovingAverage, self).__init__(name=name)
self._differentiable = differentiable
self._moving_average = snt.MovingAverage(
decay=decay, local=local, name=name)
def _build(self, inputs):
if not self._differentiable:
inputs = tf.stop_gradient(inputs)
return self._moving_average(inputs)
class LagrangeMultiplier(snt.AbstractModule):
"""A lagrange multiplier sonnet module."""
def __init__(self,
rate=1e-2,
name='snt_lagrange_multiplier'):
"""Initializer for the sonnet module.
Args:
rate: Scalar used to scale the magnitude of gradients of the Lagrange
multipliers, defaulting to 1e-2.
name: Name of the Lagrange multiplier sonnet module.
"""
super(LagrangeMultiplier, self).__init__(name=name)
self._rate = rate
def _build(self, ma_constraint):
"""Connects the module to the graph.
Args:
ma_constraint: A loss minus a target value, denoting a constraint that
shall be less or equal than zero.
Returns:
An op, which when added to a loss and calling minimize on the loss
results in the optimizer minimizing w.r.t. to the model's parameters and
maximizing w.r.t. the Lagrande multipliers, hence enforcing the
constraints.
"""
lagmul = snt.get_lagrange_multiplier(
shape=ma_constraint.shape, rate=self._rate,
initializer=np.ones(ma_constraint.shape))
return lagmul
def _sample_gumbel(shape, eps=1e-20):
"""Transforms a uniform random variable to be standard Gumbel distributed."""
return -tf.log(
-tf.log(tf.random_uniform(shape, minval=0, maxval=1) + eps) + eps)
def _topk_mask(score, k):
"""Returns a mask for the top-k elements in score."""
_, indices = tf.nn.top_k(score, k=k)
return tf.scatter_nd(tf.expand_dims(indices, -1), tf.ones(k),
tf.squeeze(score).shape.as_list())
def ce_loss(logits, labels, mask=None, top_k_percentage=None,
deterministic=False):
"""Computes the cross-entropy loss.
Optionally a mask and a top-k percentage for the used pixels can be specified.
The top-k mask can be produced deterministically or sampled.
Args:
logits: A tensor of shape (b,h,w,num_classes)
labels: A tensor of shape (b,h,w,num_classes)
mask: None or a tensor of shape (b,h,w).
top_k_percentage: None or a float in (0.,1.]. If None, a standard
cross-entropy loss is calculated.
deterministic: A Boolean indicating whether or not to produce the
prospective top-k mask deterministically.
Returns:
A dictionary holding the mean and the pixelwise sum of the loss for the
batch as well as the employed loss mask.
"""
num_classes = logits.shape.as_list()[-1]
y_flat = tf.reshape(logits, (-1, num_classes), name='reshape_y')
t_flat = tf.reshape(labels, (-1, num_classes), name='reshape_t')
if mask is None:
mask = tf.ones(shape=(t_flat.shape.as_list()[0],))
else:
assert mask.shape.as_list()[:3] == labels.shape.as_list()[:3],\
'The loss mask shape differs from the target shape: {} vs. {}.'.format(
mask.shape.as_list(), labels.shape.as_list()[:3])
mask = tf.reshape(mask, (-1,), name='reshape_mask')
n_pixels_in_batch = y_flat.shape.as_list()[0]
xe = tf.nn.softmax_cross_entropy_with_logits_v2(labels=t_flat, logits=y_flat)
if top_k_percentage is not None:
assert 0.0 < top_k_percentage <= 1.0
k_pixels = tf.cast(tf.floor(n_pixels_in_batch * top_k_percentage), tf.int32)
stopgrad_xe = tf.stop_gradient(xe)
norm_xe = stopgrad_xe / tf.reduce_sum(stopgrad_xe)
if deterministic:
score = tf.log(norm_xe)
else:
# Use the Gumbel trick to sample the top-k pixels, equivalent to sampling
# from a categorical distribution over pixels whose probabilities are
# given by the normalized cross-entropy loss values. This is done by
# adding Gumbel noise to the logarithmic normalized cross-entropy loss
# (followed by choosing the top-k pixels).
score = tf.log(norm_xe) + _sample_gumbel(norm_xe.shape.as_list())
score = score + tf.log(mask)
top_k_mask = _topk_mask(score, k_pixels)
mask = mask * top_k_mask
# Calculate batch-averages for the sum and mean of the loss
batch_size = labels.shape.as_list()[0]
xe = tf.reshape(xe, shape=(batch_size, -1))
mask = tf.reshape(mask, shape=(batch_size, -1))
ce_sum_per_instance = tf.reduce_sum(mask * xe, axis=1)
ce_sum = tf.reduce_mean(ce_sum_per_instance, axis=0)
ce_mean = tf.reduce_sum(mask * xe) / tf.reduce_sum(mask)
return {'mean': ce_mean, 'sum': ce_sum, 'mask': mask}
|
deepmind-research-master
|
hierarchical_probabilistic_unet/geco_utils.py
|
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Open Source Version of the Hierarchical Probabilistic U-Net."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import geco_utils
import sonnet as snt
import tensorflow as tf
from tensorflow_probability import distributions as tfd
import unet_utils
class _HierarchicalCore(snt.AbstractModule):
"""A U-Net encoder-decoder with a full encoder and a truncated decoder.
The truncated decoder is interleaved with the hierarchical latent space and
has as many levels as there are levels in the hierarchy plus one additional
level.
"""
def __init__(self, latent_dims, channels_per_block,
down_channels_per_block=None, activation_fn=tf.nn.relu,
initializers=None, regularizers=None, convs_per_block=3,
blocks_per_level=3, name='HierarchicalDecoderDist'):
"""Initializes a HierarchicalCore.
Args:
latent_dims: List of integers specifying the dimensions of the latents at
each scale. The length of the list indicates the number of U-Net decoder
scales that have latents.
channels_per_block: A list of integers specifying the number of output
channels for each encoder block.
down_channels_per_block: A list of integers specifying the number of
intermediate channels for each encoder block or None. If None, the
intermediate channels are chosen equal to channels_per_block.
activation_fn: A callable activation function.
initializers: Optional dict containing ops to initialize the filters (with
key 'w') or biases (with key 'b'). The default initializer for the
weights is a truncated normal initializer, which is commonly used when
the inputs are zero centered (see
https://arxiv.org/pdf/1502.03167v3.pdf). The default initializer for the
bias is a zero initializer.
regularizers: Optional dict containing regularizers for the filters
(with key 'w') and the biases (with key 'b'). As a default, no
regularizers are used. A regularizer should be a function that takes a
single `Tensor` as an input and returns a scalar `Tensor` output, e.g.
the L1 and L2 regularizers in `tf.contrib.layers`.
convs_per_block: An integer specifying the number of convolutional layers.
blocks_per_level: An integer specifying the number of residual blocks per
level.
name: A string specifying the name of the module.
"""
super(_HierarchicalCore, self).__init__(name=name)
self._latent_dims = latent_dims
self._channels_per_block = channels_per_block
self._activation_fn = activation_fn
self._initializers = initializers
self._regularizers = regularizers
self._convs_per_block = convs_per_block
self._blocks_per_level = blocks_per_level
if down_channels_per_block is None:
self._down_channels_per_block = channels_per_block
else:
self._down_channels_per_block = down_channels_per_block
self._name = name
def _build(self, inputs, mean=False, z_q=None):
"""A build-method allowing to sample from the module as specified.
Args:
inputs: A tensor of shape (b,h,w,c). When using the module as a prior the
`inputs` tensor should be a batch of images. When using it as a posterior
the tensor should be a (batched) concatentation of images and
segmentations.
mean: A boolean or a list of booleans. If a boolean, it specifies whether
or not to use the distributions' means in ALL latent scales. If a list,
each bool therein specifies whether or not to use the scale's mean. If
False, the latents of the scale are sampled.
z_q: None or a list of tensors. If not None, z_q provides external latents
to be used instead of sampling them. This is used to employ posterior
latents in the prior during training. Therefore, if z_q is not None, the
value of `mean` is ignored. If z_q is None, either the distributions
mean is used (in case `mean` for the respective scale is True) or else
a sample from the distribution is drawn.
Returns:
A Dictionary holding the output feature map of the truncated U-Net
decoder under key 'decoder_features', a list of the U-Net encoder features
produced at the end of each encoder scale under key 'encoder_outputs', a
list of the predicted distributions at each scale under key
'distributions', a list of the used latents at each scale under the key
'used_latents'.
"""
encoder_features = inputs
encoder_outputs = []
num_levels = len(self._channels_per_block)
num_latent_levels = len(self._latent_dims)
if isinstance(mean, bool):
mean = [mean] * num_latent_levels
distributions = []
used_latents = []
# Iterate the descending levels in the U-Net encoder.
for level in range(num_levels):
# Iterate the residual blocks in each level.
for _ in range(self._blocks_per_level):
encoder_features = unet_utils.res_block(
input_features=encoder_features,
n_channels=self._channels_per_block[level],
n_down_channels=self._down_channels_per_block[level],
activation_fn=self._activation_fn,
initializers=self._initializers,
regularizers=self._regularizers,
convs_per_block=self._convs_per_block)
encoder_outputs.append(encoder_features)
if level != num_levels - 1:
encoder_features = unet_utils.resize_down(encoder_features, scale=2)
# Iterate the ascending levels in the (truncated) U-Net decoder.
decoder_features = encoder_outputs[-1]
for level in range(num_latent_levels):
# Predict a Gaussian distribution for each pixel in the feature map.
latent_dim = self._latent_dims[level]
mu_logsigma = snt.Conv2D(
2 * latent_dim,
(1, 1),
padding='SAME',
initializers=self._initializers,
regularizers=self._regularizers,
)(decoder_features)
mu = mu_logsigma[..., :latent_dim]
logsigma = mu_logsigma[..., latent_dim:]
dist = tfd.MultivariateNormalDiag(loc=mu, scale_diag=tf.exp(logsigma))
distributions.append(dist)
# Get the latents to condition on.
if z_q is not None:
z = z_q[level]
elif mean[level]:
z = dist.loc
else:
z = dist.sample()
used_latents.append(z)
# Concat and upsample the latents with the previous features.
decoder_output_lo = tf.concat([z, decoder_features], axis=-1)
decoder_output_hi = unet_utils.resize_up(decoder_output_lo, scale=2)
decoder_features = tf.concat(
[decoder_output_hi, encoder_outputs[::-1][level + 1]], axis=-1)
# Iterate the residual blocks in each level.
for _ in range(self._blocks_per_level):
decoder_features = unet_utils.res_block(
input_features=decoder_features,
n_channels=self._channels_per_block[::-1][level + 1],
n_down_channels=self._down_channels_per_block[::-1][level + 1],
activation_fn=self._activation_fn,
initializers=self._initializers,
regularizers=self._regularizers,
convs_per_block=self._convs_per_block)
return {'decoder_features': decoder_features,
'encoder_features': encoder_outputs,
'distributions': distributions,
'used_latents': used_latents}
class _StitchingDecoder(snt.AbstractModule):
"""A module that completes the truncated U-Net decoder.
Using the output of the HierarchicalCore this module fills in the missing
decoder levels such that together the two form a symmetric U-Net.
"""
def __init__(self, latent_dims, channels_per_block, num_classes,
down_channels_per_block=None, activation_fn=tf.nn.relu,
initializers=None, regularizers=None, convs_per_block=3,
blocks_per_level=3, name='StitchingDecoder'):
"""Initializes a StichtingDecoder.
Args:
latent_dims: List of integers specifying the dimensions of the latents at
each scale. The length of the list indicates the number of U-Net
decoder scales that have latents.
channels_per_block: A list of integers specifying the number of output
channels for each encoder block.
num_classes: An integer specifying the number of segmentation classes.
down_channels_per_block: A list of integers specifying the number of
intermediate channels for each encoder block. If None, the
intermediate channels are chosen equal to channels_per_block.
activation_fn: A callable activation function.
initializers: Optional dict containing ops to initialize the filters (with
key 'w') or biases (with key 'b'). The default initializer for the
weights is a truncated normal initializer, which is commonly used when
the inputs are zero centered (see
https://arxiv.org/pdf/1502.03167v3.pdf). The default initializer for the
bias is a zero initializer.
regularizers: Optional dict containing regularizers for the filters
(with key 'w') and the biases (with key 'b'). As a default, no
regularizers are used. A regularizer should be a function that takes a
single `Tensor` as an input and returns a scalar `Tensor` output, e.g.
the L1 and L2 regularizers in `tf.contrib.layers`.
convs_per_block: An integer specifying the number of convolutional layers.
blocks_per_level: An integer specifying the number of residual blocks per
level.
name: A string specifying the name of the module.
"""
super(_StitchingDecoder, self).__init__(name=name)
self._latent_dims = latent_dims
self._channels_per_block = channels_per_block
self._num_classes = num_classes
self._activation_fn = activation_fn
self._initializers = initializers
self._regularizers = regularizers
self._convs_per_block = convs_per_block
self._blocks_per_level = blocks_per_level
if down_channels_per_block is None:
down_channels_per_block = channels_per_block
self._down_channels_per_block = down_channels_per_block
def _build(self, encoder_features, decoder_features):
"""Build-method that returns the segmentation logits.
Args:
encoder_features: A list of tensors of shape (b,h_i,w_i,c_i).
decoder_features: A tensor of shape (b,h,w,c).
Returns:
Logits, i.e. a tensor of shape (b,h,w,num_classes).
"""
num_latents = len(self._latent_dims)
start_level = num_latents + 1
num_levels = len(self._channels_per_block)
for level in range(start_level, num_levels, 1):
decoder_features = unet_utils.resize_up(decoder_features, scale=2)
decoder_features = tf.concat([decoder_features,
encoder_features[::-1][level]], axis=-1)
for _ in range(self._blocks_per_level):
decoder_features = unet_utils.res_block(
input_features=decoder_features,
n_channels=self._channels_per_block[::-1][level],
n_down_channels=self._down_channels_per_block[::-1][level],
activation_fn=self._activation_fn,
initializers=self._initializers,
regularizers=self._regularizers,
convs_per_block=self._convs_per_block)
return snt.Conv2D(output_channels=self._num_classes,
kernel_shape=(1, 1),
padding='SAME',
initializers=self._initializers,
regularizers=self._regularizers,
name='logits')(decoder_features)
class HierarchicalProbUNet(snt.AbstractModule):
"""A Hierarchical Probabilistic U-Net."""
def __init__(self,
latent_dims=(1, 1, 1, 1),
channels_per_block=None,
num_classes=2,
down_channels_per_block=None,
activation_fn=tf.nn.relu,
initializers=None,
regularizers=None,
convs_per_block=3,
blocks_per_level=3,
loss_kwargs=None,
name='HPUNet'):
"""Initializes a HierarchicalProbUNet.
The default values are set as for the LIDC-IDRI experiments in
`A Hierarchical Probabilistic U-Net for Modeling Multi-Scale Ambiguities',
see https://arxiv.org/abs/1905.13077.
Args:
latent_dims: List of integers specifying the dimensions of the latents at
each scales. The length of the list indicates the number of U-Net
decoder scales that have latents.
channels_per_block: A list of integers specifying the number of output
channels for each encoder block.
num_classes: An integer specifying the number of segmentation classes.
down_channels_per_block: A list of integers specifying the number of
intermediate channels for each encoder block. If None, the
intermediate channels are chosen equal to channels_per_block.
activation_fn: A callable activation function.
initializers: Optional dict containing ops to initialize the filters (with
key 'w') or biases (with key 'b').
regularizers: Optional dict containing regularizers for the filters
(with key 'w') and the biases (with key 'b').
convs_per_block: An integer specifying the number of convolutional layers.
blocks_per_level: An integer specifying the number of residual blocks per
level.
loss_kwargs: None or dictionary specifying the loss setup.
name: A string specifying the name of the module.
"""
super(HierarchicalProbUNet, self).__init__(name=name)
base_channels = 24
default_channels_per_block = (
base_channels, 2 * base_channels, 4 * base_channels, 8 * base_channels,
8 * base_channels, 8 * base_channels, 8 * base_channels,
8 * base_channels
)
if channels_per_block is None:
channels_per_block = default_channels_per_block
if down_channels_per_block is None:
down_channels_per_block =\
tuple([i / 2 for i in default_channels_per_block])
if initializers is None:
initializers = {
'w': tf.orthogonal_initializer(gain=1.0, seed=None),
'b': tf.truncated_normal_initializer(stddev=0.001)
}
if regularizers is None:
regularizers = {
'w': tf.keras.regularizers.l2(1e-5),
'b': tf.keras.regularizers.l2(1e-5)
}
if loss_kwargs is None:
self._loss_kwargs = {
'type': 'geco',
'top_k_percentage': 0.02,
'deterministic_top_k': False,
'kappa': 0.05,
'decay': 0.99,
'rate': 1e-2,
'beta': None
}
else:
self._loss_kwargs = loss_kwargs
if down_channels_per_block is None:
down_channels_per_block = channels_per_block
with self._enter_variable_scope():
self._prior = _HierarchicalCore(
latent_dims=latent_dims,
channels_per_block=channels_per_block,
down_channels_per_block=down_channels_per_block,
activation_fn=activation_fn,
initializers=initializers,
regularizers=regularizers,
convs_per_block=convs_per_block,
blocks_per_level=blocks_per_level,
name='prior')
self._posterior = _HierarchicalCore(
latent_dims=latent_dims,
channels_per_block=channels_per_block,
down_channels_per_block=down_channels_per_block,
activation_fn=activation_fn,
initializers=initializers,
regularizers=regularizers,
convs_per_block=convs_per_block,
blocks_per_level=blocks_per_level,
name='posterior')
self._f_comb = _StitchingDecoder(
latent_dims=latent_dims,
channels_per_block=channels_per_block,
num_classes=num_classes,
down_channels_per_block=down_channels_per_block,
activation_fn=activation_fn,
initializers=initializers,
regularizers=regularizers,
convs_per_block=convs_per_block,
blocks_per_level=blocks_per_level,
name='f_comb')
if self._loss_kwargs['type'] == 'geco':
self._moving_average = geco_utils.MovingAverage(
decay=self._loss_kwargs['decay'], differentiable=True,
name='ma_test')
self._lagmul = geco_utils.LagrangeMultiplier(
rate=self._loss_kwargs['rate'])
self._cache = ()
def _build(self, seg, img):
"""Inserts all ops used during training into the graph exactly once.
The first time this method is called given the input pair (seg, img) all
ops relevant for training are inserted into the graph. Calling this method
more than once does not re-insert the modules into the graph (memoization),
thus preventing multiple forward passes of submodules for the same inputs.
The method is private and called when setting up the loss.
Args:
seg: A tensor of shape (b, h, w, num_classes).
img: A tensor of shape (b, h, w, c)
Returns: None
"""
inputs = (seg, img)
if self._cache == inputs:
return
else:
self._q_sample = self._posterior(
tf.concat([seg, img], axis=-1), mean=False)
self._q_sample_mean = self._posterior(
tf.concat([seg, img], axis=-1), mean=True)
self._p_sample = self._prior(
img, mean=False, z_q=None)
self._p_sample_z_q = self._prior(
img, z_q=self._q_sample['used_latents'])
self._p_sample_z_q_mean = self._prior(
img, z_q=self._q_sample_mean['used_latents'])
self._cache = inputs
return
def sample(self, img, mean=False, z_q=None):
"""Sample a segmentation from the prior, given an input image.
Args:
img: A tensor of shape (b, h, w, c).
mean: A boolean or a list of booleans. If a boolean, it specifies whether
or not to use the distributions' means in ALL latent scales. If a list,
each bool therein specifies whether or not to use the scale's mean. If
False, the latents of the scale are sampled.
z_q: None or a list of tensors. If not None, z_q provides external latents
to be used instead of sampling them. This is used to employ posterior
latents in the prior during training. Therefore, if z_q is not None, the
value of `mean` is ignored. If z_q is None, either the distributions
mean is used (in case `mean` for the respective scale is True) or else
a sample from the distribution is drawn
Returns:
A segmentation tensor of shape (b, h, w, num_classes).
"""
prior_out = self._prior(img, mean, z_q)
encoder_features = prior_out['encoder_features']
decoder_features = prior_out['decoder_features']
return self._f_comb(encoder_features=encoder_features,
decoder_features=decoder_features)
def reconstruct(self, seg, img, mean=False):
"""Reconstruct a segmentation using the posterior.
Args:
seg: A tensor of shape (b, h, w, num_classes).
img: A tensor of shape (b, h, w, c).
mean: A boolean, specifying whether to sample from the full hierarchy of
the posterior or use the posterior means at each scale of the hierarchy.
Returns:
A segmentation tensor of shape (b,h,w,num_classes).
"""
self._build(seg, img)
if mean:
prior_out = self._p_sample_z_q_mean
else:
prior_out = self._p_sample_z_q
encoder_features = prior_out['encoder_features']
decoder_features = prior_out['decoder_features']
return self._f_comb(encoder_features=encoder_features,
decoder_features=decoder_features)
def rec_loss(self, seg, img, mask=None, top_k_percentage=None,
deterministic=True):
"""Cross-entropy reconstruction loss employed in the ELBO-/ GECO-objective.
Args:
seg: A tensor of shape (b, h, w, num_classes).
img: A tensor of shape (b, h, w, c).
mask: A mask of shape (b, h, w) or None. If None no pixels are masked in
the loss.
top_k_percentage: None or a float in (0.,1.]. If None, a standard
cross-entropy loss is calculated.
deterministic: A Boolean indicating whether or not to produce the
prospective top-k mask deterministically.
Returns:
A dictionary holding the mean and the pixelwise sum of the loss for the
batch as well as the employed loss mask.
"""
reconstruction = self.reconstruct(seg, img, mean=False)
return geco_utils.ce_loss(
reconstruction, seg, mask, top_k_percentage, deterministic)
def kl(self, seg, img):
"""Kullback-Leibler divergence between the posterior and the prior.
Args:
seg: A tensor of shape (b, h, w, num_classes).
img: A tensor of shape (b, h, w, c).
Returns:
A dictionary with keys indexing the hierarchy's levels and corresponding
values holding the KL-term for each level (per batch).
"""
self._build(seg, img)
posterior_out = self._q_sample
prior_out = self._p_sample_z_q
q_dists = posterior_out['distributions']
p_dists = prior_out['distributions']
kl = {}
for level, (q, p) in enumerate(zip(q_dists, p_dists)):
# Shape (b, h, w).
kl_per_pixel = tfd.kl_divergence(q, p)
# Shape (b,).
kl_per_instance = tf.reduce_sum(kl_per_pixel, axis=[1, 2])
# Shape (1,).
kl[level] = tf.reduce_mean(kl_per_instance)
return kl
def loss(self, seg, img, mask):
"""The full training objective, either ELBO or GECO.
Args:
seg: A tensor of shape (b, h, w, num_classes).
img: A tensor of shape (b, h, w, c).
mask: A mask of shape (b, h, w) or None. If None no pixels are masked in
the loss.
Returns:
A dictionary holding the loss (with key 'loss') and the tensorboard
summaries (with key 'summaries').
"""
summaries = {}
top_k_percentage = self._loss_kwargs['top_k_percentage']
deterministic = self._loss_kwargs['deterministic_top_k']
rec_loss = self.rec_loss(seg, img, mask, top_k_percentage, deterministic)
kl_dict = self.kl(seg, img)
kl_sum = tf.reduce_sum(
tf.stack([kl for _, kl in kl_dict.iteritems()], axis=-1))
summaries['rec_loss_mean'] = rec_loss['mean']
summaries['rec_loss_sum'] = rec_loss['sum']
summaries['kl_sum'] = kl_sum
for level, kl in kl_dict.iteritems():
summaries['kl_{}'.format(level)] = kl
# Set up a regular ELBO objective.
if self._loss_kwargs['type'] == 'elbo':
loss = rec_loss['sum'] + self._loss_kwargs['beta'] * kl_sum
summaries['elbo_loss'] = loss
# Set up a GECO objective (ELBO with a reconstruction constraint).
elif self._loss_kwargs['type'] == 'geco':
ma_rec_loss = self._moving_average(rec_loss['sum'])
mask_sum_per_instance = tf.reduce_sum(rec_loss['mask'], axis=-1)
num_valid_pixels = tf.reduce_mean(mask_sum_per_instance)
reconstruction_threshold = self._loss_kwargs['kappa'] * num_valid_pixels
rec_constraint = ma_rec_loss - reconstruction_threshold
lagmul = self._lagmul(rec_constraint)
loss = lagmul * rec_constraint + kl_sum
summaries['geco_loss'] = loss
summaries['ma_rec_loss_mean'] = ma_rec_loss / num_valid_pixels
summaries['num_valid_pixels'] = num_valid_pixels
summaries['lagmul'] = lagmul
else:
raise NotImplementedError('Loss type {} not implemeted!'.format(
self._loss_kwargs['type']))
return dict(supervised_loss=loss, summaries=summaries)
if __name__ == '__main__':
hpu_net = HierarchicalProbUNet()
|
deepmind-research-master
|
hierarchical_probabilistic_unet/model.py
|
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Setup for pip package."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from setuptools import find_packages
from setuptools import setup
REQUIRED_PACKAGES = ['numpy', 'dm-sonnet==1.35', 'tensorflow==1.14',
'tensorflow-probability==0.7.0']
setup(
name='hpu_net',
version='0.1',
description='A library for the Hierarchical Probabilistic U-Net model.',
url='https://github.com/deepmind/deepmind-research/hierarchical_probabilistic_unet',
author='DeepMind',
author_email='no-reply@google.com',
# Contained modules and scripts.
packages=find_packages(),
install_requires=REQUIRED_PACKAGES,
platforms=['any'],
license='Apache 2.0',
)
|
deepmind-research-master
|
hierarchical_probabilistic_unet/setup.py
|
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Architectural blocks and utility functions of the U-Net."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sonnet as snt
import tensorflow.compat.v1 as tf
def res_block(input_features, n_channels, n_down_channels=None,
activation_fn=tf.nn.relu, initializers=None, regularizers=None,
convs_per_block=3):
"""A pre-activated residual block.
Args:
input_features: A tensor of shape (b, h, w, c).
n_channels: An integer specifying the number of output channels.
n_down_channels: An integer specifying the number of intermediate channels.
activation_fn: A callable activation function.
initializers: Initializers for the weights and biases.
regularizers: Regularizers for the weights and biases.
convs_per_block: An Integer specifying the number of convolutional layers.
Returns:
A tensor of shape (b, h, w, c).
"""
# Pre-activate the inputs.
skip = input_features
residual = activation_fn(input_features)
# Set the number of intermediate channels that we compress to.
if n_down_channels is None:
n_down_channels = n_channels
for c in range(convs_per_block):
residual = snt.Conv2D(n_down_channels,
(3, 3),
padding='SAME',
initializers=initializers,
regularizers=regularizers)(residual)
if c < convs_per_block - 1:
residual = activation_fn(residual)
incoming_channels = input_features.shape[-1]
if incoming_channels != n_channels:
skip = snt.Conv2D(n_channels,
(1, 1),
padding='SAME',
initializers=initializers,
regularizers=regularizers)(skip)
if n_down_channels != n_channels:
residual = snt.Conv2D(n_channels,
(1, 1),
padding='SAME',
initializers=initializers,
regularizers=regularizers)(residual)
return skip + residual
def resize_up(input_features, scale=2):
"""Nearest neighbor rescaling-operation for the input features.
Args:
input_features: A tensor of shape (b, h, w, c).
scale: An integer specifying the scaling factor.
Returns: A tensor of shape (b, scale * h, scale * w, c).
"""
assert scale >= 1
_, size_x, size_y, _ = input_features.shape.as_list()
new_size_x = int(round(size_x * scale))
new_size_y = int(round(size_y * scale))
return tf.image.resize(
input_features,
[new_size_x, new_size_y],
align_corners=True,
method=tf.image.ResizeMethod.NEAREST_NEIGHBOR)
def resize_down(input_features, scale=2):
"""Average pooling rescaling-operation for the input features.
Args:
input_features: A tensor of shape (b, h, w, c).
scale: An integer specifying the scaling factor.
Returns: A tensor of shape (b, h / scale, w / scale, c).
"""
assert scale >= 1
return tf.nn.avg_pool2d(
input_features,
ksize=(1, scale, scale, 1),
strides=(1, scale, scale, 1),
padding='VALID')
|
deepmind-research-master
|
hierarchical_probabilistic_unet/unet_utils.py
|
# Copyright 2020 DeepMind Technologies Limited.
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module containing code for computing various metrics for training and evaluation."""
from typing import Callable, Dict, Optional
import distrax
import haiku as hk
import jax
import jax.nn as nn
import jax.numpy as jnp
import numpy as np
import physics_inspired_models.utils as utils
_ReconstructFunc = Callable[[utils.Params, jnp.ndarray, jnp.ndarray, bool],
distrax.Distribution]
def calculate_small_latents(dist, threshold=0.5):
"""Calculates the number of active latents by thresholding the variance of their distribution."""
if not isinstance(dist, distrax.Normal):
raise NotImplementedError()
latent_means = dist.mean()
latent_stddevs = dist.variance()
small_latents = jnp.sum(
(latent_stddevs < threshold) & (jnp.abs(latent_means) > 0.1), axis=1)
return jnp.mean(small_latents)
def compute_scale(
targets: jnp.ndarray,
rescale_by: str
) -> jnp.ndarray:
"""Compute a scaling factor based on targets shape and the rescale_by argument."""
if rescale_by == "pixels_and_time":
return jnp.asarray(np.prod(targets.shape[-4:]))
elif rescale_by is not None:
raise ValueError(f"Unrecognized rescale_by={rescale_by}.")
else:
return jnp.ones([])
def compute_data_domain_stats(
p_x: distrax.Distribution,
targets: jnp.ndarray
) -> Dict[str, jnp.ndarray]:
"""Compute several statistics in the data domain, such as L2 and negative log likelihood."""
axis = tuple(range(2, targets.ndim))
l2_over_time = jnp.sum((p_x.mean() - targets) ** 2, axis=axis)
l2 = jnp.sum(l2_over_time, axis=1)
# Calculate relative L2 normalised by image "length"
norm_factor = jnp.sum(targets**2, axis=(2, 3, 4))
l2_over_time_norm = l2_over_time / norm_factor
l2_norm = jnp.sum(l2_over_time_norm, axis=1)
# Compute negative log-likelihood under p(x)
neg_log_p_x_over_time = - np.sum(p_x.log_prob(targets), axis=axis)
neg_log_p_x = jnp.sum(neg_log_p_x_over_time, axis=1)
return dict(
neg_log_p_x_over_time=neg_log_p_x_over_time,
neg_log_p_x=neg_log_p_x,
l2_over_time=l2_over_time,
l2=l2,
l2_over_time_norm=l2_over_time_norm,
l2_norm=l2_norm,
)
def compute_vae_stats(
neg_log_p_x: jnp.ndarray,
rng: jnp.ndarray,
q_z: distrax.Distribution,
prior: distrax.Distribution
) -> Dict[str, jnp.ndarray]:
"""Compute the KL(q(z|x)||p(z)) and the negative ELBO, which are used for VAE models."""
# Compute the KL
kl = distrax.estimate_kl_best_effort(q_z, prior, rng_key=rng, num_samples=1)
kl = np.sum(kl, axis=list(range(1, kl.ndim)))
# Sanity check
assert kl.shape == neg_log_p_x.shape
return dict(
kl=kl,
neg_elbo=neg_log_p_x + kl,
)
def training_statistics(
p_x: distrax.Distribution,
targets: jnp.ndarray,
rescale_by: Optional[str],
rng: Optional[jnp.ndarray] = None,
q_z: Optional[distrax.Distribution] = None,
prior: Optional[distrax.Distribution] = None,
p_x_learned_sigma: bool = False
) -> Dict[str, jnp.ndarray]:
"""Computes various statistics we track during training."""
stats = compute_data_domain_stats(p_x, targets)
if rng is not None and q_z is not None and prior is not None:
stats.update(compute_vae_stats(stats["neg_log_p_x"], rng, q_z, prior))
else:
assert rng is None and q_z is None and prior is None
# Rescale these stats accordingly
scale = compute_scale(targets, rescale_by)
# Note that "_over_time" stats are getting normalised by time here
stats = jax.tree_map(lambda x: x / scale, stats)
if p_x_learned_sigma:
stats["p_x_sigma"] = p_x.variance().reshape([-1])[0] # pytype: disable=attribute-error # numpy-scalars
if q_z is not None:
stats["small_latents"] = calculate_small_latents(q_z)
return stats
def evaluation_only_statistics(
reconstruct_func: _ReconstructFunc,
params: hk.Params,
inputs: jnp.ndarray,
rng: jnp.ndarray,
rescale_by: str,
can_run_backwards: bool,
train_sequence_length: int,
reconstruction_skip: int,
p_x_learned_sigma: bool = False,
) -> Dict[str, jnp.ndarray]:
"""Computes various statistics we track only during evaluation."""
full_trajectory = utils.extract_image(inputs)
prefixes = ("forward", "backward") if can_run_backwards else ("forward",)
full_forward_targets = jax.tree_map(
lambda x: x[:, reconstruction_skip:], full_trajectory)
full_backward_targets = jax.tree_map(
lambda x: x[:, :x.shape[1]-reconstruction_skip], full_trajectory)
train_targets_length = train_sequence_length - reconstruction_skip
full_targets_length = full_forward_targets.shape[1]
stats = dict()
keys = ()
for prefix in prefixes:
# Fully unroll the model and reconstruct the whole sequence
full_prediction = reconstruct_func(params, full_trajectory, rng,
prefix == "forward")
assert isinstance(full_prediction, distrax.Normal)
full_targets = (full_forward_targets if prefix == "forward" else
full_backward_targets)
# In cases where the model can run backwards it is possible to reconstruct
# parts which were indented to be skipped, so here we take care of that.
if full_prediction.mean().shape[1] > full_targets_length:
if prefix == "forward":
full_prediction = jax.tree_map(lambda x: x[:, -full_targets_length:],
full_prediction)
else:
full_prediction = jax.tree_map(lambda x: x[:, :full_targets_length],
full_prediction)
# Based on the prefix and suffix fetch correct predictions and targets
for suffix in ("train", "extrapolation", "full"):
if prefix == "forward" and suffix == "train":
predict, targets = jax.tree_map(lambda x: x[:, :train_targets_length],
(full_prediction, full_targets))
elif prefix == "forward" and suffix == "extrapolation":
predict, targets = jax.tree_map(lambda x: x[:, train_targets_length:],
(full_prediction, full_targets))
elif prefix == "backward" and suffix == "train":
predict, targets = jax.tree_map(lambda x: x[:, -train_targets_length:],
(full_prediction, full_targets))
elif prefix == "backward" and suffix == "extrapolation":
predict, targets = jax.tree_map(lambda x: x[:, :-train_targets_length],
(full_prediction, full_targets))
else:
predict, targets = full_prediction, full_targets
# Compute train statistics
train_stats = training_statistics(predict, targets, rescale_by,
p_x_learned_sigma=p_x_learned_sigma)
for key, value in train_stats.items():
stats[prefix + "_" + suffix + "_" + key] = value
# Copy all stats keys
keys = tuple(train_stats.keys())
# Make a combined metric summing forward and backward
if can_run_backwards:
# Also compute
for suffix in ("train", "extrapolation", "full"):
for key in keys:
forward = stats["forward_" + suffix + "_" + key]
backward = stats["backward_" + suffix + "_" + key]
combined = (forward + backward) / 2
stats["combined_" + suffix + "_" + key] = combined
return stats
def geco_objective(
l2_loss,
kl,
alpha,
kappa,
constraint_ema,
lambda_var,
is_training
) -> Dict[str, jnp.ndarray]:
"""Computes the objective for GECO and some of it statistics used ofr updates."""
# C_t
constraint_t = l2_loss - kappa
if is_training:
# We update C_ma only during training
constraint_ema = alpha * constraint_ema + (1 - alpha) * constraint_t
lagrange = nn.softplus(lambda_var)
lagrange = jnp.broadcast_to(lagrange, constraint_ema.shape)
# Add this special op for getting all gradients correct
loss = utils.geco_lagrange_product(lagrange, constraint_ema, constraint_t)
return dict(
loss=loss + kl,
geco_multiplier=lagrange,
geco_constraint=constraint_t,
geco_constraint_ema=constraint_ema
)
def elbo_objective(neg_log_p_x, kl, final_beta, beta_delay, step):
"""Computes objective for optimizing the Evidence Lower Bound (ELBO)."""
if beta_delay == 0:
beta = final_beta
else:
delayed_beta = jnp.minimum(float(step) / float(beta_delay), 1.0)
beta = delayed_beta * final_beta
return dict(
loss=neg_log_p_x + beta * kl,
elbo_beta=beta
)
|
deepmind-research-master
|
physics_inspired_models/metrics.py
|
# Copyright 2020 DeepMind Technologies Limited.
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module containing model evaluation metric."""
import _thread as thread
import sys
import threading
import time
import warnings
from absl import logging
import distrax
import numpy as np
from sklearn import linear_model
from sklearn import model_selection
from sklearn import preprocessing
def quit_function(fn_name):
logging.error('%s took too long', fn_name)
sys.stderr.flush()
thread.interrupt_main()
def exit_after(s):
"""Use as decorator to exit function after s seconds."""
def outer(fn):
def inner(*args, **kwargs):
timer = threading.Timer(s, quit_function, args=[fn.__name__])
timer.start()
try:
result = fn(*args, **kwargs)
finally:
timer.cancel()
return result
return inner
return outer
@exit_after(400)
def do_grid_search(data_x_exp, data_y, clf, parameters, cv):
scoring_choice = 'explained_variance'
regressor = model_selection.GridSearchCV(
clf, parameters, cv=cv, refit=True, scoring=scoring_choice)
regressor.fit(data_x_exp, data_y)
return regressor
def symplectic_matrix(dim):
"""Return anti-symmetric identity matrix of given dimensionality."""
half_dims = int(dim/2)
eye = np.eye(half_dims)
zeros = np.zeros([half_dims, half_dims])
top_rows = np.concatenate([zeros, - eye], axis=1)
bottom_rows = np.concatenate([eye, zeros], axis=1)
return np.concatenate([top_rows, bottom_rows], axis=0)
def create_latent_mask(z0, dist_std_threshold=0.5):
"""Create mask based on informativeness of each latent dimension.
For stochastic models those latent dimensions that are too close to the prior
are likely to be uninformative and can be ignored.
Args:
z0: distribution or array of phase space
dist_std_threshold: informative latents have average inferred stds <
dist_std_threshold
Returns:
latent_mask_final: boolean mask of the same dimensionality as z0
"""
if isinstance(z0, distrax.Normal):
std_vals = np.mean(z0.variance(), axis=0)
elif isinstance(z0, distrax.Distribution):
raise NotImplementedError()
else:
# If the latent is deterministic, pass through all dimensions
return np.array([True]*z0.shape[-1])
tensor_shape = std_vals.shape
half_dims = int(tensor_shape[-1] / 2)
std_vals_q = std_vals[:half_dims]
std_vals_p = std_vals[half_dims:]
# Keep both q and corresponding p as either one is informative
informative_latents_inds = np.array([
x for x in range(len(std_vals_q)) if
std_vals_q[x] < dist_std_threshold or std_vals_p[x] < dist_std_threshold
])
if informative_latents_inds.shape[0] > 0:
latent_mask_final = np.zeros_like(std_vals_q)
latent_mask_final[informative_latents_inds] = 1
latent_mask_final = np.concatenate([latent_mask_final, latent_mask_final])
latent_mask_final = latent_mask_final == 1
return latent_mask_final
else:
return np.array([True]*tensor_shape[-1])
def standardize_data(data):
"""Applies the sklearn standardization to the data."""
scaler = preprocessing.StandardScaler()
scaler.fit(data)
return scaler.transform(data)
def find_best_polynomial(data_x, data_y, max_poly_order, rsq_threshold,
max_dim_n=32,
alpha_sweep=None,
max_iter=1000, cv=2):
"""Find minimal polynomial expansion that is sufficient to explain data using Lasso regression."""
rsq = 0
poly_order = 1
if not np.any(alpha_sweep):
alpha_sweep = [1e-8, 1e-7, 1e-6, 1e-5, 1e-4, 1e-3, 1e-2]
# Avoid a large polynomial expansion for large latent sizes
if data_x.shape[-1] > max_dim_n:
print(f'>WARNING! Data is too high dimensional at {data_x.shape[-1]}')
print('>WARNING! Setting max_poly_order = 1')
max_poly_order = 1
while rsq < rsq_threshold and poly_order <= max_poly_order:
time_start = time.perf_counter()
poly = preprocessing.PolynomialFeatures(poly_order, include_bias=False)
data_x_exp = poly.fit_transform(data_x)
time_end = time.perf_counter()
print(
f'Took {time_end-time_start}s to create polynomial features of order '
f'{poly_order} and size {data_x_exp.shape[1]}.')
with warnings.catch_warnings():
warnings.simplefilter('ignore')
time_start = time.perf_counter()
clf = linear_model.Lasso(
random_state=0, max_iter=max_iter, normalize=False, warm_start=False)
parameters = {'alpha': alpha_sweep}
try:
regressor = do_grid_search(data_x_exp, data_y, clf, parameters, cv)
time_end = time.perf_counter()
print(f'Took {time_end-time_start}s to do regression grid search.')
# Get rsq results
time_start = time.perf_counter()
clf = linear_model.Lasso(
random_state=0,
alpha=regressor.best_params_['alpha'],
max_iter=max_iter,
normalize=False,
warm_start=False)
clf.fit(data_x_exp, data_y)
rsq = clf.score(data_x_exp, data_y)
time_end = time.perf_counter()
print(f'Took {time_end-time_start}s to get rsq results.')
old_regressor = regressor
old_poly_order = poly_order
old_poly = poly
old_data_x_exp = data_x_exp
old_rsq = rsq
old_clf = clf
print(f'Polynomial of order {poly_order} with '
f' alpha={regressor.best_params_} RSQ: {rsq}')
poly_order += 1
except KeyboardInterrupt:
time_end = time.perf_counter()
print(f'Timed out after {time_end-time_start}s of doing grid search.')
# pytype: disable=name-error # py39-upgrade
print(f'Continuing with previous poly_order={old_poly_order}...')
regressor = old_regressor
poly_order = old_poly_order
poly = old_poly
data_x_exp = old_data_x_exp
rsq = old_rsq
clf = old_clf
# pytype: enable=name-error # py39-upgrade
print(f'Polynomial of order {poly_order} with '
f' alpha={regressor.best_params_} RSQ: {rsq}')
break
return clf, poly, data_x_exp, rsq
def eval_monomial_grad(feature, x, w, grad_acc):
"""Accumulates gradient from polynomial features and their weights."""
features = feature.split(' ')
variable_indices = []
grads = np.ones(len(features)) * w
for i, feature in enumerate(features):
name_and_power = feature.split('^')
if len(name_and_power) == 1:
name, power = name_and_power[0], 1
else:
name, power = name_and_power
power = int(power)
var_index = int(name[1:])
variable_indices.append(var_index)
new_prod = np.ones_like(grads) * (x[var_index] ** power)
# This needs a special case, for situation where x[index] = 0.0
if power == 1:
new_prod[i] = 1.0
else:
new_prod[i] = power * (x[var_index] ** (power - 1))
grads = grads * new_prod
grad_acc[variable_indices] += grads
return grad_acc
def compute_jacobian_manual(x, polynomial_features, weight_matrix, tolerance):
"""Computes the jacobian manually."""
# Put together the equation for each output var
# polynomial_features = np.array(polynomial_obj.get_feature_names())
weight_mask = np.abs(weight_matrix) > tolerance
weight_matrix = weight_mask * weight_matrix
jacobians = list()
for i in range(weight_matrix.shape[0]):
grad_accumulator = np.zeros_like(x)
for j, feature in enumerate(polynomial_features):
eval_monomial_grad(feature, x, weight_matrix[i, j], grad_accumulator)
jacobians.append(grad_accumulator)
return np.stack(jacobians)
def calculate_jacobian_prod(jacobian, noise_eps=1e-6):
"""Calculates AA*, where A=JEJ^T and A*=JE^TJ^T, which should be I."""
# Add noise as 0 in jacobian creates issues in calculations later
jacobian = jacobian + noise_eps
sym_matrix = symplectic_matrix(jacobian.shape[1])
pred = np.matmul(jacobian, sym_matrix)
pred = np.matmul(pred, np.transpose(jacobian))
pred_t = np.matmul(jacobian, np.transpose(sym_matrix))
pred_t = np.matmul(pred_t, np.transpose(jacobian))
pred_id = np.matmul(pred, pred_t)
return pred_id
def normalise_jacobian_prods(jacobian_preds):
"""Normalises Jacobians evaluated at various points by a constant."""
stacked_preds = np.stack(jacobian_preds)
# For each attempt at estimating E, get the max term, and take their average
normalisation_factor = np.mean(np.max(np.abs(stacked_preds), axis=(1, 2)))
if normalisation_factor != 0:
stacked_preds = stacked_preds/normalisation_factor
return stacked_preds
def calculate_symetric_score(
gt_data,
model_data,
max_poly_order,
max_sym_score,
rsq_threshold,
sym_threshold,
evaluation_point_n,
trajectory_n=1,
weight_tolerance=1e-5,
alpha_sweep=None,
max_iter=1000,
cv=2):
"""Finds minimal polynomial expansion to explain data using Lasso regression, gets the Jacobian of the mapping and calculates how symplectic the map is."""
model_data = model_data[..., :gt_data.shape[0], :]
# Fing polynomial expansion that explains enough variance in the gt data
print('Finding best polynomial expansion...')
time_start = time.perf_counter()
# Clean up model data to ensure it doesn't contain NaN, infinity
# or values too large for dtype('float32')
model_data = np.nan_to_num(model_data)
model_data = np.clip(model_data, -999999, 999999)
clf, poly, model_data_exp, best_rsq = find_best_polynomial(
model_data, gt_data, max_poly_order, rsq_threshold,
32, alpha_sweep, max_iter, cv)
time_end = time.perf_counter()
print(f'Took {time_end - time_start}s to find best polynomial.')
# Calculate Symplecticity score
all_raw_scores = []
features = np.array(poly.get_feature_names())
points_per_trajectory = int(len(gt_data) / trajectory_n)
for trajectory in range(trajectory_n):
random_data_inds = np.random.permutation(
range(points_per_trajectory))[:evaluation_point_n]
jacobian_preds = []
for point_ind in random_data_inds:
input_data_point = model_data[points_per_trajectory * trajectory +
point_ind]
time_start = time.perf_counter()
jacobian = compute_jacobian_manual(input_data_point, features,
clf.coef_, weight_tolerance)
pred = calculate_jacobian_prod(jacobian)
jacobian_preds.append(pred)
time_end = time.perf_counter()
print(f'Took {time_end - time_start}s to evaluate jacobian '
f'around point {point_ind}.')
# Normalise
normalised_jacobian_preds = normalise_jacobian_prods(jacobian_preds)
# The score is measured as the deviation from I
identity = np.eye(normalised_jacobian_preds.shape[-1])
scores = np.mean(np.power(normalised_jacobian_preds - identity, 2),
axis=(1, 2))
all_raw_scores.append(scores)
sym_score = np.min([np.mean(all_raw_scores), max_sym_score])
# Calculate final SyMetric score
if best_rsq > rsq_threshold and sym_score < sym_threshold:
sy_metric = 1.0
else:
sy_metric = 0.0
results = {
'poly_exp_order': poly.get_params()['degree'],
'rsq': best_rsq,
'sym': sym_score,
'SyMetric': sy_metric,
}
with np.printoptions(precision=4, suppress=True):
print(f'----------------FINAL RESULTS FOR {trajectory_n} '
'TRAJECTORIES------------------')
print(f'BEST POLYNOMIAL EXPANSION ORDER: {results["poly_exp_order"]}')
print(f'BEST RSQ (1-best): {results["rsq"]}')
print(f'SYMPLECTICITY SCORE AROUND ALL POINTS AND ALL '
f'TRAJECTORIES (0-best): {sym_score}')
print(f'SyMETRIC SCORE: {sy_metric}')
print(f'----------------FINAL RESULTS FOR {trajectory_n} '
f'TRAJECTORIES------------------')
return results, clf, poly, model_data_exp
|
deepmind-research-master
|
physics_inspired_models/eval_metric.py
|
# Copyright 2020 DeepMind Technologies Limited.
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module containing the implementations of the various numerical integrators.
Higher order methods mostly taken from [1].
References:
[1] Leimkuhler, Benedict and Sebastian Reich. Simulating hamiltonian dynamics.
Vol. 14. Cambridge university press, 2004.
[2] Forest, Etienne and Ronald D. Ruth. Fourth-order symplectic integration.
Physica D: Nonlinear Phenomena 43.1 (1990): 105-117.
[3] Blanes, Sergio and Per Christian Moan. Practical symplectic partitioned
Runge–Kutta and Runge–Kutta–Nyström methods. Journal of Computational and
Applied Mathematics 142.2 (2002): 313-330.
[4] McLachlan, Robert I. On the numerical integration of ordinary differential
equations by symmetric composition methods. SIAM Journal on Scientific
Computing 16.1 (1995): 151-168.
[5] Yoshida, Haruo. Construction of higher order symplectic integrators.
Physics letters A 150.5-7 (1990): 262-268.
[6] Süli, Endre; Mayers, David (2003), An Introduction to Numerical Analysis,
Cambridge University Press, ISBN 0-521-00794-1.
[7] Hairer, Ernst; Nørsett, Syvert Paul; Wanner, Gerhard (1993), Solving
ordinary differential equations I: Nonstiff problems, Berlin, New York:
Springer-Verlag, ISBN 978-3-540-56670-0.
"""
from typing import Callable, Dict, Optional, Sequence, Tuple, TypeVar, Union
from dm_hamiltonian_dynamics_suite.hamiltonian_systems import phase_space
import jax
from jax import lax
from jax.experimental import ode
import jax.numpy as jnp
import numpy as np
M = TypeVar("M")
TM = TypeVar("TM")
TimeInterval = Union[jnp.ndarray, Tuple[float, float]]
# _____ _
# / ____| | |
# | | __ ___ _ __ ___ _ __ __ _| |
# | | |_ |/ _ \ '_ \ / _ \ '__/ _` | |
# | |__| | __/ | | | __/ | | (_| | |
# \_____|\___|_| |_|\___|_| \__,_|_|
# _____ _ _ _
# |_ _| | | | | (_)
# | | _ __ | |_ ___ __ _ _ __ __ _| |_ _ ___ _ __
# | | | '_ \| __/ _ \/ _` | '__/ _` | __| |/ _ \| '_ \
# _| |_| | | | || __/ (_| | | | (_| | |_| | (_) | | | |
# |_____|_| |_|\__\___|\__, |_| \__,_|\__|_|\___/|_| |_|
# __/ |
# |___/
GeneralTangentFunction = Callable[
[
Optional[Union[float, jnp.ndarray]], # t
M # y
],
TM # dy_dt
]
GeneralIntegrator = Callable[
[
GeneralTangentFunction,
Optional[Union[float, jnp.ndarray]], # t
M, # y
jnp.ndarray, # dt
],
M # y_next
]
def solve_ivp_dt(
fun: GeneralTangentFunction,
y0: M,
t0: Union[float, jnp.ndarray],
dt: Union[float, jnp.ndarray],
method: Union[str, GeneralIntegrator],
num_steps: Optional[int] = None,
steps_per_dt: int = 1,
use_scan: bool = True,
ode_int_kwargs: Optional[Dict[str, Union[float, int]]] = None
) -> Tuple[jnp.ndarray, M]:
"""Solve an initial value problem for a system of ODEs using explicit method.
This function numerically integrates a system of ordinary differential
equations given an initial value::
dy / dt = f(t, y)
y(t0) = y0
Here t is a one-dimensional independent variable (time), y(t) is an
n-dimensional vector-valued function (state), and an n-dimensional
vector-valued function f(t, y) determines the differential equations.
The goal is to find y(t) approximately satisfying the differential
equations, given an initial value y(t0)=y0.
All of the solvers supported here are explicit and non-adaptive. This makes
them easy to run with a fixed amount of computation and ensures solutions are
easily differentiable.
Args:
fun: callable
Right-hand side of the system. The calling signature is ``fun(t, y)``.
Here `t` is a scalar representing the time instance. `y` can be any
type `M`, including a flat array, that is registered as a
pytree. In addition, there is a type denoted as `TM` that represents
the tangent space to `M`. It is assumed that any element of `TM` can be
multiplied by arrays and scalars, can be added to other `TM` instances
as well as they can be right added to an element of `M`, that is
add(M, TM) exists. The function should return an element of `TM` that
defines the time derivative of `y`.
y0: an instance of `M`
Initial state at `t_span[0]`.
t0: float or array.
The initial time point of integration.
dt: array
Array containing all consecutive increments in time, at which the integral
to be evaluated. The size of this array along axis 0 defines the number of
steps that the integrator would do.
method: string or `GeneralIntegrator`
The integrator method to use. Possible values for string are:
* general_euler - see `GeneralEuler`
* rk2 - see `RungaKutta2`
* rk4 - see `RungaKutta4`
* rk38 - see `RungaKutta38`
num_steps: Optional int.
If provided the `dt` will be treated as the same per step time interval,
applied for this many steps. In other words setting this argument is
equivalent to replicating `dt` num_steps times and stacking over axis=0.
steps_per_dt: int
This determines the overall step size. Between any two values of t_eval
the step size is `dt = (t_eval[i+1] - t_eval[i]) / steps_per_dt.
use_scan: bool
Whether for the loop to use `lax.scan` or a python loop
ode_int_kwargs: dict
Extra arguments to be passed to `ode.odeint` when method="adaptive"
Returns:
t: array
Time points at which the solution is evaluated.
y : an instance of M
Values of the solution at `t`.
"""
if method == "adaptive":
ndim = y0.q.ndim if isinstance(y0, phase_space.PhaseSpace) else y0.ndim
signs = jnp.asarray(jnp.sign(dt))
signs = signs.reshape([-1] + [1] * (ndim - 1))
if isinstance(dt, float) or dt.ndim == 0:
true_t_eval = t0 + dt * np.arange(1, num_steps + 1)
else:
true_t_eval = t0 + dt[None] * np.arange(1, num_steps + 1)[:, None]
if isinstance(dt, float):
dt = np.asarray(dt)
if isinstance(dt, np.ndarray) and dt.ndim > 0:
if np.all(np.abs(dt) != np.abs(dt[0])):
raise ValueError("Not all values of `dt` where the same.")
elif isinstance(dt, jnp.ndarray) and dt.ndim > 0:
raise ValueError("The code here works only when `dy_dt` is time "
"independent and `np.abs(dt)` is the same. For this we "
"allow calling this only with numpy (not jax.numpy) "
"arrays.")
dt: jnp.ndarray = jnp.abs(jnp.asarray(dt))
dt = dt.reshape([-1])[0]
t_eval = t0 + dt * np.arange(num_steps + 1)
outputs = ode.odeint(
func=lambda y_, t_: fun(None, y_) * signs,
y0=y0,
t=jnp.abs(t_eval - t0),
**(ode_int_kwargs or dict())
)
# Note that we do not return the initial point
return true_t_eval, jax.tree_map(lambda x: x[1:], outputs)
method = get_integrator(method)
if num_steps is not None:
dt = jnp.repeat(jnp.asarray(dt)[None], repeats=num_steps, axis=0)
t_eval = t0 + jnp.cumsum(dt, axis=0)
t0 = jnp.ones_like(t_eval[..., :1]) * t0
t = jnp.concatenate([t0, t_eval[..., :-1]], axis=-1)
def loop_body(y_: M, t_dt: Tuple[jnp.ndarray, jnp.ndarray]) -> Tuple[M, M]:
t_, dt_ = t_dt
dt_: jnp.ndarray = dt_ / steps_per_dt
for _ in range(steps_per_dt):
y_ = method(fun, t_, y_, dt_)
t_ = t_ + dt_
return y_, y_
if use_scan:
return t_eval, lax.scan(loop_body, init=y0, xs=(t, dt))[1]
else:
y = [y0]
for t_and_dt_i in zip(t, dt):
y.append(loop_body(y[-1], t_and_dt_i)[0])
# Note that we do not return the initial point
return t_eval, jax.tree_map(lambda *args: jnp.stack(args, axis=0),
*y[1:])
def solve_ivp_dt_two_directions(
fun: GeneralTangentFunction,
y0: M,
t0: Union[float, jnp.ndarray],
dt: Union[float, jnp.ndarray],
method: Union[str, GeneralIntegrator],
num_steps_forward: int,
num_steps_backward: int,
include_y0: bool = True,
steps_per_dt: int = 1,
use_scan: bool = True,
ode_int_kwargs: Optional[Dict[str, Union[float, int]]] = None
) -> M:
"""Equivalent to `solve_ivp_dt` but you can specify unrolling the problem for a fixed number of steps in both time directions."""
yt = []
if num_steps_backward > 0:
yt_bck = solve_ivp_dt(
fun=fun,
y0=y0,
t0=t0,
dt=- dt,
method=method,
num_steps=num_steps_backward,
steps_per_dt=steps_per_dt,
use_scan=use_scan,
ode_int_kwargs=ode_int_kwargs
)[1]
yt.append(jax.tree_map(lambda x: jnp.flip(x, axis=0), yt_bck))
if include_y0:
yt.append(jax.tree_map(lambda x: x[None], y0))
if num_steps_forward > 0:
yt_fwd = solve_ivp_dt(
fun=fun,
y0=y0,
t0=t0,
dt=dt,
method=method,
num_steps=num_steps_forward,
steps_per_dt=steps_per_dt,
use_scan=use_scan,
ode_int_kwargs=ode_int_kwargs
)[1]
yt.append(yt_fwd)
if len(yt) > 1:
return jax.tree_map(lambda *a: jnp.concatenate(a, axis=0), *yt)
else:
return yt[0]
def solve_ivp_t_eval(
fun: GeneralTangentFunction,
t_span: TimeInterval,
y0: M,
method: Union[str, GeneralIntegrator],
t_eval: Optional[jnp.ndarray] = None,
steps_per_dt: int = 1,
use_scan: bool = True,
ode_int_kwargs: Optional[Dict[str, Union[float, int]]] = None
) -> Tuple[jnp.ndarray, M]:
"""Solve an initial value problem for a system of ODEs using an explicit method.
This function numerically integrates a system of ordinary differential
equations given an initial value::
dy / dt = f(t, y)
y(t0) = y0
Here t is a one-dimensional independent variable (time), y(t) is an
n-dimensional vector-valued function (state), and an n-dimensional
vector-valued function f(t, y) determines the differential equations.
The goal is to find y(t) approximately satisfying the differential
equations, given an initial value y(t0)=y0.
All of the solvers supported here are explicit and non-adaptive. This in
terms makes them easy to run with fixed amount of computation and
the solutions to be easily differentiable.
Args:
fun: callable
Right-hand side of the system. The calling signature is ``fun(t, y)``.
Here `t` is a scalar representing the time instance. `y` can be any
type `M`, including a flat array, that is registered as a
pytree. In addition, there is a type denoted as `TM` that represents
the tangent space to `M`. It is assumed that any element of `TM` can be
multiplied by arrays and scalars, can be added to other `TM` instances
as well as they can be right added to an element of `M`, that is
add(M, TM) exists. The function should return an element of `TM` that
defines the time derivative of `y`.
t_span: 2-tuple of floats
Interval of integration (t0, tf). The solver starts with t=t0 and
integrates until it reaches t=tf.
y0: an instance of `M`
Initial state at `t_span[0]`.
method: string or `GeneralIntegrator`
The integrator method to use. Possible values for string are:
* general_euler - see `GeneralEuler`
* rk2 - see `RungaKutta2`
* rk4 - see `RungaKutta4`
* rk38 - see `RungaKutta38`
t_eval: array or None.
Times at which to store the computed solution. Must be sorted and lie
within `t_span`. If None then t_eval = [t_span[-1]]
steps_per_dt: int
This determines the overall step size. Between any two values of t_eval
the step size is `dt = (t_eval[i+1] - t_eval[i]) / steps_per_dt.
use_scan: bool
Whether for the loop to use `lax.scan` or a python loop
ode_int_kwargs: dict
Extra arguments to be passed to `ode.odeint` when method="adaptive"
Returns:
t: array
Time points at which the solution is evaluated.
y : an instance of M
Values of the solution at `t`.
"""
# Check for t_eval
if t_eval is None:
t_eval = np.asarray([t_span[-1]])
if isinstance(t_span[0], float) and isinstance(t_span[1], float):
t_span = np.asarray(t_span)
elif isinstance(t_span[0], float) and isinstance(t_span[1], jnp.ndarray):
t_span = (np.full_like(t_span[1], t_span[0]), t_span[1])
t_span = np.stack(t_span, axis=0)
elif isinstance(t_span[1], float) and isinstance(t_span[0], jnp.ndarray):
t_span = (t_span[0], jnp.full_like(t_span[0], t_span[1]))
t_span = np.stack(t_span, axis=0)
else:
t_span = np.stack(t_span, axis=0)
def check_span(span, ts):
# Verify t_span and t_eval
if span[0] < span[1]:
# Forward in time
if not np.all(np.logical_and(span[0] <= ts, ts <= span[1])):
raise ValueError("Values in `t_eval` are not within `t_span`.")
if not np.all(ts[:-1] < ts[1:]):
raise ValueError("Values in `t_eval` are not properly sorted.")
else:
# Backward in time
if not np.all(np.logical_and(span[0] >= ts, ts >= span[1])):
raise ValueError("Values in `t_eval` are not within `t_span`.")
if not np.all(ts[:-1] > ts[1:]):
raise ValueError("Values in `t_eval` are not properly sorted.")
if t_span.ndim == 1:
check_span(t_span, t_eval)
elif t_span.ndim == 2:
if t_eval.ndim != 2:
raise ValueError("t_eval should have rank 2.")
for i in range(t_span.shape[1]):
check_span(t_span[:, i], t_eval[:, i])
t = np.concatenate([t_span[:1], t_eval[:-1]], axis=0)
return solve_ivp_dt(
fun=fun,
y0=y0,
t0=t_span[0],
dt=t_eval - t,
method=method,
steps_per_dt=steps_per_dt,
use_scan=use_scan,
ode_int_kwargs=ode_int_kwargs
)
class RungaKutta(GeneralIntegrator):
"""A general Runga-Kutta integrator defined using a Butcher tableau."""
def __init__(
self,
a_tableau: Sequence[Sequence[float]],
b_tableau: Sequence[float],
c_tableau: Sequence[float],
order: int):
if len(b_tableau) != len(c_tableau) + 1:
raise ValueError("The length of b_tableau should be exactly one more than"
" the length of c_tableau.")
if len(b_tableau) != len(a_tableau) + 1:
raise ValueError("The length of b_tableau should be exactly one more than"
" the length of a_tableau.")
self.a_tableau = a_tableau
self.b_tableau = b_tableau
self.c_tableau = c_tableau
self.order = order
def __call__(
self,
tangent_func: GeneralTangentFunction,
t: jnp.ndarray,
y: M,
dt: jnp.ndarray
) -> M: # pytype: disable=invalid-annotation
k = [tangent_func(t, y)]
zero = jax.tree_map(jnp.zeros_like, k[0])
# We always broadcast opposite to numpy (e.g. leading dims (batch) count)
if dt.ndim > 0:
dt = dt.reshape(dt.shape + (1,) * (y.ndim - dt.ndim))
if t.ndim > 0:
t = t.reshape(t.shape + (1,) * (y.ndim - t.ndim))
for c_n, a_n_row in zip(self.c_tableau, self.a_tableau):
t_n = t + dt * c_n
products = [a_i * k_i for a_i, k_i in zip(a_n_row, k) if a_i != 0.0]
delta_n = sum(products, zero)
y_n = y + dt * delta_n
k.append(tangent_func(t_n, y_n))
products = [b_i * k_i for b_i, k_i in zip(self.b_tableau, k) if b_i != 0.0]
delta = sum(products, zero)
return y + dt * delta
class GeneralEuler(RungaKutta):
"""The standard Euler method (for general ODE problems)."""
def __init__(self):
super().__init__(
a_tableau=[],
b_tableau=[1.0],
c_tableau=[],
order=1
)
class RungaKutta2(RungaKutta):
"""The second order Runga-Kutta method corresponding to the mid-point rule."""
def __init__(self):
super().__init__(
a_tableau=[[1.0 / 2.0]],
b_tableau=[0.0, 1.0],
c_tableau=[1.0 / 2.0],
order=2
)
class RungaKutta4(RungaKutta):
"""The fourth order Runga-Kutta method from [6]."""
def __init__(self):
super().__init__(
a_tableau=[[1.0 / 2.0],
[0.0, 1.0 / 2.0],
[0.0, 0.0, 1.0]],
b_tableau=[1.0 / 6.0, 1.0 / 3.0, 1.0 / 3.0, 1.0 / 6.0],
c_tableau=[1.0 / 2.0, 1.0 / 2.0, 1.0],
order=4
)
class RungaKutta38(RungaKutta):
"""The fourth order 3/8 rule Runga-Kutta method from [7]."""
def __init__(self):
super().__init__(
a_tableau=[[1.0 / 3.0],
[-1.0 / 3.0, 1.0],
[1.0, -1.0, 1.0]],
b_tableau=[1.0 / 8.0, 3.0 / 8.0, 3.0 / 8.0, 1.0 / 8.0],
c_tableau=[1.0 / 3.0, 2.0 / 3.0, 1.0],
order=4
)
# _____ _ _ _
# / ____| | | | | (_)
# | (___ _ _ _ __ ___ _ __ | | ___ ___| |_ _ ___
# \___ \| | | | '_ ` _ \| '_ \| |/ _ \/ __| __| |/ __|
# ____) | |_| | | | | | | |_) | | __/ (__| |_| | (__
# |_____/ \__, |_| |_| |_| .__/|_|\___|\___|\__|_|\___|
# __/ | | |
# |___/ |_|
# _____ _ _ _
# |_ _| | | | | (_)
# | | _ __ | |_ ___ __ _ _ __ __ _| |_ _ ___ _ __
# | | | '_ \| __/ _ \/ _` | '__/ _` | __| |/ _ \| '_ \
# _| |_| | | | || __/ (_| | | | (_| | |_| | (_) | | | |
# |_____|_| |_|\__\___|\__, |_| \__,_|\__|_|\___/|_| |_|
# __/ |
# |___/
SymplecticIntegrator = Callable[
[
phase_space.SymplecticTangentFunction,
jnp.ndarray, # t
phase_space.PhaseSpace, # (q, p)
jnp.ndarray, # dt
],
phase_space.PhaseSpace # (q_next, p_next)
]
def solve_hamiltonian_ivp_dt(
hamiltonian: phase_space.HamiltonianFunction,
y0: phase_space.PhaseSpace,
t0: Union[float, jnp.ndarray],
dt: Union[float, jnp.ndarray],
method: Union[str, SymplecticIntegrator],
num_steps: Optional[int] = None,
steps_per_dt: int = 1,
use_scan: bool = True,
ode_int_kwargs: Optional[Dict[str, Union[float, int]]] = None
) -> Tuple[jnp.ndarray, phase_space.PhaseSpace]:
"""Solve an initial value problem for a Hamiltonian system.
This function numerically integrates a Hamiltonian system given an
initial value::
dq / dt = dH / dp
dp / dt = - dH / dq
q(t0), p(t0) = y0.q, y0.p
Here t is a one-dimensional independent variable (time), y(t) is an
n-dimensional vector-valued function (state), and an n-dimensional
vector-valued function H(t, q, p) determines the value of the Hamiltonian.
The goal is to find q(t) and p(t) approximately satisfying the differential
equations, given an initial values q(t0), p(t0) = y0.q, y0.p
All of the solvers supported here are explicit and non-adaptive. This in
terms makes them easy to run with fixed amount of computation and
the solutions to be easily differentiable.
Args:
hamiltonian: callable
The Hamiltonian function. The calling signature is ``h(t, s)``, where
`s` is an instance of `PhaseSpace`.
y0: an instance of `M`
Initial state at t=t0.
t0: float or array.
The initial time point of integration.
dt: array
Array containing all consecutive increments in time, at which the integral
to be evaluated. The size of this array along axis 0 defines the number of
steps that the integrator would do.
method: string or `GeneralIntegrator`
The integrator method to use. Possible values for string are:
* symp_euler - see `SymplecticEuler`
* symp_euler_q - a `SymplecticEuler` with position_first=True
* symp_euler_p - a `SymplecticEuler` with position_first=False
* leap_frog - see `LeapFrog`
* leap_frog_q - a `LeapFrog` with position_first=True
* leap_frog_p - a `LeapFrog` with position_first=False
* stormer_verlet - same as leap_frog
* stormer_verlet_q - same as leap_frog_q
* stormer_verlet_p - same as leap_frog_p
* ruth4 - see `Ruth4`,
* sym4 - see `Symmetric4`
* sym6 - see `Symmetric6`
* so4 - see `SymmetricSo4`
* so4_q - a `SymmetricSo4` with position_first=True
* so4_p - a `SymmetricSo4` with position_first=False
* so6 - see `SymmetricSo6`
* so6_q - a `SymmetricSo6` with position_first=True
* so6_p - a `SymmetricSo6` with position_first=False
* so8 - see `SymmetricSo8`
* so8_q - a `SymmetricSo8` with position_first=True
* so8_p - a `SymmetricSo8` with position_first=False
num_steps: Optional int.
If provided the `dt` will be treated as the same per step time interval,
applied for this many steps. In other words setting this argument is
equivalent to replicating `dt` num_steps times and stacking over axis=0.
steps_per_dt: int
This determines the overall step size. Between any two values of t_eval
the step size is `dt = (t_eval[i+1] - t_eval[i]) / steps_per_dt.
use_scan: bool
Whether for the loop to use `lax.scan` or a python loop
ode_int_kwargs: dict
Extra arguments to be passed to `ode.odeint` when method="adaptive"
Returns:
t: array
Time points at which the solution is evaluated.
y : an instance of M
Values of the solution at `t`.
"""
if not isinstance(y0, phase_space.PhaseSpace):
raise ValueError("The initial state must be an instance of `PhaseSpace`.")
dy_dt = phase_space.poisson_bracket_with_q_and_p(hamiltonian)
return solve_ivp_dt(
fun=dy_dt,
y0=y0,
t0=t0,
dt=dt,
method=method,
num_steps=num_steps,
steps_per_dt=steps_per_dt,
use_scan=use_scan,
ode_int_kwargs=ode_int_kwargs
)
def solve_hamiltonian_ivp_t_eval(
hamiltonian: phase_space.HamiltonianFunction,
t_span: TimeInterval,
y0: phase_space.PhaseSpace,
method: Union[str, SymplecticIntegrator],
t_eval: Optional[jnp.ndarray] = None,
steps_per_dt: int = 1,
use_scan: bool = True,
ode_int_kwargs: Optional[Dict[str, Union[float, int]]] = None
) -> Tuple[jnp.ndarray, phase_space.PhaseSpace]:
"""Solve an initial value problem for a Hamiltonian system.
This function numerically integrates a Hamiltonian system given an
initial value::
dq / dt = dH / dp
dp / dt = - dH / dq
q(t0), p(t0) = y0.q, y0.p
Here t is a one-dimensional independent variable (time), y(t) is an
n-dimensional vector-valued function (state), and an n-dimensional
vector-valued function H(t, q, p) determines the value of the Hamiltonian.
The goal is to find q(t) and p(t) approximately satisfying the differential
equations, given an initial values q(t0), p(t0) = y0.q, y0.p
All of the solvers supported here are explicit and non-adaptive. This in
terms makes them easy to run with fixed amount of computation and
the solutions to be easily differentiable.
Args:
hamiltonian: callable
The Hamiltonian function. The calling signature is ``h(t, s)``, where
`s` is an instance of `PhaseSpace`.
t_span: 2-tuple of floats
Interval of integration (t0, tf). The solver starts with t=t0 and
integrates until it reaches t=tf.
y0: an instance of `M`
Initial state at `t_span[0]`.
method: string or `GeneralIntegrator`
The integrator method to use. Possible values for string are:
* symp_euler - see `SymplecticEuler`
* symp_euler_q - a `SymplecticEuler` with position_first=True
* symp_euler_p - a `SymplecticEuler` with position_first=False
* leap_frog - see `LeapFrog`
* leap_frog_q - a `LeapFrog` with position_first=True
* leap_frog_p - a `LeapFrog` with position_first=False
* stormer_verlet - same as leap_frog
* stormer_verlet_q - same as leap_frog_q
* stormer_verlet_p - same as leap_frog_p
* ruth4 - see `Ruth4`,
* sym4 - see `Symmetric4`
* sym6 - see `Symmetric6`
* so4 - see `SymmetricSo4`
* so4_q - a `SymmetricSo4` with position_first=True
* so4_p - a `SymmetricSo4` with position_first=False
* so6 - see `SymmetricSo6`
* so6_q - a `SymmetricSo6` with position_first=True
* so6_p - a `SymmetricSo6` with position_first=False
* so8 - see `SymmetricSo8`
* so8_q - a `SymmetricSo8` with position_first=True
* so8_p - a `SymmetricSo8` with position_first=False
t_eval: array or None.
Times at which to store the computed solution. Must be sorted and lie
within `t_span`. If None then t_eval = [t_span[-1]]
steps_per_dt: int
This determines the overall step size. Between any two values of t_eval
the step size is `dt = (t_eval[i+1] - t_eval[i]) / steps_per_dt.
use_scan: bool
Whether for the loop to use `lax.scan` or a python loop
ode_int_kwargs: dict
Extra argumrnts to be passed to `ode.odeint` when method="adaptive"
Returns:
t: array
Time points at which the solution is evaluated.
y : an instance of M
Values of the solution at `t`.
"""
if not isinstance(y0, phase_space.PhaseSpace):
raise ValueError("The initial state must be an instance of `PhaseSpace`.")
dy_dt = phase_space.poisson_bracket_with_q_and_p(hamiltonian)
if method == "adaptive":
dy_dt = phase_space.transform_symplectic_tangent_function_using_array(dy_dt)
return solve_ivp_t_eval( # pytype: disable=bad-return-type # jax-ndarray
fun=dy_dt,
t_span=t_span,
y0=y0,
method=method,
t_eval=t_eval,
steps_per_dt=steps_per_dt,
use_scan=use_scan,
ode_int_kwargs=ode_int_kwargs
)
class CompositionSymplectic(SymplecticIntegrator):
"""A generalized symplectic integrator based on compositions.
Simulates Hamiltonian dynamics using a composition of symplectic steps:
q_{0} = q_init, p_{0} = p_init
for i in [1, n]:
p_{i+1} = p_{i} - c_{i} * dH/dq(q_{i}) * dt
q_{i+1} = q_{i} + d_{i} * dH/dp(p_{i+1}) * dt
q_next = q_{n}, p_next = p_{n}
This integrator always starts with updating the momentum.
The order argument is used mainly for testing to estimate the error when
integrating various systems.
"""
def __init__(
self,
momentum_coefficients: Sequence[float],
position_coefficients: Sequence[float],
order: int):
if len(position_coefficients) != len(momentum_coefficients):
raise ValueError("The number of momentum_coefficients and "
"position_coefficients must be the same.")
if not np.allclose(sum(position_coefficients), 1.0):
raise ValueError("The sum of the position_coefficients "
"must be equal to 1.")
if not np.allclose(sum(momentum_coefficients), 1.0):
raise ValueError("The sum of the momentum_coefficients "
"must be equal to 1.")
self.momentum_coefficients = momentum_coefficients
self.position_coefficients = position_coefficients
self.order = order
def __call__(
self,
tangent_func: phase_space.SymplecticTangentFunction,
t: jnp.ndarray,
y: phase_space.PhaseSpace,
dt: jnp.ndarray
) -> phase_space.PhaseSpace:
q, p = y.q, y.p
# This is intentional to prevent a bug where one uses y later
del y
# We always broadcast opposite to numpy (e.g. leading dims (batch) count)
if dt.ndim > 0:
dt = dt.reshape(dt.shape + (1,) * (q.ndim - dt.ndim))
if t.ndim > 0:
t = t.reshape(t.shape + (1,) * (q.ndim - t.ndim))
t_q = t
t_p = t
for c, d in zip(self.momentum_coefficients, self.position_coefficients):
# Update momentum
if c != 0.0:
dp_dt = tangent_func(t_p, phase_space.PhaseSpace(q, p)).p
p = p + c * dt * dp_dt
t_p = t_p + c * dt
# Update position
if d != 0.0:
dq_dt = tangent_func(t_q, phase_space.PhaseSpace(q, p)).q
q = q + d * dt * dq_dt
t_q = t_q + d * dt
return phase_space.PhaseSpace(position=q, momentum=p)
class SymplecticEuler(CompositionSymplectic):
"""The symplectic Euler method (for Hamiltonian systems).
If position_first = True:
q_{t+1} = q_{t} + dH/dp(p_{t}) * dt
p_{t+1} = p_{t} - dH/dq(q_{t+1}) * dt
else:
p_{t+1} = p_{t} - dH/dq(q_{t}) * dt
q_{t+1} = q_{t} + dH/dp(p_{t+1}) * dt
"""
def __init__(self, position_first=True):
if position_first:
super().__init__(
momentum_coefficients=[0.0, 1.0],
position_coefficients=[1.0, 0.0],
order=1
)
else:
super().__init__(
momentum_coefficients=[1.0],
position_coefficients=[1.0],
order=1
)
class SymmetricCompositionSymplectic(CompositionSymplectic):
"""A generalized composition integrator that is symmetric.
The integrators produced are always of the form:
[update_q, update_p, ..., update_p, update_q]
or
[update_p, update_q, ..., update_q, update_p]
based on the position_first argument. The method will expect which ever is
updated first to have one more coefficient.
"""
def __init__(
self,
momentum_coefficients: Sequence[float],
position_coefficients: Sequence[float],
position_first: bool,
order: int):
position_coefficients = list(position_coefficients)
momentum_coefficients = list(momentum_coefficients)
if position_first:
if len(position_coefficients) != len(momentum_coefficients) + 1:
raise ValueError("The number of position_coefficients must be one more "
"than momentum_coefficients when position_first=True.")
momentum_coefficients = [0.0] + momentum_coefficients
else:
if len(position_coefficients) + 1 != len(momentum_coefficients):
raise ValueError("The number of momentum_coefficients must be one more "
"than position_coefficients when position_first=True.")
position_coefficients = position_coefficients + [0.0]
super().__init__(
position_coefficients=position_coefficients,
momentum_coefficients=momentum_coefficients,
order=order
)
def symmetrize_coefficients(
coefficients: Sequence[float],
odd_number: bool
) -> Sequence[float]:
"""Symmetrizes the coefficients for an integrator."""
coefficients = list(coefficients)
if odd_number:
final = 1.0 - 2.0 * sum(coefficients)
return coefficients + [final] + coefficients[::-1]
else:
final = 0.5 - sum(coefficients)
return coefficients + [final, final] + coefficients[::-1]
class LeapFrog(SymmetricCompositionSymplectic):
"""The standard Leap-Frog method (also known as Stormer-Verlet).
If position_first = True:
q_half = q_{t} + dH/dp(p_{t}) * dt / 2
p_{t+1} = p_{t} - dH/dq(q_half) * dt
q_{t+1} = q_half + dH/dp(p_{t+1}) * dt / 2
else:
p_half = p_{t} - dH/dq(q_{t}) * dt / 2
q_{t+1} = q_{t} + dH/dp(p_half) * dt
p_{t+1} = p_half - dH/dq(q_{t+1}) * dt / 2
"""
def __init__(self, position_first=False):
if position_first:
super().__init__(
position_coefficients=[0.5, 0.5],
momentum_coefficients=[1.0],
position_first=True,
order=2
)
else:
super().__init__(
position_coefficients=[1.0],
momentum_coefficients=[0.5, 0.5],
position_first=False,
order=2
)
class Ruth4(SymmetricCompositionSymplectic):
"""The Fourth order method from [2]."""
def __init__(self):
cbrt_2 = float(np.cbrt(2.0))
c = [1.0 / (2.0 - cbrt_2)]
# 3: [c1, 1.0 - 2*c1, c1]
c = symmetrize_coefficients(c, odd_number=True)
d = [1.0 / (4.0 - 2.0 * cbrt_2)]
# 4: [d1, 0.5 - d1, 0.5 - d1, d1]
d = symmetrize_coefficients(d, odd_number=False)
super().__init__(
position_coefficients=d,
momentum_coefficients=c,
position_first=True,
order=4
)
class Symmetric4(SymmetricCompositionSymplectic):
"""The fourth order method from Table 6.1 in [1] (originally from [3])."""
def __init__(self):
c = [0.0792036964311957, 0.353172906049774, -0.0420650803577195]
# 7 : [c1, c2, c3, 1.0 - c1 - c2 - c3, c3, c2, c1]
c = symmetrize_coefficients(c, odd_number=True)
d = [0.209515106613362, -0.143851773179818]
# 6: [d1, d2, 0.5 - d1, 0.5 - d1, d2, d1]
d = symmetrize_coefficients(d, odd_number=False)
super().__init__(
position_coefficients=d,
momentum_coefficients=c,
position_first=False,
order=4
)
class Symmetric6(SymmetricCompositionSymplectic):
"""The sixth order method from Table 6.1 in [1] (originally from [3])."""
def __init__(self):
c = [0.0502627644003922, 0.413514300428344, 0.0450798897943977,
-0.188054853819569, 0.541960678450780]
# 11 : [c1, c2, c3, c4, c5, 1.0 - sum(ci), c5, c4, c3, c2, c1]
c = symmetrize_coefficients(c, odd_number=True)
d = [0.148816447901042, -0.132385865767784, 0.067307604692185,
0.432666402578175]
# 10: [d1, d2, d3, d4, 0.5 - sum(di), 0.5 - sum(di), d4, d3, d2, d1]
d = symmetrize_coefficients(d, odd_number=False)
super().__init__(
position_coefficients=d,
momentum_coefficients=c,
position_first=False,
order=4
)
def coefficients_based_on_composing_second_order(
weights: Sequence[float]
) -> Tuple[Sequence[float], Sequence[float]]:
"""Constructs the coefficients for methods based on second-order schemes."""
coefficients_0 = []
coefficients_1 = []
coefficients_0.append(weights[0] / 2.0)
for i in range(len(weights) - 1):
coefficients_1.append(weights[i])
coefficients_0.append((weights[i] + weights[i + 1]) / 2.0)
coefficients_1.append(weights[-1])
coefficients_0.append(weights[-1] / 2.0)
return coefficients_0, coefficients_1
class SymmetricSo4(SymmetricCompositionSymplectic):
"""The fourth order method from Table 6.2 in [1] (originally from [4])."""
def __init__(self, position_first: bool = False):
w = [0.28, 0.62546642846767004501]
# 5
w = symmetrize_coefficients(w, odd_number=True)
c0, c1 = coefficients_based_on_composing_second_order(w)
c_q, c_p = (c0, c1) if position_first else (c1, c0)
super().__init__(
position_coefficients=c_q,
momentum_coefficients=c_p,
position_first=position_first,
order=4
)
class SymmetricSo6(SymmetricCompositionSymplectic):
"""The sixth order method from Table 6.2 in [1] (originally from [5])."""
def __init__(self, position_first: bool = False):
w = [0.78451361047755726382, 0.23557321335935813368,
-1.17767998417887100695]
# 7
w = symmetrize_coefficients(w, odd_number=True)
c0, c1 = coefficients_based_on_composing_second_order(w)
c_q, c_p = (c0, c1) if position_first else (c1, c0)
super().__init__(
position_coefficients=c_q,
momentum_coefficients=c_p,
position_first=position_first,
order=6
)
class SymmetricSo8(SymmetricCompositionSymplectic):
"""The eighth order method from Table 6.2 in [1] (originally from [4])."""
def __init__(self, position_first: bool = False):
w = [0.74167036435061295345, -0.40910082580003159400,
0.19075471029623837995, -0.57386247111608226666,
0.29906418130365592384, 0.33462491824529818378,
0.31529309239676659663]
# 15
w = symmetrize_coefficients(w, odd_number=True)
c0, c1 = coefficients_based_on_composing_second_order(w)
c_q, c_p = (c0, c1) if position_first else (c1, c0)
super().__init__(
position_coefficients=c_q,
momentum_coefficients=c_p,
position_first=position_first,
order=8
)
general_integrators = dict(
general_euler=GeneralEuler(),
rk2=RungaKutta2(),
rk4=RungaKutta4(),
rk38=RungaKutta38()
)
symplectic_integrators = dict(
symp_euler=SymplecticEuler(position_first=True),
symp_euler_q=SymplecticEuler(position_first=True),
symp_euler_p=SymplecticEuler(position_first=False),
leap_frog=LeapFrog(position_first=False),
leap_frog_q=LeapFrog(position_first=True),
leap_frog_p=LeapFrog(position_first=False),
stormer_verlet=LeapFrog(position_first=False),
stormer_verlet_q=LeapFrog(position_first=True),
stormer_verlet_p=LeapFrog(position_first=False),
ruth4=Ruth4(),
sym4=Symmetric4(),
sym6=Symmetric6(),
so4=SymmetricSo4(position_first=False),
so4_q=SymmetricSo4(position_first=True),
so4_p=SymmetricSo4(position_first=False),
so6=SymmetricSo6(position_first=False),
so6_q=SymmetricSo6(position_first=True),
so6_p=SymmetricSo6(position_first=False),
so8=SymmetricSo8(position_first=False),
so8_q=SymmetricSo8(position_first=True),
so8_p=SymmetricSo8(position_first=False),
)
def get_integrator(
name_or_callable: Union[str, GeneralIntegrator]
) -> GeneralIntegrator:
"""Returns any integrator with the provided name or the argument."""
if isinstance(name_or_callable, str):
if name_or_callable in general_integrators:
return general_integrators[name_or_callable]
elif name_or_callable in symplectic_integrators:
return symplectic_integrators[name_or_callable]
else:
raise ValueError(f"Unrecognized integrator with name {name_or_callable}.")
if not callable(name_or_callable):
raise ValueError(f"Expected a callable, but got {type(name_or_callable)}.")
return name_or_callable
|
deepmind-research-master
|
physics_inspired_models/integrators.py
|
# Copyright 2020 DeepMind Technologies Limited.
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
|
deepmind-research-master
|
physics_inspired_models/__init__.py
|
# Copyright 2020 DeepMind Technologies Limited.
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Setup for pip package."""
from setuptools import setup
REQUIRED_PACKAGES = (
"dm_hamiltonian_dynamics_suite@git+https://github.com/deepmind/dm_hamiltonian_dynamics_suite", # pylint: disable=line-too-long.
"absl-py>=0.12.0",
"numpy>=1.16.4",
"scikit-learn>=1.0",
"typing>=3.7.4.3",
"jax==0.2.20",
"jaxline==0.0.3",
"distrax==0.0.2",
"optax==0.0.6",
"dm-haiku==0.0.3",
)
LONG_DESCRIPTION = "\n".join([
"A codebase containing the implementation of the following models:",
"Hamiltonian Generative Network (HGN)",
"Lagrangian Generative Network (LGN)",
"Neural ODE",
"Recurrent Generative Network (RGN)",
"and RNN, LSTM and GRU.",
"This is code accompanying the publication of:"
])
setup(
name="physics_inspired_models",
version="0.0.1",
description="Implementation of multiple physically inspired models.",
long_description=LONG_DESCRIPTION,
url="https://github.com/deepmind/deepmind-research/physics_inspired_models",
author="DeepMind",
package_dir={"physics_inspired_models": "."},
packages=["physics_inspired_models", "physics_inspired_models.models"],
install_requires=REQUIRED_PACKAGES,
platforms=["any"],
license="Apache License, Version 2.0",
)
|
deepmind-research-master
|
physics_inspired_models/setup.py
|
# Copyright 2020 DeepMind Technologies Limited.
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities functions for Jax."""
from collections import abc
import functools
from typing import Any, Callable, Dict, Mapping, Union
import distrax
import jax
from jax import core
from jax import lax
from jax import nn
import jax.numpy as jnp
from jax.tree_util import register_pytree_node
from jaxline import utils
import numpy as np
HaikuParams = Mapping[str, Mapping[str, jnp.ndarray]]
Params = Union[Mapping[str, jnp.ndarray], HaikuParams, jnp.ndarray]
_Activation = Callable[[jnp.ndarray], jnp.ndarray]
tf_leaky_relu = functools.partial(nn.leaky_relu, negative_slope=0.2)
def filter_only_scalar_stats(stats):
return {k: v for k, v in stats.items() if v.size == 1}
def to_numpy(obj):
return jax.tree_map(np.array, obj)
@jax.custom_gradient
def geco_lagrange_product(lagrange_multiplier, constraint_ema, constraint_t):
"""Modifies the gradients so that they work as described in GECO.
The evaluation gives:
lagrange * C_ema
The gradient w.r.t lagrange:
- g * C_t
The gradient w.r.t constraint_ema:
0.0
The gradient w.r.t constraint_t:
g * lagrange
Note that if you pass the same value for `constraint_ema` and `constraint_t`
this would only flip the gradient for the lagrange multiplier.
Args:
lagrange_multiplier: The lagrange multiplier
constraint_ema: The moving average of the constraint
constraint_t: The current constraint
Returns:
"""
def grad(gradient):
return (- gradient * constraint_t,
jnp.zeros_like(constraint_ema),
gradient * lagrange_multiplier)
return lagrange_multiplier * constraint_ema, grad
def bcast_if(x, t, n):
return [x] * n if isinstance(x, t) else x
def stack_time_into_channels(
images: jnp.ndarray,
data_format: str
) -> jnp.ndarray:
axis = data_format.index("C")
list_of_time = [jnp.squeeze(v, axis=1) for v in
jnp.split(images, images.shape[1], axis=1)]
return jnp.concatenate(list_of_time, axis)
def stack_device_dim_into_batch(obj):
return jax.tree_map(lambda x: x.reshape((-1,) + x.shape[2:]), obj)
def nearest_neighbour_upsampling(x, scale, data_format="NHWC"):
"""Performs nearest-neighbour upsampling."""
if data_format == "NCHW":
b, c, h, w = x.shape
x = jnp.reshape(x, [b, c, h, 1, w, 1])
ones = jnp.ones([1, 1, 1, scale, 1, scale], dtype=x.dtype)
return jnp.reshape(x * ones, [b, c, scale * h, scale * w])
elif data_format == "NHWC":
b, h, w, c = x.shape
x = jnp.reshape(x, [b, h, 1, w, 1, c])
ones = jnp.ones([1, 1, scale, 1, scale, 1], dtype=x.dtype)
return jnp.reshape(x * ones, [b, scale * h, scale * w, c])
else:
raise ValueError(f"Unrecognized data_format={data_format}.")
def get_activation(arg: Union[_Activation, str]) -> _Activation:
"""Returns an activation from provided string."""
if isinstance(arg, str):
# Try fetch in order - [this module, jax.nn, jax.numpy]
if arg in globals():
return globals()[arg]
if hasattr(nn, arg):
return getattr(nn, arg)
elif hasattr(jnp, arg):
return getattr(jnp, arg)
else:
raise ValueError(f"Unrecognized activation with name {arg}.")
if not callable(arg):
raise ValueError(f"Expected a callable, but got {type(arg)}")
return arg
def merge_first_dims(x: jnp.ndarray, num_dims_to_merge: int = 2) -> jnp.ndarray:
return x.reshape((-1,) + x.shape[num_dims_to_merge:])
def extract_image(
inputs: Union[jnp.ndarray, Mapping[str, jnp.ndarray]]
) -> jnp.ndarray:
"""Extracts a tensor with key `image` or `x_image` if it is a dict, otherwise returns the inputs."""
if isinstance(inputs, dict):
if "image" in inputs:
return inputs["image"]
else:
return inputs["x_image"]
elif isinstance(inputs, jnp.ndarray):
return inputs
raise NotImplementedError(f"Not implemented of inputs of type"
f" {type(inputs)}.")
def extract_gt_state(inputs: Any) -> jnp.ndarray:
if isinstance(inputs, dict):
return inputs["x"]
elif not isinstance(inputs, jnp.ndarray):
raise NotImplementedError(f"Not implemented of inputs of type"
f" {type(inputs)}.")
return inputs
def reshape_latents_conv_to_flat(conv_latents, axis_n_to_keep=1):
q, p = jnp.split(conv_latents, 2, axis=-1)
q = jax.tree_map(lambda x: x.reshape(x.shape[:axis_n_to_keep] + (-1,)), q)
p = jax.tree_map(lambda x: x.reshape(x.shape[:axis_n_to_keep] + (-1,)), p)
flat_latents = jnp.concatenate([q, p], axis=-1)
return flat_latents
def triu_matrix_from_v(x, ndim):
assert x.shape[-1] == (ndim * (ndim + 1)) // 2
matrix = jnp.zeros(x.shape[:-1] + (ndim, ndim))
idx = jnp.triu_indices(ndim)
index_update = lambda x, idx, y: x.at[idx].set(y)
for _ in range(x.ndim - 1):
index_update = jax.vmap(index_update, in_axes=(0, None, 0))
return index_update(matrix, idx, x)
def flatten_dict(d, parent_key: str = "", sep: str = "_") -> Dict[str, Any]:
items = []
for k, v in d.items():
new_key = parent_key + sep + k if parent_key else k
if isinstance(v, abc.MutableMapping):
items.extend(flatten_dict(v, new_key, sep=sep).items())
else:
items.append((new_key, v))
return dict(items)
def convert_to_pytype(target, reference):
"""Makes target the same pytype as reference, by jax.tree_flatten."""
_, pytree = jax.tree_flatten(reference)
leaves, _ = jax.tree_flatten(target)
return jax.tree_unflatten(pytree, leaves)
def func_if_not_scalar(func):
"""Makes a function that uses func only on non-scalar values."""
@functools.wraps(func)
def wrapped(array, axis=0):
if array.ndim == 0:
return array
return func(array, axis=axis)
return wrapped
mean_if_not_scalar = func_if_not_scalar(jnp.mean)
class MultiBatchAccumulator(object):
"""Class for abstracting statistics accumulation over multiple batches."""
def __init__(self):
self._obj = None
self._obj_max = None
self._obj_min = None
self._num_samples = None
def add(self, averaged_values, num_samples):
"""Adds an element to the moving average and the max."""
if self._obj is None:
self._obj_max = jax.tree_map(lambda y: y * 1.0, averaged_values)
self._obj_min = jax.tree_map(lambda y: y * 1.0, averaged_values)
self._obj = jax.tree_map(lambda y: y * num_samples, averaged_values)
self._num_samples = num_samples
else:
self._obj_max = jax.tree_map(jnp.maximum, self._obj_max,
averaged_values)
self._obj_min = jax.tree_map(jnp.minimum, self._obj_min,
averaged_values)
self._obj = jax.tree_map(lambda x, y: x + y * num_samples, self._obj,
averaged_values)
self._num_samples += num_samples
def value(self):
return jax.tree_map(lambda x: x / self._num_samples, self._obj)
def max(self):
return jax.tree_map(float, self._obj_max)
def min(self):
return jax.tree_map(float, self._obj_min)
def sum(self):
return self._obj
register_pytree_node(
distrax.Normal,
lambda instance: ([instance.loc, instance.scale], None),
lambda _, args: distrax.Normal(*args)
)
def inner_product(x: Any, y: Any) -> jnp.ndarray:
products = jax.tree_map(lambda x_, y_: jnp.sum(x_ * y_), x, y)
return sum(jax.tree_leaves(products))
get_first = utils.get_first
bcast_local_devices = utils.bcast_local_devices
py_prefetch = utils.py_prefetch
p_split = jax.pmap(lambda x, num: list(jax.random.split(x, num)),
static_broadcasted_argnums=1)
def wrap_if_pmap(p_func):
def p_func_if_pmap(obj, axis_name):
try:
core.axis_frame(axis_name)
return p_func(obj, axis_name)
except NameError:
return obj
return p_func_if_pmap
pmean_if_pmap = wrap_if_pmap(lax.pmean)
psum_if_pmap = wrap_if_pmap(lax.psum)
|
deepmind-research-master
|
physics_inspired_models/utils.py
|
# Copyright 2020 DeepMind Technologies Limited.
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module containing all of the configurations for various models."""
import copy
import os
from jaxline import base_config
import ml_collections as collections
_DATASETS_PATH_VAR_NAME = "DM_HAMILTONIAN_DYNAMICS_SUITE_DATASETS"
def get_config(arg_string):
"""Return config object for training."""
args = arg_string.split(",")
if len(args) != 3:
raise ValueError("You must provide exactly three arguments separated by a "
"comma - model_config_name,sweep_index,dataset_name.")
model_config_name, sweep_index, dataset_name = args
sweep_index = int(sweep_index)
config = base_config.get_base_config()
config.random_seed = 123109801
config.eval_modes = ("eval", "eval_metric")
# Get the model config and the sweeps
if model_config_name not in globals():
raise ValueError(f"The config name {model_config_name} does not exist in "
f"jaxline_configs.py")
config_and_sweep_fn = globals()[model_config_name]
model_config, sweeps = config_and_sweep_fn()
if not os.environ.get(_DATASETS_PATH_VAR_NAME, None):
raise ValueError(f"You need to set the {_DATASETS_PATH_VAR_NAME}")
dm_hamiltonian_suite_path = os.environ[_DATASETS_PATH_VAR_NAME]
dataset_folder = os.path.join(dm_hamiltonian_suite_path, dataset_name)
# Experiment config. Note that batch_size is per device.
# In the experiments we run on 4 GPUs, so the effective batch size was 128.
config.experiment_kwargs = collections.ConfigDict(
dict(
config=dict(
dataset_folder=dataset_folder,
model_kwargs=model_config,
num_extrapolation_steps=60,
drop_stats_containing=("neg_log_p_x", "l2_over_time", "neg_elbo"),
optimizer=dict(
name="adam",
kwargs=dict(
learning_rate=1.5e-4,
b1=0.9,
b2=0.999,
)
),
training=dict(
batch_size=32,
burnin_steps=5,
num_epochs=None,
lagging_vae=False
),
evaluation=dict(
batch_size=64,
),
evaluation_metric=dict(
batch_size=5,
batch_n=20,
num_eval_metric_steps=60,
max_poly_order=5,
max_jacobian_score=1000,
rsq_threshold=0.9,
sym_threshold=0.05,
evaluation_point_n=10,
weight_tolerance=1e-03,
max_iter=1000,
cv=2,
alpha_min_logspace=-4,
alpha_max_logspace=-0.5,
alpha_step_n=10,
calculate_fully_after_steps=40000,
),
evaluation_metric_mlp=dict(
batch_size=64,
batch_n=10000,
datapoint_param_multiplier=1000,
num_eval_metric_steps=60,
evaluation_point_n=10,
evaluation_trajectory_n=50,
rsq_threshold=0.9,
sym_threshold=0.05,
ridge_lambda=0.01,
model=dict(
num_units=4,
num_layers=4,
activation="tanh",
),
optimizer=dict(
name="adam",
kwargs=dict(
learning_rate=1.5e-3,
)
),
),
evaluation_vpt=dict(
batch_size=5,
batch_n=2,
vpt_threshold=0.025,
)
)
)
)
# Training loop config.
config.training_steps = int(500000)
config.interval_type = "steps"
config.log_tensors_interval = 50
config.log_train_data_interval = 50
config.log_all_train_data = False
config.save_checkpoint_interval = 100
config.checkpoint_dir = "/tmp/physics_inspired_models/"
config.train_checkpoint_all_hosts = False
config.eval_specific_checkpoint_dir = ""
config.update_from_flattened_dict(sweeps[sweep_index])
return config
config_prefix = "experiment_kwargs.config."
model_prefix = config_prefix + "model_kwargs."
default_encoder_kwargs = collections.ConfigDict(dict(
conv_channels=64,
num_blocks=3,
blocks_depth=2,
activation="leaky_relu",
))
default_decoder_kwargs = collections.ConfigDict(dict(
conv_channels=64,
num_blocks=3,
blocks_depth=2,
activation="leaky_relu",
))
default_latent_system_net_kwargs = collections.ConfigDict(dict(
conv_channels=64,
num_units=250,
num_layers=5,
activation="swish",
))
default_latent_system_kwargs = collections.ConfigDict(dict(
# Physics model arguments
input_space=collections.config_dict.placeholder(str),
simulation_space=collections.config_dict.placeholder(str),
potential_func_form="separable_net",
kinetic_func_form=collections.config_dict.placeholder(str),
hgn_kinetic_func_form="separable_net",
lgn_kinetic_func_form="matrix_dep_quad",
parametrize_mass_matrix=collections.config_dict.placeholder(bool),
hgn_parametrize_mass_matrix=False,
lgn_parametrize_mass_matrix=True,
mass_eps=1.0,
# ODE model arguments
integrator_method=collections.config_dict.placeholder(str),
# RGN model arguments
residual=collections.config_dict.placeholder(bool),
# General arguments
net_kwargs=default_latent_system_net_kwargs
))
default_config_dict = collections.ConfigDict(dict(
name=collections.config_dict.placeholder(str),
latent_system_dim=32,
latent_system_net_type="mlp",
latent_system_kwargs=default_latent_system_kwargs,
encoder_aggregation_type="linear_projection",
decoder_de_aggregation_type=collections.config_dict.placeholder(str),
encoder_kwargs=default_encoder_kwargs,
decoder_kwargs=default_decoder_kwargs,
has_latent_transform=False,
num_inference_steps=5,
num_target_steps=60,
latent_training_type="forward",
# Choices: overlap_by_one, no_overlap, include_inference
training_data_split="overlap_by_one",
objective_type="ELBO",
elbo_beta_delay=0,
elbo_beta_final=1.0,
geco_kappa=0.001,
geco_alpha=0.0,
dt=0.125,
))
hgn_paper_encoder_kwargs = collections.ConfigDict(dict(
conv_channels=[[32, 64], [64, 64], [64]],
num_blocks=3,
blocks_depth=2,
activation="relu",
kernel_shapes=[2, 4],
padding=["VALID", "SAME"],
))
hgn_paper_decoder_kwargs = collections.ConfigDict(dict(
conv_channels=64,
num_blocks=3,
blocks_depth=2,
activation="tf_leaky_relu",
))
hgn_paper_latent_net_kwargs = collections.ConfigDict(dict(
conv_channels=[32, 64, 64, 64],
num_units=250,
num_layers=5,
activation="softplus",
kernel_shapes=[3, 2, 2, 2, 2],
strides=[1, 2, 1, 2, 1],
padding=["SAME", "VALID", "SAME", "VALID", "SAME"]
))
hgn_paper_latent_system_kwargs = collections.ConfigDict(dict(
potential_func_form="separable_net",
kinetic_func_form="separable_net",
parametrize_mass_matrix=False,
net_kwargs=hgn_paper_latent_net_kwargs
))
hgn_paper_latent_transform_kwargs = collections.ConfigDict(dict(
num_layers=5,
conv_channels=64,
num_units=64,
activation="relu",
))
hgn_paper_config = copy.deepcopy(default_config_dict)
hgn_paper_config.training_data_split = "include_inference"
hgn_paper_config.latent_system_net_type = "conv"
hgn_paper_config.encoder_aggregation_type = (collections.config_dict.
placeholder(str))
hgn_paper_config.decoder_de_aggregation_type = (collections.config_dict.
placeholder(str))
hgn_paper_config.latent_system_kwargs = hgn_paper_latent_system_kwargs
hgn_paper_config.encoder_kwargs = hgn_paper_encoder_kwargs
hgn_paper_config.decoder_kwargs = hgn_paper_decoder_kwargs
hgn_paper_config.has_latent_transform = True
hgn_paper_config.latent_transform_kwargs = hgn_paper_latent_transform_kwargs
hgn_paper_config.num_inference_steps = 31
hgn_paper_config.num_target_steps = 0
hgn_paper_config.objective_type = "GECO"
forward_overlap_by_one = {
model_prefix + "latent_training_type": "forward",
model_prefix + "training_data_split": "overlap_by_one",
}
forward_backward_include_inference = {
model_prefix + "latent_training_type": "forward_backward",
model_prefix + "training_data_split": "include_inference",
}
latent_training_sweep = [
forward_overlap_by_one,
forward_backward_include_inference,
]
def sym_metric_hgn_plus_plus_sweep():
"""HGN++ experimental sweep for the SyMetric paper."""
model_config = copy.deepcopy(default_config_dict)
model_config.name = "HGN"
sweeps = list()
for elbo_beta_final in [0.001, 0.1, 1.0, 2.0]:
sweeps.append({
config_prefix + "optimizer.kwargs.learning_rate": 1.5e-4,
model_prefix + "latent_training_type": "forward",
model_prefix + "training_data_split": "overlap_by_one",
model_prefix + "elbo_beta_final": elbo_beta_final,
})
for elbo_beta_final in [0.001, 0.1, 1.0, 2.0]:
sweeps.append({
config_prefix + "optimizer.kwargs.learning_rate": 1.5e-4,
model_prefix + "latent_training_type": "forward_backward",
model_prefix + "training_data_split": "include_inference",
model_prefix + "elbo_beta_final": elbo_beta_final,
})
return model_config, sweeps
def sym_metric_hgn_sweep():
"""HGN experimental sweep for the SyMetric paper."""
model_config = copy.deepcopy(hgn_paper_config)
model_config.name = "HGN"
return model_config, list(dict())
def benchmark_hgn_overlap_sweep():
"""HGN++ sweep for the benchmark paper."""
model_config = copy.deepcopy(default_config_dict)
model_config.name = "HGN"
sweeps = list()
for elbo_beta_final in [0.001, 0.1, 1.0, 2.0]:
for train_dict in latent_training_sweep:
sweeps.append({
config_prefix + "optimizer.kwargs.learning_rate": 1.5e-4,
model_prefix + "elbo_beta_final": elbo_beta_final,
})
sweeps[-1].update(train_dict)
return model_config, sweeps
def benchmark_lgn_sweep():
"""LGN sweep for the benchmark paper."""
model_config = copy.deepcopy(default_config_dict)
model_config.name = "LGN"
sweeps = list()
for elbo_beta_final in [0.001, 0.1, 1.0, 2.0]:
for train_dict in latent_training_sweep:
sweeps.append({
config_prefix + "optimizer.kwargs.learning_rate": 1.5e-4,
model_prefix + "latent_system_kwargs.kinetic_func_form":
"matrix_dep_pure_quad",
model_prefix + "elbo_beta_final": elbo_beta_final,
})
sweeps[-1].update(train_dict)
return model_config, sweeps
def benchmark_ode_sweep():
"""Neural ODE sweep for the benchmark paper."""
model_config = copy.deepcopy(default_config_dict)
model_config.name = "ODE"
sweeps = list()
for elbo_beta_final in [0.001, 0.1, 1.0, 2.0]:
for integrator in ("adaptive", "rk2"):
for train_dict in latent_training_sweep:
sweeps.append({
config_prefix + "optimizer.kwargs.learning_rate": 1.5e-4,
model_prefix + "integrator_method": integrator,
model_prefix + "elbo_beta_final": elbo_beta_final,
})
sweeps[-1].update(train_dict)
return model_config, sweeps
def benchmark_rgn_sweep():
"""RGN sweep for the benchmark paper."""
model_config = copy.deepcopy(default_config_dict)
model_config.name = "RGN"
sweeps = list()
for elbo_beta_final in [0.001, 0.1, 1.0, 2.0]:
for residual in (True, False):
sweeps.append({
config_prefix + "optimizer.kwargs.learning_rate": 1.5e-4,
model_prefix + "latent_system_kwargs.residual": residual,
model_prefix + "elbo_beta_final": elbo_beta_final,
})
return model_config, sweeps
def benchmark_ar_sweep():
"""AR sweep for the benchmark paper."""
model_config = copy.deepcopy(default_config_dict)
model_config.name = "AR"
model_config.latent_dynamics_type = "vanilla"
sweeps = list()
for elbo_beta_final in [0.001, 0.1, 1.0, 2.0]:
for ar_type in ("vanilla", "lstm", "gru"):
sweeps.append({
config_prefix + "optimizer.kwargs.learning_rate": 1.5e-4,
model_prefix + "latent_dynamics_type": ar_type,
model_prefix + "elbo_beta_final": elbo_beta_final,
})
return model_config, sweeps
|
deepmind-research-master
|
physics_inspired_models/jaxline_configs.py
|
# Copyright 2020 DeepMind Technologies Limited.
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The training script for the HGN models."""
import functools
from absl import app
from absl import flags
from absl import logging
from dm_hamiltonian_dynamics_suite import load_datasets
import haiku as hk
import jax
import jax.numpy as jnp
from jaxline import experiment
from jaxline import platform
import numpy as np
import optax
from physics_inspired_models import eval_metric
from physics_inspired_models import utils
from physics_inspired_models.models import common
AutoregressiveModel = common.autoregressive.TeacherForcingAutoregressiveModel
class HGNExperiment(experiment.AbstractExperiment):
"""HGN experiment."""
CHECKPOINT_ATTRS = {
"_params": "params",
"_state": "state",
"_opt_state": "opt_state",
}
NON_BROADCAST_CHECKPOINT_ATTRS = {
"_python_step": "python_step"
}
def __init__(self, mode, init_rng, config):
super().__init__(mode=mode)
self.mode = mode
self.init_rng = init_rng
self.config = config
# Checkpointed experiment state.
self._python_step = None
self._params = None
self._state = None
self._opt_state = None
# Input pipelines.
self._train_input = None
self._step_fn = None
self._burnin_fn = None
self._eval_input = None
self._eval_batch = None
self._eval_input_metric = None
self._eval_input_vpt = None
self._compute_gt_state_and_latents = None
self._get_reconstructions = None
self._get_samples = None
# Construct the model
model_kwargs = dict(**self.config.model_kwargs)
self.model = common.construct_model(**model_kwargs)
# Construct the optimizer
optimizer_ctor = getattr(optax, self.config.optimizer.name)
self.optimizer = optimizer_ctor(**self.config.optimizer.kwargs)
self.model_init = jax.pmap(self.model.init)
self.opt_init = jax.pmap(self.optimizer.init)
logging.info("Number of hosts: %d/%d",
jax.process_index(), jax.process_count())
logging.info("Number of local devices: %d/%d", jax.local_device_count(),
jax.device_count())
def _process_stats(self, stats, axis_name=None):
keys_to_remove = list()
for key in stats.keys():
for dropped_keys in self.config.drop_stats_containing:
if dropped_keys in key:
keys_to_remove.append(key)
break
for key in keys_to_remove:
stats.pop(key)
# Take average statistics
stats = jax.tree_map(utils.mean_if_not_scalar, stats)
stats = utils.filter_only_scalar_stats(stats)
if axis_name is not None:
stats = utils.pmean_if_pmap(stats, axis_name="i")
return stats
# _ _
# | |_ _ __ __ _(_)_ __
# | __| '__/ _` | | '_ \
# | |_| | | (_| | | | | |
# \__|_| \__,_|_|_| |_|
#
def step(self, global_step, rng, **unused_args):
"""See base class."""
if self._train_input is None:
self._initialize_train()
# Do a small burnin to accumulate any persistent network state
if self._python_step == 0 and self._state:
for _ in range(self.config.training.burnin_steps):
rng, key = utils.p_split(rng, 2)
batch = next(self._train_input)
self._state = self._burnin_fn(self._params, self._state, key, batch)
self._state = jax.tree_map(
lambda x: x / self.config.training.burnin_steps, self._state)
batch = next(self._train_input)
self._params, self._state, self._opt_state, stats = self._step_fn(
self._params, self._state, self._opt_state, rng, batch, global_step)
self._python_step += 1
stats = utils.get_first(stats)
logging.info("global_step: %d, %s", self._python_step,
jax.tree_map(float, stats))
return stats
def _initialize_train(self):
self._train_input = utils.py_prefetch(
load_datasets.dataset_as_iter(self._build_train_input))
self._burnin_fn = jax.pmap(
self._jax_burnin_fn, axis_name="i", donate_argnums=list(range(1, 4)))
self._step_fn = jax.pmap(
self._jax_train_step_fn, axis_name="i", donate_argnums=list(range(5)))
if self._params is not None:
logging.info("Not running initialization - loaded from checkpoint.")
assert self._opt_state is not None
return
logging.info("Initializing parameters - NOT loading from checkpoint.")
# Use the same rng on all devices, so that the initialization is identical
init_rng = utils.bcast_local_devices(self.init_rng)
# Initialize the parameters and the optimizer
batch = next(self._train_input)
self._params, self._state = self.model_init(init_rng, batch)
self._python_step = 0
self._opt_state = self.opt_init(self._params)
def _build_train_input(self):
batch_size = self.config.training.batch_size
return load_datasets.load_dataset(
path=self.config.dataset_folder,
tfrecord_prefix="train",
sub_sample_length=self.model.train_sequence_length,
per_device_batch_size=batch_size,
num_epochs=self.config.training.num_epochs,
drop_remainder=True,
multi_device=True,
shuffle=True,
shuffle_buffer=100 * batch_size,
cache=False,
keys_to_preserve=["image"],
)
def _jax_train_step_fn(self, params, state, opt_state, rng_key, batch, step):
# The loss and the stats are averaged over the batch
def loss_func(*args):
outs = self.model.training_objectives(*args, is_training=True)
# Average everything over the batch
return jax.tree_map(utils.mean_if_not_scalar, outs)
# Compute gradients
grad_fn = jax.grad(loss_func, has_aux=True)
grads, (state, stats, _) = grad_fn(params, state, rng_key, batch, step)
# Average everything over the devices (e.g. average and sync)
grads, state = utils.pmean_if_pmap((grads, state), axis_name="i")
# Apply updates
updates, opt_state = self.optimizer.update(grads, opt_state)
params = optax.apply_updates(params, updates)
return params, state, opt_state, self._process_stats(stats, axis_name="i")
def _jax_burnin_fn(self, params, state, rng_key, batch):
_, (new_state, _, _) = self.model.training_objectives(
params, state, rng_key, batch, jnp.zeros([]), is_training=True)
new_state = jax.tree_map(utils.mean_if_not_scalar, new_state)
new_state = utils.pmean_if_pmap(new_state, axis_name="i")
new_state = hk.data_structures.to_mutable_dict(new_state)
new_state = hk.data_structures.to_immutable_dict(new_state)
return jax.tree_map(jnp.add, new_state, state)
# _
# _____ ____ _| |
# / _ \ \ / / _` | |
# | __/\ V / (_| | |
# \___| \_/ \__,_|_|
#
def evaluate(self, global_step, rng, writer):
"""See base class."""
logging.info("Starting evaluation.")
if self.mode == "eval":
if self._eval_input is None:
self._initialize_eval()
self._initialize_eval_vpt()
key1, _ = utils.p_split(rng, 2)
stats = utils.to_numpy(self._eval_epoch(global_step, key1))
stats.update(utils.to_numpy(self._eval_epoch_vpt(global_step, rng)))
elif self.mode == "eval_metric":
if self._eval_input_metric is None:
self._initialize_eval_metric()
stats = utils.to_numpy(self._eval_epoch_metric(global_step, rng))
else:
raise NotImplementedError()
logging.info("Finished evaluation.")
return stats
def _eval_epoch(self, step, rng):
"""Evaluates an epoch."""
accumulator = utils.MultiBatchAccumulator()
for batch in self._eval_input():
rng, key = utils.p_split(rng, 2)
stats, num_samples = utils.get_first(
self._eval_batch(self._params, self._state, key, batch, step)
)
accumulator.add(stats, num_samples)
return accumulator.value()
def _eval_epoch_metric(self, step, rng):
"""Evaluates an epoch."""
# To prevent from calculating SyMetric early on in training where a large
# polynomial expansion is likely to be required and the score is likely
# to be bad anyway, we only compute using a single batch to save compute
if step[0] > self.config.evaluation_metric.calculate_fully_after_steps:
batch_n = self.config.evaluation_metric.batch_n
else:
batch_n = 1
logging.info("Step: %d, batch_n: %d", step[0], batch_n)
accumulator = utils.MultiBatchAccumulator()
for _ in range(self.config.evaluation_metric.batch_n):
batch = next(self._eval_input_metric)
rng, key = utils.p_split(rng, 2)
stats = self._eval_batch_metric(
self._params, key, batch,
eval_seq_len=self.config.evaluation_metric.num_eval_metric_steps,
)
accumulator.add(stats, 1)
stats = utils.flatten_dict(accumulator.value())
max_keys = ("sym", "SyMetric")
for k, v in utils.flatten_dict(accumulator.max()).items():
if any(m in k for m in max_keys):
stats[k + "_max"] = v
min_keys = ("sym", "SyMetric")
for k, v in utils.flatten_dict(accumulator.min()).items():
if any(m in k for m in min_keys):
stats[k + "_min"] = v
sum_keys = ("sym", "SyMetric")
for k, v in utils.flatten_dict(accumulator.sum()).items():
if any(m in k for m in sum_keys):
stats[k + "_sum"] = v
return stats
def _eval_epoch_vpt(self, step, rng):
"""Evaluates an epoch."""
accumulator = utils.MultiBatchAccumulator()
for _ in range(self.config.evaluation_vpt.batch_n):
batch = next(self._eval_input_vpt)
rng, key = utils.p_split(rng, 2)
stats = self._eval_batch_vpt(self._params, self._state, key, batch)
accumulator.add(stats, 1)
stats = utils.flatten_dict(accumulator.value())
return stats
def _reconstruct_and_align(self, rng_key, full_trajectory, prefix, suffix):
if hasattr(self.model, "training_data_split"):
if self.model.training_data_split == "overlap_by_one":
reconstruction_skip = self.model.num_inference_steps - 1
elif self.model.training_data_split == "no_overlap":
reconstruction_skip = self.model.num_inference_steps
elif self.model.training_data_split == "include_inference":
reconstruction_skip = 0
else:
raise NotImplementedError()
else:
reconstruction_skip = 1
full_forward_targets = jax.tree_map(
lambda x: x[:, :, reconstruction_skip:], full_trajectory)
full_backward_targets = jax.tree_map(
lambda x: x[:, :, :x.shape[2] - reconstruction_skip], full_trajectory)
train_targets_length = (self.model.train_sequence_length -
reconstruction_skip)
full_targets_length = full_forward_targets.shape[2]
# Fully unroll the model and reconstruct the whole sequence, take the mean
full_prediction = self._get_reconstructions(self._params, full_trajectory,
rng_key, prefix == "forward",
True).mean()
full_targets = (full_forward_targets if prefix == "forward" else
full_backward_targets)
# In cases where the model can run backwards it is possible to reconstruct
# parts which were indented to be skipped, so here we take care of that.
if full_prediction.mean().shape[2] > full_targets_length:
if prefix == "forward":
full_prediction = jax.tree_map(
lambda x: x[:, :, -full_targets_length:], full_prediction)
else:
full_prediction = jax.tree_map(
lambda x: x[:, :, :full_targets_length], full_prediction)
# Based on the prefix and suffix fetch correct predictions and targets
if prefix == "forward" and suffix == "train":
predict, targets = jax.tree_map(
lambda x: x[:, :, :train_targets_length],
(full_prediction, full_targets))
elif prefix == "forward" and suffix == "extrapolation":
predict, targets = jax.tree_map(
lambda x: x[:, :, train_targets_length:],
(full_prediction, full_targets))
elif prefix == "backward" and suffix == "train":
predict, targets = jax.tree_map(
lambda x: x[:, :, -train_targets_length:],
(full_prediction, full_targets))
elif prefix == "backward" and suffix == "extrapolation":
predict, targets = jax.tree_map(
lambda x: x[:, :, :-train_targets_length],
(full_prediction, full_targets))
else:
predict, targets = full_prediction, full_targets
return predict, targets
def _initialize_eval(self):
length = (self.model.train_sequence_length +
self.config.num_extrapolation_steps)
batch_size = self.config.evaluation.batch_size
self._eval_input = load_datasets.dataset_as_iter(
load_datasets.load_dataset,
path=self.config.dataset_folder,
tfrecord_prefix="test",
sub_sample_length=length,
per_device_batch_size=batch_size,
num_epochs=1,
drop_remainder=False,
shuffle=False,
cache=False,
keys_to_preserve=["image"]
)
self._eval_batch = jax.pmap(
self._jax_eval_step_fn, axis_name="i")
self._get_reconstructions = jax.pmap(
self.model.reconstruct, axis_name="i",
static_broadcasted_argnums=(3, 4))
if isinstance(self.model,
common.deterministic_vae.DeterministicLatentsGenerativeModel):
self._get_samples = jax.pmap(
self.model.sample_trajectories_from_prior,
static_broadcasted_argnums=(1, 3, 4))
def _initialize_eval_metric(self):
self._eval_input_metric = utils.py_prefetch(
load_datasets.dataset_as_iter(
load_datasets.load_dataset,
path=self.config.dataset_folder,
tfrecord_prefix="test",
sub_sample_length=None,
per_device_batch_size=self.config.evaluation_metric.batch_size,
num_epochs=None,
drop_remainder=False,
cache=False,
shuffle=False,
keys_to_preserve=["image", "x"]
)
)
def compute_gt_state_and_latents(*args):
# Note that the `dt` has to be passed as a kwargs argument
if len(args) == 4:
return self.model.gt_state_and_latents(*args[:4])
elif len(args) == 5:
return self.model.gt_state_and_latents(*args[:4], dt=args[4])
else:
raise NotImplementedError()
self._compute_gt_state_and_latents = jax.pmap(
compute_gt_state_and_latents, static_broadcasted_argnums=3)
def _initialize_eval_vpt(self):
dataset_name = self.config.dataset_folder.split("/")[-1]
dataset_folder = self.config.dataset_folder
if dataset_name in ("hnn_mass_spring_dt_0_05",
"mass_spring_colors_v1_dt_0_05",
"hnn_pendulum_dt_0_05",
"pendulum_colors_v1_dt_0_05",
"matrix_rps_dt_0_1",
"matrix_mp_dt_0_1"):
dataset_folder += "_long_trajectory"
self._eval_input_vpt = utils.py_prefetch(
load_datasets.dataset_as_iter(
load_datasets.load_dataset,
path=dataset_folder,
tfrecord_prefix="test",
sub_sample_length=None,
per_device_batch_size=self.config.evaluation_vpt.batch_size,
num_epochs=None,
drop_remainder=False,
cache=False,
shuffle=False,
keys_to_preserve=["image", "x"]
)
)
self._get_reconstructions = jax.pmap(
self.model.reconstruct, axis_name="i",
static_broadcasted_argnums=(3, 4))
def _jax_eval_step_fn(self, params, state, rng_key, batch, step):
# We care only about the statistics
_, (_, stats, _) = self.model.training_objectives(params, state, rng_key,
batch, step,
is_training=False)
# Compute the full batch size
batch_size = jax.tree_flatten(batch)[0][0].shape[0]
batch_size = utils.psum_if_pmap(batch_size, axis_name="i")
return self._process_stats(stats, axis_name="i"), batch_size
def _eval_batch_vpt(self, params, state, rng_key, batch):
full_trajectory = utils.extract_image(batch)
prefixes = ("forward",
"backward") if self.model.can_run_backwards else ("forward",)
stats = dict()
vpt_abs_scores = []
vpt_rel_scores = []
seq_length = None
for prefix in prefixes:
reconstruction, gt_images = self._reconstruct_and_align(
rng_key, full_trajectory, prefix, "extrapolation")
seq_length = gt_images.shape[2]
mse_norm = np.mean(
(gt_images - reconstruction)**2, axis=(3, 4, 5)) / np.mean(
gt_images**2, axis=(3, 4, 5))
vpt_scores = []
for i in range(mse_norm.shape[1]):
vpt_ind = np.argwhere(
mse_norm[:, i:i + 1, :] > self.config.evaluation_vpt.vpt_threshold)
if vpt_ind.shape[0] > 0:
vpt_ind = vpt_ind[0][2]
else:
vpt_ind = mse_norm.shape[-1]
vpt_scores.append(vpt_ind)
vpt_abs_scores.append(np.median(vpt_scores))
vpt_rel_scores.append(np.median(vpt_scores) / seq_length)
scores = {"vpt_abs": vpt_abs_scores[-1], "vpt_rel": vpt_rel_scores[-1]}
scores = utils.to_numpy(scores)
scores = utils.filter_only_scalar_stats(scores)
stats[prefix] = scores
stats["vpt_abs"] = utils.to_numpy(np.mean(vpt_abs_scores))
stats["vpt_rel"] = utils.to_numpy(np.mean(vpt_rel_scores))
logging.info("vpt_abs: %s, seq_length: %d}",
str(vpt_abs_scores), seq_length)
return stats
def _eval_batch_metric(self, params, rng, batch, eval_seq_len=200):
# Initialise alpha values for Lasso regression
alpha_sweep = np.logspace(self.config.evaluation_metric.alpha_min_logspace,
self.config.evaluation_metric.alpha_max_logspace,
self.config.evaluation_metric.alpha_step_n)
trajectory_n = self.config.evaluation_metric.batch_size
subsection = f"{trajectory_n}tr"
stats = dict()
# Get data
(gt_trajectory,
model_trajectory,
informative_dim_n) = self._get_gt_and_model_phase_space_for_eval(
params, rng, batch, eval_seq_len)
# Calculate SyMetric scores
if informative_dim_n > 1:
scores, *_ = eval_metric.calculate_symetric_score(
gt_trajectory,
model_trajectory,
self.config.evaluation_metric.max_poly_order,
self.config.evaluation_metric.max_jacobian_score,
self.config.evaluation_metric.rsq_threshold,
self.config.evaluation_metric.sym_threshold,
self.config.evaluation_metric.evaluation_point_n,
trajectory_n=trajectory_n,
weight_tolerance=self.config.evaluation_metric.weight_tolerance,
alpha_sweep=alpha_sweep,
max_iter=self.config.evaluation_metric.max_iter,
cv=self.config.evaluation_metric.cv)
scores["unmasked_latents"] = informative_dim_n
scores = utils.to_numpy(scores)
scores = utils.filter_only_scalar_stats(scores)
stats[subsection] = scores
else:
scores = {
"poly_exp_order":
self.config.evaluation_metric.max_poly_order,
"rsq":
0,
"sym":
self.config.evaluation_metric.max_jacobian_score,
"SyMetric": 0.0,
"unmasked_latents":
informative_dim_n
}
scores = utils.to_numpy(scores)
scores = utils.filter_only_scalar_stats(scores)
stats[subsection] = scores
return stats
def _get_gt_and_model_phase_space_for_eval(self, params, rng, batch,
eval_seq_len):
# Get data
gt_data, model_data, z0 = utils.stack_device_dim_into_batch(
self._compute_gt_state_and_latents(params, rng, batch, eval_seq_len)
)
if isinstance(self.model, AutoregressiveModel):
# These models return the `z` for the whole sequence
z0 = z0[:, 0]
# If latent space is image like, reshape it down to vector
if self.model.latent_system_net_type == "conv":
z0 = jax.tree_map(utils.reshape_latents_conv_to_flat, z0)
model_data = jax.tree_map(
lambda x: utils.reshape_latents_conv_to_flat(x, axis_n_to_keep=2),
model_data)
# Create mask to get rid of uninformative latents
latent_mask = eval_metric.create_latent_mask(z0)
informative_dim_n = np.sum(latent_mask)
model_data = model_data[:, :, latent_mask]
logging.info("Masking out model data, leaving dim_n=%d dimensions.",
model_data.shape[-1])
gt_trajectory = np.reshape(
gt_data,
[np.product(gt_data.shape[:-1]), gt_data.shape[-1]]
)
model_trajectory = np.reshape(model_data, [
np.product(model_data.shape[:-1]), model_data.shape[-1]
])
# Standardize data
gt_trajectory = eval_metric.standardize_data(gt_trajectory)
model_trajectory = eval_metric.standardize_data(model_trajectory)
return gt_trajectory, model_trajectory, informative_dim_n
if __name__ == "__main__":
flags.mark_flag_as_required("config")
logging.set_stderrthreshold(logging.INFO)
app.run(functools.partial(platform.main, HGNExperiment))
|
deepmind-research-master
|
physics_inspired_models/jaxline_train.py
|
# Copyright 2020 DeepMind Technologies Limited.
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module for all autoregressive models."""
import functools
from typing import Any, Dict, Mapping, Optional, Sequence, Tuple, Union
import distrax
import haiku as hk
from jax import lax
import jax.numpy as jnp
import jax.random as jnr
import physics_inspired_models.metrics as metrics
import physics_inspired_models.models.base as base
import physics_inspired_models.models.networks as nets
import physics_inspired_models.utils as utils
class TeacherForcingAutoregressiveModel(base.SequenceModel):
"""A standard autoregressive model trained via teacher forcing."""
def __init__(
self,
latent_system_dim: int,
latent_system_net_type: str,
latent_system_kwargs: Dict[str, Any],
latent_dynamics_type: str,
encoder_aggregation_type: Optional[str],
decoder_de_aggregation_type: Optional[str],
encoder_kwargs: Dict[str, Any],
decoder_kwargs: Dict[str, Any],
num_inference_steps: int,
num_target_steps: int,
name: Optional[str] = None,
**kwargs
):
# Remove any parameters from vae models
encoder_kwargs = dict(**encoder_kwargs)
encoder_kwargs["distribution_name"] = None
if kwargs.get("has_latent_transform", False):
raise ValueError("We do not support AR models with latent transform.")
super().__init__(
can_run_backwards=False,
latent_system_dim=latent_system_dim,
latent_system_net_type=latent_system_net_type,
latent_system_kwargs=latent_system_kwargs,
encoder_aggregation_type=encoder_aggregation_type,
decoder_de_aggregation_type=decoder_de_aggregation_type,
encoder_kwargs=encoder_kwargs,
decoder_kwargs=decoder_kwargs,
num_inference_steps=num_inference_steps,
num_target_steps=num_target_steps,
name=name,
**kwargs
)
self.latent_dynamics_type = latent_dynamics_type
# Arguments checks
if self.latent_system_net_type != "mlp":
raise ValueError("Currently we do not support non-mlp AR models.")
def recurrence_function(sequence, initial_state=None):
core = nets.make_flexible_recurrent_net(
core_type=latent_dynamics_type,
net_type=latent_system_net_type,
output_dims=self.latent_system_dim,
**self.latent_system_kwargs["net_kwargs"])
initial_state = initial_state or core.initial_state(sequence.shape[1])
core(sequence[0], initial_state)
return hk.dynamic_unroll(core, sequence, initial_state)
self.recurrence = hk.transform(recurrence_function)
def process_inputs_for_encoder(self, x: jnp.ndarray) -> jnp.ndarray:
return x
def process_latents_for_dynamics(self, z: jnp.ndarray) -> jnp.ndarray:
return z
def process_latents_for_decoder(self, z: jnp.ndarray) -> jnp.ndarray:
return z
@property
def inferred_index(self) -> int:
return self.num_inference_steps - 1
@property
def train_sequence_length(self) -> int:
return self.num_target_steps
def train_data_split(
self,
images: jnp.ndarray
) -> Tuple[jnp.ndarray, jnp.ndarray, Mapping[str, Any]]:
images = images[:, :self.train_sequence_length]
inference_data = images[:, :-1]
target_data = images[:, 1:]
return inference_data, target_data, dict(
num_steps_forward=1,
num_steps_backward=0,
include_z0=False)
def unroll_without_inputs(
self,
params: utils.Params,
rng: jnp.ndarray,
x_init: jnp.ndarray,
h_init: jnp.ndarray,
num_steps: int,
is_training: bool
) -> Tuple[Tuple[distrax.Distribution, jnp.ndarray], Any]:
if num_steps < 1:
raise ValueError("`num_steps` must be at least 1.")
def step_fn(carry, key):
x_last, h_last = carry
enc_key, dec_key = jnr.split(key)
z_in_next = self.encoder.apply(params, enc_key, x_last,
is_training=is_training)
z_next, h_next = self.recurrence.apply(params, None, z_in_next[None],
h_last)
p_x_next = self.decode_latents(params, dec_key, z_next[0],
is_training=is_training)
return (p_x_next.mean(), h_next), (p_x_next, z_next[0])
return lax.scan(
step_fn,
init=(x_init, h_init),
xs=jnr.split(rng, num_steps)
)
def unroll_latent_dynamics(
self,
z: jnp.ndarray,
params: utils.Params,
key: jnp.ndarray,
num_steps_forward: int,
num_steps_backward: int,
include_z0: bool,
is_training: bool,
**kwargs: Any
) -> Tuple[jnp.ndarray, Mapping[str, jnp.ndarray]]:
init_key, unroll_key, dec_key = jnr.split(key, 3)
if num_steps_backward != 0:
raise ValueError("This model can not run backwards.")
# Change 'z' time dimension to be first
z = jnp.swapaxes(z, 0, 1)
# Run recurrent model on inputs
z_0, h_0 = self.recurrence.apply(params, init_key, z)
if num_steps_forward == 1:
z_t = z_0
elif num_steps_forward > 1:
p_x_0 = self.decode_latents(params, dec_key, z_0[-1], is_training=False)
_, (_, z_t) = self.unroll_without_inputs(
params=params,
rng=unroll_key,
x_init=p_x_0.mean(),
h_init=h_0,
num_steps=num_steps_forward-1,
is_training=is_training
)
z_t = jnp.concatenate([z_0, z_t], axis=0)
else:
raise ValueError("num_steps_forward should be at least 1.")
# Make time dimension second
return jnp.swapaxes(z_t, 0, 1), dict()
def _models_core(
self,
params: utils.Params,
keys: jnp.ndarray,
image_data: jnp.ndarray,
is_training: bool,
**unroll_kwargs: Any
) -> Tuple[distrax.Distribution, jnp.ndarray, jnp.ndarray]:
enc_key, _, transform_key, unroll_key, dec_key, _ = keys
# Calculate latent input representation
inference_data = self.process_inputs_for_encoder(image_data)
z_raw = self.encoder.apply(params, enc_key, inference_data,
is_training=is_training)
# Apply latent transformation (should be identity)
z0 = self.apply_latent_transform(params, transform_key, z_raw,
is_training=is_training)
z0 = self.process_latents_for_dynamics(z0)
# Calculate latent output representation
decoder_z, _ = self.unroll_latent_dynamics(
z=z0,
params=params,
key=unroll_key,
is_training=is_training,
**unroll_kwargs
)
decoder_z = self.process_latents_for_decoder(decoder_z)
# Compute p(x|z)
p_x = self.decode_latents(params, dec_key, decoder_z,
is_training=is_training)
return p_x, z0, decoder_z
def training_objectives( # pytype: disable=signature-mismatch # jax-ndarray
self,
params: hk.Params,
state: hk.State,
rng: jnp.ndarray,
inputs: jnp.ndarray,
step: jnp.ndarray,
is_training: bool = True,
use_mean_for_eval_stats: bool = True
) -> Tuple[jnp.ndarray, Sequence[Dict[str, jnp.ndarray]]]:
"""Computes the training objective and any supporting stats."""
# Split all rng keys
keys = jnr.split(rng, 6)
# Process training data
images = utils.extract_image(inputs)
image_data, target_data, unroll_kwargs = self.train_data_split(images)
p_x, _, _ = self._models_core(
params=params,
keys=keys,
image_data=image_data,
is_training=is_training,
**unroll_kwargs
)
# Compute training statistics
stats = metrics.training_statistics(
p_x=p_x,
targets=target_data,
rescale_by=self.rescale_by,
p_x_learned_sigma=self.decoder_kwargs.get("learned_sigma", False)
)
# The loss is just the negative log-likelihood (e.g. the L2 loss)
stats["loss"] = stats["neg_log_p_x"]
if not is_training:
# Optionally add the evaluation stats when not training
# Add also the evaluation statistics
# We need to be able to set `use_mean = False` for some of the tests
stats.update(metrics.evaluation_only_statistics(
reconstruct_func=functools.partial(
self.reconstruct, use_mean=use_mean_for_eval_stats),
params=params,
inputs=inputs,
rng=rng,
rescale_by=self.rescale_by,
can_run_backwards=self.can_run_backwards,
train_sequence_length=self.train_sequence_length,
reconstruction_skip=1,
p_x_learned_sigma=self.decoder_kwargs.get("learned_sigma", False)
))
return stats["loss"], (dict(), stats, dict())
def reconstruct(
self,
params: utils.Params,
inputs: jnp.ndarray,
rng: jnp.ndarray,
forward: bool,
use_mean: bool = True,
) -> distrax.Distribution:
"""Reconstructs the input sequence."""
if not forward:
raise ValueError("This model can not run backwards.")
images = utils.extract_image(inputs)
image_data = images[:, :self.num_inference_steps]
return self._models_core(
params=params,
keys=jnr.split(rng, 6),
image_data=image_data,
is_training=False,
num_steps_forward=images.shape[1] - self.num_inference_steps,
num_steps_backward=0,
include_z0=False,
)[0]
def gt_state_and_latents( # pytype: disable=signature-mismatch # jax-ndarray
self,
params: hk.Params,
rng: jnp.ndarray,
inputs: Dict[str, jnp.ndarray],
seq_length: int,
is_training: bool = False,
unroll_direction: str = "forward",
**kwargs: Dict[str, Any]
) -> Tuple[jnp.ndarray, jnp.ndarray,
Union[distrax.Distribution, jnp.ndarray]]:
"""Computes the ground state and matching latents."""
assert unroll_direction == "forward"
images = utils.extract_image(inputs)
gt_state = utils.extract_gt_state(inputs)
image_data = images[:, :self.num_inference_steps]
gt_state = gt_state[:, 1:seq_length + 1]
_, z_in, z_out = self._models_core(
params=params,
keys=jnr.split(rng, 6),
image_data=image_data,
is_training=False,
num_steps_forward=images.shape[1] - self.num_inference_steps,
num_steps_backward=0,
include_z0=False,
)
return gt_state, z_out, z_in
def _init_non_model_params_and_state(
self,
rng: jnp.ndarray
) -> Tuple[Dict[str, jnp.ndarray], Dict[str, jnp.ndarray]]:
return dict(), dict()
def _init_latent_system( # pytype: disable=signature-mismatch # jax-ndarray
self,
rng: jnp.ndarray,
z: jnp.ndarray,
**kwargs: Any
) -> utils.Params:
return self.recurrence.init(rng, z)
|
deepmind-research-master
|
physics_inspired_models/models/autoregressive.py
|
# Copyright 2020 DeepMind Technologies Limited.
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
|
deepmind-research-master
|
physics_inspired_models/models/__init__.py
|
# Copyright 2020 DeepMind Technologies Limited.
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module containing all of the networks as Haiku modules."""
from typing import Any, Callable, Mapping, Optional, Sequence, Union
from absl import logging
import distrax
import haiku as hk
import jax.numpy as jnp
from physics_inspired_models import utils
Activation = Union[str, Callable[[jnp.ndarray], jnp.ndarray]]
class DenseNet(hk.Module):
"""A feed forward network (MLP)."""
def __init__(
self,
num_units: Sequence[int],
activate_final: bool = False,
activation: Activation = "leaky_relu",
name: Optional[str] = None):
super().__init__(name=name)
self.num_units = num_units
self.num_layers = len(self.num_units)
self.activate_final = activate_final
self.activation = utils.get_activation(activation)
self.linear_modules = []
for i in range(self.num_layers):
self.linear_modules.append(
hk.Linear(
output_size=self.num_units[i],
name=f"ff_{i}"
)
)
def __call__(self, inputs: jnp.ndarray, is_training: bool):
net = inputs
for i, linear in enumerate(self.linear_modules):
net = linear(net)
if i < self.num_layers - 1 or self.activate_final:
net = self.activation(net)
return net
class Conv2DNet(hk.Module):
"""Convolutional Network."""
def __init__(
self,
output_channels: Sequence[int],
kernel_shapes: Union[int, Sequence[int]] = 3,
strides: Union[int, Sequence[int]] = 1,
padding: Union[str, Sequence[str]] = "SAME",
data_format: str = "NHWC",
with_batch_norm: bool = False,
activate_final: bool = False,
activation: Activation = "leaky_relu",
name: Optional[str] = None):
super().__init__(name=name)
self.output_channels = tuple(output_channels)
self.num_layers = len(self.output_channels)
self.kernel_shapes = utils.bcast_if(kernel_shapes, int, self.num_layers)
self.strides = utils.bcast_if(strides, int, self.num_layers)
self.padding = utils.bcast_if(padding, str, self.num_layers)
self.data_format = data_format
self.with_batch_norm = with_batch_norm
self.activate_final = activate_final
self.activation = utils.get_activation(activation)
if len(self.kernel_shapes) != self.num_layers:
raise ValueError(f"Kernel shapes is of size {len(self.kernel_shapes)}, "
f"while output_channels is of size{self.num_layers}.")
if len(self.strides) != self.num_layers:
raise ValueError(f"Strides is of size {len(self.kernel_shapes)}, while "
f"output_channels is of size{self.num_layers}.")
if len(self.padding) != self.num_layers:
raise ValueError(f"Padding is of size {len(self.padding)}, while "
f"output_channels is of size{self.num_layers}.")
self.conv_modules = []
self.bn_modules = []
for i in range(self.num_layers):
self.conv_modules.append(
hk.Conv2D(
output_channels=self.output_channels[i],
kernel_shape=self.kernel_shapes[i],
stride=self.strides[i],
padding=self.padding[i],
data_format=data_format,
name=f"conv_2d_{i}")
)
if with_batch_norm:
self.bn_modules.append(
hk.BatchNorm(
create_offset=True,
create_scale=False,
decay_rate=0.999,
name=f"batch_norm_{i}")
)
else:
self.bn_modules.append(None)
def __call__(self, inputs: jnp.ndarray, is_training: bool):
assert inputs.ndim == 4
net = inputs
for i, (conv, bn) in enumerate(zip(self.conv_modules, self.bn_modules)):
net = conv(net)
# Batch norm
if bn is not None:
net = bn(net, is_training=is_training)
if i < self.num_layers - 1 or self.activate_final:
net = self.activation(net)
return net
class SpatialConvEncoder(hk.Module):
"""Spatial Convolutional Encoder for learning the Hamiltonian."""
def __init__(
self,
latent_dim: int,
conv_channels: Union[Sequence[int], int],
num_blocks: int,
blocks_depth: int = 2,
distribution_name: str = "diagonal_normal",
aggregation_type: Optional[str] = None,
data_format: str = "NHWC",
activation: Activation = "leaky_relu",
scale_factor: int = 2,
kernel_shapes: Union[Sequence[int], int] = 3,
padding: Union[Sequence[str], str] = "SAME",
name: Optional[str] = None):
super().__init__(name=name)
if aggregation_type not in (None, "max", "mean", "linear_projection"):
raise ValueError(f"Unrecognized aggregation_type={aggregation_type}.")
self.latent_dim = latent_dim
self.conv_channels = conv_channels
self.num_blocks = num_blocks
self.scale_factor = scale_factor
self.data_format = data_format
self.distribution_name = distribution_name
self.aggregation_type = aggregation_type
# Compute the required size of the output
if distribution_name is None:
self.output_dim = latent_dim
elif distribution_name == "diagonal_normal":
self.output_dim = 2 * latent_dim
else:
raise ValueError(f"Unrecognized distribution_name={distribution_name}.")
if isinstance(conv_channels, int):
conv_channels = [[conv_channels] * blocks_depth
for _ in range(num_blocks)]
conv_channels[-1] += [self.output_dim]
else:
assert isinstance(conv_channels, (list, tuple))
assert len(conv_channels) == num_blocks
conv_channels = list(list(c) for c in conv_channels)
conv_channels[-1].append(self.output_dim)
if isinstance(kernel_shapes, tuple):
kernel_shapes = list(kernel_shapes)
# Convolutional blocks
self.blocks = []
for i, channels in enumerate(conv_channels):
if isinstance(kernel_shapes, int):
extra_kernel_shapes = 0
else:
extra_kernel_shapes = [3] * (len(channels) - len(kernel_shapes))
self.blocks.append(Conv2DNet(
output_channels=channels,
kernel_shapes=kernel_shapes + extra_kernel_shapes,
strides=[self.scale_factor] + [1] * (len(channels) - 1),
padding=padding,
data_format=data_format,
with_batch_norm=False,
activate_final=i < num_blocks - 1,
activation=activation,
name=f"block_{i}"
))
def spatial_aggregation(self, x: jnp.ndarray) -> jnp.ndarray:
if self.aggregation_type is None:
return x
axis = (1, 2) if self.data_format == "NHWC" else (2, 3)
if self.aggregation_type == "max":
return jnp.max(x, axis=axis)
if self.aggregation_type == "mean":
return jnp.mean(x, axis=axis)
if self.aggregation_type == "linear_projection":
x = x.reshape(x.shape[:-3] + (-1,))
return hk.Linear(self.output_dim, name="LinearProjection")(x)
raise NotImplementedError()
def make_distribution(self, net_output: jnp.ndarray) -> distrax.Distribution:
if self.distribution_name is None:
return net_output
elif self.distribution_name == "diagonal_normal":
if self.aggregation_type is None:
split_axis, num_axes = self.data_format.index("C"), 3
else:
split_axis, num_axes = 1, 1
# Add an extra axis if the input has more than 1 batch dimension
split_axis += net_output.ndim - num_axes - 1
loc, log_scale = jnp.split(net_output, 2, axis=split_axis)
return distrax.Normal(loc, jnp.exp(log_scale))
else:
raise NotImplementedError()
def __call__(
self,
inputs: jnp.ndarray,
is_training: bool
) -> Union[jnp.ndarray, distrax.Distribution]:
# Treat any extra dimensions (like time) as the batch
batched_shape = inputs.shape[:-3]
net = jnp.reshape(inputs, (-1,) + inputs.shape[-3:])
# Apply all blocks in sequence
for block in self.blocks:
net = block(net, is_training=is_training)
# Final projection
net = self.spatial_aggregation(net)
# Reshape back to correct dimensions (like batch + time)
net = jnp.reshape(net, batched_shape + net.shape[1:])
# Return a distribution over the observations
return self.make_distribution(net)
class SpatialConvDecoder(hk.Module):
"""Spatial Convolutional Decoder for learning the Hamiltonian."""
def __init__(
self,
initial_spatial_shape: Sequence[int],
conv_channels: Union[Sequence[int], int],
num_blocks: int,
max_de_aggregation_dims: int,
blocks_depth: int = 2,
scale_factor: int = 2,
output_channels: int = 3,
h_const_channels: int = 2,
data_format: str = "NHWC",
activation: Activation = "leaky_relu",
learned_sigma: bool = False,
de_aggregation_type: Optional[str] = None,
final_activation: Activation = "sigmoid",
discard_half_de_aggregated: bool = False,
kernel_shapes: Union[Sequence[int], int] = 3,
padding: Union[Sequence[str], str] = "SAME",
name: Optional[str] = None):
super().__init__(name=name)
if de_aggregation_type not in (None, "tile", "linear_projection"):
raise ValueError(f"Unrecognized de_aggregation_type="
f"{de_aggregation_type}.")
self.num_blocks = num_blocks
self.scale_factor = scale_factor
self.h_const_channels = h_const_channels
self.data_format = data_format
self.learned_sigma = learned_sigma
self.initial_spatial_shape = tuple(initial_spatial_shape)
self.final_activation = utils.get_activation(final_activation)
self.de_aggregation_type = de_aggregation_type
self.max_de_aggregation_dims = max_de_aggregation_dims
self.discard_half_de_aggregated = discard_half_de_aggregated
if isinstance(conv_channels, int):
conv_channels = [[conv_channels] * blocks_depth
for _ in range(num_blocks)]
conv_channels[-1] += [output_channels]
else:
assert isinstance(conv_channels, (list, tuple))
assert len(conv_channels) == num_blocks
conv_channels = list(list(c) for c in conv_channels)
conv_channels[-1].append(output_channels)
# Convolutional blocks
self.blocks = []
for i, channels in enumerate(conv_channels):
is_final_block = i == num_blocks - 1
self.blocks.append(
Conv2DNet( # pylint: disable=g-complex-comprehension
output_channels=channels,
kernel_shapes=kernel_shapes,
strides=1,
padding=padding,
data_format=data_format,
with_batch_norm=False,
activate_final=not is_final_block,
activation=activation,
name=f"block_{i}"
))
def spatial_de_aggregation(self, x: jnp.ndarray) -> jnp.ndarray:
if self.de_aggregation_type is None:
assert x.ndim >= 4
if self.data_format == "NHWC":
assert x.shape[1:3] == self.initial_spatial_shape
elif self.data_format == "NCHW":
assert x.shape[2:4] == self.initial_spatial_shape
return x
elif self.de_aggregation_type == "linear_projection":
assert x.ndim == 2
n, d = x.shape
d = min(d, self.max_de_aggregation_dims or d)
out_d = d * self.initial_spatial_shape[0] * self.initial_spatial_shape[1]
x = hk.Linear(out_d, name="LinearProjection")(x)
if self.data_format == "NHWC":
shape = (n,) + self.initial_spatial_shape + (d,)
else:
shape = (n, d) + self.initial_spatial_shape
return x.reshape(shape)
elif self.de_aggregation_type == "tile":
assert x.ndim == 2
if self.data_format == "NHWC":
repeats = (1,) + self.initial_spatial_shape + (1,)
x = x[:, None, None, :]
else:
repeats = (1, 1) + self.initial_spatial_shape
x = x[:, :, None, None]
return jnp.tile(x, repeats)
else:
raise NotImplementedError()
def add_constant_channels(self, inputs: jnp.ndarray) -> jnp.ndarray:
# --------------------------------------------
# This is purely for TF compatibility purposes
if self.discard_half_de_aggregated:
axis = self.data_format.index("C")
inputs, _ = jnp.split(inputs, 2, axis=axis)
# --------------------------------------------
# An extra constant channels
if self.data_format == "NHWC":
h_shape = self.initial_spatial_shape + (self.h_const_channels,)
else:
h_shape = (self.h_const_channels,) + self.initial_spatial_shape
h_const = hk.get_parameter("h", h_shape, dtype=inputs.dtype,
init=hk.initializers.Constant(1))
h_const = jnp.tile(h_const, reps=[inputs.shape[0], 1, 1, 1])
return jnp.concatenate([h_const, inputs], axis=self.data_format.index("C"))
def make_distribution(self, net_output: jnp.ndarray) -> distrax.Distribution:
if self.learned_sigma:
init = hk.initializers.Constant(- jnp.log(2.0) / 2.0)
log_scale = hk.get_parameter("log_scale", shape=(),
dtype=net_output.dtype, init=init)
scale = jnp.full_like(net_output, jnp.exp(log_scale))
else:
scale = jnp.full_like(net_output, 1 / jnp.sqrt(2.0))
return distrax.Normal(net_output, scale)
def __call__(
self,
inputs: jnp.ndarray,
is_training: bool
) -> distrax.Distribution:
# Apply the spatial de-aggregation
inputs = self.spatial_de_aggregation(inputs)
# Add the parameterized constant channels
net = self.add_constant_channels(inputs)
# Apply all the blocks
for block in self.blocks:
# Up-sample the image
net = utils.nearest_neighbour_upsampling(net, self.scale_factor)
# Apply the convolutional block
net = block(net, is_training=is_training)
# Apply any specific output nonlinearity
net = self.final_activation(net)
# Construct the distribution over the observations
return self.make_distribution(net)
def make_flexible_net(
net_type: str,
output_dims: int,
conv_channels: Union[Sequence[int], int],
num_units: Union[Sequence[int], int],
num_layers: Optional[int],
activation: Activation,
activate_final: bool = False,
kernel_shapes: Union[Sequence[int], int] = 3,
strides: Union[Sequence[int], int] = 1,
padding: Union[Sequence[str], str] = "SAME",
name: Optional[str] = None,
**unused_kwargs: Mapping[str, Any]
):
"""Commonly used for creating a flexible network."""
if unused_kwargs:
logging.warning("Unused kwargs of `make_flexible_net`: %s",
str(unused_kwargs))
if net_type == "mlp":
if isinstance(num_units, int):
assert num_layers is not None
num_units = [num_units] * (num_layers - 1) + [output_dims]
else:
num_units = list(num_units) + [output_dims]
return DenseNet(
num_units=num_units,
activation=activation,
activate_final=activate_final,
name=name
)
elif net_type == "conv":
if isinstance(conv_channels, int):
assert num_layers is not None
conv_channels = [conv_channels] * (num_layers - 1) + [output_dims]
else:
conv_channels = list(conv_channels) + [output_dims]
return Conv2DNet(
output_channels=conv_channels,
kernel_shapes=kernel_shapes,
strides=strides,
padding=padding,
activation=activation,
activate_final=activate_final,
name=name
)
elif net_type == "transformer":
raise NotImplementedError()
else:
raise ValueError(f"Unrecognized net_type={net_type}.")
def make_flexible_recurrent_net(
core_type: str,
net_type: str,
output_dims: int,
num_units: Union[Sequence[int], int],
num_layers: Optional[int],
activation: Activation,
activate_final: bool = False,
name: Optional[str] = None,
**unused_kwargs
):
"""Commonly used for creating a flexible recurrences."""
if net_type != "mlp":
raise ValueError("We do not support convolutional recurrent nets atm.")
if unused_kwargs:
logging.warning("Unused kwargs of `make_flexible_recurrent_net`: %s",
str(unused_kwargs))
if isinstance(num_units, (list, tuple)):
num_units = list(num_units) + [output_dims]
num_layers = len(num_units)
else:
assert num_layers is not None
num_units = [num_units] * (num_layers - 1) + [output_dims]
name = name or f"{core_type.upper()}"
activation = utils.get_activation(activation)
core_list = []
for i, n in enumerate(num_units):
if core_type.lower() == "vanilla":
core_list.append(hk.VanillaRNN(hidden_size=n, name=f"{name}_{i}"))
elif core_type.lower() == "lstm":
core_list.append(hk.LSTM(hidden_size=n, name=f"{name}_{i}"))
elif core_type.lower() == "gru":
core_list.append(hk.GRU(hidden_size=n, name=f"{name}_{i}"))
else:
raise ValueError(f"Unrecognized core_type={core_type}.")
if i != num_layers - 1:
core_list.append(activation)
if activate_final:
core_list.append(activation)
return hk.DeepRNN(core_list, name="RNN")
|
deepmind-research-master
|
physics_inspired_models/models/networks.py
|
# Copyright 2020 DeepMind Technologies Limited.
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module for all models."""
from typing import Any, Dict, Optional
import physics_inspired_models.models.autoregressive as autoregressive
import physics_inspired_models.models.deterministic_vae as deterministic_vae
_physics_arguments = (
"input_space", "simulation_space", "potential_func_form",
"kinetic_func_form", "hgn_kinetic_func_form", "lgn_kinetic_func_form",
"parametrize_mass_matrix", "hgn_parametrize_mass_matrix",
"lgn_parametrize_mass_matrix", "mass_eps"
)
def construct_model(
name: str,
*args,
**kwargs: Dict[str, Any]
):
"""Constructs the correct instance of a model given the short name."""
latent_dynamics_type: Optional[str] = kwargs.pop("latent_dynamics_type", None) # pytype: disable=annotation-type-mismatch
latent_system_kwargs = dict(**kwargs.pop("latent_system_kwargs", dict()))
if name == "AR":
assert latent_dynamics_type in ("vanilla", "lstm", "gru")
# This arguments are not part of the AR models
for k in _physics_arguments + ("integrator_method", "residual"):
latent_system_kwargs.pop(k, None)
return autoregressive.TeacherForcingAutoregressiveModel(
*args,
latent_dynamics_type=latent_dynamics_type,
latent_system_kwargs=latent_system_kwargs,
**kwargs
)
elif name == "RGN":
assert latent_dynamics_type in ("Discrete", None)
latent_dynamics_type = "Discrete"
# This arguments are not part of the RGN models
for k in _physics_arguments + ("integrator_method",):
latent_system_kwargs.pop(k, None)
elif name == "ODE":
assert latent_dynamics_type in ("ODE", None)
latent_dynamics_type = "ODE"
# This arguments are not part of the ODE models
for k in _physics_arguments + ("residual",):
latent_system_kwargs.pop(k, None)
elif name == "HGN":
assert latent_dynamics_type in ("Physics", None)
latent_dynamics_type = "Physics"
assert latent_system_kwargs.get("input_space", None) in ("momentum", None)
latent_system_kwargs["input_space"] = "momentum"
assert (latent_system_kwargs.get("simulation_space", None)
in ("momentum", None))
latent_system_kwargs["simulation_space"] = "momentum"
# Kinetic func form
hgn_specific = latent_system_kwargs.pop("hgn_kinetic_func_form", None)
if hgn_specific is not None:
latent_system_kwargs["kinetic_func_form"] = hgn_specific
# Mass matrix
hgn_specific = latent_system_kwargs.pop("hgn_parametrize_mass_matrix",
None)
if hgn_specific is not None:
latent_system_kwargs["parametrize_mass_matrix"] = hgn_specific
# This arguments are not part of the HGN models
latent_system_kwargs.pop("residual", None)
latent_system_kwargs.pop("lgn_kinetic_func_form", None)
latent_system_kwargs.pop("lgn_parametrize_mass_matrix", None)
elif name == "LGN":
assert latent_dynamics_type in ("Physics", None)
latent_dynamics_type = "Physics"
assert latent_system_kwargs.get("input_space", None) in ("velocity", None)
latent_system_kwargs["input_space"] = "velocity"
assert (latent_system_kwargs.get("simulation_space", None) in
("velocity", None))
latent_system_kwargs["simulation_space"] = "velocity"
# Kinetic func form
lgn_specific = latent_system_kwargs.pop("lgn_kinetic_func_form", None)
if lgn_specific is not None:
latent_system_kwargs["kinetic_func_form"] = lgn_specific
# Mass matrix
lgn_specific = latent_system_kwargs.pop("lgn_parametrize_mass_matrix",
None)
if lgn_specific is not None:
latent_system_kwargs["parametrize_mass_matrix"] = lgn_specific
# This arguments are not part of the HGN models
latent_system_kwargs.pop("residual", None)
latent_system_kwargs.pop("hgn_kinetic_func_form", None)
latent_system_kwargs.pop("hgn_parametrize_mass_matrix", None)
elif name == "PGN":
assert latent_dynamics_type in ("Physics", None)
latent_dynamics_type = "Physics"
# This arguments are not part of the PGN models
latent_system_kwargs.pop("residual")
latent_system_kwargs.pop("hgn_kinetic_func_form", None)
latent_system_kwargs.pop("hgn_parametrize_mass_matrix", None)
latent_system_kwargs.pop("lgn_kinetic_func_form", None)
latent_system_kwargs.pop("lgn_parametrize_mass_matrix", None)
else:
raise NotImplementedError()
return deterministic_vae.DeterministicLatentsGenerativeModel(
*args,
latent_dynamics_type=latent_dynamics_type,
latent_system_kwargs=latent_system_kwargs,
**kwargs)
|
deepmind-research-master
|
physics_inspired_models/models/common.py
|
# Copyright 2020 DeepMind Technologies Limited.
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module containing the main models code."""
import functools
from typing import Any, Dict, Mapping, Optional, Sequence, Tuple, Union
import distrax
from dm_hamiltonian_dynamics_suite.hamiltonian_systems import phase_space
import haiku as hk
import jax.numpy as jnp
import jax.random as jnr
import numpy as np
from physics_inspired_models import metrics
from physics_inspired_models import utils
from physics_inspired_models.models import base
from physics_inspired_models.models import dynamics
_ArrayOrPhase = Union[jnp.ndarray, phase_space.PhaseSpace]
class DeterministicLatentsGenerativeModel(base.SequenceModel[_ArrayOrPhase]):
"""Common class for generative models with deterministic latent dynamics."""
def __init__(
self,
latent_system_dim: int,
latent_system_net_type: str,
latent_system_kwargs: Dict[str, Any],
latent_dynamics_type: str,
encoder_aggregation_type: Optional[str],
decoder_de_aggregation_type: Optional[str],
encoder_kwargs: Dict[str, Any],
decoder_kwargs: Dict[str, Any],
num_inference_steps: int,
num_target_steps: int,
latent_training_type: str,
training_data_split: str,
objective_type: str,
dt: float = 0.125,
render_from_q_only: bool = True,
prior_type: str = "standard_normal",
use_analytical_kl: bool = True,
geco_kappa: float = 0.001,
geco_alpha: Optional[float] = 0.0,
elbo_beta_delay: int = 0,
elbo_beta_final: float = 1.0,
name: Optional[str] = None,
**kwargs
):
can_run_backwards = latent_dynamics_type in ("ODE", "Physics")
# Verify arguments
if objective_type not in ("GECO", "ELBO", "NON-PROB"):
raise ValueError(f"Unrecognized training type - {objective_type}")
if geco_alpha is None:
geco_alpha = 0
if geco_alpha < 0 or geco_alpha >= 1:
raise ValueError("GECO alpha parameter must be in [0, 1).")
if prior_type not in ("standard_normal", "made", "made_gated"):
raise ValueError(f"Unrecognized prior_type='{prior_type}.")
if (latent_training_type == "forward_backward" and
training_data_split != "include_inference"):
raise ValueError("Training forward_backward works only when "
"training_data_split=include_inference.")
if (latent_training_type == "forward_backward" and
num_inference_steps % 2 == 0):
raise ValueError("Training forward_backward works only when "
"num_inference_steps are odd.")
if latent_training_type == "forward_backward" and not can_run_backwards:
raise ValueError("Training forward_backward works only when the model can"
" be run backwards.")
if prior_type != "standard_normal":
raise ValueError("For now we support only `standard_normal`.")
super().__init__(
can_run_backwards=can_run_backwards,
latent_system_dim=latent_system_dim,
latent_system_net_type=latent_system_net_type,
latent_system_kwargs=latent_system_kwargs,
encoder_aggregation_type=encoder_aggregation_type,
decoder_de_aggregation_type=decoder_de_aggregation_type,
encoder_kwargs=encoder_kwargs,
decoder_kwargs=decoder_kwargs,
num_inference_steps=num_inference_steps,
num_target_steps=num_target_steps,
name=name,
**kwargs
)
# VAE specific arguments
self.prior_type = prior_type
self.objective_type = objective_type
self.use_analytical_kl = use_analytical_kl
self.geco_kappa = geco_kappa
self.geco_alpha = geco_alpha
self.elbo_beta_delay = elbo_beta_delay
self.elbo_beta_final = jnp.asarray(elbo_beta_final)
# The dynamics module and arguments
self.latent_dynamics_type = latent_dynamics_type
self.latent_training_type = latent_training_type
self.training_data_split = training_data_split
self.dt = dt
self.render_from_q_only = render_from_q_only
latent_system_kwargs["net_kwargs"] = dict(
latent_system_kwargs["net_kwargs"])
latent_system_kwargs["net_kwargs"]["net_type"] = self.latent_system_net_type
if self.latent_dynamics_type == "Physics":
# Note that here system_dim means the dimensionality of `q` and `p`.
model_constructor = functools.partial(
dynamics.PhysicsSimulationNetwork,
system_dim=self.latent_system_dim // 2,
name="Physics",
**latent_system_kwargs
)
elif self.latent_dynamics_type == "ODE":
model_constructor = functools.partial(
dynamics.OdeNetwork,
system_dim=self.latent_system_dim,
name="ODE",
**latent_system_kwargs
)
elif self.latent_dynamics_type == "Discrete":
model_constructor = functools.partial(
dynamics.DiscreteDynamicsNetwork,
system_dim=self.latent_system_dim,
name="Discrete",
**latent_system_kwargs
)
else:
raise NotImplementedError()
self.dynamics = hk.transform(
lambda *args, **kwargs_: model_constructor()(*args, **kwargs_)) # pylint: disable=unnecessary-lambda
def process_inputs_for_encoder(self, x: jnp.ndarray) -> jnp.ndarray:
return utils.stack_time_into_channels(x, self.data_format)
def process_latents_for_dynamics(self, z: jnp.ndarray) -> _ArrayOrPhase:
if self.latent_dynamics_type == "Physics":
return phase_space.PhaseSpace.from_state(z)
return z
def process_latents_for_decoder(self, z: _ArrayOrPhase) -> jnp.ndarray:
if self.latent_dynamics_type == "Physics":
return z.q if self.render_from_q_only else z.single_state # pytype: disable=attribute-error # jax-ndarray
return z # pytype: disable=bad-return-type # jax-ndarray
@property
def inferred_index(self) -> int:
if self.latent_training_type == "forward":
return self.num_inference_steps - 1
elif self.latent_training_type == "forward_backward":
assert self.num_inference_steps % 2 == 1
return self.num_inference_steps // 2
else:
raise NotImplementedError()
@property
def targets_index_offset(self) -> int:
if self.training_data_split == "overlap_by_one":
return -1
elif self.training_data_split == "no_overlap":
return 0
elif self.training_data_split == "include_inference":
return - self.num_inference_steps
else:
raise NotImplementedError()
@property
def targets_length(self) -> int:
if self.training_data_split == "include_inference":
return self.num_inference_steps + self.num_target_steps
return self.num_target_steps
@property
def train_sequence_length(self) -> int:
"""Computes the total length of a sequence needed for training."""
if self.training_data_split == "overlap_by_one":
# Input - [-------------------------------------------------]
# Inference - [---------------]
# Targets - [---------------------------------]
return self.num_inference_steps + self.num_target_steps - 1
elif self.training_data_split == "no_overlap":
# Input - [-------------------------------------------------]
# Inference - [---------------]
# Targets - [--------------------------------]
return self.num_inference_steps + self.num_target_steps
elif self.training_data_split == "include_inference":
# Input - [-------------------------------------------------]
# Inference - [---------------]
# Targets - [-------------------------------------------------]
return self.num_inference_steps + self.num_target_steps
else:
raise NotImplementedError()
def train_data_split(
self,
images: jnp.ndarray
) -> Tuple[jnp.ndarray, jnp.ndarray, Mapping[str, Any]]:
images = images[:, :self.train_sequence_length]
inf_idx = self.num_inference_steps
t_idx = self.num_inference_steps + self.targets_index_offset
if self.latent_training_type == "forward":
inference_data = images[:, :inf_idx]
target_data = images[:, t_idx:]
if self.training_data_split == "include_inference":
num_steps_backward = self.inferred_index
else:
num_steps_backward = 0
num_steps_forward = self.num_target_steps
if self.training_data_split == "overlap_by_one":
num_steps_forward -= 1
unroll_kwargs = dict(
num_steps_backward=num_steps_backward,
include_z0=self.training_data_split != "no_overlap",
num_steps_forward=num_steps_forward,
dt=self.dt
)
elif self.latent_training_type == "forward_backward":
assert self.training_data_split == "include_inference"
n_fwd = images.shape[0] // 2
inference_fwd = images[:n_fwd, :inf_idx]
targets_fwd = images[:n_fwd, t_idx:]
inference_bckwd = images[n_fwd:, -inf_idx:]
targets_bckwd = jnp.flip(images[n_fwd:, :images.shape[1] - t_idx], axis=1)
inference_data = jnp.concatenate([inference_fwd, inference_bckwd], axis=0)
target_data = jnp.concatenate([targets_fwd, targets_bckwd], axis=0)
# This needs to by numpy rather than jax.numpy, because we make some
# verification checks in `integrators.py:149-161`.
dt_fwd = np.full([n_fwd], self.dt)
dt_bckwd = np.full([images.shape[0] - n_fwd], self.dt)
dt = np.concatenate([dt_fwd, -dt_bckwd], axis=0)
unroll_kwargs = dict(
num_steps_backward=self.inferred_index,
include_z0=True,
num_steps_forward=self.targets_length - self.inferred_index - 1,
dt=dt
)
else:
raise NotImplementedError()
return inference_data, target_data, unroll_kwargs
def prior(self) -> distrax.Distribution:
"""Given the parameters returns the prior distribution of the model."""
# Allow to run with both the full parameters and only the priors
if self.prior_type == "standard_normal":
# assert self.prior_nets is None and self.gated_made is None
if self.latent_system_net_type == "mlp":
event_shape = (self.latent_system_dim,)
elif self.latent_system_net_type == "conv":
if self.data_format == "NHWC":
event_shape = self.latent_spatial_shape + (self.latent_system_dim,)
else:
event_shape = (self.latent_system_dim,) + self.latent_spatial_shape
else:
raise NotImplementedError()
return distrax.Normal(jnp.zeros(event_shape), jnp.ones(event_shape))
else:
raise ValueError(f"Unrecognized prior_type='{self.prior_type}'.")
def sample_latent_from_prior(
self,
params: utils.Params,
rng: jnp.ndarray,
num_samples: int = 1,
**kwargs: Any) -> jnp.ndarray:
"""Takes sample from the prior (and optionally puts them through the latent transform function."""
_, sample_key, transf_key = jnr.split(rng, 3)
prior = self.prior()
z_raw = prior.sample(seed=sample_key, sample_shape=[num_samples])
return self.apply_latent_transform(params, transf_key, z_raw, **kwargs)
def sample_trajectories_from_prior(
self,
params: utils.Params,
num_steps: int,
rng: jnp.ndarray,
num_samples: int = 1,
is_training: bool = False,
**kwargs
) -> distrax.Distribution:
"""Generates samples from the prior (unconditional generation)."""
sample_key, unroll_key, dec_key = jnr.split(rng, 3)
z0 = self.sample_latent_from_prior(params, sample_key, num_samples,
is_training=is_training)
z, _ = self.unroll_latent_dynamics(
z=self.process_latents_for_dynamics(z0),
params=params,
key=unroll_key,
num_steps_forward=num_steps,
num_steps_backward=0,
include_z0=True,
is_training=is_training,
**kwargs
)
z = self.process_latents_for_decoder(z)
return self.decode_latents(params, dec_key, z, is_training=is_training)
def verify_unroll_args(
self,
num_steps_forward: int,
num_steps_backward: int,
include_z0: bool
) -> None:
if num_steps_forward < 0 or num_steps_backward < 0:
raise ValueError("num_steps_forward and num_steps_backward can not be "
"negative.")
if num_steps_forward == 0 and num_steps_backward == 0:
raise ValueError("You need one of num_steps_forward or "
"num_of_steps_backward to be positive.")
if num_steps_forward > 0 and num_steps_backward > 0 and not include_z0:
raise ValueError("When both num_steps_forward and num_steps_backward are "
"positive include_t0 should be True.")
if num_steps_backward > 0 and not self.can_run_backwards:
raise ValueError("This model can not be unrolled backward in time.")
def unroll_latent_dynamics( # pytype: disable=signature-mismatch # jax-ndarray
self,
z: phase_space.PhaseSpace,
params: hk.Params,
key: jnp.ndarray,
num_steps_forward: int,
num_steps_backward: int,
include_z0: bool,
is_training: bool,
**kwargs: Any
) -> Tuple[_ArrayOrPhase, Mapping[str, jnp.ndarray]]:
self.verify_unroll_args(num_steps_forward, num_steps_backward, include_z0)
return self.dynamics.apply(
params,
key,
y0=z,
dt=kwargs.pop("dt", self.dt),
num_steps_forward=num_steps_forward,
num_steps_backward=num_steps_backward,
include_y0=include_z0,
return_stats=True,
is_training=is_training
)
def _models_core(
self,
params: utils.Params,
keys: jnp.ndarray,
image_data: jnp.ndarray,
use_mean: bool,
is_training: bool,
**unroll_kwargs: Any
) -> Tuple[distrax.Distribution, distrax.Distribution, distrax.Distribution,
jnp.ndarray, jnp.ndarray, Mapping[str, jnp.ndarray]]:
enc_key, sample_key, transform_key, unroll_key, dec_key, _ = keys
# Calculate the approximate posterior q(z|x)
inference_data = self.process_inputs_for_encoder(image_data)
q_z: distrax.Distribution = self.encoder.apply(params, enc_key,
inference_data,
is_training=is_training)
# Sample latent variables or take the mean
z_raw = q_z.mean() if use_mean else q_z.sample(seed=sample_key)
# Apply latent transformation
z0 = self.apply_latent_transform(params, transform_key, z_raw,
is_training=is_training)
# Unroll the latent variable
z, dyn_stats = self.unroll_latent_dynamics(
z=self.process_latents_for_dynamics(z0),
params=params,
key=unroll_key,
is_training=is_training,
**unroll_kwargs
)
decoder_z = self.process_latents_for_decoder(z)
# Compute p(x|z)
p_x = self.decode_latents(params, dec_key, decoder_z,
is_training=is_training)
z = z.single_state if isinstance(z, phase_space.PhaseSpace) else z
return p_x, q_z, self.prior(), z0, z, dyn_stats
def training_objectives( # pytype: disable=signature-mismatch # jax-ndarray
self,
params: utils.Params,
state: hk.State,
rng: jnp.ndarray,
inputs: jnp.ndarray,
step: jnp.ndarray,
is_training: bool = True,
use_mean_for_eval_stats: bool = True
) -> Tuple[jnp.ndarray, Sequence[Dict[str, jnp.ndarray]]]:
# Split all rng keys
keys = jnr.split(rng, 6)
# Process training data
images = utils.extract_image(inputs)
image_data, target_data, unroll_kwargs = self.train_data_split(images)
p_x, q_z, prior, _, _, dyn_stats = self._models_core(
params=params,
keys=keys,
image_data=image_data,
use_mean=False,
is_training=is_training,
**unroll_kwargs
)
# Note: we reuse the rng key used to sample the latent variable here
# so that it can be reused to evaluate a (non-analytical) KL at that sample.
stats = metrics.training_statistics(
p_x=p_x,
targets=target_data,
rescale_by=self.rescale_by,
rng=keys[1],
q_z=q_z,
prior=prior,
p_x_learned_sigma=self.decoder_kwargs.get("learned_sigma", False)
)
stats.update(dyn_stats)
# Compute other (non-reported statistics)
z_stats = dict()
other_stats = dict(x_reconstruct=p_x.mean(), z_stats=z_stats)
# The loss computation and GECO state update
new_state = dict()
if self.objective_type == "GECO":
geco_stats = metrics.geco_objective(
l2_loss=stats["l2"],
kl=stats["kl"],
alpha=self.geco_alpha,
kappa=self.geco_kappa,
constraint_ema=state["GECO"]["geco_constraint_ema"],
lambda_var=params["GECO"]["geco_lambda_var"],
is_training=is_training
)
new_state["GECO"] = dict(
geco_constraint_ema=geco_stats["geco_constraint_ema"])
stats.update(geco_stats)
elif self.objective_type == "ELBO":
elbo_stats = metrics.elbo_objective(
neg_log_p_x=stats["neg_log_p_x"],
kl=stats["kl"],
final_beta=self.elbo_beta_final,
beta_delay=self.elbo_beta_delay,
step=step
)
stats.update(elbo_stats)
elif self.objective_type == "NON-PROB":
stats["loss"] = stats["neg_log_p_x"]
else:
raise ValueError()
if not is_training:
if self.training_data_split == "overlap_by_one":
reconstruction_skip = self.num_inference_steps - 1
elif self.training_data_split == "no_overlap":
reconstruction_skip = self.num_inference_steps
elif self.training_data_split == "include_inference":
reconstruction_skip = 0
else:
raise NotImplementedError()
# We intentionally reuse the same rng as the training, in order to be able
# to run tests and verify that the evaluation and reconstruction work
# correctly.
# We need to be able to set `use_mean = False` for some of the tests
stats.update(metrics.evaluation_only_statistics(
reconstruct_func=functools.partial(
self.reconstruct, use_mean=use_mean_for_eval_stats),
params=params,
inputs=inputs,
rng=rng,
rescale_by=self.rescale_by,
can_run_backwards=self.can_run_backwards,
train_sequence_length=self.train_sequence_length,
reconstruction_skip=reconstruction_skip,
p_x_learned_sigma=self.decoder_kwargs.get("learned_sigma", False)
))
# Make new state the same type as state
new_state = utils.convert_to_pytype(new_state, state)
return stats["loss"], (new_state, stats, other_stats)
def reconstruct(
self,
params: utils.Params,
inputs: jnp.ndarray,
rng: Optional[jnp.ndarray],
forward: bool,
use_mean: bool = True,
) -> distrax.Distribution:
if not self.can_run_backwards and not forward:
raise ValueError("This model can not be run backwards.")
images = utils.extract_image(inputs)
# This is intentionally matching the split for the training stats
if forward:
num_steps_backward = self.inferred_index
num_steps_forward = images.shape[1] - num_steps_backward - 1
else:
num_steps_forward = self.num_inference_steps - self.inferred_index - 1
num_steps_backward = images.shape[1] - num_steps_forward - 1
if not self.can_run_backwards:
num_steps_backward = 0
if forward:
image_data = images[:, :self.num_inference_steps]
else:
image_data = images[:, -self.num_inference_steps:]
return self._models_core(
params=params,
keys=jnr.split(rng, 6),
image_data=image_data,
use_mean=use_mean,
is_training=False,
num_steps_forward=num_steps_forward,
num_steps_backward=num_steps_backward,
include_z0=True,
)[0]
def gt_state_and_latents( # pytype: disable=signature-mismatch # jax-ndarray
self,
params: hk.Params,
rng: jnp.ndarray,
inputs: Dict[str, jnp.ndarray],
seq_length: int,
is_training: bool = False,
unroll_direction: str = "forward",
**kwargs: Dict[str, Any]
) -> Tuple[jnp.ndarray, jnp.ndarray,
Union[distrax.Distribution, jnp.ndarray]]:
"""Computes the ground state and matching latents."""
assert unroll_direction in ("forward", "backward")
if unroll_direction == "backward" and not self.can_run_backwards:
raise ValueError("This model can not be unrolled backwards.")
images = utils.extract_image(inputs)
gt_state = utils.extract_gt_state(inputs)
if unroll_direction == "forward":
image_data = images[:, :self.num_inference_steps]
if self.can_run_backwards:
num_steps_backward = self.inferred_index
gt_start_idx = 0
else:
num_steps_backward = 0
gt_start_idx = self.inferred_index
num_steps_forward = seq_length - num_steps_backward - 1
gt_state = gt_state[:, gt_start_idx: seq_length + gt_start_idx]
elif unroll_direction == "backward":
inference_start_idx = seq_length - self.num_inference_steps
image_data = images[:, inference_start_idx: seq_length]
num_steps_forward = self.num_inference_steps - self.inferred_index - 1
num_steps_backward = seq_length - num_steps_forward - 1
gt_state = gt_state[:, :seq_length]
else:
raise NotImplementedError()
_, q_z, _, z0, z, _ = self._models_core(
params=params,
keys=jnr.split(rng, 6),
image_data=image_data,
use_mean=True,
is_training=False,
num_steps_forward=num_steps_forward,
num_steps_backward=num_steps_backward,
include_z0=True,
)
if self.has_latent_transform:
return gt_state, z, z0
else:
return gt_state, z, q_z
def _init_non_model_params_and_state(
self,
rng: jnp.ndarray
) -> Tuple[utils.Params, utils.Params]:
if self.objective_type == "GECO":
# Initialize such that softplus(lambda_var) = 1
geco_lambda_var = jnp.asarray(jnp.log(jnp.e - 1.0))
geco_constraint_ema = jnp.asarray(0.0)
return (dict(GECO=dict(geco_lambda_var=geco_lambda_var)),
dict(GECO=dict(geco_constraint_ema=geco_constraint_ema)))
else:
return dict(), dict()
def _init_latent_system(
self,
rng: jnp.ndarray,
z: jnp.ndarray,
**kwargs: Mapping[str, Any]
) -> hk.Params:
"""Initializes the parameters of the latent system."""
return self.dynamics.init(
rng,
y0=z,
dt=self.dt,
num_steps_forward=1,
num_steps_backward=0,
include_y0=True,
**kwargs
)
|
deepmind-research-master
|
physics_inspired_models/models/deterministic_vae.py
|
# Copyright 2020 DeepMind Technologies Limited.
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module containing the base abstract classes for sequence models."""
import abc
from typing import Any, Dict, Generic, Mapping, Optional, Sequence, Tuple, TypeVar, Union
from absl import logging
import distrax
import haiku as hk
import jax
import jax.numpy as jnp
import jax.random as jnr
from physics_inspired_models import utils
from physics_inspired_models.models import networks
T = TypeVar("T")
class SequenceModel(abc.ABC, Generic[T]):
"""An abstract class for sequence models."""
def __init__(
self,
can_run_backwards: bool,
latent_system_dim: int,
latent_system_net_type: str,
latent_system_kwargs: Dict[str, Any],
encoder_aggregation_type: Optional[str],
decoder_de_aggregation_type: Optional[str],
encoder_kwargs: Dict[str, Any],
decoder_kwargs: Dict[str, Any],
num_inference_steps: int,
num_target_steps: int,
name: str,
latent_spatial_shape: Optional[Tuple[int, int]] = (4, 4),
has_latent_transform: bool = False,
latent_transform_kwargs: Optional[Dict[str, Any]] = None,
rescale_by: Optional[str] = "pixels_and_time",
data_format: str = "NHWC",
**unused_kwargs
):
# Arguments checks
encoder_kwargs = encoder_kwargs or dict()
decoder_kwargs = decoder_kwargs or dict()
# Set the decoder de-aggregation type the "same" type as the encoder if not
# provided
if (decoder_de_aggregation_type is None and
encoder_aggregation_type is not None):
if encoder_aggregation_type == "linear_projection":
decoder_de_aggregation_type = "linear_projection"
elif encoder_aggregation_type in ("mean", "max"):
decoder_de_aggregation_type = "tile"
else:
raise ValueError(f"Unrecognized encoder_aggregation_type="
f"{encoder_aggregation_type}")
if latent_system_net_type == "conv":
if encoder_aggregation_type is not None:
raise ValueError("When the latent system is convolutional, the encoder "
"aggregation type should be None.")
if decoder_de_aggregation_type is not None:
raise ValueError("When the latent system is convolutional, the decoder "
"aggregation type should be None.")
else:
if encoder_aggregation_type is None:
raise ValueError("When the latent system is not convolutional, the "
"you must provide an encoder aggregation type.")
if decoder_de_aggregation_type is None:
raise ValueError("When the latent system is not convolutional, the "
"you must provide an decoder aggregation type.")
if has_latent_transform and latent_transform_kwargs is None:
raise ValueError("When using latent transformation you have to provide "
"the latent_transform_kwargs argument.")
if unused_kwargs:
logging.warning("Unused kwargs: %s", str(unused_kwargs))
super().__init__(**unused_kwargs)
self.can_run_backwards = can_run_backwards
self.latent_system_dim = latent_system_dim
self.latent_system_kwargs = latent_system_kwargs
self.latent_system_net_type = latent_system_net_type
self.latent_spatial_shape = latent_spatial_shape
self.num_inference_steps = num_inference_steps
self.num_target_steps = num_target_steps
self.rescale_by = rescale_by
self.data_format = data_format
self.name = name
# Encoder
self.encoder_kwargs = encoder_kwargs
self.encoder = hk.transform(
lambda *args, **kwargs: networks.SpatialConvEncoder( # pylint: disable=unnecessary-lambda,g-long-lambda
latent_dim=latent_system_dim,
aggregation_type=encoder_aggregation_type,
data_format=data_format,
name="Encoder",
**encoder_kwargs
)(*args, **kwargs))
# Decoder
self.decoder_kwargs = decoder_kwargs
self.decoder = hk.transform(
lambda *args, **kwargs: networks.SpatialConvDecoder( # pylint: disable=unnecessary-lambda,g-long-lambda
initial_spatial_shape=self.latent_spatial_shape,
de_aggregation_type=decoder_de_aggregation_type,
data_format=data_format,
max_de_aggregation_dims=self.latent_system_dim // 2,
name="Decoder",
**decoder_kwargs,
)(*args, **kwargs))
self.has_latent_transform = has_latent_transform
if has_latent_transform:
self.latent_transform = hk.transform(
lambda *args, **kwargs: networks.make_flexible_net( # pylint: disable=unnecessary-lambda,g-long-lambda
net_type=latent_system_net_type,
output_dims=latent_system_dim,
name="LatentTransform",
**latent_transform_kwargs
)(*args, **kwargs))
else:
self.latent_transform = None
self._jit_init = None
@property
@abc.abstractmethod
def train_sequence_length(self) -> int:
"""Computes the total length of a sequence needed for training or evaluation."""
pass
@abc.abstractmethod
def train_data_split(
self,
images: jnp.ndarray,
) -> Tuple[jnp.ndarray, jnp.ndarray, Mapping[str, Any]]:
"""Extracts from the inputs the data splits for training."""
pass
def decode_latents(
self,
params: hk.Params,
rng: jnp.ndarray,
z: jnp.ndarray,
**kwargs: Any
) -> distrax.Distribution:
"""Decodes the latent variable given the parameters of the model."""
# Allow to run with both the full parameters and only the decoders
if self.latent_system_net_type == "mlp":
fixed_dims = 1
elif self.latent_system_net_type == "conv":
fixed_dims = 1 + len(self.latent_spatial_shape)
else:
raise NotImplementedError()
n_shape = z.shape[:-fixed_dims]
z = z.reshape((-1,) + z.shape[-fixed_dims:])
x = self.decoder.apply(params, rng, z, **kwargs)
return jax.tree_map(lambda a: a.reshape(n_shape + a.shape[1:]), x)
def apply_latent_transform(
self,
params: hk.Params,
key: jnp.ndarray,
z: jnp.ndarray,
**kwargs: Any
) -> jnp.ndarray:
if self.latent_transform is not None:
return self.latent_transform.apply(params, key, z, **kwargs)
else:
return z
@abc.abstractmethod
def process_inputs_for_encoder(self, x: jnp.ndarray) -> jnp.ndarray:
pass
@abc.abstractmethod
def process_latents_for_dynamics(self, z: jnp.ndarray) -> T:
pass
@abc.abstractmethod
def process_latents_for_decoder(self, z: T) -> jnp.ndarray:
pass
@abc.abstractmethod
def unroll_latent_dynamics(
self,
z: T,
params: utils.Params,
key: jnp.ndarray,
num_steps_forward: int,
num_steps_backward: int,
include_z0: bool,
is_training: bool,
**kwargs: Any
) -> Tuple[T, Mapping[str, jnp.ndarray]]:
"""Unrolls the latent dynamics starting from z and pre-processing for the decoder."""
pass
@abc.abstractmethod
def reconstruct(
self,
params: utils.Params,
inputs: jnp.ndarray,
rng_key: Optional[jnp.ndarray],
forward: bool,
) -> distrax.Distribution:
"""Using the first `num_inference_steps` parts of inputs reconstructs the rest."""
pass
@abc.abstractmethod
def training_objectives(
self,
params: utils.Params,
state: hk.State,
rng: jnp.ndarray,
inputs: Union[Dict[str, jnp.ndarray], jnp.ndarray],
step: jnp.ndarray,
is_training: bool = True,
use_mean_for_eval_stats: bool = True
) -> Tuple[jnp.ndarray, Sequence[Dict[str, jnp.ndarray]]]:
"""Returns all training objectives statistics and update states."""
pass
@property
@abc.abstractmethod
def inferred_index(self):
"""Returns the time index in the input sequence, for which the encoder infers.
If the encoder takes as input the sequence x[0:n-1], where
`n = self.num_inference_steps`, then this outputs the index `k` relative to
the begging of the input sequence `x_0`, which the encoder infers.
"""
pass
@property
def inferred_right_offset(self):
return self.num_inference_steps - 1 - self.inferred_index
@abc.abstractmethod
def gt_state_and_latents(
self,
params: hk.Params,
rng: jnp.ndarray,
inputs: Dict[str, jnp.ndarray],
seq_len: int,
is_training: bool = False,
unroll_direction: str = "forward",
**kwargs: Dict[str, Any]
) -> Tuple[jnp.ndarray, jnp.ndarray, jnp.ndarray]:
"""Computes the ground state and matching latents."""
pass
@abc.abstractmethod
def _init_non_model_params_and_state(
self,
rng: jnp.ndarray
) -> Tuple[utils.Params, utils.Params]:
"""Initializes any non-model parameters and state."""
pass
@abc.abstractmethod
def _init_latent_system(
self,
rng: jnp.ndarray,
z: jnp.ndarray,
**kwargs: Any
) -> hk.Params:
"""Initializes the parameters of the latent system."""
pass
def _init(
self,
rng: jnp.ndarray,
images: jnp.ndarray
) -> Tuple[hk.Params, hk.State]:
"""Initializes the whole model parameters and state."""
inference_data, _, _ = self.train_data_split(images)
# Initialize parameters and state for the vae training
rng, key = jnr.split(rng)
params, state = self._init_non_model_params_and_state(key)
# Initialize and run encoder
inference_data = self.process_inputs_for_encoder(inference_data)
rng, key = jnr.split(rng)
encoder_params = self.encoder.init(key, inference_data, is_training=True)
rng, key = jnr.split(rng)
z_in = self.encoder.apply(encoder_params, key, inference_data,
is_training=True)
# For probabilistic models this will be a distribution
if isinstance(z_in, distrax.Distribution):
z_in = z_in.mean()
# Initialize and run the optional latent transform
if self.latent_transform is not None:
rng, key = jnr.split(rng)
transform_params = self.latent_transform.init(key, z_in, is_training=True)
rng, key = jnr.split(rng)
z_in = self.latent_transform.apply(transform_params, key, z_in,
is_training=True)
else:
transform_params = dict()
# Initialize and run the latent system
z_in = self.process_latents_for_dynamics(z_in)
rng, key = jnr.split(rng)
latent_params = self._init_latent_system(key, z_in, is_training=True)
rng, key = jnr.split(rng)
z_out, _ = self.unroll_latent_dynamics(
z=z_in,
params=latent_params,
key=key,
num_steps_forward=1,
num_steps_backward=0,
include_z0=False,
is_training=True
)
z_out = self.process_latents_for_decoder(z_out)
# Initialize and run the decoder
rng, key = jnr.split(rng)
decoder_params = self.decoder.init(key, z_out[:, 0], is_training=True)
_ = self.decoder.apply(decoder_params, rng, z_out[:, 0], is_training=True)
# Combine all and make immutable
params = hk.data_structures.merge(params, encoder_params, transform_params,
latent_params, decoder_params)
params = hk.data_structures.to_immutable_dict(params)
state = hk.data_structures.to_immutable_dict(state)
return params, state # pytype: disable=bad-return-type # jax-ndarray
def init(
self,
rng: jnp.ndarray,
inputs_or_shape: Union[jnp.ndarray, Mapping[str, jnp.ndarray],
Sequence[int]],
) -> Tuple[utils.Params, hk.State]:
"""Initializes the whole model parameters and state."""
if (isinstance(inputs_or_shape, (tuple, list))
and isinstance(inputs_or_shape[0], int)):
images = jnp.zeros(inputs_or_shape)
else:
images = utils.extract_image(inputs_or_shape)
if self._jit_init is None:
self._jit_init = jax.jit(self._init)
return self._jit_init(rng, images)
|
deepmind-research-master
|
physics_inspired_models/models/base.py
|
# Copyright 2020 DeepMind Technologies Limited.
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module containing all of the networks as Haiku modules."""
from typing import Any, Mapping, Optional, Tuple, Union
from dm_hamiltonian_dynamics_suite.hamiltonian_systems import phase_space
import haiku as hk
import jax
import jax.numpy as jnp
from physics_inspired_models import integrators
from physics_inspired_models import utils
from physics_inspired_models.models import networks
_PhysicsSimulationOutput = Union[
phase_space.PhaseSpace,
Tuple[phase_space.PhaseSpace, Mapping[str, jnp.ndarray]]
]
class PhysicsSimulationNetwork(hk.Module):
"""A model for simulating an abstract physical system, whose energy is defined by a neural network."""
def __init__(
self,
system_dim: int,
input_space: str,
simulation_space: str,
potential_func_form: str,
kinetic_func_form: str,
parametrize_mass_matrix: bool,
net_kwargs: Mapping[str, Any],
mass_eps: float = 1.0,
integrator_method: Optional[str] = None,
steps_per_dt: int = 1,
ode_int_kwargs: Optional[Mapping[str, float]] = None,
use_scan: bool = True,
feature_axis: int = -1,
features_extra_dims: Optional[int] = None,
network_creation_func=networks.make_flexible_net,
name: Optional[str] = None
):
"""Initializes the model.
Args:
system_dim: The number of system dimensions. Note that this specifies the
number of dimensions only of the position vectors, not of position and
momentum. Hence the generalized coordinates would be of dimension
`2 * system_dim`.
input_space: Either `velocity` or `momentum`. Specifies whether the inputs
to the model are to be interpreted as `(position, velocity)` or as
`(position, momentum)`.
simulation_space: Either `velocity` or `momentum`. Specifies whether the
model should simulate the dynamics in `(position, velocity)` space
using the Lagrangian formulation or in `(position, momentum)` space
using the Hamiltonian formulation. If this is different than the value
of `input_space` then `kinetic_func_form` must be one of pure_quad,
matrix_diag_quad, matrix_quad, matrix_dep_diag_quad, matrix_dep_quad.
In all other cases one can not compute analytically the form of the
functional (Lagrangian or Hamiltonian) from the other.
potential_func_form: String specifying the form of the potential energy:
* separable_net - The network uses only the position:
U(q, q_dot/p) = f(q) f: R^d -> R
* dep_net - The network uses both the position and velocity/momentum:
U(q, q_dot/p) = f(q, q_dot/p) f: R^d x R^d -> R
* embed_quad - A quadratic of the embedding of a network embedding of
the velocity/momentum:
U(q, q_dot/p) = f(q)^T f(q) / 2 f: R^d -> R^d
kinetic_func_form: String specifying the form of the potential energy:
* separable_net - The network uses only the velocity/momentum:
K(q, q_dot/p) = f(q_dot/p) f: R^d -> R
* dep_net - The network uses both the position and velocity/momentum:
K(q, q_dot/p) = f(q, q_dot/p) f: R^d x R^d -> R
* pure_quad - A quadratic function of the velocity/momentum:
K(q, q_dot/p) = (q_dot/p)^T (q_dot/p) / 2
* matrix_diag_quad - A quadratic function of the velocity/momentum,
where there is diagonal mass matrix, whose log `P` is a parameter:
K(q, q_dot) = q_dot^T M q_dot / 2
K(q, p) = p^T M^-1 p / 2
[if `parameterize_mass_matrix`]
M = diag(exp(P) + mass_eps)
[else]
M^-1 = diag(exp(P) + mass_eps)
* matrix_quad - A quadratic function of the velocity/momentum, where
there is a full mass matrix, whose Cholesky factor L is a parameter:
K(q, q_dot) = q_dot^T M q_dot / 2
K(q, p) = p^T M^-1 p / 2
[if `parameterize_mass_matrix`]
M = LL^T + mass_eps * I
[else]
M^-1 = LL^T + mass_eps * I
* matrix_dep_quad - A quadratic function of the velocity/momentum, where
there is a full mass matrix defined as a function of the position:
K(q, q_dot) = q_dot^T M(q) q_dot / 2
K(q, p) = p^T M(q)^-1 p / 2
[if `parameterize_mass_matrix`]
M(q) = g(q) g(q)^T + mass_eps * I g: R^d -> R^(d(d+1)/2)
[else]
M(q)^-1 = g(q) g(q)^T + mass_eps * I g: R^d -> R^(d(d+1)/2)
* embed_quad - A quadratic of the embedding of a network embedding of
the velocity/momentum:
K(q, q_dot/p) = f(q_dot/p)^T f(q_dot/p) / 2 f: R^d -> R^d
* matrix_dep_diag_embed_quad - A quadratic of the embedding of a network
embedding of the velocity/momentum where there is diagonal mass matrix
defined as a function of the position:
K(q, q_dot) = f(q_dot)^T M(q) f(q_dot) / 2 f: R^d -> R^d
K(q, p) = f(p)^T M(q)^-1 f(p) / 2 f: R^d -> R^d
[if `parameterize_mass_matrix`]
M(q) = diag(exp(g(q)) + mass_eps * I g: R^d -> R^d
[else]
M(q)^-1 = diag(exp(g(q)) + mass_eps * I g: R^d -> R^d
* matrix_dep_embed_quad - A quadratic of the embedding of a network
embedding of the velocity/momentum where there is a full mass matrix
defined as a function of the position:
K(q, q_dot) = f(q_dot)^T M(q) f(q_dot) / 2 f: R^d -> R^d
K(q, p) = f(p)^T M(q)^-1 f(p) / 2 f: R^d -> R^d
[if `parameterize_mass_matrix`]
M(q) = g(q) g(q)^T + mass_eps * I g: R^d -> R^(d(d+1)/2)
[else]
M(q)^-1 = g(q) g(q)^T + mass_eps * I g: R^d -> R^(d(d+1)/2)
For any of the function forms with mass matrices, if we have a
convolutional input it is assumed that the matrix is shared across all
spatial locations.
parametrize_mass_matrix: Defines for the kinetic functional form, whether
the network output defines the mass or the inverse of the mass matrix.
net_kwargs: Any keyword arguments to pass down to the networks.
mass_eps: The additional weight of the identity added to the mass matrix,
when relevant.
integrator_method: What method to use for integrating the system.
steps_per_dt: How many internal steps per a single `dt` step to do.
ode_int_kwargs: Extra arguments when using "implicit" integrator method.
use_scan: Whether to use `lax.scan` for explicit integrators.
feature_axis: The number of the features axis in the inputs.
features_extra_dims: If the inputs have extra features (like spatial for
convolutions) this specifies how many of them there are.
network_creation_func: A function that creates the networks. Should have a
signature `network_creation_func(output_dims, name, **net_kwargs)`.
name: The name of this Haiku module.
"""
super().__init__(name=name)
if input_space not in ("velocity", "momentum"):
raise ValueError("input_space must be either velocity or momentum.")
if simulation_space not in ("velocity", "momentum"):
raise ValueError("simulation_space must be either velocity or momentum.")
if potential_func_form not in ("separable_net", "dep_net", "embed_quad"):
raise ValueError("The potential network can be only a network.")
if kinetic_func_form not in ("separable_net", "dep_net", "pure_quad",
"matrix_diag_quad", "matrix_quad",
"matrix_dep_diag_quad", "matrix_dep_quad",
"embed_quad", "matrix_dep_diag_embed_quad",
"matrix_dep_embed_quad"):
raise ValueError(f"Unrecognized kinetic func form {kinetic_func_form}.")
if input_space != simulation_space:
if kinetic_func_form not in (
"pure_quad", "matrix_diag_quad", "matrix_quad",
"matrix_dep_diag_quad", "matrix_dep_quad"):
raise ValueError(
"When the input and simulation space are not the same, it is "
"possible to simulate the physical system only if kinetic_func_form"
" is one of pure_quad, matrix_diag_quad, matrix_quad, "
"matrix_dep_diag_quad, matrix_dep_quad. In all other cases one can"
"not compute analytically the form of the functional (Lagrangian or"
" Hamiltonian) from the other.")
if feature_axis != -1:
raise ValueError("Currently we only support features_axis=-1.")
if integrator_method is None:
if simulation_space == "velocity":
integrator_method = "rk2"
else:
integrator_method = "leap_frog"
if features_extra_dims is None:
if net_kwargs["net_type"] == "mlp":
features_extra_dims = 0
elif net_kwargs["net_type"] == "conv":
features_extra_dims = 2
else:
raise NotImplementedError()
ode_int_kwargs = dict(ode_int_kwargs or {})
ode_int_kwargs.setdefault("rtol", 1e-6)
ode_int_kwargs.setdefault("atol", 1e-6)
ode_int_kwargs.setdefault("mxstep", 50)
self.system_dim = system_dim
self.input_space = input_space
self.simulation_space = simulation_space
self.potential_func_form = potential_func_form
self.kinetic_func_form = kinetic_func_form
self.parametrize_mass_matrix = parametrize_mass_matrix
self.features_axis = feature_axis
self.features_extra_dims = features_extra_dims
self.integrator_method = integrator_method
self.steps_per_dt = steps_per_dt
self.ode_int_kwargs = ode_int_kwargs
self.net_kwargs = net_kwargs
self.mass_eps = mass_eps
self.use_scan = use_scan
self.name = name
self.potential_net = network_creation_func(
output_dims=1, name="PotentialNet", **net_kwargs)
if kinetic_func_form in ("separable_net", "dep_net"):
self.kinetic_net = network_creation_func(
output_dims=1, name="KineticNet", **net_kwargs)
else:
self.kinetic_net = None
if kinetic_func_form in ("matrix_dep_quad", "matrix_dep_embed_quad"):
output_dims = (system_dim * (system_dim + 1)) // 2
name = "MatrixNet" if parametrize_mass_matrix else "InvMatrixNet"
self.mass_matrix_net = network_creation_func(
output_dims=output_dims, name=name, **net_kwargs)
elif kinetic_func_form in ("matrix_dep_diag_quad",
"matrix_dep_diag_embed_quad",
"matrix_dep_embed_quad"):
name = "MatrixNet" if parametrize_mass_matrix else "InvMatrixNet"
self.mass_matrix_net = network_creation_func(
output_dims=system_dim, name=name, **net_kwargs)
else:
self.mass_matrix_net = None
if kinetic_func_form in ("embed_quad", "matrix_dep_diag_embed_quad",
"matrix_dep_embed_quad"):
self.kinetic_embed_net = network_creation_func(
output_dims=system_dim, name="KineticEmbed", **net_kwargs)
else:
self.kinetic_embed_net = None
def sum_per_dim_energy(self, energy: jnp.ndarray) -> jnp.ndarray:
"""Sums the per dimension energy."""
axis = [-i-1 for i in range(self.features_extra_dims + 1)]
return jnp.sum(energy, axis=axis)
def feature_matrix_vector(self, m, v):
"""A utility function to compute the product of a matrix and vector in the features axis."""
v = jnp.expand_dims(v, axis=self.features_axis-1)
return jnp.sum(m * v, axis=self.features_axis)
def mass_matrix_mul(
self,
q: jnp.ndarray,
v: jnp.ndarray,
**kwargs
) -> jnp.ndarray:
"""Computes the product of the mass matrix with a vector and throws an error if not applicable."""
if self.kinetic_func_form in ("separable_net", "dep_net"):
raise ValueError("It is not possible to compute `M q_dot` when using a "
"network for the kinetic energy.")
if self.kinetic_func_form in ("pure_quad", "embed_quad"):
return v
if self.kinetic_func_form == "matrix_diag_quad":
if self.parametrize_mass_matrix:
m_diag_log = hk.get_parameter("MassMatrixDiagLog",
shape=[self.system_dim],
init=hk.initializers.Constant(0.0))
m_diag = jnp.exp(m_diag_log) + self.mass_eps
else:
m_inv_diag_log = hk.get_parameter("InvMassMatrixDiagLog",
shape=[self.system_dim],
init=hk.initializers.Constant(0.0))
m_diag = 1.0 / (jnp.exp(m_inv_diag_log) + self.mass_eps)
return m_diag * v
if self.kinetic_func_form == "matrix_quad":
if self.parametrize_mass_matrix:
m_triu = hk.get_parameter("MassMatrixU",
shape=[self.system_dim, self.system_dim],
init=hk.initializers.Identity())
m_triu = jnp.triu(m_triu)
m = jnp.matmul(m_triu.T, m_triu)
m = m + self.mass_eps * jnp.eye(self.system_dim)
return self.feature_matrix_vector(m, v)
else:
m_inv_triu = hk.get_parameter("InvMassMatrixU",
shape=[self.system_dim, self.system_dim],
init=hk.initializers.Identity())
m_inv_triu = jnp.triu(m_inv_triu)
m_inv = jnp.matmul(m_inv_triu.T, m_inv_triu)
m_inv = m_inv + self.mass_eps * jnp.eye(self.system_dim)
solve = jnp.linalg.solve
for _ in range(v.ndim + 1 - m_inv.ndim):
solve = jax.vmap(solve, in_axes=(None, 0))
return solve(m_inv, v)
if self.kinetic_func_form in ("matrix_dep_diag_quad",
"matrix_dep_diag_embed_quad"):
if self.parametrize_mass_matrix:
m_diag_log = self.mass_matrix_net(q, **kwargs)
m_diag = jnp.exp(m_diag_log) + self.mass_eps
else:
m_inv_diag_log = self.mass_matrix_net(q, **kwargs)
m_diag = 1.0 / (jnp.exp(m_inv_diag_log) + self.mass_eps)
return m_diag * v
if self.kinetic_func_form in ("matrix_dep_quad",
"matrix_dep_embed_quad"):
if self.parametrize_mass_matrix:
m_triu = self.mass_matrix_net(q, **kwargs)
m_triu = utils.triu_matrix_from_v(m_triu, self.system_dim)
m = jnp.matmul(jnp.swapaxes(m_triu, -1, -2), m_triu)
m = m + self.mass_eps * jnp.eye(self.system_dim)
return self.feature_matrix_vector(m, v)
else:
m_inv_triu = self.mass_matrix_net(q, **kwargs)
m_inv_triu = utils.triu_matrix_from_v(m_inv_triu, self.system_dim)
m_inv = jnp.matmul(jnp.swapaxes(m_inv_triu, -1, -2), m_inv_triu)
m_inv = m_inv + self.mass_eps * jnp.eye(self.system_dim)
return jnp.linalg.solve(m_inv, v)
raise NotImplementedError()
def mass_matrix_inv_mul(
self,
q: jnp.ndarray,
v: jnp.ndarray,
**kwargs
) -> jnp.ndarray:
"""Computes the product of the inverse mass matrix with a vector."""
if self.kinetic_func_form in ("separable_net", "dep_net"):
raise ValueError("It is not possible to compute `M^-1 p` when using a "
"network for the kinetic energy.")
if self.kinetic_func_form in ("pure_quad", "embed_quad"):
return v
if self.kinetic_func_form == "matrix_diag_quad":
if self.parametrize_mass_matrix:
m_diag_log = hk.get_parameter("MassMatrixDiagLog",
shape=[self.system_dim],
init=hk.initializers.Constant(0.0))
m_inv_diag = 1.0 / (jnp.exp(m_diag_log) + self.mass_eps)
else:
m_inv_diag_log = hk.get_parameter("InvMassMatrixDiagLog",
shape=[self.system_dim],
init=hk.initializers.Constant(0.0))
m_inv_diag = jnp.exp(m_inv_diag_log) + self.mass_eps
return m_inv_diag * v
if self.kinetic_func_form == "matrix_quad":
if self.parametrize_mass_matrix:
m_triu = hk.get_parameter("MassMatrixU",
shape=[self.system_dim, self.system_dim],
init=hk.initializers.Identity())
m_triu = jnp.triu(m_triu)
m = jnp.matmul(m_triu.T, m_triu)
m = m + self.mass_eps * jnp.eye(self.system_dim)
solve = jnp.linalg.solve
for _ in range(v.ndim + 1 - m.ndim):
solve = jax.vmap(solve, in_axes=(None, 0))
return solve(m, v)
else:
m_inv_triu = hk.get_parameter("InvMassMatrixU",
shape=[self.system_dim, self.system_dim],
init=hk.initializers.Identity())
m_inv_triu = jnp.triu(m_inv_triu)
m_inv = jnp.matmul(m_inv_triu.T, m_inv_triu)
m_inv = m_inv + self.mass_eps * jnp.eye(self.system_dim)
return self.feature_matrix_vector(m_inv, v)
if self.kinetic_func_form in ("matrix_dep_diag_quad",
"matrix_dep_diag_embed_quad"):
if self.parametrize_mass_matrix:
m_diag_log = self.mass_matrix_net(q, **kwargs)
m_inv_diag = 1.0 / (jnp.exp(m_diag_log) + self.mass_eps)
else:
m_inv_diag_log = self.mass_matrix_net(q, **kwargs)
m_inv_diag = jnp.exp(m_inv_diag_log) + self.mass_eps
return m_inv_diag * v
if self.kinetic_func_form in ("matrix_dep_quad",
"matrix_dep_embed_quad"):
if self.parametrize_mass_matrix:
m_triu = self.mass_matrix_net(q, **kwargs)
m_triu = utils.triu_matrix_from_v(m_triu, self.system_dim)
m = jnp.matmul(jnp.swapaxes(m_triu, -2, -1), m_triu)
m = m + self.mass_eps * jnp.eye(self.system_dim)
return jnp.linalg.solve(m, v)
else:
m_inv_triu = self.mass_matrix_net(q, **kwargs)
m_inv_triu = utils.triu_matrix_from_v(m_inv_triu, self.system_dim)
m_inv = jnp.matmul(jnp.swapaxes(m_inv_triu, -2, -1), m_inv_triu)
m_inv = m_inv + self.mass_eps * jnp.eye(self.system_dim)
return self.feature_matrix_vector(m_inv, v)
raise NotImplementedError()
def momentum_from_velocity(
self,
q: jnp.ndarray,
q_dot: jnp.ndarray,
**kwargs
) -> jnp.ndarray:
"""Computes the momentum from position and velocity."""
def local_lagrangian(q_dot_):
# We take the sum so we can easily take gradients
return jnp.sum(self.lagrangian(
phase_space.PhaseSpace(q, q_dot_), **kwargs))
return jax.grad(local_lagrangian)(q_dot)
def velocity_from_momentum(
self,
q: jnp.ndarray,
p: jnp.ndarray,
**kwargs
) -> jnp.ndarray:
"""Computes the velocity from position and momentum."""
def local_hamiltonian(p_):
# We take the sum so we can easily take gradients
return jnp.sum(self.hamiltonian(
phase_space.PhaseSpace(q, p_), **kwargs))
return jax.grad(local_hamiltonian)(p)
def kinetic_energy_velocity(
self,
q: jnp.ndarray,
q_dot: jnp.ndarray,
**kwargs
) -> jnp.ndarray:
"""Computes the kinetic energy in velocity coordinates."""
if self.kinetic_func_form in ("separable_net", "dep_net"):
if self.input_space != "velocity":
raise ValueError("Can not evaluate the Kinetic energy from velocity, "
"when the input space is momentum and "
"kinetic_func_form is separable_net or dep_net.")
if self.kinetic_func_form == "separable_net":
s = q_dot
else:
s = jnp.concatenate([q, q_dot], axis=-1)
per_dim_energy = self.kinetic_net(s, **kwargs)
else:
if self.kinetic_embed_net is not None:
if self.input_space != "velocity":
raise ValueError("Can not evaluate the Kinetic energy from velocity, "
"when the input space is momentum and "
"kinetic_func_form is embed_quad, "
"matrix_dep_diag_embed_quad or "
"matrix_dep_embed_quad.")
q_dot = self.kinetic_embed_net(q_dot, **kwargs)
m_q_dot = self.mass_matrix_mul(q, q_dot, **kwargs)
per_dim_energy = q_dot * m_q_dot / 2
return self.sum_per_dim_energy(per_dim_energy)
def kinetic_energy_momentum(
self,
q: jnp.ndarray,
p: jnp.ndarray,
**kwargs
) -> jnp.ndarray:
"""Computes the kinetic energy in momentum coordinates."""
if self.kinetic_func_form in ("separable_net", "dep_net"):
if self.input_space != "momentum":
raise ValueError("Can not evaluate the Kinetic energy from momentum, "
"when the input space is velocity and "
"kinetic_func_form is separable_net or dep_net.")
if self.kinetic_func_form == "separable_net":
s = p
else:
s = jnp.concatenate([q, p], axis=-1)
per_dim_energy = self.kinetic_net(s, **kwargs)
else:
if self.kinetic_embed_net is not None:
if self.input_space != "momentum":
raise ValueError("Can not evaluate the Kinetic energy from momentum, "
"when the input space is velocity and "
"kinetic_func_form is embed_quad, "
"matrix_dep_diag_embed_quad or "
"matrix_dep_embed_quad.")
p = self.kinetic_embed_net(p, **kwargs)
m_inv_p = self.mass_matrix_inv_mul(q, p, **kwargs)
per_dim_energy = p * m_inv_p / 2
return self.sum_per_dim_energy(per_dim_energy)
def potential_energy_velocity(
self,
q: jnp.ndarray,
q_dot: jnp.ndarray,
**kwargs
) -> jnp.ndarray:
"""Computes the potential energy in velocity coordinates."""
if self.potential_func_form == "separable_net":
per_dim_energy = self.potential_net(q, **kwargs)
elif self.input_space != "momentum":
raise ValueError("Can not evaluate the Potential energy from velocity, "
"when the input space is momentum and "
"potential_func_form is dep_net.")
else:
s = jnp.concatenate([q, q_dot], axis=-1)
per_dim_energy = self.potential_net(s, **kwargs)
return self.sum_per_dim_energy(per_dim_energy)
def potential_energy_momentum(
self,
q: jnp.ndarray,
p: jnp.ndarray,
**kwargs
) -> jnp.ndarray:
"""Computes the potential energy in momentum coordinates."""
if self.potential_func_form == "separable_net":
per_dim_energy = self.potential_net(q, **kwargs)
elif self.input_space != "momentum":
raise ValueError("Can not evaluate the Potential energy from momentum, "
"when the input space is velocity and "
"potential_func_form is dep_net.")
else:
s = jnp.concatenate([q, p], axis=-1)
per_dim_energy = self.potential_net(s, **kwargs)
return self.sum_per_dim_energy(per_dim_energy)
def hamiltonian(
self,
s: phase_space.PhaseSpace,
**kwargs
) -> jnp.ndarray:
"""Computes the Hamiltonian in momentum coordinates."""
potential = self.potential_energy_momentum(s.q, s.p, **kwargs)
kinetic = self.kinetic_energy_momentum(s.q, s.p, **kwargs)
# Sanity check
assert potential.shape == kinetic.shape
return kinetic + potential
def lagrangian(
self,
s: phase_space.PhaseSpace,
**kwargs
) -> jnp.ndarray:
"""Computes the Lagrangian in velocity coordinates."""
potential = self.potential_energy_velocity(s.q, s.p, **kwargs)
kinetic = self.kinetic_energy_velocity(s.q, s.p, **kwargs)
# Sanity check
assert potential.shape == kinetic.shape
return kinetic - potential
def energy_from_momentum(
self,
s: phase_space.PhaseSpace,
**kwargs
) -> jnp.ndarray:
"""Computes the energy of the system in momentum coordinates."""
return self.hamiltonian(s, **kwargs)
def energy_from_velocity(
self,
s: phase_space.PhaseSpace,
**kwargs
) -> jnp.ndarray:
"""Computes the energy of the system in velocity coordinates."""
q, q_dot = s.q, s.p
p = self.momentum_from_velocity(q, q_dot, **kwargs)
q_dot_p = jnp.sum(q_dot * p, self.features_axis)
return q_dot_p - self.lagrangian(s, **kwargs)
def velocity_and_acceleration(
self,
q: jnp.ndarray,
q_dot: jnp.ndarray,
**kwargs
) -> phase_space.TangentPhaseSpace:
"""Computes the velocity and acceleration of the system in velocity coordinates."""
def local_lagrangian(*q_and_q_dot):
# We take the sum so we can easily take gradients
return jnp.sum(self.lagrangian(
phase_space.PhaseSpace(*q_and_q_dot), **kwargs))
grad_q = jax.grad(local_lagrangian, 0)(q, q_dot)
grad_q_dot_func = jax.grad(local_lagrangian, 1)
_, grad_q_dot_grad_q_times_q_dot = jax.jvp(grad_q_dot_func, (q, q_dot),
(q_dot, jnp.zeros_like(q_dot)))
pre_acc_vector = grad_q - grad_q_dot_grad_q_times_q_dot
if self.kinetic_func_form in ("pure_quad", "matrix_diag_quad",
"matrix_quad", "matrix_dep_diag_quad",
"matrix_dep_quad"):
q_dot_dot = self.mass_matrix_inv_mul(q, pre_acc_vector, **kwargs)
else:
hess_q_dot = jax.vmap(jax.hessian(local_lagrangian, 1))(q, q_dot)
q_dot_dot = jnp.linalg.solve(hess_q_dot, pre_acc_vector)
return phase_space.TangentPhaseSpace(q_dot, q_dot_dot)
def simulate(
self,
y0: phase_space.PhaseSpace,
dt: Union[float, jnp.ndarray],
num_steps_forward: int,
num_steps_backward: int,
include_y0: bool,
return_stats: bool = True,
**nets_kwargs
) -> _PhysicsSimulationOutput:
"""Simulates the continuous dynamics of the physical system.
Args:
y0: Initial state of the system.
dt: The size of the time intervals at which to evolve the system.
num_steps_forward: Number of steps to make into the future.
num_steps_backward: Number of steps to make into the past.
include_y0: Whether to include the initial state in the result.
return_stats: Whether to return additional statistics.
**nets_kwargs: Keyword arguments to pass to the networks.
Returns:
* The state of the system evolved as many steps as specified by the
arguments into the past and future, all in chronological order.
* Optionally return a dictionary of additional statistics. For the moment
this only returns the energy of the system at each evaluation point.
"""
# Define the dynamics
if self.simulation_space == "velocity":
dy_dt = lambda t_, y: self.velocity_and_acceleration( # pylint: disable=g-long-lambda
y.q, y.p, **nets_kwargs)
# Special Haiku magic to avoid tracer issues
if hk.running_init():
return self.lagrangian(y0, **nets_kwargs) # pytype: disable=bad-return-type # jax-ndarray
else:
hamiltonian = lambda t_, y: self.hamiltonian(y, **nets_kwargs)
dy_dt = phase_space.poisson_bracket_with_q_and_p(hamiltonian)
if hk.running_init():
return self.hamiltonian(y0, **nets_kwargs) # pytype: disable=bad-return-type # jax-ndarray
# Optionally switch coordinate frame
if self.input_space == "velocity" and self.simulation_space == "momentum":
p = self.momentum_from_velocity(y0.q, y0.p, **nets_kwargs)
y0 = phase_space.PhaseSpace(y0.q, p)
if self.input_space == "momentum" and self.simulation_space == "velocity":
q_dot = self.velocity_from_momentum(y0.q, y0.p, **nets_kwargs)
y0 = phase_space.PhaseSpace(y0.q, q_dot)
yt = integrators.solve_ivp_dt_two_directions(
fun=dy_dt,
y0=y0,
t0=0.0,
dt=dt,
method=self.integrator_method,
num_steps_forward=num_steps_forward,
num_steps_backward=num_steps_backward,
include_y0=include_y0,
steps_per_dt=self.steps_per_dt,
ode_int_kwargs=self.ode_int_kwargs
)
# Make time axis second
yt = jax.tree_map(lambda x: jnp.swapaxes(x, 0, 1), yt)
# Compute energies for the full trajectory
yt_energy = jax.tree_map(utils.merge_first_dims, yt)
if self.simulation_space == "momentum":
energy = self.energy_from_momentum(yt_energy, **nets_kwargs)
else:
energy = self.energy_from_velocity(yt_energy, **nets_kwargs)
energy = energy.reshape(yt.q.shape[:2])
# Optionally switch back to input coordinate frame
if self.input_space == "velocity" and self.simulation_space == "momentum":
q_dot = self.velocity_from_momentum(yt.q, yt.p, **nets_kwargs)
yt = phase_space.PhaseSpace(yt.q, q_dot)
if self.input_space == "momentum" and self.simulation_space == "velocity":
p = self.momentum_from_velocity(yt.q, yt.p, **nets_kwargs)
yt = phase_space.PhaseSpace(yt.q, p)
# Compute energy deficit
t = energy.shape[-1]
non_zero_diffs = float((t * (t - 1)) // 2)
energy_deficits = jnp.abs(energy[..., None, :] - energy[..., None])
avg_deficit = jnp.sum(energy_deficits, axis=(-2, -1)) / non_zero_diffs
max_deficit = jnp.max(energy_deficits)
# Return the states and energies
if return_stats:
return yt, dict(avg_energy_deficit=avg_deficit,
max_energy_deficit=max_deficit)
else:
return yt
def __call__(self, *args, **kwargs):
return self.simulate(*args, **kwargs)
class OdeNetwork(hk.Module):
"""A simple haiku module for constructing a NeuralODE."""
def __init__(
self,
system_dim: int,
net_kwargs: Mapping[str, Any],
integrator_method: Optional[str] = None,
steps_per_dt: int = 1,
ode_int_kwargs: Optional[Mapping[str, float]] = None,
use_scan: bool = True,
network_creation_func=networks.make_flexible_net,
name: Optional[str] = None,
):
super().__init__(name=name)
ode_int_kwargs = dict(ode_int_kwargs or {})
ode_int_kwargs.setdefault("rtol", 1e-6)
ode_int_kwargs.setdefault("atol", 1e-6)
ode_int_kwargs.setdefault("mxstep", 50)
self.system_dim = system_dim
self.integrator_method = integrator_method or "adaptive"
self.steps_per_dt = steps_per_dt
self.ode_int_kwargs = ode_int_kwargs
self.net_kwargs = net_kwargs
self.use_scan = use_scan
self.core = network_creation_func(
output_dims=system_dim, name="Net", **net_kwargs)
def simulate(
self,
y0: jnp.ndarray,
dt: Union[float, jnp.ndarray],
num_steps_forward: int,
num_steps_backward: int,
include_y0: bool,
return_stats: bool = True,
**nets_kwargs
) -> Union[jnp.ndarray, Tuple[jnp.ndarray, Mapping[str, jnp.ndarray]]]:
"""Simulates the continuous dynamics of the ODE specified by the network.
Args:
y0: Initial state of the system.
dt: The size of the time intervals at which to evolve the system.
num_steps_forward: Number of steps to make into the future.
num_steps_backward: Number of steps to make into the past.
include_y0: Whether to include the initial state in the result.
return_stats: Whether to return additional statistics.
**nets_kwargs: Keyword arguments to pass to the networks.
Returns:
* The state of the system evolved as many steps as specified by the
arguments into the past and future, all in chronological order.
* Optionally return a dictionary of additional statistics. For the moment
this is just an empty dictionary.
"""
if hk.running_init():
return self.core(y0, **nets_kwargs)
yt = integrators.solve_ivp_dt_two_directions(
fun=lambda t, y: self.core(y, **nets_kwargs),
y0=y0,
t0=0.0,
dt=dt,
method=self.integrator_method,
num_steps_forward=num_steps_forward,
num_steps_backward=num_steps_backward,
include_y0=include_y0,
steps_per_dt=self.steps_per_dt,
ode_int_kwargs=self.ode_int_kwargs
)
# Make time axis second
yt = jax.tree_map(lambda x: jnp.swapaxes(x, 0, 1), yt)
if return_stats:
return yt, dict()
else:
return yt
def __call__(self, *args, **kwargs):
return self.simulate(*args, **kwargs)
class DiscreteDynamicsNetwork(hk.Module):
"""A simple haiku module for constructing a discrete dynamics network."""
def __init__(
self,
system_dim: int,
residual: bool,
net_kwargs: Mapping[str, Any],
use_scan: bool = True,
network_creation_func=networks.make_flexible_net,
name: Optional[str] = None,
):
super().__init__(name=name)
self.system_dim = system_dim
self.residual = residual
self.net_kwargs = net_kwargs
self.use_scan = use_scan
self.core = network_creation_func(
output_dims=system_dim, name="Net", **net_kwargs)
def simulate(
self,
y0: jnp.ndarray,
num_steps_forward: int,
include_y0: bool,
return_stats: bool = True,
**nets_kwargs
) -> Union[jnp.ndarray, Tuple[jnp.ndarray, Mapping[str, jnp.ndarray]]]:
"""Simulates the dynamics of the discrete system.
Args:
y0: Initial state of the system.
num_steps_forward: Number of steps to make into the future.
include_y0: Whether to include the initial state in the result.
return_stats: Whether to return additional statistics.
**nets_kwargs: Keyword arguments to pass to the networks.
Returns:
* The state of the system evolved as many steps as specified by the
arguments into the past and future, all in chronological order.
* Optionally return a dictionary of additional statistics. For the moment
this is just an empty dictionary.
"""
if num_steps_forward < 0:
raise ValueError("It is required to unroll at least one step.")
nets_kwargs.pop("dt", None)
nets_kwargs.pop("num_steps_backward", None)
if hk.running_init():
return self.core(y0, **nets_kwargs)
def step(*args):
y, _ = args
if self.residual:
y_next = y + self.core(y, **nets_kwargs)
else:
y_next = self.core(y, **nets_kwargs)
return y_next, y_next
if self.use_scan:
_, yt = jax.lax.scan(step, init=y0, xs=None, length=num_steps_forward)
if include_y0:
yt = jnp.concatenate([y0[None], yt], axis=0)
# Make time axis second
yt = jax.tree_map(lambda x: jnp.swapaxes(x, 0, 1), yt)
else:
yt = [y0]
for _ in range(num_steps_forward):
yt.append(step(yt[-1], None)[0])
if not include_y0:
yt = yt[1:]
if len(yt) == 1:
yt = yt[0][:, None]
else:
yt = jax.tree_map(lambda args: jnp.stack(args, 1), yt)
if return_stats:
return yt, dict()
else:
return yt
def __call__(self, *args, **kwargs):
return self.simulate(*args, **kwargs)
|
deepmind-research-master
|
physics_inspired_models/models/dynamics.py
|
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Episodic Memory and Synthetic Returns Core Wrapper modules."""
import collections
import haiku as hk
import jax
import jax.numpy as jnp
SRCoreWrapperOutput = collections.namedtuple(
"SRCoreWrapperOutput", ["output", "synthetic_return", "augmented_return",
"sr_loss"])
class EpisodicMemory(hk.RNNCore):
"""Episodic Memory module."""
def __init__(self, memory_size, capacity, name="episodic_memory"):
"""Constructor.
Args:
memory_size: Integer. The size of the vectors to be stored.
capacity: Integer. The maximum number of memories to store before it
becomes necessary to overwrite old memories.
name: String. A name for this Haiku module instance.
"""
super().__init__(name=name)
self._memory_size = memory_size
self._capacity = capacity
def __call__(self, inputs, prev_state):
"""Writes a new memory into the episodic memory.
Args:
inputs: A Tensor of shape ``[batch_size, memory_size]``.
prev_state: The previous state of the episodic memory, which is a tuple
with a (i) counter of shape ``[batch_size, 1]`` indicating how many
memories have been written so far, and (ii) a tensor of shape
``[batch_size, capacity, memory_size]`` with the full content of the
episodic memory.
Returns:
A tuple with (i) a tensor of shape ``[batch_size, capacity, memory_size]``
with the full content of the episodic memory, including the newly
written memory, and (ii) the new state of the episodic memory.
"""
inputs = jax.lax.stop_gradient(inputs)
counter, memories = prev_state
counter_mod = jnp.mod(counter, self._capacity)
slot_selector = jnp.expand_dims(
jax.nn.one_hot(counter_mod, self._capacity), axis=2)
memories = memories * (1 - slot_selector) + (
slot_selector * jnp.expand_dims(inputs, 1))
counter = counter + 1
return memories, (counter, memories)
def initial_state(self, batch_size):
"""Creates the initial state of the episodic memory.
Args:
batch_size: Integer. The batch size of the episodic memory.
Returns:
A tuple with (i) a counter of shape ``[batch_size, 1]`` and (ii) a tensor
of shape ``[batch_size, capacity, memory_size]`` with the full content
of the episodic memory.
"""
if batch_size is None:
shape = []
else:
shape = [batch_size]
counter = jnp.zeros(shape)
memories = jnp.zeros(shape + [self._capacity, self._memory_size])
return (counter, memories)
class SyntheticReturnsCoreWrapper(hk.RNNCore):
"""Synthetic Returns core wrapper."""
def __init__(self, core, memory_size, capacity, hidden_layers, alpha, beta,
loss_func=(lambda x, y: 0.5 * jnp.square(x - y)),
apply_core_to_input=False, name="synthetic_returns_wrapper"):
"""Constructor.
Args:
core: hk.RNNCore. The recurrent core of the agent. E.g. an LSTM.
memory_size: Integer. The size of the vectors to be stored in the episodic
memory.
capacity: Integer. The maximum number of memories to store before it
becomes necessary to overwrite old memories.
hidden_layers: Tuple or list of integers, indicating the size of the
hidden layers of the MLPs used to produce synthetic returns, current
state bias, and gate.
alpha: The multiplier of the synthetic returns term in the augmented
return.
beta: The multiplier of the environment returns term in the augmented
return.
loss_func: A function of two arguments (predictions and targets) to
compute the SR loss.
apply_core_to_input: Boolean. Whether to apply the core on the inputs. If
true, the synthetic returns will be computed from the outputs of the
RNN core passed to the constructor. If false, the RNN core will be
applied only at the output of this wrapper, and the synthetic returns
will be computed from the inputs.
name: String. A name for this Haiku module instance.
"""
super().__init__(name=name)
self._em = EpisodicMemory(memory_size, capacity)
self._capacity = capacity
hidden_layers = list(hidden_layers)
self._synthetic_return = hk.nets.MLP(hidden_layers + [1])
self._bias = hk.nets.MLP(hidden_layers + [1])
self._gate = hk.Sequential([
hk.nets.MLP(hidden_layers + [1]),
jax.nn.sigmoid,
])
self._apply_core_to_input = apply_core_to_input
self._core = core
self._alpha = alpha
self._beta = beta
self._loss = loss_func
def initial_state(self, batch_size):
return (
self._em.initial_state(batch_size),
self._core.initial_state(batch_size)
)
def __call__(self, inputs, prev_state):
current_input, return_target = inputs
em_state, core_state = prev_state
(counter, memories) = em_state
if self._apply_core_to_input:
current_input, core_state = self._core(current_input, core_state)
# Synthetic return for the current state
synth_return = jnp.squeeze(self._synthetic_return(current_input), -1)
# Current state bias term
bias = self._bias(current_input)
# Gate computed from current state
gate = self._gate(current_input)
# When counter > capacity, mask will be all ones
mask = 1 - jnp.cumsum(jax.nn.one_hot(counter, self._capacity), axis=1)
mask = jnp.expand_dims(mask, axis=2)
# Synthetic returns for each state in memory
past_synth_returns = hk.BatchApply(self._synthetic_return)(memories)
# Sum of synthetic returns from previous states
sr_sum = jnp.sum(past_synth_returns * mask, axis=1)
prediction = jnp.squeeze(sr_sum * gate + bias, -1)
sr_loss = self._loss(prediction, return_target)
augmented_return = jax.lax.stop_gradient(
self._alpha * synth_return + self._beta * return_target)
# Write current state to memory
_, em_state = self._em(current_input, em_state)
if not self._apply_core_to_input:
output, core_state = self._core(current_input, core_state)
else:
output = current_input
output = SRCoreWrapperOutput(
output=output,
synthetic_return=synth_return,
augmented_return=augmented_return,
sr_loss=sr_loss,
)
return output, (em_state, core_state)
|
deepmind-research-master
|
synthetic_returns/synthetic_returns.py
|
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Memory & Planning Game environment."""
import string
import dm_env
import matplotlib.pyplot as plt
import networkx as nx
import numpy as np
class MemoryPlanningGame(dm_env.Environment):
"""Memory & Planning Game environment."""
ACTION_NAMES = ['Up', 'Down', 'Left', 'Right', 'Collect']
NUM_ACTIONS = len(ACTION_NAMES)
DIRECTIONS = [
(0, 1), # Up
(0, -1), # Down
(-1, 0), # Left
(1, 0), # Right
(0, 0), # Collect
]
def __init__(self,
maze_size=4,
max_episode_steps=100,
target_reward=1.,
per_step_reward=0.,
random_respawn=False,
seed=None):
"""The Memory & Planning Game environment.
Args:
maze_size: (int) size of the maze dimension.
max_episode_steps: (int) number of steps per episode.
target_reward: (float) reward value of the target.
per_step_reward: (float) reward/cost of taking a step.
random_respawn: (bool) whether the agent respawns in a random location
upon collecting the goal.
seed: (int or None) seed for random number generator.
"""
self._maze_size = maze_size
self._num_labels = maze_size * maze_size
# The graph itself is the same across episodes, but the node labels will be
# randomly sampled in each episode.
self._graph = nx.grid_2d_graph(
self._maze_size, self._maze_size, periodic=True)
self._max_episode_steps = max_episode_steps
self._target_reward = target_reward
self._per_step_reward = per_step_reward
self._random_respawn = random_respawn
self._rng = np.random.RandomState(seed)
def _one_hot(self, node):
one_hot_vector = np.zeros([self._num_labels], dtype=np.int32)
one_hot_vector[self._labels[node]] = 1
return one_hot_vector
def step(self, action):
# If previous step was the last step of an episode, reset.
if self._needs_reset:
return self.reset()
# Increment step count and check if it's the last step of the episode.
self._episode_steps += 1
if self._episode_steps >= self._max_episode_steps:
self._needs_reset = True
transition = dm_env.termination
else:
transition = dm_env.transition
# Recompute agent's position given the selected action.
direction = self.DIRECTIONS[action]
self._position = tuple(
(np.array(self._position) + np.array(direction)) % self._maze_size)
self._previous_action = self.ACTION_NAMES[action]
# Get reward if agent is over the goal location and the selected action is
# `collect`.
if self._position == self._goal and self.ACTION_NAMES[action] == 'Collect':
reward = self._target_reward
self._set_new_goal()
else:
reward = self._per_step_reward
self._episode_reward += reward
return transition(reward, self._observation())
def _observation(self):
return {
'position': np.array(self._one_hot(self.position), dtype=np.int32),
'goal': np.array(self._one_hot(self.goal), dtype=np.int32),
}
def observation_spec(self):
return {
'position': dm_env.specs.Array(
shape=(self._num_labels,), dtype=np.int32, name='position'),
'goal': dm_env.specs.Array(
shape=(self._num_labels,), dtype=np.int32, name='goal'),
}
def action_spec(self):
return dm_env.specs.DiscreteArray(self.NUM_ACTIONS)
def take_random_action(self):
return self.step(self._rng.randint(self.NUM_ACTIONS))
def reset(self):
self._previous_action = ''
self._episode_reward = 0.
self._episode_steps = 0
self._needs_reset = False
random_labels = self._rng.permutation(self._num_labels)
self._labels = {n: random_labels[i]
for i, n in enumerate(self._graph.nodes())}
self._respawn()
self._set_new_goal()
return dm_env.restart(self._observation())
def _respawn(self):
random_idx = self._rng.randint(self._num_labels)
self._position = list(self._graph.nodes())[random_idx]
def _set_new_goal(self):
if self._random_respawn:
self._respawn()
goal = self._position
while goal == self._position:
random_idx = self._rng.randint(self._num_labels)
goal = list(self._graph.nodes())[random_idx]
self._goal = goal
@property
def position(self):
return self._position
@property
def goal(self):
return self._goal
@property
def previous_action(self):
return self._previous_action
@property
def episode_reward(self):
return self._episode_reward
def draw_maze(self, ax=None):
if ax is None:
plt.figure()
ax = plt.gca()
node_positions = {(x, y): (x, y) for x, y in self._graph.nodes()}
letters = string.ascii_uppercase + string.ascii_lowercase
labels = {n: letters[self._labels[n]] for n in self._graph.nodes()}
node_list = list(self._graph.nodes())
colors = []
for n in node_list:
if n == self.position:
colors.append('lightblue')
elif n == self.goal:
colors.append('lightgreen')
else:
colors.append('pink')
nx.draw(self._graph, pos=node_positions, nodelist=node_list, ax=ax,
node_color=colors, with_labels=True, node_size=200, labels=labels)
ax.set_title('{}\nEpisode reward={:.1f}'.format(
self.previous_action, self.episode_reward))
ax.margins(.1)
return plt.gcf(), ax
|
deepmind-research-master
|
rapid_task_solving/memory_planning_game.py
|
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""One-shot StreetLearn environment."""
import dm_env
import matplotlib.pyplot as plt
import networkx as nx
import numpy as np
def deg_to_rad(x):
"""Convert degrees to radians."""
return x / 180. * np.pi
def rad_to_deg(x):
"""Convert radians to degrees."""
return x * 180. / np.pi
class OneShotStreetLearn(dm_env.Environment):
"""One-shot Streetlearn environment."""
ACTION_NAMES = [
'Forward',
'Left',
'Right',
'Collect',
]
NUM_ACTIONS = len(ACTION_NAMES)
def __init__(self, dataset_path, max_episode_steps, num_junctions=8,
target_reward=1., per_step_reward=0., observation_length=60,
seed=None):
self._graph = nx.read_gexf(dataset_path)
self._node_attrs = self._graph.nodes(data=True)
self._num_junctions = num_junctions
self._observation_length = observation_length
self._max_episode_steps = max_episode_steps
self._target_reward = target_reward
self._per_step_reward = per_step_reward
self._rng = np.random.RandomState(seed)
self.reset()
def reset(self):
self._previous_action = ''
self._episode_reward = 0.
self._episode_steps = 0
self._needs_reset = False
self._subgraph = self.get_random_subgraph()
self._observation_map = self.randomize_observations(self._subgraph)
self._position = self._rng.choice(list(self._subgraph.nodes()))
neighbours = self._neighbors_bearings(self._subgraph, self._position)
self._neighbour = neighbours[self._rng.randint(len(neighbours))]
self._set_new_goal()
return dm_env.restart(self._observation())
@property
def _current_edge(self):
return (self._position, self._neighbour['neighbour'])
def _set_new_goal(self):
goal = None
edges = list(self._observation_map.keys())
while goal is None or goal == self._current_edge:
goal = edges[self._rng.randint(len(edges))]
self._goal = goal
def _one_hot(self, edge):
one_hot_vector = np.zeros([self._observation_length], dtype=np.int32)
one_hot_vector[self._observation_map[edge]] = 1
return one_hot_vector
def _observation(self):
return {
'position': np.array(self._one_hot(self._current_edge), dtype=np.int32),
'goal': np.array(self._one_hot(self._goal), dtype=np.int32),
}
def observation_spec(self):
return {
'position': dm_env.specs.Array(
shape=(self._observation_length,), dtype=np.int32, name='position'),
'goal': dm_env.specs.Array(
shape=(self._observation_length,), dtype=np.int32, name='goal'),
}
def action_spec(self):
return dm_env.specs.DiscreteArray(self.NUM_ACTIONS)
def step(self, action):
# If previous step was the last step of an episode, reset.
if self._needs_reset:
return self.reset()
# Increment step count and check if it's the last step of the episode.
self._episode_steps += 1
if self._episode_steps >= self._max_episode_steps:
self._needs_reset = True
transition = dm_env.termination
else:
transition = dm_env.transition
# Recompute agent's position
self._move(action)
self._previous_action = self.ACTION_NAMES[action]
# Get reward if agent is at the goal location and the selected action is
# `collect`.
if (self._current_edge == self._goal and
self.ACTION_NAMES[action] == 'Collect'):
reward = self._target_reward
self._set_new_goal()
else:
reward = self._per_step_reward
self._episode_reward += reward
return transition(reward, self._observation())
def randomize_observations(self, subgraph):
edges = list(subgraph.edges())
edges.extend([(y, x) for (x, y) in edges])
obs_permutation = self._rng.permutation(self._observation_length)
return {e: obs_permutation[i] for i, e in enumerate(edges)}
def _calculate_bearing(self, node, neighbor):
lat1 = deg_to_rad(self._node_attrs[node]['lat'])
lng1 = deg_to_rad(self._node_attrs[node]['lng'])
lat2 = deg_to_rad(self._node_attrs[neighbor]['lat'])
lng2 = deg_to_rad(self._node_attrs[neighbor]['lng'])
delta_lng = lng2 - lng1
theta = np.arctan2(
np.sin(delta_lng) * np.cos(lat2),
np.cos(lat1) * np.sin(lat2) -
np.sin(lat1) * np.cos(lat2) * np.cos(delta_lng))
return theta
def _neighbors_bearings(self, subgraph, node):
bearings = []
for neighbor in list(subgraph[node]):
orientation = self._calculate_bearing(node, neighbor)
bearings.append({'neighbour': neighbor, 'orientation': orientation})
bearings.sort(key=lambda x: x['orientation'])
return bearings
def _sort_neighbors(self, node, neighbour):
bearings = self._neighbors_bearings(self._subgraph, node)
bs = [x['orientation'] for x in bearings]
idx = np.argmin(np.abs(bs - neighbour['orientation']))
return {
'forward': bearings[idx],
'right': bearings[idx-1],
'left': bearings[(idx+1) % len(bearings)],
}
def _move(self, action):
neighbours = self._sort_neighbors(self._position, self._neighbour)
if action == 0:
new_node = self._neighbour['neighbour']
neighbours = self._sort_neighbors(new_node, neighbours['forward'])
new_neighbour = neighbours['forward']
else:
new_node = self._position
if action == 1:
new_neighbour = neighbours['left']
elif action == 2:
new_neighbour = neighbours['right']
else:
new_neighbour = self._neighbour
self._position = new_node
self._neighbour = new_neighbour
def _all_next_junctions(self, subgraph, node):
neighbors = list(subgraph[node])
edges = [self._get_next_junction(subgraph, node, nb) for nb in neighbors]
nodes = [y for (_, y) in edges]
return nodes, edges
def _get_next_junction(self, subgraph, initial_node, next_node):
node = initial_node
while subgraph.degree(next_node) == 2:
neighbours = list(subgraph.neighbors(next_node))
neighbours.remove(node)
node = next_node
next_node = neighbours.pop()
return (initial_node, next_node)
def get_random_subgraph(self):
graph = self._graph
num_nodes = len(graph)
rnd_index = self._rng.randint(num_nodes)
center_node = list(graph.nodes())[rnd_index]
while graph.degree(center_node) <= 2:
rnd_index = self._rng.randint(num_nodes)
center_node = list(graph.nodes())[rnd_index]
to_visit = [center_node]
visited = []
subgraph = nx.Graph()
while to_visit:
node = to_visit.pop(0)
visited.append(node)
new_nodes, new_edges = self._all_next_junctions(graph, node)
subgraph.add_edges_from(new_edges)
node_degrees = [subgraph.degree(n) for n in subgraph.nodes()]
count_junctions = len(list(filter(lambda x: x > 2, node_degrees)))
if count_junctions >= self._num_junctions:
break
new_nodes = filter(lambda x: x not in visited + to_visit, new_nodes)
to_visit.extend(new_nodes)
return subgraph
def draw_subgraph(self, ax=None):
if ax is None:
_ = plt.figure(figsize=(3, 3))
ax = plt.gca()
node_ids = list(self._subgraph.nodes())
pos = {
x: (self._node_attrs[x]['lat'], self._node_attrs[x]['lng'])
for x in node_ids
}
labels = {}
nc = 'pink'
ec = 'black'
ns = 50
nshape = 'o'
# Draw the current subgraph
nx.draw(self._subgraph, pos=pos, node_color=nc, with_labels=False,
node_size=ns, labels=labels, edgecolors=ec, node_shape=nshape,
ax=ax)
max_xy = np.array([np.array(x) for x in pos.values()]).max(0)
min_xy = np.array([np.array(x) for x in pos.values()]).min(0)
delta_xy = (max_xy - min_xy) / 6.
ax.set_xlim([min_xy[0] - delta_xy[0], max_xy[0] + delta_xy[0]])
ax.set_ylim([min_xy[1] - delta_xy[1], max_xy[1] + delta_xy[1]])
# Draw goal position and orientation
x = self._node_attrs[self._goal[0]]['lat']
y = self._node_attrs[self._goal[0]]['lng']
rotation = rad_to_deg(self._calculate_bearing(*self._goal))
_ = ax.plot(x, y, marker=(3, 0, rotation - 90), color=(0, 0, 0),
markersize=14, markerfacecolor='white')
_ = ax.plot(x, y, marker=(2, 0, rotation - 90), color=(0, 0, 0),
markersize=12, markerfacecolor='None')
# Draw current position and orientation
x = self._node_attrs[self._position]['lat']
y = self._node_attrs[self._position]['lng']
rotation = rad_to_deg(self._neighbour['orientation'])
_ = ax.plot(x, y, marker=(3, 0, rotation - 90), color=(0, 0, 0),
markersize=14, markerfacecolor='lightgreen')
_ = ax.plot(x, y, marker=(2, 0, rotation - 90), color=(0, 0, 0),
markersize=12, markerfacecolor='None')
ax.set_title('{}\nEpisode reward = {}'.format(
self._previous_action, self._episode_reward))
return plt.gcf(), ax
|
deepmind-research-master
|
rapid_task_solving/one_shot_streetlearn.py
|
# Copyright 2019 Deepmind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Submission to Unrestricted Adversarial Challenge."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.compat.v1 as tf
import tensorflow_hub as hub
from unrestricted_advex import eval_kit
def _preprocess_image(image):
image = tf.image.central_crop(image, central_fraction=0.875)
image = tf.image.resize_bilinear(image, [224, 224], align_corners=False)
return image
def test_preprocess(image):
image = _preprocess_image(image)
image = tf.subtract(image, 0.5)
image = tf.multiply(image, 2.0)
return image
def main():
g = tf.Graph()
with g.as_default():
input_tensor = tf.placeholder(tf.float32, (None, 224, 224, 3))
x_np = test_preprocess(input_tensor)
raw_module_1 = hub.Module(
"https://tfhub.dev/deepmind/llr-pretrain-adv/latents/1")
raw_module_2 = hub.Module(
"https://tfhub.dev/deepmind/llr-pretrain-adv/linear/1")
latents = raw_module_1(dict(inputs=x_np, decay_rate=0.1))
logits = raw_module_2(dict(inputs=latents))
logits = tf.squeeze(logits, axis=[1, 2])
two_class_logits = tf.concat([tf.nn.relu(-logits[:, 1:]),
tf.nn.relu(logits[:, 1:])], axis=1)
sess = tf.train.SingularMonitoredSession()
def model(x_np):
return sess.run(two_class_logits, feed_dict={input_tensor: x_np})
eval_kit.evaluate_bird_or_bicycle_model(model, model_name="llr_resnet")
if __name__ == "__main__":
main()
|
deepmind-research-master
|
unrestricted_advx/main.py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.