python_code
stringlengths 0
780k
| repo_name
stringlengths 7
38
| file_path
stringlengths 5
103
|
|---|---|---|
# Copyright 2021 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for xmanager.cloud.kubernetes."""
import sys
import unittest
from unittest import mock
from absl import flags
from absl.testing import parameterized
from kubernetes import client as k8s_client
from kubernetes import config as k8s_config
from xmanager import xm
from xmanager.cloud import kubernetes
from xmanager.xm_local import executables as local_executables
from xmanager.xm_local import executors as local_executors
_TEST_SERVICE_ACCOUNT_NAME = 'test-service-account'
_DEFAULT_SERVICE_ACCOUNT_NAME = 'default'
_SERVICE_ACCOUNT_FLAG_TEST_PARAMETERS = [
{
'sys_argv': sys.argv,
'expected_account_name': _DEFAULT_SERVICE_ACCOUNT_NAME,
},
{
'sys_argv': [
*sys.argv,
f'--xm_k8s_service_account_name={_TEST_SERVICE_ACCOUNT_NAME}',
],
'expected_account_name': _TEST_SERVICE_ACCOUNT_NAME,
},
]
class CallAPIResponse:
items = []
class KubernetesTest(parameterized.TestCase):
@parameterized.parameters(_SERVICE_ACCOUNT_FLAG_TEST_PARAMETERS)
def test_launch(self, sys_argv, expected_account_name):
flags.FLAGS(sys_argv)
fake_client = mock.Mock()
fake_client.call_api.return_value = CallAPIResponse()
client = kubernetes.Client(fake_client)
job = xm.Job(
name='test-job',
executable=local_executables.GoogleContainerRegistryImage(
name='test-image',
image_path='image-path',
args=xm.SequentialArgs.from_collection({'a': 1}),
),
executor=local_executors.Kubernetes(
xm.JobRequirements(cpu=1, ram=1, t4=2)
),
args={'b': 2, 'c': 3},
)
expected_service = k8s_client.V1Service(
metadata=k8s_client.V1ObjectMeta(name='experiments'),
spec=k8s_client.V1ServiceSpec(
selector={'service': 'experiments'},
cluster_ip='None',
),
)
expected_job = k8s_client.V1Job(
metadata=k8s_client.V1ObjectMeta(name='test-job'),
spec=k8s_client.V1JobSpec(
template=k8s_client.V1PodTemplateSpec(
metadata=k8s_client.V1ObjectMeta(
labels={'service': 'experiments'},
annotations={},
),
spec=k8s_client.V1PodSpec(
service_account=expected_account_name,
hostname='test-job',
subdomain='experiments',
restart_policy='Never',
containers=[
k8s_client.V1Container(
name='test-job',
image='image-path',
resources=k8s_client.V1ResourceRequirements(
limits={
'cpu': '1',
'memory': '1',
'nvidia.com/gpu': '2',
},
),
args=['--a=1', '--b=2', '--c=3'],
env=[],
)
],
node_selector={
'cloud.google.com/gke-accelerator': 'nvidia-tesla-t4',
},
),
),
backoff_limit=0,
),
)
client.launch(lambda x: x, [job])
[_, service_call, job_call] = fake_client.call_api.call_args_list
_, service_kwargs = service_call
self.assertEqual(service_kwargs['body'], expected_service)
_, job_kwargs = job_call
self.assertEqual(job_kwargs['body'], expected_job)
@parameterized.product(
inside_cluster=[True, False],
)
def test_config_load(self, inside_cluster):
load_incluster_side_effect = (
k8s_config.ConfigException() if not inside_cluster else None
)
with mock.patch.object(
k8s_client, 'ApiClient', return_value=None
), mock.patch.object(
k8s_config,
'load_incluster_config',
side_effect=load_incluster_side_effect,
return_value=None,
) as mock_load_incluster, mock.patch.object(
k8s_config, 'load_kube_config', return_value=None
) as mock_load_kube:
kubernetes.Client(None)
if inside_cluster:
mock_load_incluster.assert_called_once_with()
else:
mock_load_kube.assert_called_once_with()
def test_requirements_from_executor(self):
executor = local_executors.Kubernetes(
requirements=xm.JobRequirements(cpu=1, ram=1 * xm.GiB)
)
requirements = kubernetes.requirements_from_executor(executor).to_dict()
self.assertDictEqual(
requirements['limits'],
{
'cpu': '1',
'memory': str(2**30),
},
)
def test_requirements_from_executor_gpu(self):
executor = local_executors.Kubernetes(
requirements=xm.JobRequirements(v100=4)
)
requirements = kubernetes.requirements_from_executor(executor).to_dict()
self.assertDictEqual(requirements['limits'], {'nvidia.com/gpu': '4'})
def test_requirements_from_executor_empty(self):
executor = local_executors.Kubernetes()
requirements = kubernetes.requirements_from_executor(executor).to_dict()
self.assertDictEqual(requirements['limits'], {})
def test_annotations_from_executor_tpu(self):
executor = local_executors.Kubernetes(xm.JobRequirements(tpu_v2=8))
self.assertDictEqual(
kubernetes.annotations_from_executor(executor),
{'tf-version.cloud-tpus.google.com': 'nightly'},
)
def test_annotations_from_executor_gpu(self):
executor = local_executors.Kubernetes(xm.JobRequirements(v100=4))
self.assertDictEqual(kubernetes.annotations_from_executor(executor), {})
def test_node_selector_from_executor_gpu(self):
executor = local_executors.Kubernetes(xm.JobRequirements(v100=4))
self.assertDictEqual(
kubernetes.node_selector_from_executor(executor),
{'cloud.google.com/gke-accelerator': 'nvidia-tesla-v100'},
)
def test_node_selector_from_executor_tpu(self):
executor = local_executors.Kubernetes(xm.JobRequirements(tpu_v2=8))
self.assertDictEqual(kubernetes.node_selector_from_executor(executor), {})
def test_node_selector_from_executor_empty(self):
executor = local_executors.Kubernetes(xm.JobRequirements())
self.assertDictEqual(kubernetes.node_selector_from_executor(executor), {})
if __name__ == '__main__':
unittest.main()
|
xmanager-main
|
xmanager/cloud/kubernetes_test.py
|
# Copyright 2021 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Builds images for XManager Docker executables."""
import os
import shutil
import tempfile
from typing import Dict, List, Optional
from absl import flags
from docker.utils import utils as docker_utils
from xmanager import xm
from xmanager.cloud import auth
from xmanager.cloud import cloud_build
from xmanager.cloud import docker_lib
from xmanager.docker import docker_adapter
from xmanager.xm import utils
_BUILD_IMAGE_LOCALLY = flags.DEFINE_boolean(
'xm_build_image_locally',
True,
(
'Use local Docker to build images instead of remote Google Cloud Build.'
' This is usually a lot faster but requires docker to be installed.'
),
)
_USE_DOCKER_COMMAND = flags.DEFINE_boolean(
'xm_use_docker_command',
True,
(
'Call "docker build" in a subprocess rather than using Python docker '
'client library when building the docker image locally. This provies a '
'much nicer output for interactive use.'
),
)
_SHOW_DOCKER_COMMAND_PROGRESS = flags.DEFINE_boolean(
'xm_show_docker_command_progress',
False,
'Show container output during the "docker build".',
)
_WRAP_LATE_BINDINGS = flags.DEFINE_boolean(
'xm_wrap_late_bindings',
False,
(
'Feature flag to wrap and unwrap late bindings for network addresses. '
'ONLY works with PythonContainer with default instructions or simple '
'instructions that do not modify the file directory. '
'REQUIRES ./entrypoint.sh to be the ENTRYPOINT.'
),
)
# TODO: Find a master image than is compatible with every
# combination (TF, Torch, JAX) X (CPU, GPU, TPU).
_DEFAULT_BASE_IMAGE = 'gcr.io/deeplearning-platform-release/base-cu110'
_DOCKERFILE_TEMPLATE = """
FROM {base_image}
RUN if ! id 1000; then useradd -m -u 1000 clouduser; fi
{instructions}
COPY entrypoint.sh ./entrypoint.sh
RUN chown -R 1000:root ./entrypoint.sh && chmod -R 775 ./entrypoint.sh
{entrypoint}
"""
_ENTRYPOINT_TEMPLATE = """#!/bin/bash
if [[ ! -z "$KUBE_GOOGLE_CLOUD_TPU_ENDPOINTS" ]]; then
# TPU is available; set up expected env vars.
TPU_IP_AND_PORT="${{KUBE_GOOGLE_CLOUD_TPU_ENDPOINTS:7}}"
TPU_ADDRESS="$(cut -d':' -f1 <<< "$TPU_IP_AND_PORT")"
echo "TPU_ADDRESS is $TPU_ADDRESS"
export TPU_ADDRESS
# Sometimes TPUs are not ready yet when the job starts. Wait until they are.
while ! nc -z $TPU_ADDRESS 8470 ; do sleep 5 ; done
# Expected by PyTorch.
export XRT_TPU_CONFIG="tpu_worker;0;$TPU_IP_AND_PORT"
fi
{cmds}
"""
def build(
py_executable: xm.PythonContainer,
args: xm.SequentialArgs,
env_vars: Dict[str, str],
image_name: Optional[str] = None,
project: Optional[str] = None,
bucket: Optional[str] = None,
pull_image: bool = False,
) -> str:
"""Build a Docker image from a Python project.
Args:
py_executable: The PythonContainer to build.
args: Args to pass to the image.
env_vars: Environment variables to set in the image.
image_name: The image name that will be assigned to the resulting image.
project: The project to use if CloudBuild is used.
bucket: The bucket to upload if CloudBuild is used.
pull_image: Whether to pull the image if CloudBuild is used.
Returns:
The name of the built image.
"""
if not image_name:
image_name = _get_image_name(py_executable)
dockerfile = _create_dockerfile(py_executable, args, env_vars)
entrypoint = _create_entrypoint(py_executable)
dirname = os.path.basename(py_executable.path)
python_path = py_executable.path
with tempfile.TemporaryDirectory() as wrapped_directory:
if _WRAP_LATE_BINDINGS.value:
_wrap_late_bindings(wrapped_directory, python_path, dockerfile)
python_path = wrapped_directory
dockerfile = os.path.join(python_path, 'Dockerfile')
with tempfile.TemporaryDirectory() as staging:
docker_lib.prepare_directory(
staging, python_path, dirname, entrypoint, dockerfile
)
return build_by_dockerfile(
staging,
os.path.join(staging, 'Dockerfile'),
image_name,
project,
bucket,
pull_image,
)
def build_by_dockerfile(
path: str,
dockerfile: str,
image_name: str,
project: Optional[str] = None,
bucket: Optional[str] = None,
pull_image: bool = False,
):
"""Build a Docker image from a Docker directory.
Args:
path: The directory to use for the Docker build context.
dockerfile: The path of Dockerfile.
image_name: The name to set the built image to.
project: The project to use if CloudBuild is used.
bucket: The bucket to upload if CloudBuild is used.
pull_image: Whether to pull the image if CloudBuild is used.
Returns:
The name of the built image.
"""
print('Building Docker image, please wait...')
if _BUILD_IMAGE_LOCALLY.value:
if docker_lib.is_docker_installed():
# TODO: Improve out-of-disk space handling.
return docker_lib.build_docker_image(
image_name,
path,
dockerfile,
use_docker_command=_USE_DOCKER_COMMAND.value,
show_docker_command_progress=_SHOW_DOCKER_COMMAND_PROGRESS.value,
)
print('Falling back to CloudBuild. See INFO log for details.')
# If Dockerfile is not a direct child of path, then create a temp directory
# that contains both the contents of path and Dockerfile.
with tempfile.TemporaryDirectory() as tempdir:
if os.path.dirname(dockerfile) != path:
new_path = os.path.join(tempdir, os.path.basename(path))
shutil.copytree(path, new_path)
shutil.copyfile(dockerfile, os.path.join(path, 'Dockerfile'))
path = new_path
cloud_build_client = cloud_build.Client(project=project, bucket=bucket)
repository, _ = docker_utils.parse_repository_tag(image_name)
upload_name = repository.split('/')[-1]
cloud_build_client.build_docker_image(image_name, path, upload_name)
if pull_image:
docker_adapter.instance().pull_image(image_name)
return image_name
def push(image: str) -> str:
return docker_lib.push_docker_image(image)
def _get_image_name(py_executable: xm.PythonContainer) -> str:
image_name = os.path.basename(py_executable.path)
project_name = auth.get_project_name()
tag = docker_lib.create_tag()
return f'gcr.io/{project_name}/{image_name}:{tag}'
def _get_base_image(py_executable: xm.PythonContainer) -> str:
if py_executable.base_image:
return py_executable.base_image
return _DEFAULT_BASE_IMAGE
def _create_instructions(
py_executable: xm.PythonContainer, env_vars: Dict[str, str]
) -> str:
"""Create Docker instructions."""
set_env_vars = [f'ENV {key}="{value}"' for key, value in env_vars.items()]
if py_executable.docker_instructions:
return '\n'.join(py_executable.docker_instructions + set_env_vars)
directory = os.path.basename(py_executable.path)
return '\n'.join(
list(default_steps(directory, py_executable.use_deep_module))
+ set_env_vars
)
def default_steps(directory: str, use_deep_module: bool) -> List[str]:
"""Default commands to use in the Dockerfile."""
workdir_setup_prefix = []
workdir_setup_suffix = []
project_dir = f'/{directory}'
if use_deep_module:
# Setting a top-level work dir allows using the Python code without
# modifying import statements.
workdir_setup_prefix = [
'RUN mkdir /workdir',
'WORKDIR /workdir',
]
project_dir = f'/workdir/{directory}'
else:
workdir_setup_suffix = [
f'WORKDIR {directory}',
]
return (
workdir_setup_prefix
+ [
# Without setting LANG, RDL ran into an UnicodeDecodeError, similar to
# what is described at [1]. This seems to be good practice and not
# hurt so we're just always setting it.
# [1] https://github.com/spotDL/spotify-downloader/issues/279
'ENV LANG=C.UTF-8',
# Updating and installing on the same line causes cache-busting.
# https://docs.docker.com/develop/develop-images/dockerfile_best-practices/#run
'RUN apt-get update && apt-get install -y git netcat',
'RUN python -m pip install --upgrade pip',
f'COPY {directory}/requirements.txt {project_dir}/requirements.txt',
f'RUN python -m pip install -r {directory}/requirements.txt',
# It is best practice to copy the project directory as late as
# possible, rather than at the beginning. This allows Docker to reuse
# cached layers. If copying the project files were the first step, a
# tiny modification to the source code will invalidate the cache.
# https://docs.docker.com/develop/develop-images/dockerfile_best-practices/#add-or-copy
f'COPY {directory}/ {project_dir}',
# Changing ownwership of project_dir, so that both users: UID 1000
# and root are the co-owner of it.
f'RUN chown -R 1000:root {project_dir} && chmod -R 775 {project_dir}',
]
+ workdir_setup_suffix
)
def _create_dockerfile(
py_executable: xm.PythonContainer,
args: xm.SequentialArgs,
env_vars: Dict[str, str],
) -> str:
"""Creates a Dockerfile from a project executable."""
base_image = _get_base_image(py_executable)
instructions = _create_instructions(py_executable, env_vars)
entrypoint = _create_entrypoint_cmd(args)
contents = _DOCKERFILE_TEMPLATE.format(
base_image=base_image, instructions=instructions, entrypoint=entrypoint
)
print('Dockerfile:', contents, sep='\n')
t = tempfile.NamedTemporaryFile(delete=False)
with open(t.name, 'w') as f:
f.write(contents)
return t.name
def _get_entrypoint_commands(py_executable: xm.PythonContainer) -> str:
"""Given the executable, return entrypoint commands."""
if isinstance(py_executable.entrypoint, xm.ModuleName):
cmds = [f'python -m {py_executable.entrypoint.module_name}']
elif isinstance(py_executable.entrypoint, xm.CommandList):
# Commands specified by the user are passed unchanged.
cmds = py_executable.entrypoint.commands
else:
raise ValueError(
'Unsupported entrypoint type {}'.format(type(py_executable.entrypoint))
)
cmds = '\n'.join(cmds)
# Allow passing extra parameters to the commands.
if not cmds.endswith(('$@', '"$@"')):
cmds = cmds + ' "$@"'
return cmds
def _create_entrypoint(py_executable: xm.PythonContainer) -> str:
"""Create a bash entrypoint based on the base image."""
contents = _ENTRYPOINT_TEMPLATE.format(
cmds=_get_entrypoint_commands(py_executable)
)
t = tempfile.NamedTemporaryFile(delete=False)
with open(t.name, 'w') as f:
f.write(contents)
return t.name
def _create_entrypoint_cmd(args: xm.SequentialArgs) -> str:
"""Create the entrypoint command with optional args."""
entrypoint_args = ['./entrypoint.sh']
entrypoint_args.extend(args.to_list(utils.ARG_ESCAPER))
entrypoint = ', '.join([f'"{arg}"' for arg in entrypoint_args])
return f'ENTRYPOINT [{entrypoint}]'
def _wrap_late_bindings(destination: str, path: str, dockerfile: str) -> None:
"""Create a new path and dockerfile to wrap/unwrap late-bindings.
TODO: Rather than only working PythonContainer, this method can
also work on PrebuiltContainers. We do this by inspecting the entrypoint by
using `docker.APIClient().inspect_image()`.
Late bindings are special formatted strings that are evaluated at runtime. The
primary use for late-bindings is to find the address of other jobs in
Vertex AI which is only known at runtime and cannot be statically defined.
Args:
destination: An empty destination to contain the new project path and the
new dockerfile will be destination/Dockerfile. The current contents of
destination will be deleted.
path: The current project path to build.
dockerfile: The current dockerfile path needed to build the project.
"""
shutil.rmtree(destination)
shutil.copytree(path, destination)
root_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
shutil.copyfile(
os.path.join(root_dir, 'cloud', 'data', 'wrapped_entrypoint.sh'),
os.path.join(destination, 'wrapped_entrypoint.sh'),
)
shutil.copyfile(
os.path.join(root_dir, 'cloud', 'utils.py'),
os.path.join(destination, 'vertex_utils.py'),
)
shutil.copyfile(
os.path.join(root_dir, 'vizier', 'vizier_cloud', 'vizier_worker.py'),
os.path.join(destination, 'vizier_worker.py'),
)
new_dockerfile = os.path.join(destination, 'Dockerfile')
insert_instructions = [
'RUN chmod +x ./wrapped_entrypoint.sh',
]
with open(dockerfile) as f:
contents = f.read()
contents = contents.replace(
'ENTRYPOINT', '\n'.join(insert_instructions + ['ENTRYPOINT'])
)
contents = contents.replace(
'ENTRYPOINT ["./entrypoint.sh', 'ENTRYPOINT ["./wrapped_entrypoint.sh'
)
with open(new_dockerfile, 'w') as f:
f.write(contents)
|
xmanager-main
|
xmanager/cloud/build_image.py
|
# Copyright 2021 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Client for interacting with Cloud Build."""
import datetime
import getpass
import tarfile
import tempfile
import time
from typing import Any, Dict, Optional
import warnings
from absl import flags
from docker.utils import utils as docker_utils
from google.cloud import storage
from googleapiclient import discovery
import termcolor
from xmanager.cloud import auth
_CLOUD_BUILD_TIMEOUT_SECONDS = flags.DEFINE_integer(
'xm_cloud_build_timeout_seconds',
1200,
(
'The amount of time that builds should be allowed to run, '
'to second granularity.'
),
)
_USE_CLOUD_BUILD_CACHE = flags.DEFINE_boolean(
'xm_use_cloud_build_cache',
False,
( # pylint:disable=g-line-too-long
'Use Cloud Build cache to speed up the Docker build. '
'An image with the same name tagged as :latest should exist.'
'More details at'
' https://cloud.google.com/cloud-build/docs/speeding-up-builds#using_a_cached_docker_image'
),
)
_USE_KANIKO = flags.DEFINE_boolean(
'xm_use_kaniko',
False,
'Use kaniko backend for Cloud Build and enable caching.',
)
_KANIKO_CACHE_TTL = flags.DEFINE_string(
'xm_kaniko_cache_ttl', '336h', 'Cache ttl to use for kaniko builds.'
)
_CLOUD_SDK_CREDENTIALS_WARNING = """\
Your application has authenticated using end user credentials from Google \
Cloud SDK without a quota project. You might receive a "quota exceeded" \
or "API not enabled" error. We recommend you rerun \
`gcloud auth application-default login` and make sure a quota project is \
added. Or you can use service accounts instead. For more information \
about service accounts, see https://cloud.google.com/docs/authentication/"""
warnings.filterwarnings('ignore', _CLOUD_SDK_CREDENTIALS_WARNING)
class Client:
"""Cloud Build Client."""
def __init__(
self,
project: Optional[str] = None,
bucket: Optional[str] = None,
credentials=None,
cloud_build_timeout_seconds: Optional[int] = None,
use_cloud_build_cache: Optional[bool] = None,
use_kaniko: Optional[bool] = None,
kaniko_cache_ttl: Optional[str] = None,
):
"""Create the Cloud Build Client.
Args:
project: Name of the GCP project to use for Cloud Build calls and for
storing the data passed to Cloud Build. If not specified the project of
the default credentials for the current environment is used.
bucket: Bucket used to store data passed to Cloud Build. If not specified
uses the value from the GOOGLE_CLOUD_BUCKET_NAME environment variable.
credentials: OAuth2 Credentials to use for Cloud Build & storage calls. If
None gets the default credentials for the current environment.
cloud_build_timeout_seconds: The amount of time that builds should be
allowed to run. If None defaults to `--xm_cloud_build_timeout_seconds.
use_cloud_build_cache: Whether to use Cloud Build cache to speed up the
Docker build. If None defaults to `--xm_use_cloud_build_cache`. An image
with the same name tagged as :latest should exist. More details at
https://cloud.google.com/cloud-build/docs/speeding-up-builds#using_a_cached_docker_image
use_kaniko: Use kaniko backend for Cloud Build and enable caching. If None
defaults to `--xm_use_kaniko`.
kaniko_cache_ttl: Cache ttl to use for kaniko builds. If None defaults to
`--xm_kaniko_cache_ttl`.
"""
self.project = project or auth.get_project_name()
self.bucket = bucket or auth.get_bucket()
self.credentials = credentials or auth.get_creds()
if cloud_build_timeout_seconds is None:
cloud_build_timeout_seconds = _CLOUD_BUILD_TIMEOUT_SECONDS.value
self.cloud_build_timeout_seconds = cloud_build_timeout_seconds
if use_cloud_build_cache is None:
use_cloud_build_cache = _USE_CLOUD_BUILD_CACHE.value
self.use_cloud_build_cache = use_cloud_build_cache
if use_kaniko is None:
use_kaniko = _USE_KANIKO.value
self.use_kaniko = use_kaniko
if kaniko_cache_ttl is None:
kaniko_cache_ttl = _KANIKO_CACHE_TTL.value
self.kaniko_cache_ttl = kaniko_cache_ttl
self.cloudbuild_api = None # discovery CloudBuild v1 client
def upload_tar_to_storage(self, archive_path: str, destination_name: str):
storage_client = storage.Client(
project=self.project, credentials=self.credentials
)
bucket = storage_client.bucket(self.bucket)
blob = bucket.blob(destination_name)
blob.upload_from_filename(archive_path)
def build_docker_image(
self, image: str, directory: str, upload_name: str
) -> str:
"""Create a Docker image via Cloud Build and push to Cloud Repository."""
repository, tag = docker_utils.parse_repository_tag(image)
if not tag:
tag = datetime.datetime.now().strftime('%Y%m%d-%H%M%S_%f')
_, archive_path = tempfile.mkstemp(suffix='.tar.gz')
with tarfile.open(archive_path, 'w:gz') as tar:
tar.add(directory, '/')
destination_path = f'{getpass.getuser()}/{upload_name}-{tag}.tar.gz'
self.upload_tar_to_storage(archive_path, destination_path)
build_body = self._build_request_body(destination_path, repository, tag)
# Note: On GCP cache_discovery=True (the default) leads to ugly error
# messages as file_cache is unavailable.
if not self.cloudbuild_api:
self.cloudbuild_api = discovery.build(
'cloudbuild',
'v1',
credentials=self.credentials,
cache_discovery=False,
)
create_op = (
self.cloudbuild_api.projects()
.builds()
.create(projectId=self.project, body=build_body)
.execute()
)
log_url = create_op['metadata']['build']['logUrl']
print('Cloud Build link:', termcolor.colored(log_url, color='blue'))
build_id = create_op['metadata']['build']['id']
return self.wait_for_build(build_id, f'{repository}:{tag}')
def _build_request_body(self, bucket_path, repository, tag) -> Dict[str, Any]:
"""Builds the Cloud Build create_build_request body."""
body = {
'source': {
'storageSource': {
'bucket': self.bucket,
'object': bucket_path,
},
},
'timeout': str(self.cloud_build_timeout_seconds) + 's',
}
if self.use_kaniko:
body.update(
{
'steps': [{
'name': 'gcr.io/kaniko-project/executor:latest',
'args': [
f'--destination={repository}:{tag}',
f'--destination={repository}:latest',
'--cache=true',
f'--cache-ttl={self.kaniko_cache_ttl}',
],
}]
}
)
else:
args_for_cached_image = (
['--cache-from', f'{repository}:latest']
if self.use_cloud_build_cache
else []
)
body.update({
'steps': [{
'name': 'gcr.io/cloud-builders/docker',
'args': (
[
'build',
'-t',
f'{repository}:{tag}',
'-t',
f'{repository}:latest',
]
+ args_for_cached_image
+ ['.']
),
}],
'options': {'machineType': 'E2_HIGHCPU_32'},
'images': [repository],
})
return body
def wait_for_build(self, build_id: str, kaniko_image: str) -> str:
"""Waits for build to finish and return the image URI of the result."""
backoff = 30 # seconds
while True:
time.sleep(backoff)
result = (
self.cloudbuild_api.projects()
.builds()
.get(projectId=self.project, id=build_id)
.execute()
)
status = result['status']
print('Cloud Build status:', status)
if status == 'SUCCESS':
image_uri = kaniko_image
if not self.use_kaniko:
# Note: Not sure if this is needed. Could we always use the uri above?
image = result['results']['images'][0]
image_uri = f'{image["name"]}@{image["digest"]}'
break
elif status == 'FAILURE':
print(
'Build FAILED. See logs for more information:',
termcolor.colored(result['logUrl'], color='red'),
)
raise RuntimeError('Build FAILED.')
elif status == 'QUEUED' or status == 'WORKING':
continue
elif status == 'INTERNAL_ERROR' or status == 'CANCELLED':
print('Cloud Build tool failure. Status:', status)
raise RuntimeError('Cloud Build tool failed. Try again.')
else:
print(
'Build not complete. See logs for more information:',
termcolor.colored(result['logUrl'], color='red'),
)
raise RuntimeError('Build FAILED.')
print('Your image URI is:', termcolor.colored(image_uri, color='blue'))
print(
'You can run your image locally via:\n'
+ termcolor.colored('docker run ' + image_uri, color='green')
)
return image_uri
|
xmanager-main
|
xmanager/cloud/cloud_build.py
|
# Copyright 2021 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility functions for building Docker images."""
import datetime
import os
import pathlib
import shutil
import subprocess
import sys
from typing import Optional
from absl import logging
import docker
from docker.utils import utils as docker_utils
import humanize
import termcolor
def create_tag() -> str:
return datetime.datetime.now().strftime('%Y%m%d-%H%M%S-%f')
def prepare_directory(
destination_directory: str,
source_directory: str,
project_name: str,
entrypoint_file: str,
dockerfile: str,
) -> None:
"""Stage all inputs into the destination directory.
Args:
destination_directory: The directory to copy files to.
source_directory: The directory to copy files from.
project_name: The name of the folder inside destination_directory/ that
source_directory/ files will be copied to.
entrypoint_file: The file path of entrypoint.sh.
dockerfile: The file path of Dockerfile.
"""
source_path = pathlib.Path(source_directory)
size = sum(f.stat().st_size for f in source_path.glob('**/*') if f.is_file())
print(f'Size of Docker input: {humanize.naturalsize(size)}')
if size > 200 * 10**6:
print(
termcolor.colored(
(
'You are trying to pack over 200MB into a Docker image. '
'Large images negatively impact build times'
),
color='magenta',
)
)
shutil.copytree(
source_directory, os.path.join(destination_directory, project_name)
)
shutil.copyfile(dockerfile, os.path.join(destination_directory, 'Dockerfile'))
shutil.copyfile(
entrypoint_file, os.path.join(destination_directory, 'entrypoint.sh')
)
def is_docker_installed() -> bool:
"""Checks if Docker is installed and accessible."""
try:
docker_client = docker.from_env()
logging.info('Local docker: %s', docker_client.version())
return True
except docker.errors.DockerException as e:
if 'No such file or directory' in str(e):
# This is the expected case when Docker is not installed, so we don't log
# anything, and just return False. The other error branches indicate
# something wrong with the Docker installation, so we log an error and
# also return False.
return False
logging.info(e)
if 'Permission denied' in str(e):
print(
'Looks like there is a permission problem with docker. '
'Did you install sudoless docker?'
)
return False
def build_docker_image(
image: str,
directory: str,
dockerfile: Optional[str] = None,
use_docker_command: bool = True,
show_docker_command_progress: bool = False,
) -> str:
"""Builds a Docker image locally."""
logging.info('Building Docker image')
docker_client = docker.from_env()
if not dockerfile:
dockerfile = os.path.join(directory, 'Dockerfile')
if use_docker_command:
_build_image_with_docker_command(
docker_client,
directory,
image,
dockerfile,
show_docker_command_progress,
)
else:
_build_image_with_python_client(docker_client, directory, image, dockerfile)
logging.info('Building docker image: Done')
return image
def push_docker_image(image: str) -> str:
"""Pushes a Docker image to the designated repository."""
docker_client = docker.from_env()
repository, tag = docker_utils.parse_repository_tag(image)
push = docker_client.images.push(repository=repository, tag=tag)
logging.info(push)
if not isinstance(push, str) or '"Digest":' not in push:
raise RuntimeError(
'Expected docker push to return a string with `status: Pushed` and a '
'Digest. This is probably a temporary issue with '
'--xm_build_image_locally and you should try again'
)
# If we are pushing an image, then :latest should also be present.
docker_client.images.push(repository=repository, tag='latest')
print('Your image URI is:', termcolor.colored(image, color='blue'))
return image
def _build_image_with_docker_command(
client: docker.DockerClient,
path: str,
image_tag: str,
dockerfile: str,
progress: bool = False,
) -> None:
"""Builds a Docker image by calling `docker build` within a subprocess."""
version = client.version()['Version']
[major, minor] = version.split('.')[:2]
if float(f'{major}.{minor}') < 20.10:
# docker buildx requires docker 20.10.
raise RuntimeError('XCloud requires Docker Engine version 20.10+.')
repository, tag = docker_utils.parse_repository_tag(image_tag)
if not tag:
tag = 'latest'
command = [
'docker',
'buildx',
'build',
'-t',
f'{repository}:{tag}',
'-t',
f'{repository}:latest',
'-f',
dockerfile,
path,
]
# Adding flags to show progress and disabling cache.
# Caching prevents actual commands in layer from executing.
# This is turn makes displaying progress redundant.
if progress:
command[2:2] = ['--progress', 'plain', '--no-cache']
subprocess.run(
command, check=True, env={**os.environ, 'DOCKER_BUILDKIT': '1'}
)
def _build_image_with_python_client(
client: docker.DockerClient, path: str, image_tag: str, dockerfile: str
) -> None:
"""Builds a Docker image by calling the Docker Python client."""
repository, tag = docker_utils.parse_repository_tag(image_tag)
if not tag:
tag = 'latest'
try:
# The `tag=` arg refers to the full repository:tag image name.
_, logs = client.images.build(
path=path, tag=f'{repository}:{tag}', dockerfile=dockerfile
)
client.images.build(
path=path, tag=f'{repository}:latest', dockerfile=dockerfile
)
except docker.errors.BuildError as error:
for log in error.build_log:
print(log.get('stream', ''), end='', file=sys.stderr)
raise error
for log in logs:
print(log.get('stream', ''), end='')
|
xmanager-main
|
xmanager/cloud/docker_lib.py
|
# Copyright 2021 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for cloud_build."""
from unittest import mock
from absl.testing import absltest
from xmanager.cloud import cloud_build
class CloudBuildTest(absltest.TestCase):
def test_build_request_body(self):
client = cloud_build.Client(
'my-project',
'my-bucket',
mock.Mock(),
use_kaniko=False,
use_cloud_build_cache=False,
)
image = client._build_request_body('path/to/project', 'my-image', 'live')
self.assertEqual(
image,
{
'images': ['my-image'],
'options': {'machineType': 'E2_HIGHCPU_32'},
'source': {
'storageSource': {
'bucket': 'my-bucket',
'object': 'path/to/project',
},
},
'steps': [{
'args': [
'build',
'-t',
'my-image:live',
'-t',
'my-image:latest',
'.',
],
'name': 'gcr.io/cloud-builders/docker',
}],
'timeout': '1200s',
},
)
def test_build_request_body_use_kaniko(self):
client = cloud_build.Client(
'my-project',
'my-bucket',
mock.Mock(),
use_kaniko=True,
use_cloud_build_cache=False,
)
image = client._build_request_body('path/to/project', 'my-image', 'live')
self.assertEqual(
image,
{
'source': {
'storageSource': {
'bucket': 'my-bucket',
'object': 'path/to/project',
},
},
'steps': [{
'args': [
'--destination=my-image:live',
'--destination=my-image:latest',
'--cache=true',
'--cache-ttl=336h',
],
'name': 'gcr.io/kaniko-project/executor:latest',
}],
'timeout': '1200s',
},
)
def test_build_request_body_use_build_cache(self):
client = cloud_build.Client(
'my-project',
'my-bucket',
mock.Mock(),
use_kaniko=False,
use_cloud_build_cache=True,
)
image = client._build_request_body('path/to/project', 'my-image', 'live')
self.assertEqual(
image,
{
'images': ['my-image'],
'options': {'machineType': 'E2_HIGHCPU_32'},
'source': {
'storageSource': {
'bucket': 'my-bucket',
'object': 'path/to/project',
},
},
'steps': [{
'args': [
'build',
'-t',
'my-image:live',
'-t',
'my-image:latest',
'--cache-from',
'my-image:latest',
'.',
],
'name': 'gcr.io/cloud-builders/docker',
}],
'timeout': '1200s',
},
)
if __name__ == '__main__':
absltest.main()
|
xmanager-main
|
xmanager/cloud/cloud_build_test.py
|
# Copyright 2021 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for xmanager.cloud.vertex."""
import datetime
import os
import unittest
from unittest import mock
from google import auth
from google.auth import credentials
from google.cloud import aiplatform
from google.cloud import aiplatform_v1 as aip_v1
from google.cloud.aiplatform import utils as aip_utils
from xmanager import xm
from xmanager.cloud import auth as xm_auth
from xmanager.xm_local import executables as local_executables
from xmanager.xm_local import executors as local_executors
from xmanager.cloud import vertex # pylint: disable=g-bad-import-order
class VertexTest(unittest.TestCase):
@mock.patch.object(xm_auth, 'get_service_account')
@mock.patch.object(auth, 'default')
def test_launch(self, mock_creds, mock_sa):
os.environ['GOOGLE_CLOUD_BUCKET_NAME'] = 'test-bucket'
creds = credentials.AnonymousCredentials()
mock_creds.return_value = (creds, 'test-project')
mock_sa.return_value = 'test-sa'
client = vertex.Client('test-project', 'us-central1')
job = xm.Job(
name='test-job',
executable=local_executables.GoogleContainerRegistryImage(
name='test-image',
image_path='image-path',
args=xm.SequentialArgs.from_collection({'a': 1}),
),
executor=local_executors.Vertex(xm.JobRequirements(cpu=1, ram=1, t4=2)),
args={'b': 2, 'c': 3},
)
expected_call = {
'parent': 'projects/test-project/locations/us-central1',
'custom_job': aip_v1.CustomJob(
display_name='test-experiment',
job_spec=aip_v1.CustomJobSpec(
worker_pool_specs=[
aip_v1.WorkerPoolSpec(
machine_spec=aip_v1.MachineSpec(
machine_type='n1-highmem-2',
accelerator_type='NVIDIA_TESLA_T4',
accelerator_count=2,
),
replica_count=1,
container_spec=aip_v1.ContainerSpec(
image_uri='image-path',
args=['--a=1', '--b=2', '--c=3'],
),
)
],
service_account='test-sa',
base_output_directory=aip_v1.GcsDestination(
output_uri_prefix='gs://test-bucket/aiplatform-custom-job-2022-01-01-00:00:00.000',
),
enable_web_access=True,
),
),
'timeout': None,
}
timestamp = datetime.datetime.strptime('2022/1/1', '%Y/%m/%d')
with mock.patch.object(
datetime, 'datetime'
) as mock_timestamp, mock.patch.object(
aip_utils.ClientWithOverride, 'WrappedClient'
) as job_client, mock.patch.object(
aiplatform.CustomJob, 'resource_name', new_callable=mock.PropertyMock
) as name, mock.patch.object(
aiplatform.CustomJob, '_dashboard_uri'
):
mock_timestamp.now.return_value = timestamp
name.return_value = 'test-resource-name'
client.launch('test-experiment', [job])
job_client.return_value.create_custom_job.assert_called_once_with( # pytype: disable=attribute-error # py39-upgrade
**expected_call
)
def test_get_machine_spec_default(self):
job = xm.Job(
executable=local_executables.GoogleContainerRegistryImage('name', ''),
executor=local_executors.Vertex(),
args={},
)
machine_spec = vertex.get_machine_spec(job)
self.assertDictEqual(machine_spec, {'machine_type': 'n1-standard-4'})
def test_get_machine_spec_cpu(self):
job = xm.Job(
executable=local_executables.GoogleContainerRegistryImage('name', ''),
executor=local_executors.Vertex(
requirements=xm.JobRequirements(cpu=20, ram=40 * xm.GiB)
),
args={},
)
machine_spec = vertex.get_machine_spec(job)
self.assertDictEqual(machine_spec, {'machine_type': 'n1-highcpu-64'})
def test_get_machine_spec_gpu(self):
job = xm.Job(
executable=local_executables.GoogleContainerRegistryImage('name', ''),
executor=local_executors.Vertex(
requirements=xm.JobRequirements(p100=2)
),
args={},
)
machine_spec = vertex.get_machine_spec(job)
self.assertDictEqual(
machine_spec,
{
'machine_type': 'n1-standard-4',
'accelerator_type': vertex.aip_v1.AcceleratorType.NVIDIA_TESLA_P100,
'accelerator_count': 2,
},
)
def test_get_machine_spec_a100(self):
job = xm.Job(
executable=local_executables.GoogleContainerRegistryImage('name', ''),
executor=local_executors.Vertex(
requirements=xm.JobRequirements(a100=2)
),
args={},
)
machine_spec = vertex.get_machine_spec(job)
self.assertDictEqual(
machine_spec,
{
'machine_type': 'a2-highgpu-2g',
'accelerator_type': vertex.aip_v1.AcceleratorType.NVIDIA_TESLA_A100,
'accelerator_count': 2,
},
)
def test_get_machine_spec_tpu(self):
job = xm.Job(
executable=local_executables.GoogleContainerRegistryImage('name', ''),
executor=local_executors.Vertex(
requirements=xm.JobRequirements(tpu_v3=8)
),
args={},
)
machine_spec = vertex.get_machine_spec(job)
self.assertDictEqual(
machine_spec,
{
'machine_type': 'cloud-tpu',
'accelerator_type': vertex.aip_v1.AcceleratorType.TPU_V3,
'accelerator_count': 8,
},
)
def test_cpu_ram_to_machine_type_exact(self):
self.assertEqual(
'n1-standard-16', vertex.cpu_ram_to_machine_type(16, 60 * xm.GiB)
)
def test_cpu_ram_to_machine_type_highmem(self):
self.assertEqual(
'n1-highmem-64', vertex.cpu_ram_to_machine_type(1, 415 * xm.GiB)
)
def test_cpu_ram_to_machine_type_mem_only(self):
self.assertEqual(
'n1-highmem-64', vertex.cpu_ram_to_machine_type(None, 415 * xm.GiB)
)
def test_cpu_ram_to_machine_type_highcpu(self):
self.assertEqual(
'n1-highcpu-64', vertex.cpu_ram_to_machine_type(63, 1 * xm.GiB)
)
def test_cpu_ram_to_machine_type_cpu_only(self):
self.assertEqual('n1-highcpu-64', vertex.cpu_ram_to_machine_type(63, None))
def test_cpu_ram_to_machine_type_too_high(self):
with self.assertRaises(ValueError):
vertex.cpu_ram_to_machine_type(1000, 1000)
if __name__ == '__main__':
unittest.main()
|
xmanager-main
|
xmanager/cloud/vertex_test.py
|
# Copyright 2021 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Run Job as a Vizier worker to manager WorkUnit Vizier interaction."""
import re
from typing import Dict, Optional
from absl import logging
from google.cloud import aiplatform_v1beta1 as aip
_TRIAL_NAME_REGEX = (
r'projects\/[^\/]+\/locations\/[^\/]+\/studies\/[^\/]+\/trials\/[^\/]+'
)
class VizierWorker:
"""Worker that manage interaction between job and Vizier."""
def __init__(self, trial_name: str) -> None:
if not re.match(_TRIAL_NAME_REGEX, trial_name):
raise Exception(
'The trial_name must be in the form: '
'projects/{project}/locations/{location}/'
'studies/{study}/trials/{trial}'
)
self._trial_name = trial_name
location = trial_name.split('/')[3]
self._vz_client = aip.VizierServiceClient(
client_options={
'api_endpoint': f'{location}-aiplatform.googleapis.com',
}
)
def add_trial_measurement(self, step: int, metrics: Dict[str, float]) -> None:
"""Add trial measurements to Vizier."""
self._vz_client.add_trial_measurement(
request=aip.AddTrialMeasurementRequest(
trial_name=self._trial_name,
measurement=aip.Measurement(
step_count=step,
metrics=[
aip.Measurement.Metric(metric_id=k, value=v)
for k, v in metrics.items()
],
),
)
)
logging.info('Step %d Metric %s is reported', step, metrics)
def complete_trial(self, infeasible_reason: Optional[str] = None) -> None:
"""Complete a trial."""
self._vz_client.complete_trial(
request=aip.CompleteTrialRequest(
name=self._trial_name,
trial_infeasible=infeasible_reason is not None,
infeasible_reason=infeasible_reason,
)
)
logging.info('Trial %s is completed', self._trial_name)
|
xmanager-main
|
xmanager/vizier/vizier_cloud/vizier_worker.py
|
# Copyright 2021 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Main code that the Vertex Cloud Vizier Controller runs."""
import time
from typing import Any, Callable, Dict, Optional
from google.cloud import aiplatform_v1beta1 as aip
from xmanager import xm
class VizierController:
"""A Controller that runs Vizier suggested hyperparameters in multiple work units."""
def __init__(
self,
experiment: xm.Experiment,
work_unit_generator: Callable[[xm.WorkUnit, Dict[str, Any]], Any],
vz_client: aip.VizierServiceClient,
study_name: str,
num_work_units_total: int,
num_parallel_work_units: int,
) -> None:
"""Create a VizierController.
Args:
experiment: XM experiment.
work_unit_generator: the function that generates WorkUnit from
hyperparameters.
vz_client: the Vizier Client used for interacting with Vizier.
study_name: the study name the controller works on.
num_work_units_total: number of work units to create in total. (TODO:
remove this and retrieve from study spec stopping criteria once it is
settable there.)
num_parallel_work_units: number of work units to run in parallel.
"""
self._experiment = experiment
self._work_unit_generator = work_unit_generator
self._vz_client = vz_client
self._study_name = study_name
self._num_work_units_total = num_work_units_total
self._num_parallel_work_units = num_parallel_work_units
self._work_unit_updaters = []
def run(self, poll_frequency_in_sec: float = 60) -> None:
"""Peridically check and sync status between vizier and work units and create new work units when needed."""
while True:
# 1. Complete trial for completed work unit; Early stop first if needed.
for work_unit_updater in self._work_unit_updaters:
if not work_unit_updater.completed:
work_unit_updater.check_for_completion()
# 2. TODO: Return by Vizier's indication that study is done
# when such API is ready on Vizier side.
num_exisiting_work_units = len(self._work_unit_updaters)
num_completed_work_units = sum(
[wuu.completed for wuu in self._work_unit_updaters]
)
if (
num_exisiting_work_units == self._num_work_units_total
and num_completed_work_units == self._num_work_units_total
):
print('All done! Exiting VizierController... \n')
return
# 3. Get new trials and assign to new work units.
self._launch_new_work_units()
time.sleep(poll_frequency_in_sec)
def _launch_new_work_units(self) -> None:
"""Get hyperparmeter suggestions from Vizier and assign to new work units to run."""
# 1. Compute num of work units to create next.
num_existing_work_units = len(self._work_unit_updaters)
num_running_work_units = len(
[
wuu
for wuu in self._work_unit_updaters
if wuu.work_unit_status().is_active
]
)
num_work_units_to_create_total = (
self._num_work_units_total - num_existing_work_units
)
num_work_units_to_create_next = min(
self._num_parallel_work_units - num_running_work_units,
num_work_units_to_create_total,
)
# 2. Create the work units.
start_index = num_existing_work_units + 1
for i in range(start_index, start_index + num_work_units_to_create_next):
trial = (
self._vz_client.suggest_trials(
request=aip.SuggestTrialsRequest(
parent=self._study_name,
suggestion_count=1,
client_id=f'work unit {i}',
)
)
.result()
.trials[0]
)
print(f'Trial for work unit (index: {i}) is retrieved:\n{trial}')
print(f'Creating work unit (index: {i})... \n')
def create_gen(index: int, trial: aip.Trial) -> xm.JobGeneratorType:
async def gen_work_unit(work_unit: xm.WorkUnit, **kwargs):
await self._work_unit_generator(work_unit, kwargs)
# TODO: Add an utility to handle logging conditionally
# (use print when run local otherwise logging.info.)
print(
f'Work unit (index: {index}, '
f'id: {work_unit.work_unit_id}) created. \n'
)
self._work_unit_updaters.append(
WorkUnitVizierUpdater(
vz_client=self._vz_client, work_unit=work_unit, trial=trial
)
)
return gen_work_unit
args = {
'trial_name': trial.name,
**{p.parameter_id: p.value for p in trial.parameters},
}
self._experiment.add(create_gen(i, trial), args)
class WorkUnitVizierUpdater:
"""An updater for syncing completion state between work unit and vizier trial."""
def __init__(
self,
vz_client: aip.VizierServiceClient,
work_unit: xm.WorkUnit,
trial: aip.Trial,
) -> None:
self.completed = False
self._vz_client = vz_client
self._work_unit = work_unit
self._trial = trial
def work_unit_status(self) -> xm.ExperimentUnitStatus:
return self._work_unit.get_status()
def check_for_completion(self) -> None:
"""Sync the completion status between WorkUnit and Vizier Trial if needed."""
if self.completed:
return
print(
'Start completion check for work unit'
f' {self._work_unit.work_unit_id}.\n'
)
# TODO: Add infeasible_reason when available.
if not self.work_unit_status().is_active:
self._complete_trial(self._trial)
self.completed = True
elif (
self._vz_client.check_trial_early_stopping_state(
request=aip.CheckTrialEarlyStoppingStateRequest(
trial_name=self._trial.name
)
)
.result()
.should_stop
):
print(f'Early stopping work unit {self._work_unit.work_unit_id}.\n')
self._work_unit.stop()
else:
print(f'Work unit {self._work_unit.work_unit_id} is still running.\n')
def _complete_trial(
self, trial: aip.Trial, infeasible_reason: Optional[str] = None
) -> None:
"""Complete a trial."""
self._vz_client.complete_trial(
request=aip.CompleteTrialRequest(
name=trial.name,
trial_infeasible=infeasible_reason is not None,
infeasible_reason=infeasible_reason,
)
)
print(f'Trial {trial.name} is completed\n')
|
xmanager-main
|
xmanager/vizier/vizier_cloud/vizier_controller.py
|
# Copyright 2021 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Interface for launching Vizier Explorations using Vertex Vizier."""
from typing import Any, Dict
from xmanager import xm
from xmanager.vizier.vizier_cloud import study_factory as sf
from xmanager.vizier.vizier_cloud import vizier_controller
_DEFAULT_LOCATION = 'us-central1'
# TODO: Add vizier_controller as auxiliary Job generator.
class VizierExploration:
"""An API for launching experiment as a Vizier-based Exploration."""
def __init__(
self,
experiment: xm.Experiment,
job: xm.JobType,
study_factory: sf.StudyFactory,
num_trials_total: int,
num_parallel_trial_runs: int,
) -> None:
"""Create a VizierExploration.
Args:
experiment: the experiment who does the exploration.
job: a job to run.
study_factory: the VizierStudyFactory used to create or load the study.
num_trials_total: total number of trials the experiment want to explore.
num_parallel_trial_runs: number of parallel runs evaluating the trials.
"""
async def work_unit_generator(
work_unit: xm.WorkUnit, vizier_params: Dict[str, Any]
):
work_unit.add(job, self._to_job_params(vizier_params))
if not study_factory.display_name:
study_factory.display_name = f'X{experiment.experiment_id}'
self._controller = vizier_controller.VizierController(
experiment,
work_unit_generator,
study_factory.vz_client,
study_factory.study(),
num_trials_total,
num_parallel_trial_runs,
)
def _to_job_params(self, vizier_params: Dict[str, Any]) -> Dict[str, Any]:
# TODO: unflatten parameters for JobGroup case (currently this
# works for xm.Job).
# For example: transform
# {'learner.args.learning_rate': 0.1}
# to
# {'learner': {'args': {'learning_rate': 0.1}}}
return {'args': vizier_params}
def launch(self, **kwargs) -> None:
self._controller.run(**kwargs)
|
xmanager-main
|
xmanager/vizier/vizier_cloud/vizier_exploration.py
|
# Copyright 2021 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Vizier API for launching Vertex-Vizier explored Experiment for OSS."""
from xmanager.vizier.vizier_cloud.study_factory import NewStudy
from xmanager.vizier.vizier_cloud.vizier_exploration import VizierExploration
|
xmanager-main
|
xmanager/vizier/vizier_cloud/__init__.py
|
# Copyright 2021 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Factory classes for generating study of cloud Vertex Vizier."""
import abc
from typing import Optional
from google.cloud import aiplatform_v1beta1 as aip
from xmanager.cloud import auth
_DEFAULT_LOCATION = 'us-central1'
class StudyFactory(abc.ABC):
"""Abstract class representing vizier study generator."""
vz_client: aip.VizierServiceClient
study_config: aip.StudySpec
num_trials_total: int
display_name: str
# TODO: Once vertex pyvizier is available, we should replace
# aip.StudySpec with it.
# display_name and num_trials_total are supposed to be set into the study
# config, which is not supported by aip.StudySpec currently. But should be
# settable when pyvizier.StudyConfig is available.
def __init__(
self,
study_config: aip.StudySpec,
num_trials_total: int,
display_name: str,
location: str,
) -> None:
super().__init__()
self.study_config = study_config
self.num_trials_total = num_trials_total
self.display_name = display_name
self.vz_client = aip.VizierServiceClient(
client_options=dict(
api_endpoint=f'{location}-aiplatform.googleapis.com'
)
)
@abc.abstractmethod
def study(self) -> str:
raise NotImplementedError
class NewStudy(StudyFactory):
"""Vizier study generator that generates new study from given config."""
project: str
location: str
# `num_trials_total` is a required field. Default it to 0 to unbreak the
# soon-to-deprecate VizierExploration users.
# `display_name` is optional for user to customize, if not set, XM will
# set it with experiment information
def __init__(
self,
study_config: aip.StudySpec,
num_trials_total: int = 0,
display_name: Optional[str] = None,
project: Optional[str] = None,
location: Optional[str] = None,
) -> None:
self.project = project or auth.get_project_name()
self.location = location or _DEFAULT_LOCATION
super().__init__(
study_config, num_trials_total, display_name or '', self.location
)
def study(self) -> str:
return self.vz_client.create_study(
parent=f'projects/{self.project}/locations/{self.location}',
study=aip.Study(
display_name=self.display_name, study_spec=self.study_config
),
).name
|
xmanager-main
|
xmanager/vizier/vizier_cloud/study_factory.py
|
# Copyright 2021 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""An utility to batch Packageables together and build them in one go."""
import asyncio
import concurrent.futures as concurrent_futures
import threading
from typing import Awaitable, Callable, Sequence, TypeVar
from xmanager.xm import job_blocks
class PackageHasNotBeenCalledError(RuntimeError):
"""Access to package_async() awaitable prior to calling .package()."""
Awaited = TypeVar('Awaited')
class PicklableAwaitableImpl:
"""Awaitable type with known value which can be pickled."""
def __init__(
self,
get_future: Callable[
[], asyncio.Future[Awaited] | concurrent_futures.Future[Awaited]
],
):
self._get_future = get_future
def __await__(self):
return asyncio.wrap_future(self._get_future()).__await__()
def __reduce__(self):
return _return_awaited, (self._get_future().result(),)
def _return_awaited(
awaited: Awaited,
) -> Awaitable[Awaited]:
"""Returns a picklable awaitable for an already known value."""
def get_future() -> asyncio.Future[Awaited]:
future = asyncio.Future()
future.set_result(awaited)
return future
return PicklableAwaitableImpl(get_future)
class AsyncPackager:
"""An utility to batch Packageables together and build them in one go.
Attributes:
_lock: A Lock() object used to make the class threadsafe.
_package_batch: A function which packages a batch of Packageables.
_packageables: Packageables queued to be packaged.
_futures: Corresponding futures where packaging results should be written.
"""
def __init__(
self,
package_batch: Callable[
[Sequence[job_blocks.Packageable]], Sequence[job_blocks.Executable]
],
) -> None:
"""Creates the async packager.
Args:
package_batch: A function which packages a batch of Packageables.
"""
super().__init__()
self._lock = threading.Lock()
self._package_batch = package_batch
self._packageables = []
self._futures = []
def add(
self, packageable: job_blocks.Packageable
) -> Awaitable[job_blocks.Executable]:
"""Adds new packageable to the batch."""
with self._lock:
future = concurrent_futures.Future()
self._packageables.append(packageable)
self._futures.append(future)
def check_is_packaged() -> None:
with self._lock:
if packageable in self._packageables:
raise PackageHasNotBeenCalledError(
'.package() must be called before awaiting on the packaging '
'result'
)
def get_future() -> concurrent_futures.Future[job_blocks.Executable]:
check_is_packaged()
return future
return PicklableAwaitableImpl(get_future)
def package(
self, extra_packageables: Sequence[job_blocks.Packageable] = ()
) -> Sequence[job_blocks.Executable]:
"""Triggers the packaging of previously added packageables.
Args:
extra_packageables: An explicit sequence of extra packageables items to
package synchronously.
Returns:
The list of executables corresponding to `extra_packageables`.
"""
with self._lock:
packageables = self._packageables + list(extra_packageables)
futures = self._futures
self._packageables = []
self._futures = []
if not packageables:
return []
try:
executables = self._package_batch(packageables)
for executable, future in zip(executables, futures):
future.set_result(executable)
return executables[len(futures) :]
except Exception as e:
for future in futures:
future.set_exception(e)
raise
|
xmanager-main
|
xmanager/xm/async_packager.py
|
# Copyright 2021 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Data classes for job-related abstractions."""
import abc
import functools
import itertools
import re
from typing import Any, Awaitable, Callable, Dict, List, Mapping, Optional, Sequence, Tuple, TypeVar, Union
import attr
from xmanager.xm import utils
UserArgs = Union[Mapping, Sequence, 'SequentialArgs']
@functools.cache
def print_none_warning(key: str) -> None:
print(
f'WARNING: Setting `{key}=None` will exclude the flag. To pass the '
f'actual value, pass the string literal `{key}="None"` instead'
)
def _is_nested_structure(structure: Union[List[Any], Tuple[Any]]) -> bool:
"""Returns true if a list or tuple contains a list or tuple."""
return any(type(element) in (list, tuple) for element in structure)
class SequentialArgs:
"""A sequence of positional and keyword arguments for a binary.
Unix command line arguments are just a list of strings. But it is very common
to simulate keyword arguments in a --key=value form. It is not uncommon to
only have keyword arguments. Therefore we allow providing args as:
Dicts:
{'foo': 'space bar', 'with_magic': True} -> --foo='space bar' --with_magic
Argument order is preserved.
Lists:
['--foo', 'space bar'] -> --foo 'space bar'
SequentialArgs (which allows to represent a mix of the two above):
xm.merge_args({'foo': 'bar'}, ['--'], {'n': 16}) -> --foo=bar -- --n=16
SequentialArgs provides a convenient merging semantics: if a value is given
for an existing keyword argument, it will be overriden rather than appended,
which allows to specify default values and override them later:
xm.merge_args({'foo': '1', 'bar': '42'}, {'foo': '2'}) -> --foo=2 --bar=42
SequentialArgs is immutable, but you can get a copy with updated value:
args = xm.merge_args({'foo': '1', 'bar': '42'})
args = xm.merge_args(args, {'foo': '2'})
We only allow appending new arguments (positional and keyword) and overriding
keyword arguments. Removal and inserting to the middle is not supported.
"""
@attr.s(auto_attribs=True)
class _RegularItem:
value: Any
@attr.s(auto_attribs=True)
class _KeywordItem:
name: str
def __init__(self) -> None:
"""Constucts an empty SequentialArgs.
Prefer using xm.merge_args to construct SequentialArgs objects.
"""
self._items: List[
Union[SequentialArgs._RegularItem, SequentialArgs._KeywordItem]
] = []
self._kwvalues: Dict[str, Any] = {}
def _ingest_regular_item(self, value: Any) -> None:
self._items.append(SequentialArgs._RegularItem(value))
def _ingest_keyword_item(self, name: str, value: Any) -> None:
if name not in self._kwvalues:
self._items.append(SequentialArgs._KeywordItem(name))
self._kwvalues[name] = value
def _merge_from(self, args: 'SequentialArgs') -> None:
"""Merges another instance of SequentialArgs into self."""
for item in args._items: # pylint: disable=protected-access
match item:
case SequentialArgs._RegularItem() as regular_item:
self._ingest_regular_item(regular_item.value) # pytype: disable=attribute-error
case SequentialArgs._KeywordItem() as keyword_item:
self._ingest_keyword_item(
keyword_item.name, args._kwvalues[keyword_item.name] # pylint: disable=protected-access # pytype: disable=attribute-error
)
@staticmethod
def from_collection(collection: Optional[UserArgs]) -> 'SequentialArgs':
"""Populates a new instance from a given collection."""
result = SequentialArgs()
if collection is None:
return result
if isinstance(collection, str):
raise ValueError(
f'Tried to construct xm.SequentialArgs from string: {collection!r}. '
f'Wrap it in a list: [{collection!r}] to make it a single argument.'
)
elif isinstance(collection, SequentialArgs):
result._merge_from(collection) # pylint: disable=protected-access
elif isinstance(collection, Mapping):
for key, value in collection.items():
result._ingest_keyword_item(str(key), value) # pylint: disable=protected-access
elif isinstance(collection, Sequence):
for value in collection:
result._ingest_regular_item(value) # pylint: disable=protected-access
else:
raise TypeError(f'Unsupported collection type: {collection!r}')
return result
def rewrite_args(self, rewrite: Callable[[str], str]) -> 'SequentialArgs':
"""Applies the rewrite function to all args and returns the result."""
result = SequentialArgs()
for item in self._items:
# pytype: disable=attribute-error
match item:
case SequentialArgs._RegularItem() as regular_item:
new_value = regular_item.value
if isinstance(new_value, str):
new_value = rewrite(new_value)
result._ingest_regular_item(new_value) # pylint: disable=protected-access
case SequentialArgs._KeywordItem() as keyword_item:
new_value = self._kwvalues[keyword_item.name]
if isinstance(new_value, str):
new_value = rewrite(new_value)
result._ingest_keyword_item(keyword_item.name, new_value) # pylint: disable=protected-access
case _:
raise TypeError(f'Unsupported item type: {item!r}')
# pytype: enable=attribute-error
return result
def to_list(
self,
escaper: Callable[[Any], str] = utils.ARG_ESCAPER,
kwargs_joiner: Callable[[str, str], str] = utils.trivial_kwargs_joiner,
) -> List[str]:
"""Exports items as a list ready to be passed into the command line."""
def export_keyword_item(
item: SequentialArgs._KeywordItem,
) -> List[Optional[str]]:
value = self._kwvalues[item.name]
if value is None:
# We skip flags with None value, allowing the binary to use defaults.
# A string can be used if a literal "None" value needs to be assigned.
print_none_warning(item.name)
return [None]
elif isinstance(value, bool):
return [escaper(f"--{'' if value else 'no'}{item.name}")]
elif type(value) in (list, tuple) and not _is_nested_structure(value):
# Pass sequence of arguments in by repeating the flag for each
# element to be consistent with absl's handling of multiple flags.
# We do not do this for nested sequences, which absl cannot handle,
# and instead fallback to quoting the sequence and leaving parsing of
# the nested structure to the executable being called.
return [
kwargs_joiner(escaper(f'--{item.name}'), escaper(v)) for v in value
]
else:
return [kwargs_joiner(escaper(f'--{item.name}'), escaper(value))]
def matcher(item) -> List[Optional[str]]:
match item:
case SequentialArgs._RegularItem() as regular_item:
return [escaper(regular_item.value)]
case SequentialArgs._KeywordItem() as keyword_item:
return export_keyword_item(keyword_item)
case _:
raise TypeError(f'Unsupported item type: {item!r}')
flags = itertools.chain.from_iterable(matcher(item) for item in self._items)
return [f for f in flags if f is not None]
def to_dict(self, kwargs_only: bool = False) -> Dict[str, Any]:
"""Exports items as a dictionary.
Args:
kwargs_only: Whether to skip positional arguments.
Returns:
The sought dictionary.
"""
if kwargs_only:
return self._kwvalues
def matcher(item) -> Tuple[str, Any]:
match item:
case SequentialArgs._RegularItem() as regular_item:
return (str(regular_item.value), True)
case SequentialArgs._KeywordItem() as keyword_item:
return (keyword_item.name, self._kwvalues[keyword_item.name])
case _:
raise TypeError(f'Unsupported item type: {item!r}')
return dict([matcher(item) for item in self._items])
def __eq__(self, other) -> bool:
return isinstance(other, SequentialArgs) and all([
self._items == other._items,
self._kwvalues == other._kwvalues,
])
def __repr__(self) -> str:
return f"[{', '.join(self.to_list(repr))}]"
def merge_args(*operands: Union[SequentialArgs, UserArgs]) -> SequentialArgs:
"""Merges several arguments collections into one left-to-right."""
result = SequentialArgs()
for operand in operands:
if not isinstance(operand, SequentialArgs):
operand = SequentialArgs.from_collection(operand)
result._merge_from(operand) # pylint: disable=protected-access
return result
class ExecutableSpec(abc.ABC):
"""Executable specification describes what code / computation to run.
Use one of the functions declared in xm/packagables.py to create a spec:
* xm.binary - a prebuilt executable program.
* xm.bazel_binary - an executable built with Bazel.
* xm.container - a prebuilt Docker container.
* xm.bazel_container - a Docker container built with Bazel.
* xm.python_container - a Docker container running python code.
* xm.dockerfile_container - a Docker container built with dockerfile.
An executable spec must be turned into an Executable using
Experiment.package() in order to be used in a Job.
WARNING: `ExecutableSpec`s are supposed to be implementation-agnostic. That
means there should be no backend-specific class inheriting `ExecutableSpec`.
"""
@property
@abc.abstractmethod
def name(self) -> str:
raise NotImplementedError
@attr.s(auto_attribs=True)
class Executable(abc.ABC):
"""Executable describes the final location of a packaged executable spec.
An executable depends on the executable specification and the executor
specification. Experiment's implementation knows how to handle each type of
executable.
Attributes:
name: An automatically populated name for the executable. Used for assigning
default names to `Job`s.
"""
name: str
class ExecutorSpec(abc.ABC):
"""Executor spec describes the location of the runtime environment.
For a list of supported ExecutorSpecs see a list of executors below.
"""
class Executor(abc.ABC):
"""Executor describes the runtime environment of a Job.
Concrete supported executors are listed in xm_local/executors.py:
* xm_local.Local
* xm_local.Vertex
* xm_local.Kubernetes
"""
@classmethod
@abc.abstractmethod
def Spec(cls) -> ExecutorSpec: # pylint: disable=invalid-name
raise NotImplementedError
def _validate_env_vars(
self: Any, attribute: Any, env_vars: Dict[str, str]
) -> None:
del self # Unused.
del attribute # Unused.
for key in env_vars.keys():
if not re.fullmatch('[a-zA-Z_][a-zA-Z0-9_]*', key):
raise ValueError(
'Environment variables names must conform to '
f'[a-zA-Z_][a-zA-Z0-9_]*. Got {key!r}.'
)
@attr.s(auto_attribs=True)
class Packageable:
"""Packageable describes what to build and its static parameters."""
executable_spec: ExecutableSpec
executor_spec: ExecutorSpec
args: SequentialArgs = attr.ib(
factory=list, converter=SequentialArgs.from_collection
) # pytype: disable=annotation-type-mismatch
env_vars: Dict[str, str] = attr.ib(
converter=dict, default=attr.Factory(dict), validator=_validate_env_vars
)
class Constraint(abc.ABC):
"""Constraint describes the requirements for where a job group can run.
Some examples of constraints include:
* same virtual machine;
* same virtual private Cloud subnetwork;
* same network fabric;
* same geographic location.
"""
# Job generators are async functions returning None.
# Pylint doesn't distinguish async and sync contexts so Optional[Awaitable] has
# to be used to accomodate both cases.
JobGeneratorType = Callable[..., Optional[Awaitable]]
JobType = Union['Job', 'JobGroup', JobGeneratorType, 'JobConfig']
@attr.s(auto_attribs=True)
class Job:
"""Job describes a unit of computation to be run.
Attributes:
executable: What to run -- one of `xm.Experiment.package` results.
executor: Where to run -- one of `xm.Executor` subclasses.
name: Name of the job. Must be unique within the context (work unit). By
default it is constructed from the executable. Used for naming related
entities such as newly created containers.
args: Command line arguments to pass. This can be dict, list or
xm.SequentialArgs. Dicts are most convenient for keyword flags.
{'batch_size': 16} is passed as --batch_size=16. If positional arguments
are needed one can use a list or xm.SequentialArgs.
env_vars: Environment variables to apply.
"""
executable: Executable
executor: Executor
name: Optional[str] = None
args: SequentialArgs = attr.ib(
factory=list, converter=SequentialArgs.from_collection
) # pytype: disable=annotation-type-mismatch
env_vars: Dict[str, str] = attr.ib(
converter=dict, default=attr.Factory(dict), validator=_validate_env_vars
)
class JobGroup:
"""JobGroup describes a set of jobs that run under shared constraints.
Use named arguments to give jobs meaningful names:
```
JobGroup(
learner=Job(learner_executable, executor),
actor=Job(actor_executable, executor),
)
```
JobGroups provide the gang scheduling concept: Jobs inside them would be
scheduled / descheduled simultaneously. Note that schedulers may not always be
able to enforce that.
JobGroups may include more fine grained constraints:
```
JobGroup(
learner=Job(tpu_learner_executable, executor),
preprocessor=Job(preprocessor_executable, executor),
constraints=[xm_impl.SameMachine()],
)
```
To express sophisticated requirements JobGroups can be nested:
```
JobGroup(
eval=Job(eval_executable, executor),
colocated_learner_and_actor=JobGroup(
learner=Job(tpu_learner_executable, executor),
actor=Job(actor_executable, executor),
constraints=[xm_impl.SameMachine()],
),
)
```
Attributes:
jobs: A mapping of names to jobs.
constraints: A list of additional scheduling constraints.
"""
jobs: Dict[str, JobType]
constraints: List[Constraint]
def __init__(
self,
*,
constraints: Optional[Sequence[Constraint]] = None,
**jobs: JobType,
) -> None:
"""Builds a JobGroup.
Args:
constraints: List of additional scheduling constraints. Keyword only arg.
**jobs: Jobs / job groups that constitute the group passed as kwargs.
"""
self.jobs = jobs
self.constraints = list(constraints) if constraints else []
class JobConfig(abc.ABC):
"""A job defined by a platform-specific configuration.
Sometimes defining a job through a platform-agnostic xm.Job/xm.JobGroup
interfaces is not feasible. In this case job can be defined by a configuration
language native to the underlying platform. This is a base class for such
configurations. Concrete XManager implementations may provide descendants for
the configuration languages they support.
"""
JobTypeVar = TypeVar('JobTypeVar', Job, JobGroup, JobGeneratorType, JobConfig)
def is_job_generator(job: JobType) -> bool:
return isinstance(job, Callable)
def get_args_for_all_jobs(job: JobType, args: Dict[str, Any]) -> Dict[str, Any]:
"""Gets args to apply on all jobs inside a JobGroup.
This is useful if all jobs within a work unit accept the same arguments.
Args:
job: The job group to generate args for.
args: The args to apply to all jobs inside the job group.
Returns:
args that can be added with work_unit.add()
"""
match job:
case JobGroup() as job_group:
all_args = {}
for job_name, job_type in job_group.jobs.items(): # pytype: disable=attribute-error
job_type_args = get_args_for_all_jobs(job_type, args)
all_args[job_name] = job_type_args
return all_args
case _:
return {'args': dict(args)}
|
xmanager-main
|
xmanager/xm/job_blocks.py
|
# Copyright 2021 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Convenience methods for constructing core objects."""
from typing import Collection, List, Mapping, Optional, Union
import immutabledict
from xmanager.xm import executables
from xmanager.xm import job_blocks
# NOTE: Do not edit methods below manually.
# They should be generated with packagables_generator utility.
def binary(
executor_spec: job_blocks.ExecutorSpec,
path: str,
dependencies: Collection[executables.BinaryDependency] = (),
*,
args: Optional[job_blocks.UserArgs] = None,
env_vars: Mapping[str, str] = immutabledict.immutabledict(),
) -> job_blocks.Packageable:
# pyformat: disable
"""A prebuilt executable program.
Args:
executor_spec: Where the binary should be launched. Instructs for which
platform it should be packaged.
path: Path to a prebuilt binary.
dependencies: A list of data dependencies to be packaged together with the
binary.
args: Command line arguments to pass. This can be dict, list or
xm.SequentialArgs. Dicts are most convenient for keyword flags.
{'batch_size': 16} is passed as --batch_size=16. If positional arguments
are needed one can use a list or xm.SequentialArgs.
env_vars: Environment variables to be set.
Returns:
A packageable object which can be turned into an executable with
Experiment.package or Experiment.package_async.
"""
# pyformat: enable
return job_blocks.Packageable(
executable_spec=executables.Binary(
path=path,
dependencies=dependencies,
),
executor_spec=executor_spec,
args=args,
env_vars=env_vars,
)
def bazel_binary(
executor_spec: job_blocks.ExecutorSpec,
label: str,
dependencies: Collection[executables.BinaryDependency] = (),
bazel_args: Collection[str] = (),
*,
args: Optional[job_blocks.UserArgs] = None,
env_vars: Mapping[str, str] = immutabledict.immutabledict(),
) -> job_blocks.Packageable:
# pyformat: disable
"""A Bazel target that produces a self-contained binary.
Note that for Python targets based on https://github.com/google/subpar
a self-contained '.par' binary would be built.
Args:
executor_spec: Where the binary should be launched. Instructs for which
platform it should be packaged.
label: The Bazel target to be built.
dependencies: A list of data dependencies to be packaged together with
the binary.
bazel_args: Bazel command line arguments.
args: Command line arguments to pass. This can be dict, list or
xm.SequentialArgs. Dicts are most convenient for keyword flags.
{'batch_size': 16} is passed as --batch_size=16. If positional arguments
are needed one can use a list or xm.SequentialArgs.
env_vars: Environment variables to be set.
Returns:
A packageable object which can be turned into an executable with
Experiment.package or Experiment.package_async.
"""
# pyformat: enable
return job_blocks.Packageable(
executable_spec=executables.BazelBinary(
label=label,
dependencies=dependencies,
bazel_args=bazel_args,
),
executor_spec=executor_spec,
args=args,
env_vars=env_vars,
)
def container(
executor_spec: job_blocks.ExecutorSpec,
image_path: str,
*,
args: Optional[job_blocks.UserArgs] = None,
env_vars: Mapping[str, str] = immutabledict.immutabledict(),
) -> job_blocks.Packageable:
# pyformat: disable
"""A prebuilt Docker image.
The image can be tagged locally or in a remote repository.
Args:
executor_spec: Where the binary should be launched. Instructs for which
platform it should be packaged.
image_path: Path to a prebuilt container image.
args: Command line arguments to pass. This can be dict, list or
xm.SequentialArgs. Dicts are most convenient for keyword flags.
{'batch_size': 16} is passed as --batch_size=16. If positional arguments
are needed one can use a list or xm.SequentialArgs.
env_vars: Environment variables to be set.
Returns:
A packageable object which can be turned into an executable with
Experiment.package or Experiment.package_async.
"""
# pyformat: enable
return job_blocks.Packageable(
executable_spec=executables.Container(
image_path=image_path,
),
executor_spec=executor_spec,
args=args,
env_vars=env_vars,
)
def bazel_container(
executor_spec: job_blocks.ExecutorSpec,
label: str,
bazel_args: Collection[str] = (),
*,
args: Optional[job_blocks.UserArgs] = None,
env_vars: Mapping[str, str] = immutabledict.immutabledict(),
) -> job_blocks.Packageable:
# pyformat: disable
"""A Bazel target that produces a .tar image.
Note that for targets based on https://github.com/bazelbuild/rules_docker one
should append '.tar' to the label to specify a self-contained image.
Args:
executor_spec: Where the binary should be launched. Instructs for which
platform it should be packaged.
label: The Bazel target to be built.
bazel_args: Bazel command line arguments.
args: Command line arguments to pass. This can be dict, list or
xm.SequentialArgs. Dicts are most convenient for keyword flags.
{'batch_size': 16} is passed as --batch_size=16. If positional arguments
are needed one can use a list or xm.SequentialArgs.
env_vars: Environment variables to be set.
Returns:
A packageable object which can be turned into an executable with
Experiment.package or Experiment.package_async.
"""
# pyformat: enable
return job_blocks.Packageable(
executable_spec=executables.BazelContainer(
label=label,
bazel_args=bazel_args,
),
executor_spec=executor_spec,
args=args,
env_vars=env_vars,
)
def python_container(
executor_spec: job_blocks.ExecutorSpec,
entrypoint: Union[executables.ModuleName, executables.CommandList],
path: str = '.',
base_image: Optional[str] = None,
docker_instructions: Optional[List[str]] = None,
use_deep_module: bool = False,
*,
args: Optional[job_blocks.UserArgs] = None,
env_vars: Mapping[str, str] = immutabledict.immutabledict(),
) -> job_blocks.Packageable:
# pyformat: disable
"""PythonContainer describes a directory containing Python code.
Args:
executor_spec: Where the binary should be launched. Instructs for which
platform it should be packaged.
entrypoint: The Python module or list of shell commands to run when entering
this Python project.
path: Relative or absolute path to the Python project. By default, the
current directory (`'.'`) is used.
base_image: Name of the image to initialize a new Docker build stage using
the instruction `FROM`.
docker_instructions: List of Docker instructions to apply when building the
image. If not specified, the default one will be provided.
When you use `docker_instructions`, you are responsible for copying the
project directory. For example, if you are running with:
path='/path/to/cifar10'
You should include these steps in your `docker_instructions`:
[
'COPY cifar10/ cifar10',
'WORKDIR cifar10',
]
If your source code rarely changes, you can make this your first step.
If you are frequently iterating on the source code, it is best practice
to place these steps as late as possible in the list to maximize Docker
layer-caching.
use_deep_module: Whether the experiment code uses deep module structure
(i.e., 'from <a.prefix> import models') or not (i.e., 'import models').
If use_deep_module is set to True, and docker_instructions are used, it
is recommended to use dedicated workdir and copy a whole project
directory there. The example above should be modified as:
[
'RUN mkdir /workdir',
'WORKDIR /workdir',
'COPY cifar10/ /workdir/cifar10',
]
args: Command line arguments to pass. This can be dict, list or
xm.SequentialArgs. Dicts are most convenient for keyword flags.
{'batch_size': 16} is passed as --batch_size=16. If positional arguments
are needed one can use a list or xm.SequentialArgs.
env_vars: Environment variables to be set.
Returns:
A packageable object which can be turned into an executable with
Experiment.package or Experiment.package_async.
"""
# pyformat: enable
return job_blocks.Packageable(
executable_spec=executables.PythonContainer(
entrypoint=entrypoint,
path=path,
base_image=base_image,
docker_instructions=docker_instructions,
use_deep_module=use_deep_module,
),
executor_spec=executor_spec,
args=args,
env_vars=env_vars,
)
def dockerfile_container(
executor_spec: job_blocks.ExecutorSpec,
path: str = '.',
dockerfile: Optional[str] = None,
*,
args: Optional[job_blocks.UserArgs] = None,
env_vars: Mapping[str, str] = immutabledict.immutabledict(),
) -> job_blocks.Packageable:
# pyformat: disable
"""Dockerfile describes a Dockerfile for generating a docker image.
This is a lower-level feature that could be solved using higher-level
Executables such as BazelContainer or PythonContainer.
Args:
executor_spec: Where the binary should be launched. Instructs for which
platform it should be packaged.
path: Specifies the build's context.
dockerfile: The file that will be used for build instructions. Otherwise,
{path}/Dockerfile will be used. Equivalent to `docker build -f`. A
relative path will use a Dockerfile that is relative to the launcher
script.
args: Command line arguments to pass. This can be dict, list or
xm.SequentialArgs. Dicts are most convenient for keyword flags.
{'batch_size': 16} is passed as --batch_size=16. If positional arguments
are needed one can use a list or xm.SequentialArgs.
env_vars: Environment variables to be set.
Returns:
A packageable object which can be turned into an executable with
Experiment.package or Experiment.package_async.
"""
# pyformat: enable
return job_blocks.Packageable(
executable_spec=executables.Dockerfile(
path=path,
dockerfile=dockerfile,
),
executor_spec=executor_spec,
args=args,
env_vars=env_vars,
)
|
xmanager-main
|
xmanager/xm/packagables.py
|
"""An utility to generate factory methods in packagables.py.
packagables.py has a set of convenience functions which can be mechanically
generated for each ExecutableSpec. But we want them to have all arguments
explicetly written down with proper type annotations. This way IDEs can provide
proper contextual help and autocompletion. We also want make it easy to find
documentation, so having docstrings inplace is important.
We do this in an automated manner to ensure that arguments in ExecutableSpecs
and in the packagables do not diverge and that documentation remains up to date.
Usage: Run this binary and replace relevant parts of packagables.py. Then run
pyformat.
"""
import inspect
import re
from typing import List, Sequence, Type
from absl import app
import inflection
from xmanager.xm import executables
from xmanager.xm import job_blocks
_EXECUTABLES_SPECS = (
executables.Binary,
executables.BazelBinary,
executables.Container,
executables.BazelContainer,
executables.PythonContainer,
executables.Dockerfile,
)
# Reconstructing argument definition from Python introspection is non trivial.
# Partially due to deducing shorter module names (job_blocks rather than
# xmanager.xm.job_blocks). But mostly because type annotations for attr.s
# constructor show field types rather than what converters accept.
# So here we just list how each parameter should be defined.
_KNOWN_ARGS = (
'base_image: Optional[str] = None',
'bazel_args: Collection[str] = ()',
'docker_instructions: Optional[List[str]] = None',
'dockerfile: Optional[str] = None',
'entrypoint: Union[executables.ModuleName, executables.CommandList]',
'dependencies: Collection[executables.BinaryDependency] = ()',
'image_path: str',
'label: str',
'path: str',
'use_deep_module: bool',
)
_KNOWN_ARGS_DICT = {arg.split(':')[0]: arg for arg in _KNOWN_ARGS}
_ATTRIBUTES_SECTION_HEADER = ' Attributes:'
_ARGS_DOCSTRING = """ Args:
executor_spec: Where the binary should be launched. Instructs for which
platform it should be packaged."""
_DOCSTRING_SUFFIX = """
args: Command line arguments to pass. This can be dict, list or
xm.SequentialArgs. Dicts are most convenient for keyword flags.
{'batch_size': 16} is passed as --batch_size=16. If positional arguments
are needed one can use a list or xm.SequentialArgs.
env_vars: Environment variables to be set.
Returns:
A packageable object which can be turned into an executable with
Experiment.package or Experiment.package_async.
"""
def generate_docstring(executable: Type[job_blocks.ExecutableSpec]) -> str:
"""Returns a docstring for a ExecutableSpec factory method."""
docstring = executable.__doc__
if _ATTRIBUTES_SECTION_HEADER not in docstring:
raise Exception(
f'Please add Attributes: section to {executable.__name__} docstring.'
)
docstring = re.sub(_ATTRIBUTES_SECTION_HEADER, _ARGS_DOCSTRING, docstring)
docstring = docstring.rstrip() + _DOCSTRING_SUFFIX.rstrip()
return docstring
def generate_factory_parameters(parameters: List[inspect.Parameter]) -> str:
"""Returns ExecutableSpec factory method parameters definition.
Args:
parameters: ExecutableSpec constructor parameters except self.
Returns:
Python source code.
"""
source = ' executor_spec: job_blocks.ExecutorSpec,\n'
keyword_args_started = False
for parameter in parameters:
if (
parameter.kind == inspect.Parameter.KEYWORD_ONLY
and not keyword_args_started
):
keyword_args_started = True
source += ' *,\n'
parameter_source = _KNOWN_ARGS_DICT[parameter.name]
if (
parameter.default != inspect.Parameter.empty
and '=' not in parameter_source
):
parameter_source += f' = {parameter.default!r}'
source += f' {parameter_source},\n'
if not keyword_args_started:
source += ' *,\n'
source += ' args: Optional[job_blocks.UserArgs] = None,\n'
source += ' env_vars: Mapping[str, str] = immutabledict.immutabledict(),\n'
return source
def generate_factory_method(executable: Type[job_blocks.ExecutableSpec]) -> str:
"""Returns the factory method source code for the given ExecutableSpec."""
factory_name = inflection.underscore(executable.__name__)
if factory_name == 'dockerfile':
# We should rename the Dockerfile class to avoid this naming inconsistency.
factory_name = 'dockerfile_container'
signature = inspect.signature(executable.__init__)
# Skip the `self` parameter.
parameters = list(signature.parameters.values())[1:]
executable_args = '\n'.join(
f' {p.name}={p.name},' for p in parameters
)
return f'''
def {factory_name}(
{generate_factory_parameters(parameters)}
) -> job_blocks.Packageable:
# pyformat: disable
"""{generate_docstring(executable)}
"""
# pyformat: enable
return job_blocks.Packageable(
executable_spec=executables.{executable.__name__}(
{executable_args}
),
executor_spec=executor_spec,
args=args,
env_vars=env_vars,
)'''.strip(
'\n'
)
def main(argv: Sequence[str]) -> None:
if len(argv) > 1:
raise app.UsageError('Too many command-line arguments.')
for spec in _EXECUTABLES_SPECS:
print(generate_factory_method(spec))
print()
print()
if __name__ == '__main__':
app.run(main)
|
xmanager-main
|
xmanager/xm/packagables_generator.py
|
# Copyright 2021 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import asyncio
import unittest
from xmanager.xm import id_predictor
from xmanager.xm import utils
class IdPredictorTest(unittest.TestCase):
@utils.run_in_asyncio_loop
async def test_first_id_is_correct(self):
"""Simple Predictor usage example."""
predictor = id_predictor.Predictor(next_id=1)
first_id = predictor.reserve_id()
async with predictor.submit_id(first_id):
self.assertEqual(first_id, 1)
@utils.run_in_asyncio_loop
async def test_ids_are_submitted_in_order(self):
predictor = id_predictor.Predictor(next_id=1)
self.assertEqual(predictor.reserve_id(), 1)
self.assertEqual(predictor.reserve_id(), 2)
self.assertEqual(predictor.reserve_id(), 3)
submitted_ids = []
async def submit(id_to_submit):
async with predictor.submit_id(id_to_submit):
submitted_ids.append(id_to_submit)
await asyncio.gather(submit(3), submit(2), submit(1))
self.assertEqual(submitted_ids, [1, 2, 3])
@utils.run_in_asyncio_loop
async def test_broken_sequence(self):
predictor = id_predictor.Predictor(next_id=1)
self.assertEqual(predictor.reserve_id(), 1)
self.assertEqual(predictor.reserve_id(), 2)
with self.assertRaises(RuntimeError):
async with predictor.submit_id(1):
raise RuntimeError('Id was eaten by a giant space ant.')
with self.assertRaises(id_predictor.BrokenSequenceError):
async with predictor.submit_id(2):
pass
if __name__ == '__main__':
unittest.main()
|
xmanager-main
|
xmanager/xm/id_predictor_test.py
|
# Copyright 2021 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import enum
import unittest
from xmanager.xm import utils
async def make_me_a_sandwich() -> str:
return 'sandwich'
class ResourceType(enum.Enum):
MINERALS = 1
VESPEN = 2
class UtilsTest(unittest.TestCase):
@utils.run_in_asyncio_loop
async def test_run_in_asyncio_loop(self):
self.assertEqual(await make_me_a_sandwich(), 'sandwich')
def test_run_in_asyncio_loop_returns_value(self):
self.assertEqual(
utils.run_in_asyncio_loop(make_me_a_sandwich)(), 'sandwich'
)
def test_arg_escaper(self):
self.assertEqual(utils.ARG_ESCAPER(1.0), '1.0')
self.assertEqual(utils.ARG_ESCAPER('Jonny Droptable'), "'Jonny Droptable'")
self.assertEqual(utils.ARG_ESCAPER(ResourceType.VESPEN), 'VESPEN')
def test_shell_safe_arg_in_f_string(self):
# ShellSafeArg shouldn't be used in f-strings.
with self.assertRaises(RuntimeError):
f'{utils.ShellSafeArg("42")}' # pylint: disable=expression-not-assigned
if __name__ == '__main__':
unittest.main()
|
xmanager-main
|
xmanager/xm/utils_test.py
|
# Copyright 2021 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for packagables."""
import unittest
from xmanager.xm import executables
from xmanager.xm import job_blocks
from xmanager.xm import packagables
from xmanager.xm_local import executors
class PackagablesTest(unittest.TestCase):
def test_minimal_executable_spec(self):
expected = job_blocks.Packageable(
executable_spec=executables.BazelBinary(label='label'),
executor_spec=executors.Local.Spec(),
args=[],
env_vars={},
)
actual = packagables.bazel_binary(executors.Local.Spec(), label='label')
self.assertEqual(actual, expected)
def test_pkg_args_env_vars(self):
expected = job_blocks.Packageable(
executable_spec=executables.BazelBinary(label='label'),
executor_spec=executors.Local.Spec(),
args=['-f'],
env_vars={'KEY': 'value'},
)
actual = packagables.bazel_binary(
executors.Local.Spec(),
label='label',
args=['-f'],
env_vars={'KEY': 'value'},
)
self.assertEqual(actual, expected)
if __name__ == '__main__':
unittest.main()
|
xmanager-main
|
xmanager/xm/packagables_test.py
|
# Copyright 2021 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""An interface to manipulate and access experiment metadata.
Metadata is attached to a context and the context may belong to an experiment
or a work unit.
"""
from typing import Any, Collection, List, Mapping, Optional, Set
import attr
class ContextAnnotations:
"""Interface for managing annotations.
Annotations are user-supplied attributes of a context, such as title or tags.
Default method implementations are intentionally left blank so that backends
only have to implement the subset they support.
"""
@property
def title(self) -> str:
"""An experiment title.
To differentiate experiments from each other they can be given a human
readable title. Same title can be reused for multiple experiments.
"""
return ''
def set_title(self, title: str) -> None:
"""Sets the context title."""
@attr.s(auto_attribs=True)
class MetadataContext:
"""Interface for managing metadata.
The metadata context could be attached to an experiment or a work unit.
Attributes:
creator: The username of the creator of this context.
annotations: User-modifiable annotations.
"""
creator: str
annotations: ContextAnnotations
|
xmanager-main
|
xmanager/xm/metadata_context.py
|
# Copyright 2021 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for xmanager.xm.resources."""
from absl.testing import absltest
from absl.testing import parameterized
from xmanager import xm
from xmanager.xm import resources
from xmanager.xm.resources import JobRequirements
from xmanager.xm.resources import ResourceType
class ResourceDictTest(absltest.TestCase):
def test_resource_type_by_name(self):
self.assertEqual(ResourceType['cpu'], ResourceType.CPU)
self.assertEqual(ResourceType['Cpu'], ResourceType.CPU)
self.assertEqual(ResourceType['CPU'], ResourceType.CPU)
with self.assertRaises(KeyError):
ResourceType['UPC'] # pylint: disable=pointless-statement
def test_resource_dict_to_string(self):
resource_dict = resources.ResourceDict()
resource_dict[ResourceType.V100] = 8
resource_dict[ResourceType.CPU] = 4.2 * xm.vCPU
resource_dict[ResourceType.MEMORY] = 16.5 * xm.GiB
self.assertEqual(
str(resource_dict), 'CPU: 4.2, MEMORY: 17716740096.0, V100: 8'
)
def test_resource_dict_from_job_requirements(self):
requirements = JobRequirements(cpu=0.5 * xm.vCPU, memory=2 * xm.MiB, v100=8)
resource_dict = requirements.task_requirements
self.assertEqual(resource_dict[ResourceType.CPU], 0.5)
self.assertEqual(resource_dict[ResourceType.MEMORY], 2097152)
self.assertEqual(resource_dict[ResourceType.V100], 8)
def test_job_requirements_unknown_key(self):
with self.assertRaises(KeyError):
JobRequirements(cpu=0.5 * xm.vCPU, upc=2)
def test_requirements_summation(self):
first = resources.JobRequirements(cpu=1, tpu_v2='2x2')
second = resources.JobRequirements(ram=4 * xm.GiB, replicas=10)
total = (
first.replicas * first.task_requirements
+ second.replicas * second.task_requirements
)
self.assertEqual(total[ResourceType.CPU], 1)
self.assertEqual(total[ResourceType.RAM], 40 * xm.GiB)
self.assertEqual(total[ResourceType.TPU_V2], 4)
class TopologyTest(parameterized.TestCase):
@parameterized.parameters(
('2', 2), ('4x4', 16), ('2x3x5', 30), ('4x4_twisted', 16)
)
def test_resource_type_by_name(self, topology, chip_count):
self.assertEqual(resources.Topology(topology).chip_count, chip_count)
def test_invalid_topology(self):
with self.assertRaises(resources.InvalidTpuTopologyError):
resources.Topology('euclidian')
def test_topology_repr(self):
self.assertEqual(repr(resources.Topology('4x4')), "xm.Topology('4x4')")
def test_topology_eq(self):
self.assertEqual(resources.Topology('4x4'), resources.Topology('4x4'))
self.assertNotEqual(resources.Topology('2x2'), resources.Topology('4x4'))
self.assertEqual(
hash(resources.Topology('4x4')), hash(resources.Topology('4x4'))
)
class JobRequirementsTest(parameterized.TestCase):
def test_cpu_job(self):
requirements = resources.JobRequirements(cpu=1.2, ram=1 * xm.GiB)
self.assertEqual(
requirements.task_requirements[resources.ResourceType.CPU], 1.2
)
self.assertEqual(
requirements.task_requirements[resources.ResourceType.RAM], 1 * xm.GiB
)
self.assertIsNone(requirements.accelerator)
self.assertIsNone(requirements.topology)
self.assertEqual(requirements.replicas, 1)
def test_construct_requirements(self):
requirements = resources.JobRequirements(
{resources.ResourceType.CPU: 4}, v100=1
)
task_requirements = requirements.task_requirements
self.assertEqual(task_requirements[resources.ResourceType.CPU], 4)
self.assertEqual(task_requirements[resources.ResourceType.V100], 1)
self.assertEqual(requirements.replicas, 1)
def test_resource_specified_twice(self):
with self.assertRaises(ValueError):
resources.JobRequirements({resources.ResourceType.CPU: 1}, cpu=2)
def test_tpu_job(self):
requirements = resources.JobRequirements(tpu_v3='4x4')
self.assertEqual(requirements.accelerator, resources.ResourceType.TPU_V3)
self.assertEqual(requirements.topology.name, '4x4')
def test_multihost_gpu(self):
requirements = resources.JobRequirements(v100='4x2')
self.assertEqual(requirements.accelerator, resources.ResourceType.V100)
self.assertEqual(requirements.topology.name, '4x2')
self.assertEqual(requirements.replicas, 2)
def test_multihost_gpu_with_cpu(self):
requirements = resources.JobRequirements(v100='4x2', cpu=1)
self.assertEqual(requirements.accelerator, resources.ResourceType.V100)
self.assertEqual(requirements.topology.name, '4x2')
self.assertEqual(requirements.replicas, 2)
def test_multihost_gpu_with_replicas(self):
with self.assertRaises(ValueError):
resources.JobRequirements(v100='4x2', replicas=3)
def test_location(self):
requirements = resources.JobRequirements(location='lon_r7')
self.assertEqual(requirements.location, 'lon_r7')
def test_service_tier(self):
requirements = resources.JobRequirements(
service_tier=resources.ServiceTier.PROD
)
self.assertEqual(requirements.service_tier, resources.ServiceTier.PROD)
def test_service_tier_mutable(self):
requirements = resources.JobRequirements(
service_tier=resources.ServiceTier.PROD
)
requirements.service_tier = resources.ServiceTier.BATCH
self.assertEqual(requirements.service_tier, resources.ServiceTier.BATCH)
def test_replicas(self):
requirements = resources.JobRequirements(replicas=2)
self.assertEqual(requirements.replicas, 2)
with self.assertRaises(ValueError):
resources.JobRequirements(replicas=2, tpu_v3='4x4')
resources.JobRequirements(replicas=2, v100='4x2')
resources.JobRequirements(v100='4x2')
with self.assertRaises(ValueError):
resources.JobRequirements(replicas=4, v100='4x2')
def test_str(self):
self.assertEqual(
repr(
resources.JobRequirements(
cpu=1,
location='lon_r7',
service_tier=resources.ServiceTier.BATCH,
replicas=2,
)
),
(
"xm.JobRequirements(cpu=1.0, location='lon_r7',"
' service_tier=xm.ServiceTier.BATCH, replicas=2)'
),
)
def test_str_omits_empty_fields(self):
self.assertEqual(
repr(resources.JobRequirements(cpu=1)), 'xm.JobRequirements(cpu=1.0)'
)
def test_is_gpu_tpu_given_cpu(self):
requirements = resources.JobRequirements(cpu=1, ram=4 * xm.GiB)
self.assertNotIn(requirements.accelerator, xm.GpuType)
self.assertNotIn(requirements.accelerator, xm.TpuType)
def test_is_gpu_tpu_given_gpu(self):
requirements = resources.JobRequirements(cpu=1, v100=4)
self.assertIn(requirements.accelerator, xm.GpuType)
self.assertNotIn(requirements.accelerator, xm.TpuType)
def test_is_gpu_tpu_given_tpu(self):
requirements = resources.JobRequirements(cpu=1, tpu_v2='2x2')
self.assertNotIn(requirements.accelerator, xm.GpuType)
self.assertIn(requirements.accelerator, xm.TpuType)
class EnumSubsetTest(parameterized.TestCase):
def test_construction(self):
self.assertEqual(resources.GpuType['V100'], resources.GpuType(17))
with self.assertRaises(AttributeError):
resources.GpuType['TPU_V3'] # pylint: disable=pointless-statement
with self.assertRaises(ValueError):
resources.GpuType(170)
def test_equivalence(self):
self.assertEqual(resources.ResourceType.V100, resources.GpuType.V100)
def test_iteration(self):
gpus = set(iter(resources.GpuType))
self.assertIn(resources.ResourceType.V100, gpus)
self.assertNotIn(resources.ResourceType.TPU_V3, gpus)
def test_contains(self):
self.assertIn(resources.ResourceType.V100, resources.GpuType)
self.assertNotIn(resources.ResourceType.TPU_V3, resources.GpuType)
if __name__ == '__main__':
absltest.main()
|
xmanager-main
|
xmanager/xm/resources_test.py
|
# Copyright 2021 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pickle
from typing import Sequence
import unittest
from xmanager.xm import async_packager
from xmanager.xm import job_blocks
from xmanager.xm import utils
def _package_batch(
packageables: Sequence[job_blocks.Packageable],
) -> Sequence[job_blocks.Executable]:
return [
job_blocks.Executable(name=packageable.executable_spec.name)
for packageable in packageables
]
class _TestExecutableSpec(job_blocks.ExecutableSpec):
def __init__(self, name: str) -> None:
self._name = name
@property
def name(self) -> str:
return self._name
def _make_packageable(name: str) -> job_blocks.Packageable:
return job_blocks.Packageable(
executable_spec=_TestExecutableSpec(name),
executor_spec=job_blocks.ExecutorSpec(),
)
class AsyncPackagerTest(unittest.TestCase):
@utils.run_in_asyncio_loop
async def test_async_packager_end_to_end(self):
packager = async_packager.AsyncPackager(_package_batch)
executable1 = packager.add(_make_packageable('1'))
executable2 = packager.add(_make_packageable('2'))
packager.package()
self.assertEqual((await executable1).name, '1')
self.assertEqual((await executable2).name, '2')
@utils.run_in_asyncio_loop
async def test_package_with_extra_packageables(self):
packager = async_packager.AsyncPackager(_package_batch)
async_executable = packager.add(_make_packageable('async'))
[extra_executable] = packager.package((_make_packageable('extra'),))
self.assertEqual((await async_executable).name, 'async')
self.assertEqual(extra_executable.name, 'extra')
@utils.run_in_asyncio_loop
async def test_package_is_required(self):
packager = async_packager.AsyncPackager(_package_batch)
executable = packager.add(_make_packageable(''))
with self.assertRaises(async_packager.PackageHasNotBeenCalledError):
await executable
def test_awaitable_is_picklable(self):
packager = async_packager.AsyncPackager(_package_batch)
executable = packager.add(_make_packageable(''))
packager.package()
executable_str = pickle.dumps(executable)
# Wait for the executable in a separate event loop, which did not even exist
# when we requested packaging.
@utils.run_in_asyncio_loop
async def wait_for_it():
await pickle.loads(executable_str)
wait_for_it()
def test_awaitable_is_repeatedly_picklable(self):
packager = async_packager.AsyncPackager(_package_batch)
executable = packager.add(_make_packageable(''))
packager.package()
executable_str = pickle.dumps(executable)
executable_reconstructed = pickle.loads(executable_str)
executable_str2 = pickle.dumps(executable_reconstructed)
self.assertEqual(executable_str, executable_str2)
if __name__ == '__main__':
unittest.main()
|
xmanager-main
|
xmanager/xm/async_packager_test.py
|
# Copyright 2021 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""XManager client API.
Provides XManager public API for configuring and launching experiments.
"""
from xmanager.xm import job_operators
from xmanager.xm.compute_units import *
from xmanager.xm.core import AuxiliaryUnitJob
from xmanager.xm.core import AuxiliaryUnitRole
from xmanager.xm.core import Experiment
from xmanager.xm.core import ExperimentUnit
from xmanager.xm.core import ExperimentUnitError
from xmanager.xm.core import ExperimentUnitFailedError
from xmanager.xm.core import ExperimentUnitNotCompletedError
from xmanager.xm.core import ExperimentUnitRole
from xmanager.xm.core import ExperimentUnitStatus
from xmanager.xm.core import Importance
from xmanager.xm.core import LaunchedJob
from xmanager.xm.core import NotFoundError
from xmanager.xm.core import ReloadError
from xmanager.xm.core import WorkUnit
from xmanager.xm.core import WorkUnitCompletedAwaitable
from xmanager.xm.core import WorkUnitRole
from xmanager.xm.executables import BazelBinary
from xmanager.xm.executables import BazelContainer
from xmanager.xm.executables import Binary
from xmanager.xm.executables import BinaryDependency
from xmanager.xm.executables import CommandList
from xmanager.xm.executables import Container
from xmanager.xm.executables import Dockerfile
from xmanager.xm.executables import ModuleName
from xmanager.xm.executables import PythonContainer
from xmanager.xm.job_blocks import Constraint
from xmanager.xm.job_blocks import Executable
from xmanager.xm.job_blocks import ExecutableSpec
from xmanager.xm.job_blocks import Executor
from xmanager.xm.job_blocks import ExecutorSpec
from xmanager.xm.job_blocks import get_args_for_all_jobs
from xmanager.xm.job_blocks import Job
from xmanager.xm.job_blocks import JobConfig
from xmanager.xm.job_blocks import JobGeneratorType
from xmanager.xm.job_blocks import JobGroup
from xmanager.xm.job_blocks import JobType
from xmanager.xm.job_blocks import merge_args
from xmanager.xm.job_blocks import Packageable
from xmanager.xm.job_blocks import SequentialArgs
from xmanager.xm.job_blocks import UserArgs
from xmanager.xm.metadata_context import ContextAnnotations
from xmanager.xm.metadata_context import MetadataContext
from xmanager.xm.packagables import bazel_binary
from xmanager.xm.packagables import bazel_container
from xmanager.xm.packagables import binary
from xmanager.xm.packagables import container
from xmanager.xm.packagables import dockerfile_container
from xmanager.xm.packagables import python_container
from xmanager.xm.resources import GpuType
from xmanager.xm.resources import InvalidTpuTopologyError
from xmanager.xm.resources import JobRequirements
from xmanager.xm.resources import ResourceDict
from xmanager.xm.resources import ResourceQuantity
from xmanager.xm.resources import ResourceType
from xmanager.xm.resources import ServiceTier
from xmanager.xm.resources import Topology
from xmanager.xm.resources import TpuType
from xmanager.xm.utils import run_in_asyncio_loop
from xmanager.xm.utils import ShellSafeArg
|
xmanager-main
|
xmanager/xm/__init__.py
|
# Copyright 2021 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Abstract API specification for XManager implementations.
Each implementation of the XManager API should override the abstract methods.
Users are normally expected to have the following pair of imports:
```
from xmanager import xm
from xmanager import xm_foo
```
"""
import abc
import asyncio
from concurrent import futures
import contextvars
import enum
import getpass
import inspect
import queue
import threading
import traceback
from typing import Any, Awaitable, Callable, Collection, Coroutine, Dict, Generator, List, Mapping, Optional, Sequence, overload
from absl import logging
import attr
from xmanager.xm import async_packager
from xmanager.xm import id_predictor
from xmanager.xm import job_blocks
from xmanager.xm import job_operators
from xmanager.xm import metadata_context
# ContextVars holding the current experiment (when within an Experiment context)
# and current experiment unit (when inside a JobGenerator).
_current_experiment: contextvars.ContextVar['Experiment'] = (
contextvars.ContextVar('_xm_current_experiment')
)
_current_experiment_unit: contextvars.ContextVar['ExperimentUnit'] = (
contextvars.ContextVar('_xm_current_experiment_unit')
)
def _check_if_unsupported_args_are_present(
args: Mapping[str, Any], supported_args: Collection[str], job_type: str
) -> None:
supported_args = set(supported_args)
unsupported_args = set(args.keys()) - supported_args
if unsupported_args:
raise ValueError(
f'Arguments {unsupported_args!r} are not supported by {job_type}. Only '
f'{supported_args!r} are allowed.'
)
def _apply_args(job_type: job_blocks.JobType, args: Mapping[str, Any]) -> None:
# pytype: disable=attribute-error
match job_type:
case job_blocks.Job() as job:
_check_if_unsupported_args_are_present(
args, ('args', 'env_vars'), 'xm.Job'
)
if 'args' in args:
job.args = job_blocks.merge_args(job.args, args['args'])
if 'env_vars' in args:
job.env_vars = job.env_vars.copy()
job.env_vars.update(args['env_vars'])
case job_blocks.JobGroup() as job_group:
if args:
_check_if_unsupported_args_are_present(
args, job_group.jobs.keys(), 'xm.JobGroup'
)
for key, job in job_group.jobs.items():
_apply_args(job, args.get(key, {}))
case _:
pass
# pytype: enable=attribute-error
class ExperimentUnitStatus(abc.ABC):
"""The status of an experiment unit."""
@property
@abc.abstractmethod
def is_active(self) -> bool:
"""Returns whether the unit is not in terminal state.
It may be actively running or queued. The unit may produce more results.
If the unit is stopped by a user it will be neither active, completed
nor failed.
"""
raise NotImplementedError
@property
@abc.abstractmethod
def is_completed(self) -> bool:
"""Returns whether the unit has completed without failures.
This is a terminal state. The unit has produced all the intended results.
But it still may be restarted by an explicit request.
"""
raise NotImplementedError
@property
@abc.abstractmethod
def is_failed(self) -> bool:
"""Returns whether the unit has failed.
This is a terminal state. Experiment unit will enter this state on any
fatal failure, such as process exiting with non-zero code, cloud rejecting
to schedule/queue the job or exceptions in JobGenerator. The unit will stay
in this state unless explicitly restarted.
Intermediate failures do not result in this state.
"""
raise NotImplementedError
@property
@abc.abstractmethod
def message(self) -> str:
"""An optional human-readable message providing context for the status.
This may take the form of explaining why the work unit is in this state,
or any potentially transient errors the work unit may be experiencing.
"""
raise NotImplementedError
class ExperimentUnitError(RuntimeError):
"""Experiment unit could not be completed.
Attrs:
work_unit: The work unit in which the error occured, if available.
"""
work_unit: Optional['WorkUnit'] = None
def __init__(self, message: Any, *, work_unit: Optional['WorkUnit'] = None):
super().__init__(message)
self.work_unit = work_unit
class ExperimentUnitFailedError(ExperimentUnitError):
"""A job running in an experiment unit has failed."""
class ExperimentUnitNotCompletedError(ExperimentUnitError):
"""Experiment unit is neither running nor completed.
For example it may be stopped by a user.
"""
class NotFoundError(KeyError):
"""Experiment/Work Unit/etc. has not been found."""
class ReloadError(ExperimentUnitError):
"""Raised when an XReload reload check fails during the reload step."""
def _work_unit_arguments(
job: job_blocks.JobType,
args: Optional[Mapping[str, Any]],
) -> Mapping[str, Any]:
"""Constructs work unit arguments to display them in various UIs.
If users pass `args` to the `.add` method explicitly, we assume `args` to be
the sought work unit arguments. If `args` are not passed to `.add`, we deduce
work unit arguments implicitly from the `job`s' `args` and `env_vars`.
Args:
job: A job to run inside a work unit.
args: Explicitly specified arguments (could be empty).
Returns:
Depending on the type of the `job` given, can be one of the following:
- if it's an instance of `Job`, we return `{'args': job.args, 'env_vars':
job.env_vars}` with empty values omitted;
- if it's an instance of `JobGroup`, we recursively unwind the group while
populating corresponding nested dictionaries until we reach standalone
`Job`s;
- if it's a job generator, we return `{}`.
"""
if args is not None:
# In order to give users control on what is shown as work unit arguments we
# don't alter them if a value is given.
return args
def deduce_args(job_type: job_blocks.JobType) -> Dict[str, Any]:
# pytype: disable=attribute-error
match job_type:
case job_blocks.Job() as job:
args = {
'args': job.args.to_dict(kwargs_only=True),
'env_vars': job.env_vars,
}
return {key: value for key, value in args.items() if value}
case job_blocks.JobGroup() as job_group:
args = {}
for job_name, job in job_group.jobs.items():
job_args = deduce_args(job)
if job_args:
args[job_name] = job_args
return args
case _:
return {}
# pytype: enable=attribute-error
return deduce_args(job)
@attr.s(auto_attribs=True, kw_only=True)
class LaunchedJob:
"""A read-only view of a launched job.
Launched jobs correspond to job instances that have been added to an
Experiment Unit. If a Job does not have a unique identity, it can be added
multiple times to an Experiment, so a Job object may correspond to multiple
LaunchedJob objects. A LaunchedJob will have the same name as the added Job,
but launched jobs may have different addresses and logs.
`experiment.add(job)` will generate an Experiment Unit
containing a corresponding launched job. `experiment.add(job_group)` will
generate an Experiment Unit with multiple corresponding launched jobs.
`experiment.add(generator)` will generate an Experiment Unit with as many
launched jobs as the generator adds.
Attributes:
name: Name of the job corresponding to this launched job.
address: The job's address.
logs: A URL to this job's logs.
"""
name: str
address: Optional[str] = None
logs: Optional[str] = None
class Importance(enum.Enum):
"""How important it is to schedule particular Experiment or ExperimentUnit.
This is a hint to the scheduler. Not all schedulers take it into account
(xm_local doesn't). And even with smart scheduler a less important work unit
may run before a more important one e.g. if it uses a less contended resource.
Unlike ServiceTier, importance only controls preference within a team i.e. how
team's resources are divided between team's experiments. It has no effect on
resource allocation between teams.
"""
# High impact experiments. Try scheduling them even at the cost of significant
# reduction of the overall throughput that your experiments get.
HIGH = 'high'
# The default importance.
NORMAL = 'normal'
# Prefer to schedule other experiments with higher importance, but in overall
# try to maximize throughput.
LOW = 'low'
@attr.s(auto_attribs=True, kw_only=True)
class ExperimentUnitRole(abc.ABC):
"""The role of an experiment unit within the experiment structure.
Attributes:
importance: how important it is to schedule this executable unit comparing
to all your executable units (from all your experiments).
"""
importance: Importance = Importance.NORMAL
class ExperimentUnit(abc.ABC):
"""ExperimentUnit is a collection of semantically associated `Job`s."""
experiment: 'Experiment'
def __init__(
self,
experiment: 'Experiment',
create_task: Callable[[Awaitable[Any]], futures.Future[Any]],
args: Optional[Mapping[str, Any]],
role: ExperimentUnitRole,
identity: str = '',
) -> None:
"""Initializes an `ExperimentUnit` instance.
Args:
experiment: An experiment this unit belongs to.
create_task: A callback to register a new asynchronous task.
args: Arguments to this experiment unit. Most commonly used to represent
the hyperparameter sweep trial corresponding to a work unit.
role: The role of this unit in the experiment structure.
identity: The unique (user defined) identifier for this work unit.
"""
self.experiment = experiment
self._create_task = create_task
self._args = args
self._role = role
self._identity = identity
self._launch_tasks: List[futures.Future[Any]] = []
@property
def experiment_id(self) -> int:
"""Returns a unique ID assigned to the experiment."""
return self.experiment.experiment_id
@property
def identity(self) -> str:
"""Returns the unique identity (user assigned) for the experiment unit."""
return self._identity
def add(
self,
job: job_blocks.JobType,
args: Optional[Mapping[str, Any]] = None,
*,
identity: str = '',
) -> Awaitable[None]:
# pyformat: disable
"""Adds a Job / JobGroup to the experiment unit.
Only one JobGroup can be added to an ExperimentUnit. This limitation may be
lifted in future versions.
Args:
job: A job or job group to add.
args: Keyword arguments to be passed to the job. For Job and JobGroup args
are recursively expanded. For example,
```
wu.add(
JobGroup(agent=Job(...)),
args={'agent': {'args': {'learning_rate': 0.1}}},
)
```
would update `args` field of a job `agent` in the group.
identity: Optional unique job identifier. If not empty, `add` adopts an
'add if not exists' behavior. No changes to existing job would be done.
Returns:
An awaitable that would be fulfilled when the job is launched.
"""
# pyformat: enable
# Prioritize the identity given directly to the work unit at work unit
# creation time, as opposed to the identity passed when adding jobs to it as
# this is more consistent between job generator work units and regular work
# units.
identity = self.identity or identity
job = job_operators.shallow_copy_job_type(job)
if args is not None:
_apply_args(job, args)
job_operators.populate_job_names(job)
def launch_job(job: job_blocks.Job) -> Awaitable[None]:
_current_experiment.set(self.experiment)
_current_experiment_unit.set(self)
return self._launch_job_group(
job_blocks.JobGroup(**{job.name: job}),
_work_unit_arguments(job, self._args),
identity,
)
def launch_job_group(group: job_blocks.JobGroup) -> Awaitable[None]:
_current_experiment.set(self.experiment)
_current_experiment_unit.set(self)
return self._launch_job_group(
group, _work_unit_arguments(group, self._args), identity
)
def launch_job_generator(
job_generator: job_blocks.JobGeneratorType,
) -> Awaitable[None]:
if not inspect.iscoroutinefunction(
job_generator
) and not inspect.iscoroutinefunction(job_generator.__call__):
raise ValueError(
'Job generator must be an async function. Signature needs to be '
'`async def job_generator(work_unit: xm.WorkUnit) -> None:`'
)
_current_experiment.set(self.experiment)
_current_experiment_unit.set(self)
coroutine = job_generator(self, **(args or {}))
assert coroutine is not None
return coroutine
def launch_job_config(job_config: job_blocks.JobConfig) -> Awaitable[None]:
_current_experiment.set(self.experiment)
_current_experiment_unit.set(self)
return self._launch_job_config(job_config, args or {}, identity)
job_awaitable: Awaitable[Any]
match job:
case job_blocks.Job() as job:
job_awaitable = launch_job(job)
case job_blocks.JobGroup() as job_group:
job_awaitable = launch_job_group(job_group)
case job_generator if job_blocks.is_job_generator(job):
job_awaitable = launch_job_generator(job_generator)
case job_blocks.JobConfig() as job_config:
job_awaitable = launch_job_config(job_config)
case _:
raise TypeError(f'Unsupported job type: {job!r}')
launch_task = self._create_task(job_awaitable)
self._launch_tasks.append(launch_task)
return asyncio.wrap_future(launch_task)
async def _wait_until_complete_impl(self) -> 'ExperimentUnit':
try:
for task in self._launch_tasks:
await asyncio.wrap_future(task)
except Exception as e:
raise ExperimentUnitError('Experiment unit could not be created.') from e
await self._wait_until_complete()
return self
def wait_until_complete(self) -> Coroutine[Any, Any, 'ExperimentUnit']:
"""Waits until the unit is in a final state: completed/failed/stopped.
Raises:
ExperimentUnitError: Exception if the unit couldn't complete.
Returns:
Returns self to facilitate asyncio.as_completed usage.
"""
return self._wait_until_complete_impl()
async def _launch_job_group(
self,
job_group: job_blocks.JobGroup,
args_view: Mapping[str, Any],
identity: str,
) -> None:
"""Launches a given job group as part of the unit."""
raise NotImplementedError
async def _launch_job_config(
self,
job_config: job_blocks.JobConfig,
args_view: Mapping[str, Any],
identity: str,
) -> None:
"""Launches a given job config as part of the unit."""
raise NotImplementedError
async def _wait_until_complete(self) -> None:
"""Waits until the unit is in a final state: completed/failed/stopped.
Child classes need to implement this method to support awaiting units.
Unlike wait_until_complete this method asumes that unit has been fully
created. This method is only invoked if somebody has requested to monitor
unit.
"""
raise NotImplementedError
def stop(
self,
*,
mark_as_failed: bool = False,
mark_as_completed: bool = False,
message: Optional[str] = None,
) -> None:
"""Initiate the process to stop the unit from running.
This method will synchronously make a request for the unit to stop.
However, the method does not actually wait for the unit to be in a
terminal state.
Use self.wait_until_complete() after self.stop() to guarantee the unit
is stopped.
Args:
mark_as_failed: Mark this unit as failed rather than stopped.
mark_as_completed: Mark this unit as completed rather than stopped.
message: Optional user-defined status message.
"""
raise NotImplementedError
def get_status(self) -> ExperimentUnitStatus:
"""Gets the status of this unit."""
raise NotImplementedError
@property
@abc.abstractmethod
def experiment_unit_name(self) -> str:
raise NotImplementedError
def get_full_job_name(self, job_name: str) -> str:
"""Given `Job.name` constructs its full name.
The primary use case is addressing containers -- full names serve as
hostnames.
Args:
job_name: Short name of a job.
Returns:
Full name of the job.
"""
return f'{self.experiment_unit_name}_{job_name}'
@property
def context(self) -> metadata_context.MetadataContext:
"""Returns metadata context for a unit."""
return metadata_context.MetadataContext(
creator=getpass.getuser(),
annotations=metadata_context.ContextAnnotations(),
)
@property
def launched_jobs(self) -> List[LaunchedJob]:
"""Gets a representation for each individual job that was added.
Each added Job should produce a LaunchedJob.
Each added JobGroup will produce a LaunchedJob for each leaf Job.
"""
raise NotImplementedError
@attr.s(auto_attribs=True, kw_only=True)
class WorkUnitRole(ExperimentUnitRole):
"""An experiment unit with this role is a work unit.
Work units contain jobs that are often run as trials as part of an
experiment's hyper-parameter search. The status of a work unit is used to
determine the status of the experiment.
"""
class WorkUnitCompletedAwaitable(Coroutine):
"""Awaitable for work unit completion event.
Usage:
completion_events = [work_unit.wait_until_complete() for work_unit in ...]
while completion_events:
completed_event, completion_events = asyncio.wait(
completion_events, return_when=asyncio.FIRST_COMPLETED)
for event in completed_events:
wid = event.work_unit.work_unit_id
try:
await event
print(f'Wor unit {wid} completed successfully.')
except xm.ExperimentUnitError as e:
print(f'Wor unit {wid} failed: {e}.')
"""
def __init__(
self, work_unit: 'WorkUnit', awaitable: Callable[[], Any]
) -> None:
self.work_unit = work_unit
self._awaitable = awaitable
self._wait_coro = self._wait()
async def _wait(self) -> 'WorkUnit':
# Coroutine must be created inside of async function to avoid
# "coroutine ... was never awaited" runtime warning.
await self._awaitable()
return self.work_unit
def __await__(self) -> Generator[Any, None, 'WorkUnit']:
return self._wait_coro.__await__()
def send(self, value: Any) -> Any:
return self._wait_coro.send(value)
def throw(self, typ, val=None, tb=None) -> Any:
return self._wait_coro.throw(typ, val, tb)
def close(self) -> None:
self._wait_coro.close()
class WorkUnit(ExperimentUnit):
"""Work units are experiment units with the work unit role."""
@property
@abc.abstractmethod
def work_unit_id(self) -> int:
raise NotImplementedError
def wait_until_complete(self) -> WorkUnitCompletedAwaitable:
"""Waits until the unit is in a final state: completed/failed/stopped.
Raises:
ExperimentUnitError: Exception if the unit couldn't complete.
Returns:
Returns self to facilitate asyncio.as_completed usage.
"""
return WorkUnitCompletedAwaitable(self, self._wait_until_complete_impl)
@attr.s(auto_attribs=True, kw_only=True)
class AuxiliaryUnitRole(ExperimentUnitRole):
"""An experiment unit with this role is an auxiliary unit.
Auxiliary units contain jobs that are not part of the trials of a
hyper-parameter search. The status of an auxiliary unit is not used to
determine the status of the experiment. e.g. Tensorboard
Attributes:
termination_delay_secs: How long to keep AUX unit running after experiment
completion.
"""
termination_delay_secs: int
class AuxiliaryUnitJob(abc.ABC):
"""A job bundled with an AuxiliaryUnitRole.
This class allows libraries to define self-contained objects which would
result in AUX units once added to the expetiment.
Note that this class conforms to xm.JobGenerator interface.
"""
role: AuxiliaryUnitRole
_job: job_blocks.JobType
def __init__(
self,
job: job_blocks.JobType,
*,
importance: Importance = Importance.NORMAL,
termination_delay_secs: int,
) -> None:
self.role = AuxiliaryUnitRole(
importance=importance,
termination_delay_secs=termination_delay_secs,
)
self._job = job
async def __call__(self, aux_unit: ExperimentUnit, **kwargs):
if not job_blocks.is_job_generator(self._job):
aux_unit.add(self._job, args=kwargs)
return
job_generator = self._job
coroutine = job_generator(aux_unit, **kwargs)
assert coroutine is not None
await coroutine
class Experiment(abc.ABC):
"""Experiment is the core unit of research in XManager.
An experiment typically involves running a computation (e.g. training a model)
in different hyperparameter configurations. Experiments are made up of work
units, which do the computation(s) in question, and auxiliary units which
perform other functions like TensorBoard. It can have associated metadata,
such as title or source code commit from which it has been compiled.
XManager API provides multiple implementations of the Experiment class which
use different backends to start jobs. Types of supported Executables may vary
between these implementations. Usually experiments woulbe be created by though
xm_<foo>.create_experiment() functions.
While experiment metadata and statuses can accessed and altered directly,
adding work or auxiliary units requires entering experiment context:
with xm_foo.create_experiment(...) as experiment:
experiment.add(xm.Job(...))
This context would spawn background event pool to support asynchronous
operations and ensure that they all are completed on exit. It also may mark
experiment as failed if an exception is thrown.
"""
# An event loop in which job generators would be run.
_event_loop: asyncio.AbstractEventLoop
# A queue of background tasks that launch work units.
_running_tasks: queue.Queue[futures.Future[Any]]
# Work unit ID predictor.
_work_unit_id_predictor: id_predictor.Predictor
# A class variable for batching packaging requests.
_async_packager: async_packager.AsyncPackager
# ContextVars token when entering the context.
_current_experiment_token: contextvars.Token
_current_async_experiment_token: contextvars.Token
@property
def experiment_id(self) -> int:
"""Returns a unique ID assigned to the experiment."""
raise NotImplementedError
def __enter__(self) -> 'Experiment':
is_coro_context = False
try:
asyncio.get_running_loop()
is_coro_context = True
except RuntimeError:
pass
if is_coro_context:
raise RuntimeError(
'When using Experiment from a coroutine please use '
'`async with` syntax'
)
self._current_experiment_token = _current_experiment.set(self)
self._event_loop = asyncio.new_event_loop()
asyncio.get_child_watcher().attach_loop(self._event_loop)
self._event_loop_thread = self._thread_factory()(
target=self._event_loop.run_forever, daemon=True
)
self._event_loop_thread.start()
# asyncio.run_coroutine_threadsafe doesn't accept class method and wants it
# wrapped in a function.
async def async_enter():
await self.__aenter__()
asyncio.run_coroutine_threadsafe(
async_enter(), loop=self._event_loop
).result()
return self
def _thread_factory(self) -> type[threading.Thread]:
"""Gets the type of Thread to use for asynchronous operations.
Defaults to `threading.Thread`.
Returns:
The type to use for constructing a Thread.
"""
return threading.Thread
def _wait_for_tasks(self):
"""Waits for pending tasks to complete, raising the first error."""
exception = None
while not self._running_tasks.empty():
try:
self._running_tasks.get_nowait().result()
except futures.CancelledError:
# Ignore cancelled tasks.
pass
except Exception as e: # pylint: disable=broad-except
# Allow remaining tasks to complete before raising the first exception.
if not exception:
exception = e
if exception:
raise exception
def __exit__(self, exc_type, exc_value, traceback): # pylint:disable=redefined-outer-name
_current_experiment.reset(self._current_experiment_token)
self._wait_for_tasks()
self._event_loop.call_soon_threadsafe(self._event_loop.stop)
self._event_loop_thread.join()
async def __aenter__(self):
self._current_async_experiment_token = _current_experiment.set(self)
self._event_loop = asyncio.get_event_loop()
self._running_tasks = queue.Queue()
self._work_unit_id_predictor = id_predictor.Predictor(
1 + self.work_unit_count
)
return self
async def _await_for_tasks(self):
while not self._running_tasks.empty():
await asyncio.wrap_future(self._running_tasks.get_nowait())
async def __aexit__(self, exc_type, exc_value, traceback): # pylint:disable=redefined-outer-name
_current_experiment.reset(self._current_async_experiment_token)
await self._await_for_tasks()
@classmethod
def package(
cls, packageables: Sequence[job_blocks.Packageable] = ()
) -> Sequence[job_blocks.Executable]:
"""Packages `packageables` & triggers async packaging.
This function has 2 usages:
- Builds all given executables specs in parallel. While calling package(...)
multiple times is allowed, that would result in slow sequential build,
even if invoked from concurrent threads.
- Triggers packaging of the items enqueued previously with `package_async`.
Args:
packageables: A sequence of extra packageables to build synchronously.
Returns:
A sequence of packaging results associated to `packageables` (same order).
"""
return cls._async_packager.package(packageables)
@classmethod
def package_async(
cls, packageable: job_blocks.Packageable
) -> Awaitable[job_blocks.Executable]:
"""Queues executable spec to be packaged into executable.
If gathering all packageables for a single `package()` call is inconvenient,
one may request packaging with `package_async` and later trigger the build
for the whole batch with `package()`.
Usage:
if eval:
eval_executable = experiment.package_async(xm.blaze_binary(...))
if train:
train_executable = experiment.package_async(xm.blaze_binary(...))
experiment.package() # Explicitly trigger packaging.
jobs = {}
if eval:
jobs['eval'] = xm.job(await eval_executable, ...)
if train:
jobs['train'] = xm.job(await train_executable, ...)
Args:
packageable: Executable spec to package.
Returns:
An awaitable for the packaging result.
"""
return cls._async_packager.add(packageable)
@overload
def add(
self,
job: AuxiliaryUnitJob,
args: Optional[Mapping[str, Any]] = ...,
*, # parameters after “*” are keyword-only parameters
identity: str = '',
) -> asyncio.Future[ExperimentUnit]:
...
@overload
def add(
self,
job: job_blocks.JobType,
args: Optional[Mapping[str, Any]] = ...,
*, # parameters after “*” are keyword-only parameters
role: WorkUnitRole = ...,
identity: str = '',
) -> asyncio.Future[WorkUnit]:
...
@overload
def add(
self,
job: job_blocks.JobType,
args: Optional[Mapping[str, Any]],
*, # parameters after “*” are keyword-only parameters
role: ExperimentUnitRole,
identity: str = '',
) -> asyncio.Future[ExperimentUnit]:
...
@overload
def add(
self,
job: job_blocks.JobType,
args: Optional[Mapping[str, Any]] = ...,
*, # parameters after “*” are keyword-only parameters
role: ExperimentUnitRole,
identity: str = '',
) -> asyncio.Future[ExperimentUnit]:
...
# The ExperimentUnit return type is determined by the role.
def add(self, job, args=None, *, role=WorkUnitRole(), identity: str = ''):
# pyformat: disable
"""Adds a Job / JobGroup to the experiment.
A new Experiment Unit is created to run the job.
Args:
job: A Job or JobGroup to add.
args: Keyword arguments to be passed to the job. For Job and JobGroup args
are recursively expanded. For example,
```
wu.add(
JobGroup(agent=Job(...)),
args={'agent': {'args': {'learning_rate': 0.1}}},
)
```
would update `args` field of a job `agent` in the group.
role: The role of this unit in the experiment structure.
identity: Optional unique experiment unit identifier within the
experiment. If not empty, `add` adopts an 'add if not exists' behavior.
If a unit with the given identity already exists it will be returned as
is, without modifications. JobGenerators would still run to allow them
to recover after preemption.
Returns:
An awaitable that would be fulfilled when the job is launched.
"""
# pyformat: enable
if isinstance(job, AuxiliaryUnitJob):
role = job.role
if self._should_reload_experiment_unit(role):
experiment_unit_future = self._get_experiment_unit(
self.experiment_id, identity, role, args
)
else:
experiment_unit_future = self._create_experiment_unit(
args, role, identity
)
async def launch():
experiment_unit = await experiment_unit_future
try:
await experiment_unit.add(job, args, identity=identity)
except Exception:
try:
experiment_unit.stop(
mark_as_failed=True,
message=f'Work unit creation failed. {traceback.format_exc()}',
)
except Exception as stop_exception: # pylint: disable=broad-except
logging.error("Couldn't stop experiment unit: %s", stop_exception)
raise
return experiment_unit
async def reload():
experiment_unit = await experiment_unit_future
try:
await experiment_unit.add(job, args, identity=identity)
except Exception as update_exception:
logging.error(
'Could not reload the experiment unit: %s',
update_exception,
)
raise
return experiment_unit
return asyncio.wrap_future(
self._create_task(
reload() if self._should_reload_experiment_unit(role) else launch()
),
loop=self._event_loop,
)
@abc.abstractmethod
def _get_experiment_unit(
self,
experiment_id: int,
identity: str,
role: ExperimentUnitRole,
args: Optional[Mapping[str, Any]] = None,
) -> Awaitable[ExperimentUnit]:
"""Returns an existing experiment unit by identity.
Args:
experiment_id: The ID of the experiment to get the Experiment Unit for.
identity: The identity of the Experiment Unit to get.
role: Executable unit role: whether to fetch a work unit or auxiliary
unit.
args: Keyword arguments to be passed to the job.
Returns:
An awaitable which fetches the work unit.
"""
raise NotImplementedError
@abc.abstractmethod
def _create_experiment_unit(
self,
args: Optional[Mapping[str, Any]],
role: ExperimentUnitRole,
identity: str,
) -> Awaitable[ExperimentUnit]:
"""Creates a new experiment unit.
Synchronously starts the experiment unit creation, ensuring that IDs would
be assigned in invocation order. The operation itself may run asynchronously
in background.
Args:
args: Executable unit arguments, to be show as a part of hyper-parameter
sweep.
role: Executable unit role: whether to create a work or auxiliary unit.
identity: Optional user-given experiment unit id.
Returns:
An awaitable to the creation result.
"""
raise NotImplementedError
def _create_task(self, task: Awaitable[Any]) -> futures.Future[Any]:
if not self._event_loop.is_running():
raise RuntimeError(
'Event loop is not running. Have you entered Experiment context '
'manager (e.g. with xm.create-experiment() as experiment:)?'
)
future = asyncio.run_coroutine_threadsafe(task, loop=self._event_loop)
self._running_tasks.put_nowait(future)
return future
@property
def work_unit_count(self) -> int:
"""Returns how many work units the experiment has."""
raise NotImplementedError
@abc.abstractmethod
def work_units(self) -> Mapping[int, WorkUnit]:
"""Returns a mapping from work_unit_id to an instance of the work unit."""
raise NotImplementedError
@property
def context(self) -> metadata_context.MetadataContext:
"""Returns metadata context for the experiment."""
return metadata_context.MetadataContext(
creator=getpass.getuser(),
annotations=metadata_context.ContextAnnotations(),
)
@abc.abstractmethod
def _should_reload_experiment_unit(self, role: ExperimentUnitRole) -> bool:
"""Returns True if the experiment unit should be reloaded based on its role.
Reloading an experiment depends on the context in which it is running in.
Primarily it entails updating, stopping, and restarting the executable
units very quickly without having to wait for scheduling.
Args:
role: Experiment unit role trying to be reloaded.
"""
raise NotImplementedError
@abc.abstractmethod
def create_experiment(experiment_title: Optional[str] = None) -> Experiment:
"""Returns a concrete Experiment instance."""
raise NotImplementedError
@abc.abstractmethod
def get_experiment(experiment_id: int) -> Experiment:
"""Returns an Experiment instance associated with this experiment id.
Args:
experiment_id: An ID of an experiment to get.
Raises:
NotFoundError: If experiment is not found.
"""
raise NotImplementedError
|
xmanager-main
|
xmanager/xm/core.py
|
# Copyright 2021 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import asyncio
from concurrent import futures
import threading
import unittest
from xmanager import xm_mock
from xmanager.xm import core
from xmanager.xm import job_blocks
from xmanager.xm import utils
class TestError(RuntimeError):
"""Exception which can be used in tests below."""
async def failing_job_generator(work_unit: core.WorkUnit):
raise TestError
class ApplyArgsTest(unittest.TestCase):
def test_wrong_job_args(self):
with self.assertRaises(ValueError):
core._apply_args(
job_blocks.Job(
job_blocks.Executable(name=''), xm_mock.MockExecutor()
),
{'abra': 'kadabra'},
)
def test_wrong_job_group_args(self):
with self.assertRaises(ValueError):
core._apply_args(
job_blocks.JobGroup(
learner=job_blocks.Job(
job_blocks.Executable(name=''), xm_mock.MockExecutor()
)
),
{'eval': {'args': {'batch_size': 32}}},
)
class ExperimentTest(unittest.TestCase):
def test_single_job_launch(self):
experiment = xm_mock.MockExperiment()
with experiment:
job = job_blocks.Job(
xm_mock.MockExecutable(), xm_mock.MockExecutor(), args={}, name='name'
)
experiment.add(job)
self.assertEqual(experiment.launched_jobs, [job])
def test_job_group_launch(self):
experiment = xm_mock.MockExperiment()
with experiment:
foo_job = job_blocks.Job(
xm_mock.MockExecutable(),
xm_mock.MockExecutor(),
args={'foo': 1},
name='1',
)
bar_job = job_blocks.Job(
xm_mock.MockExecutable(),
xm_mock.MockExecutor(),
args={'bar': 2},
name='2',
)
experiment.add(job_blocks.JobGroup(foo=foo_job, bar=bar_job))
self.assertEqual(experiment.launched_jobs, [foo_job, bar_job])
def test_job_generator_launch(self):
experiment = xm_mock.MockExperiment()
with experiment:
job = job_blocks.Job(
xm_mock.MockExecutable(), xm_mock.MockExecutor(), args={}, name='name'
)
async def job_generator(work_unit: core.WorkUnit, use_magic: bool):
self.assertEqual(use_magic, True)
work_unit.add(job)
experiment.add(job_generator, args={'use_magic': True})
self.assertEqual(experiment.launched_jobs, [job])
self.assertEqual(experiment.launched_jobs_args, [{'use_magic': True}])
def test_job_generator_raises(self):
experiment = xm_mock.MockExperiment()
with self.assertRaises(TestError):
with experiment:
experiment.add(failing_job_generator)
def test_non_async_job_generator_raises_user_friendly_exception(self):
with self.assertRaisesRegex(ValueError, '.* generator must be an async .*'):
with xm_mock.MockExperiment() as experiment:
def job_generator(work_unit: core.WorkUnit):
del work_unit
experiment.add(job_generator)
def test_auxiliary_unit_job(self):
experiment = xm_mock.MockExperiment()
with experiment:
job = job_blocks.Job(
xm_mock.MockExecutable(), xm_mock.MockExecutor(), args={}, name='name'
)
experiment.add(core.AuxiliaryUnitJob(job, termination_delay_secs=600))
self.assertEqual(len(experiment.auxiliary_units), 1)
def test_auxiliary_unit_job_generator(self):
experiment = xm_mock.MockExperiment()
with experiment:
async def make_job(aux_unit: core.ExperimentUnit):
aux_unit.add(
job_blocks.Job(
xm_mock.MockExecutable(),
xm_mock.MockExecutor(),
args={},
name='name',
)
)
experiment.add(
core.AuxiliaryUnitJob(make_job, termination_delay_secs=600)
)
self.assertEqual(len(experiment.auxiliary_units), 1)
def test_launch_with_args(self):
experiment = xm_mock.MockExperiment()
with experiment:
experiment.add(
job_blocks.JobGroup(
foo=job_blocks.Job(
xm_mock.MockExecutable(),
xm_mock.MockExecutor(),
args={'x': 1, 'y': 2},
env_vars={'EDITOR': 'vi'},
),
bar=job_blocks.Job(
xm_mock.MockExecutable(),
xm_mock.MockExecutor(),
args=['--bar=1'],
),
),
args={
'foo': {'args': {'x': 3, 'z': 4}, 'env_vars': {'TURBO': 'ON'}},
'bar': {'args': ['--spacebar']},
},
)
self.assertEqual(
experiment.launched_jobs[0].args,
job_blocks.SequentialArgs.from_collection({'x': 3, 'y': 2, 'z': 4}),
)
self.assertEqual(
experiment.launched_jobs[0].env_vars, {'TURBO': 'ON', 'EDITOR': 'vi'}
)
self.assertEqual(
experiment.launched_jobs[1].args,
job_blocks.SequentialArgs.from_collection(['--bar=1', '--spacebar']),
)
def test_launch_with_different_args(self):
experiment = xm_mock.MockExperiment()
with experiment:
job = job_blocks.Job(xm_mock.MockExecutable(), xm_mock.MockExecutor())
for i in range(10):
experiment.add(job, args={'env_vars': {'FOO': i}})
self.assertEqual(experiment.launched_jobs[0].env_vars, {'FOO': 0})
self.assertEqual(experiment.launched_jobs[1].env_vars, {'FOO': 1})
self.assertEqual(experiment.launched_jobs[2].env_vars, {'FOO': 2})
def test_add_runs_asynchronously(self):
generator_called = threading.Event()
with xm_mock.MockExperiment() as experiment:
async def job_generator(work_unit: core.WorkUnit):
del work_unit
generator_called.set()
experiment.add(job_generator)
# Validate that job_generator is executed in a parallel thread.
self.assertTrue(generator_called.wait(timeout=5))
@utils.run_in_asyncio_loop
async def test_loop_is_reused_in_coro_context(self):
loop = asyncio.get_event_loop()
async with xm_mock.MockExperiment() as experiment:
async def job_generator(work_unit: core.WorkUnit):
del work_unit
self.assertEqual(asyncio.get_event_loop(), loop)
experiment.add(job_generator)
@utils.run_in_asyncio_loop
async def test_sync_with_cant_be_used_in_coro_context(self):
# `async with` works.
async with xm_mock.MockExperiment():
pass
with self.assertRaises(RuntimeError):
# But `with` raises an exception.
with xm_mock.MockExperiment():
pass
def test_experiment_works_from_thread_pool(self):
# There would be no Asyncio even loop thread attahched if running from a
# worker thread. We ensure that the API still works.
def launch_experiment():
experiment = xm_mock.MockExperiment()
with experiment:
experiment.add(
job_blocks.Job(
xm_mock.MockExecutable(), xm_mock.MockExecutor(), args={}
)
)
with futures.ThreadPoolExecutor() as executor:
executor.submit(launch_experiment).result()
@utils.run_in_asyncio_loop
async def test_work_unit_wait_until_complete(self):
experiment = xm_mock.MockExperiment()
async with experiment:
experiment.add(
job_blocks.Job(
xm_mock.MockExecutable(), xm_mock.MockExecutor(), args={}
)
)
completion_future = experiment.work_units[0].wait_until_complete()
self.assertEqual(completion_future.work_unit.work_unit_id, 1)
await completion_future
@utils.run_in_asyncio_loop
async def test_work_unit_wait_until_complete_exception(self):
experiment = xm_mock.MockExperiment()
with self.assertRaises(TestError):
async with experiment:
experiment.add(failing_job_generator)
with self.assertRaises(core.ExperimentUnitError):
await experiment.work_units[0].wait_until_complete()
@utils.run_in_asyncio_loop
async def test_get_full_job_name(self):
async def generator(work_unit):
self.assertEqual(work_unit.get_full_job_name('name'), '1_1_name')
async with xm_mock.MockExperiment() as experiment:
experiment.add(generator)
class ContextvarsTest(unittest.TestCase):
def test_contextvars_single_job_launch(self):
with xm_mock.MockExperiment() as experiment:
job = job_blocks.Job(xm_mock.MockExecutable(), xm_mock.MockExecutor())
self.assertEqual(core._current_experiment.get(), experiment)
experiment.add(job)
self.assertIsNone(core._current_experiment.get(None))
self.assertIsNone(core._current_experiment_unit.get(None))
def test_contextvars_job_group_launch(self):
with xm_mock.MockExperiment() as experiment:
foo_job = job_blocks.Job(xm_mock.MockExecutable(), xm_mock.MockExecutor())
self.assertEqual(core._current_experiment.get(), experiment)
experiment.add(job_blocks.JobGroup(foo=foo_job))
self.assertIsNone(core._current_experiment.get(None))
self.assertIsNone(core._current_experiment_unit.get(None))
def test_contextvars_job_generator_launch(self):
with xm_mock.MockExperiment() as experiment:
self.assertEqual(core._current_experiment.get(), experiment)
async def job_generator(work_unit: core.WorkUnit):
self.assertEqual(core._current_experiment_unit.get(), work_unit)
self.assertEqual(core._current_experiment.get(), work_unit.experiment)
experiment.add(job_generator)
self.assertIsNone(core._current_experiment.get(None))
self.assertIsNone(core._current_experiment_unit.get(None))
def test_contextvars_async_job_generator_launch(self):
async def make_experiment():
async with xm_mock.MockExperiment() as experiment:
async def job_generator(work_unit: core.WorkUnit):
self.assertEqual(core._current_experiment_unit.get(), work_unit)
self.assertEqual(core._current_experiment.get(), work_unit.experiment)
experiment.add(job_generator)
self.assertEqual(core._current_experiment.get(), experiment)
asyncio.run(make_experiment())
self.assertIsNone(core._current_experiment.get(None))
self.assertIsNone(core._current_experiment_unit.get(None))
def test_contextvars_nested_async_job_generator_launch(self):
async def job_generator(work_unit: core.WorkUnit):
self.assertEqual(core._current_experiment.get(), work_unit.experiment)
self.assertEqual(core._current_experiment_unit.get(), work_unit)
with xm_mock.MockExperiment() as outer_exp:
async def make_inner_exp():
async with xm_mock.MockExperiment() as experiment:
experiment.add(job_generator)
outer_exp.add(job_generator)
self.assertEqual(core._current_experiment.get(), experiment)
asyncio.run(make_inner_exp())
self.assertEqual(core._current_experiment.get(), outer_exp)
self.assertIsNone(core._current_experiment.get(None))
self.assertIsNone(core._current_experiment_unit.get(None))
if __name__ == '__main__':
unittest.main()
|
xmanager-main
|
xmanager/xm/core_test.py
|
# Copyright 2021 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Common job operators useful in the framework and 3P-libraries."""
import copy
import itertools
from typing import Callable, List, Sequence, Tuple
import attr
from xmanager.xm import job_blocks
def shallow_copy_job_type(
job_type: job_blocks.JobTypeVar,
) -> job_blocks.JobTypeVar:
"""Creates a shallow copy of the job structure."""
if job_blocks.is_job_generator(job_type):
return job_type
if isinstance(job_type, job_blocks.JobGroup):
job_type = copy.copy(job_type)
job_type.jobs = {
key: shallow_copy_job_type(job) for key, job in job_type.jobs.items()
}
return job_type
return copy.copy(job_type)
def populate_job_names(job_type: job_blocks.JobTypeVar) -> None:
"""Assigns default names to the given jobs."""
def matcher(prefix: Sequence[str], job_type: job_blocks.JobTypeVar) -> None:
match job_type:
case job_blocks.Job() as target:
if target.name is None: # pytype: disable=attribute-error
target.name = '_'.join(prefix) if prefix else target.executable.name
case job_blocks.JobGroup() as target:
for key, job in target.jobs.items(): # pytype: disable=attribute-error
matcher([*prefix, key], job)
case _:
return
matcher([], job_type)
def collect_jobs_by_filter(
job_group: job_blocks.JobGroup,
predicate: Callable[[job_blocks.Job], bool],
) -> List[job_blocks.Job]:
"""Flattens a given job group and filters the result."""
def job_collector(job_type: job_blocks.JobTypeVar) -> List[job_blocks.Job]:
match job_type:
case job_blocks.Job() as job:
return [job] if predicate(job) else [] # pytype: disable=bad-return-type
case job_blocks.JobGroup() as job_group:
return list(
itertools.chain.from_iterable(
[job_collector(job) for job in job_group.jobs.values()]
)
)
case _:
raise TypeError(f'Unsupported job_type: {job_type!r}')
return job_collector(job_group)
@attr.s(auto_attribs=True)
class ConstraintClique:
"""A constraint with the list of jobs it applies to."""
constraint: job_blocks.Constraint
jobs: List[job_blocks.Job]
def aggregate_constraint_cliques(
job_group: job_blocks.JobGroup,
) -> List[ConstraintClique]:
"""Forms constraint cliques.
For each constraint met, collects all jobs it applies to.
Args:
job_group: A job group to aggregate on.
Returns:
A set of cliques.
"""
def matcher(
job_type: job_blocks.JobTypeVar,
) -> Tuple[List[ConstraintClique], List[job_blocks.Job]]:
match job_type:
case job_blocks.Job() as job:
return [], [job] # pytype: disable=bad-return-type
case job_blocks.JobGroup() as job_group:
cliques: List[ConstraintClique] = []
jobs: List[job_blocks.Job] = []
for job in job_group.jobs.values():
subcliques, subjobs = matcher(job) # pylint: disable=unpacking-non-sequence
cliques += subcliques
jobs += subjobs
cliques = [
ConstraintClique(constraint, jobs)
for constraint in job_group.constraints
] + cliques
return cliques, jobs
case _:
raise TypeError(f'Unsupported job_type: {job_type!r}')
result, _ = matcher(job_group) # pylint: disable=unpacking-non-sequence
return result
def flatten_jobs(job_group: job_blocks.JobGroup) -> List[job_blocks.Job]:
return collect_jobs_by_filter(job_group, lambda _: True)
|
xmanager-main
|
xmanager/xm/job_operators.py
|
# Copyright 2021 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A utility to predict IDs that would be assigned upon object creation.
This class helps to untie this chicken and egg problem. Sometimes to create an
object (for example WorkUnit) one may need to know its ID beforehand (for
example to generate a checkpoint path). But the ID would only be assigned by a
backend upon object creation. In ideal world we would rewrite the backend to
allow ID reservation. This module provides a temporary solution which does the
reservation on client-side. Following assumptions are made:
* Ids are assigned sequentially by the backend, starting from some number.
* Only one process at a time creates the objects. Any races are resolved only
within that process.
Usage:
predictor = Predictor()
# Obtain the ID and asynchronously construct the object.
next_id = predictor.reserve_id()
job = Job(args={'checkpoint_path': f'/tmp/{next_id}'})
# Wait untill all objects with smaller IDs are submitted.
async with predictor.submit_id(next_id):
# And submit it to the backend.
submit(job)
If the submission fails, the sequence is considered broken and following calls
to the predictor would raise an error.
"""
import asyncio
import threading
from typing import AsyncIterable
import async_generator
class BrokenSequenceError(RuntimeError):
"""The ID would never be ready to submit."""
class Predictor:
"""Predicts IDs that would be assigned on object creation.
This class is thread safe and async Python friendly. It must be constructed
from inside asyncio event loop.
"""
def __init__(self, next_id: int) -> None:
"""Initializes the predictor.
Args:
next_id: The first available ID that would be assigned to the next object.
"""
self._next_id = next_id
# We use threading.Lock to allow calling reserve_id from non async context.
# Note that no long operations are done under this lock.
self._next_id_lock = threading.Lock()
self._is_broken = False
self._last_created_id = next_id - 1
self._last_created_id_condition = asyncio.Condition()
def reserve_id(self) -> int:
"""Returns the next ID."""
with self._next_id_lock:
next_id = self._next_id
self._next_id += 1
return next_id
@async_generator.asynccontextmanager
async def submit_id(self, id_to_submit: int) -> AsyncIterable[None]:
"""Waits until the ID can be submitted and marks it as such.
A context manager which would wait for all smaller IDs to be submitted on
entry and marks it as submitted on exit. As a result all submissions are
serialized in the correct order and receive the right ID from the backend.
Args:
id_to_submit: The id to be submitted.
Yields:
Yields when it is time to send the request to the backend.
"""
async with self._last_created_id_condition:
await self._last_created_id_condition.wait_for(
lambda: self._is_broken or self._last_created_id == id_to_submit - 1
)
try:
if self._is_broken:
raise BrokenSequenceError(
f'Id {id} would never be ready to submit as'
' submission of the previous one has failed'
)
yield
self._last_created_id = id_to_submit
except:
self._is_broken = True
raise
finally:
self._last_created_id_condition.notify_all()
|
xmanager-main
|
xmanager/xm/id_predictor.py
|
# Copyright 2021 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from unittest import mock
from absl.testing import absltest
from xmanager.xm import job_blocks
class JobBlocksTest(unittest.TestCase):
def test_from_mapping(self):
args = job_blocks.SequentialArgs.from_collection({
'a': 1,
'b': 2,
'c': 3,
})
self.assertEqual(args.to_list(str), ['--a=1', '--b=2', '--c=3'])
def test_from_mapping_multi(self):
args = job_blocks.SequentialArgs.from_collection({
'a': 1,
'c': [3, '4'],
})
self.assertEqual(args.to_list(str), ['--a=1', '--c=3', '--c=4'])
def test_from_mapping_nested_multi(self):
args = job_blocks.SequentialArgs.from_collection({'a': [[1, 2, 3]]})
self.assertEqual(args.to_list(str), ['--a=[[1, 2, 3]]'])
def test_from_sequence(self):
args = job_blocks.SequentialArgs.from_collection([1, 2, 3])
self.assertEqual(args.to_list(str), ['1', '2', '3'])
def test_from_none(self):
args = job_blocks.SequentialArgs.from_collection(None)
self.assertEqual(args.to_list(str), [])
def test_merge_args(self):
args = job_blocks.merge_args(
[1],
{
'a': 'z',
'b': 'y',
},
[2],
{
'b': 'x',
'c': 't',
},
[3],
)
self.assertEqual(
args.to_list(str), ['1', '--a=z', '--b=x', '2', '--c=t', '3']
)
def test_to_dict(self):
args = job_blocks.merge_args(['--knob'], {1: False})
self.assertEqual(args.to_dict(), {'--knob': True, '1': False})
def test_to_list_bool(self):
args = job_blocks.SequentialArgs.from_collection({'yes': True, 'no': False})
self.assertEqual(args.to_list(str), ['--yes', '--nono'])
def test_to_list_none(self):
args = job_blocks.SequentialArgs.from_collection(
{'skip_me': None, 'pass_me': 'None'}
)
self.assertEqual(args.to_list(str), ['--pass_me=None'])
def test_sequential_args_from_string(self):
with self.assertRaisesRegex(
ValueError,
(
"Tried to construct xm.SequentialArgs from string: '--foo'. "
"Wrap it in a list: \\['--foo'\\] to make it a single argument."
),
):
job_blocks.SequentialArgs.from_collection('--foo')
def test_get_args_for_all_jobs(self):
group = job_blocks.JobGroup(
a=job_blocks.Job(mock.Mock(), mock.Mock()),
b=job_blocks.JobGroup(
b1=job_blocks.JobGroup(
b1i=job_blocks.Job(mock.Mock(), mock.Mock()),
b1ii=job_blocks.Job(mock.Mock(), mock.Mock()),
),
b2=job_blocks.Job(mock.Mock(), mock.Mock()),
),
)
logdir = {'logdir': '/logdir/1'}
expected = {
'a': {'args': logdir},
'b': {
'b1': {
'b1i': {'args': logdir},
'b1ii': {'args': logdir},
},
'b2': {'args': logdir},
},
}
args = job_blocks.get_args_for_all_jobs(group, logdir)
self.assertDictEqual(expected, args)
if __name__ == '__main__':
absltest.main()
|
xmanager-main
|
xmanager/xm/job_blocks_test.py
|
# Copyright 2021 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility functions needed for XManager API implementation.
This module is private and can only be used by the API itself, but not by users.
"""
import abc
import asyncio
import enum
import functools
import os
import shlex
import sys
from typing import Any, Callable, TypeVar
from absl import flags
import attr
FLAGS = flags.FLAGS
flags.DEFINE_string(
'xm_launch_script', None, 'Path to the launch script that is using '
'XManager Launch API')
ReturnT = TypeVar('ReturnT')
class SpecialArg(abc.ABC):
"""A base class for arguments with special handling on serialization."""
@attr.s(auto_attribs=True)
class ShellSafeArg(SpecialArg):
"""Command line argument that shouldn't be escaped.
Normally all arguments would be passed to the binary as is. To let shell
substitutions (such as environment variable expansion) to happen the argument
must be wrapped with this structure.
"""
arg: str
def __str__(self) -> str:
"""Prevents ShellSafeArg from being used in f-strings."""
raise RuntimeError(
f'Converting {self!r} to a string would strip the ShellSafe semantics.'
)
def ARG_ESCAPER(value: Any) -> str: # pylint: disable=invalid-name
match value:
case ShellSafeArg():
return value.arg
case enum.Enum():
return shlex.quote(str(value.name))
case _:
return shlex.quote(str(value))
def trivial_kwargs_joiner(key: str, value: str) -> str:
"""Concatenates keyword arguments with = sign."""
return f'{key}={value}'
def run_in_asyncio_loop(f: Callable[..., ReturnT]) -> Callable[..., ReturnT]:
"""A decorator that turns an async function to a synchronous one.
Python asynchronous APIs can't be used directly from synchronous functions.
While wrapping them with an asyncio loop requires little code, in some
contexts it results in too much boilerplate.
Testing async functions:
class MyTest(unittest.TestCase):
@run_in_asyncio_loop
async def test_my_async_function(self):
self.assertEqual(await async_function(), 42)
Running the whole program in an event loop:
@run_in_asyncio_loop
async def main(argv):
print('Hello world')
if __name__ == '__main__':
app.run(main)
It is not advised to use this decorator beyond these two cases.
Args:
f: An async function to run in a loop.
Returns:
A synchronous function with the same arguments.
"""
@functools.wraps(f)
def decorated(*args, **kwargs) -> ReturnT:
loop = asyncio.new_event_loop()
asyncio.get_child_watcher().attach_loop(loop)
return loop.run_until_complete(f(*args, **kwargs))
return decorated
@functools.lru_cache()
def find_launch_script_path() -> str:
"""Finds the launch script path."""
# We can get the launch script if it's provided explicitly, or when it's run
# using a Python interpreter.
launch_script_path = sys.argv[0]
if hasattr(FLAGS, 'xm_launch_script') and FLAGS.xm_launch_script:
launch_script_path = FLAGS.xm_launch_script
if not launch_script_path.endswith('.py'):
# If the launch script is built with subpar we are interested in the name
# of the main module, rather than subpar binary.
main_file_path = getattr(sys.modules['__main__'], '__file__', None)
if main_file_path and os.access(main_file_path, os.R_OK):
launch_script_path = main_file_path
if not launch_script_path:
return ''
# The path may be relative, especially if it comes from sys.argv[0].
return os.path.abspath(launch_script_path)
def resolve_path_relative_to_launcher(path: str) -> str:
"""Get the absolute assuming paths are relative to the launcher script file.
Using this method a launcher script can refer to its own directory or parent
directory via `.` and `..`.
Args:
path: Path that may be relative to the launch script.
Returns:
Absolute path.
Raises:
RuntimeError: If unable to determine the launch script path.
"""
if os.path.isabs(path):
return path
launch_script_path = find_launch_script_path()
if not os.access(launch_script_path, os.R_OK):
raise RuntimeError(
'Unable to determine launch script path. '
f'The script is not present at {launch_script_path!r}. '
'This may happen if launch script changes the '
'working directory.'
)
caller_file_path = os.path.realpath(launch_script_path)
caller_dir = os.path.dirname(caller_file_path)
return os.path.realpath(os.path.join(caller_dir, path))
|
xmanager-main
|
xmanager/xm/utils.py
|
# Copyright 2021 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Definition of shared executable specifications."""
import abc
import os
import re
from typing import List, NamedTuple, Optional, Union
import attr
from xmanager.xm import job_blocks
from xmanager.xm import utils
def name_from_path(path: str) -> str:
"""Returns a safe to use executable name based on a filesystem path."""
return re.sub('\\W', '_', os.path.basename(path.rstrip(os.sep)))
class ModuleName(NamedTuple):
"""Name of python module to execute when entering this project."""
module_name: str
class CommandList(NamedTuple):
"""List of commands to execute when entering this project."""
commands: List[str]
@attr.s(auto_attribs=True)
class Dockerfile(job_blocks.ExecutableSpec):
"""Dockerfile describes a Dockerfile for generating a docker image.
This is a lower-level feature that could be solved using higher-level
Executables such as BazelContainer or PythonContainer.
Attributes:
path: Specifies the build's context.
dockerfile: The file that will be used for build instructions. Otherwise,
{path}/Dockerfile will be used. Equivalent to `docker build -f`. A
relative path will use a Dockerfile that is relative to the launcher
script.
"""
path: str = attr.ib(
converter=utils.resolve_path_relative_to_launcher, default='.'
)
dockerfile: str = attr.ib(
# This field is always set once the object is initialized, so we use str
# as type annotation. But the default value depends on another property
# and is set in __attrs_post_init__, so we temporary convert None to ''.
converter=lambda dockerfile: dockerfile or '',
default=None,
)
def __attrs_post_init__(self):
if not self.dockerfile:
self.dockerfile = os.path.join(self.path, 'Dockerfile')
self.dockerfile = utils.resolve_path_relative_to_launcher(self.dockerfile)
@property
def name(self) -> str:
return name_from_path(self.path)
@attr.s(auto_attribs=True)
class PythonContainer(job_blocks.ExecutableSpec):
"""PythonContainer describes a directory containing Python code.
Attributes:
entrypoint: The Python module or list of shell commands to run when entering
this Python project.
path: Relative or absolute path to the Python project. By default, the
current directory (`'.'`) is used.
base_image: Name of the image to initialize a new Docker build stage using
the instruction `FROM`.
docker_instructions: List of Docker instructions to apply when building the
image. If not specified, the default one will be provided.
When you use `docker_instructions`, you are responsible for copying the
project directory. For example, if you are running with:
path='/path/to/cifar10'
You should include these steps in your `docker_instructions`:
[
'COPY cifar10/ cifar10',
'WORKDIR cifar10',
]
If your source code rarely changes, you can make this your first step.
If you are frequently iterating on the source code, it is best practice
to place these steps as late as possible in the list to maximize Docker
layer-caching.
use_deep_module: Whether the experiment code uses deep module structure
(i.e., 'from <a.prefix> import models') or not (i.e., 'import models').
If use_deep_module is set to True, and docker_instructions are used, it
is recommended to use dedicated workdir and copy a whole project
directory there. The example above should be modified as:
[
'RUN mkdir /workdir',
'WORKDIR /workdir',
'COPY cifar10/ /workdir/cifar10',
]
"""
entrypoint: Union[ModuleName, CommandList]
path: str = attr.ib(
converter=utils.resolve_path_relative_to_launcher, default='.'
)
base_image: Optional[str] = None
docker_instructions: Optional[List[str]] = None
use_deep_module: bool = False
@property
def name(self) -> str:
return name_from_path(self.path)
class BinaryDependency(abc.ABC):
"""Additional resource for `Binary` / `BazelBinary`.
Implementations can define backend-specific dependencies.
BinaryDependency and its ancestors must be comparable and hashable and
therefore immutable.
"""
@attr.s(auto_attribs=True)
class Container(job_blocks.ExecutableSpec):
"""A prebuilt Docker image.
The image can be tagged locally or in a remote repository.
Attributes:
image_path: Path to a prebuilt container image.
"""
image_path: str
@property
def name(self) -> str:
return name_from_path(self.image_path)
@attr.s(auto_attribs=True)
class Binary(job_blocks.ExecutableSpec):
"""A prebuilt executable program.
Attributes:
path: Path to a prebuilt binary.
dependencies: A list of data dependencies to be packaged together with the
binary.
"""
path: str
dependencies: List[BinaryDependency] = attr.ib(
converter=list, default=attr.Factory(list)
)
@property
def name(self) -> str:
return name_from_path(self.path)
@attr.s(auto_attribs=True)
class BazelContainer(job_blocks.ExecutableSpec):
"""A Bazel target that produces a .tar image.
Note that for targets based on https://github.com/bazelbuild/rules_docker one
should append '.tar' to the label to specify a self-contained image.
Attributes:
label: The Bazel target to be built.
bazel_args: Bazel command line arguments.
"""
label: str
bazel_args: List[str] = attr.ib(converter=list, default=attr.Factory(list))
@property
def name(self) -> str:
return name_from_path(self.label)
@attr.s(auto_attribs=True)
class BazelBinary(job_blocks.ExecutableSpec):
# pyformat: disable
"""A Bazel target that produces a self-contained binary.
Note that for Python targets based on https://github.com/google/subpar
a self-contained '.par' binary would be built.
Attributes:
label: The Bazel target to be built.
dependencies: A list of data dependencies to be packaged together with the
binary.
bazel_args: Bazel command line arguments.
"""
# pyformat: enable
label: str
dependencies: List[BinaryDependency] = attr.ib(
converter=list, default=attr.Factory(list)
)
bazel_args: List[str] = attr.ib(converter=list, default=attr.Factory(list))
@property
def name(self) -> str:
return name_from_path(self.label)
|
xmanager-main
|
xmanager/xm/executables.py
|
# Copyright 2021 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Resources specification for use in the API.
Various classes defined to support resources specification for jobs.
"""
import builtins
import enum
import functools
import itertools
import operator
import re
from typing import Any, Dict, Iterable, Iterator, Mapping, MutableMapping, Optional, Tuple, Union, cast
import immutabledict
class _CaseInsensetiveResourceTypeMeta(enum.EnumMeta):
"""Metaclass which allows case-insensetive enum lookup.
Enum keys are upper case, but we allow other cases for the input. For
example existing flags and JobRequirements use lower case for resource names.
"""
def __getitem__(cls, resource_name: str) -> 'ResourceType':
try:
return super().__getitem__(resource_name.upper())
except KeyError:
raise KeyError(f'Unknown {cls.__name__} {resource_name!r}') # pylint: disable=raise-missing-from
class ResourceType(enum.Enum, metaclass=_CaseInsensetiveResourceTypeMeta):
"""Type of a countable resource (e.g., CPU, memory, accelerators etc).
We use a schema in which every particular accelerator has its own type. This
way all countable resources required for a job could be represented by a
simple dictionary.
"""
# Amount of required CPU resources in vCPUs.
CPU = 100002
# Amount of required memory resources in bytes.
MEMORY = 39
RAM = 39
# Amount of required disk resources in bytes.
EPHEMERAL_STORAGE = 100003
DISK = 100003
# GPUs
LOCAL_GPU = 100006
P4 = 21
T4 = 22
P100 = 14
V100 = 17
A100 = 46
A100_80GIB = 66
H100 = 70
# TPUs
TPU_V2 = 3
TPU_V3 = 16
# TODO: do we need V2_DONUT and V3_DONUT?
def __str__(self):
return self._name_
class _CaseInsensetiveServiceTierMeta(enum.EnumMeta):
"""Metaclass which allows case-insensetive enum lookup.
Enum keys are upper case, but we allow other cases for the input. For
example existing flags and JobRequirements use lower case for resource names.
"""
def __getitem__(cls, resource_name: str) -> 'ServiceTier':
try:
return super().__getitem__(resource_name.upper())
except KeyError:
raise KeyError(f'Unknown {cls.__name__} {resource_name!r}') # pylint: disable=raise-missing-from
class ServiceTier(enum.Enum, metaclass=_CaseInsensetiveServiceTierMeta):
"""The job availability guarantees which underlying platfrom should provide.
Most cloud platforms offer a selection of availability/price tradeoff options.
Usually there are at least "Take my money, this workload is important" and
"Buy excess compute for cheap" offerings. This enum provides a classification
of such offerings and allows matching comparable (but not necessary identical)
options from different runtimes.
"""
# Highly available tier. The job is expected to be scheduled fast once sent to
# the cloud. Recommended tier for multi-job work units as lower tiers may lead
# to partially-scheduled work units.
PROD = 200
# A cheaper tier with guaranteed average throughput. Jobs may spend hours
# awaiting scheduling by the cloud and can be preempted.
BATCH = 100
# The cheapest tier. No guarantees, but it often works.
FREEBIE = 25
def _enum_subset(class_name: str, values: Iterable[ResourceType]) -> type: # pylint: disable=g-bare-generic
"""Returns an enum subset class.
The class is syntactically equivalent to an enum with the given resource
types. But the concrete constants are the same as in the ResourceType enum,
making all equivalence comparisons work correctly. Additionally operator `in`
is supported for checking if a resource belongs to the subset.
Args:
class_name: Class name of the subset enum.
values: A list of resources that belong to the subset.
"""
values = set(values)
class EnumSubsetMetaclass(type): # pylint: disable=g-bare-generic
"""Metaclass which implements enum subset operations."""
def __new__(
cls,
name: str,
bases: Tuple[type], # pylint: disable=g-bare-generic
dct: Dict[str, Any],
) -> type: # pylint: disable=g-bare-generic
# Add constants to the class dict.
for name, member in ResourceType.__members__.items():
if member in values:
dct[name] = member
return super().__new__(cls, class_name, bases, dct)
def __getitem__(cls, item: str) -> ResourceType:
result = ResourceType[item]
if result not in cls: # pylint: disable=unsupported-membership-test
raise AttributeError(
f"type object '{cls.__name__}' has no attribute '{item}'"
)
return result
def __iter__(cls) -> Iterator[ResourceType]:
return iter(values)
def contains(cls, value: ResourceType) -> bool:
return value in values
class EnumSubset(metaclass=EnumSubsetMetaclass):
def __new__(cls, value: int) -> ResourceType:
resource = ResourceType(value)
if resource not in cls:
raise ValueError(f'{value} is not a valid {cls.__name__}')
return resource
return EnumSubset
# TODO: Use centralized resource metadata.
TpuType = _enum_subset(
'TpuType',
[
ResourceType.TPU_V2,
ResourceType.TPU_V3,
],
)
GpuType = _enum_subset(
'GpuType',
[
# LOCAL_GPU is missing as only specific GPU types should be added.
ResourceType.P4,
ResourceType.T4,
ResourceType.P100,
ResourceType.V100,
ResourceType.A100,
ResourceType.A100_80GIB,
ResourceType.H100,
],
)
_AcceleratorType = _enum_subset(
'_AcceleratorType',
[
ResourceType.LOCAL_GPU,
*list(TpuType),
*list(GpuType),
],
)
class ResourceDict(MutableMapping):
"""Internal class to represent amount of countable resources.
A mapping from ResourceType to amount of the resource combined with
convenience methods. This class only tracks amounts of the resources, but not
their topologies, locations or constraints.
This class is rather generic and is designed be used internally as job
requirements as well as in the executors. API users should not use it
explicitly.
Usage:
# Construct (implicitly) from user code using JobRequirements:
requirements = JobRequirements(cpu=0.5 * xm.vCPU, memory=2 * xm.GiB, v100=8)
resources = requirements.task_requirements
# Resources are available by their canonical names.
assert(resources[ResourceType.V100], 8)
# Print user-friendly representation:
print(f'The task needs {resources}')
"""
def __init__(self) -> None:
self.__dict: Dict[ResourceType, float] = {}
def __setitem__(self, key: ResourceType, value: float) -> None:
self.__dict.__setitem__(key, value)
def __getitem__(self, key: ResourceType) -> float:
return self.__dict.__getitem__(key)
def __delitem__(self, key: ResourceType) -> None:
self.__dict.__delitem__(key)
def __iter__(self):
return self.__dict.__iter__()
def __len__(self) -> int:
return self.__dict.__len__()
def __str__(self) -> str:
"""Returns user-readable text representation.
Such as "V100: 8, CPU: 1.2, MEMORY: 5.4GiB".
"""
# TODO: We do not aggregate memory yet, update this method to be more
# user-friendly.
return ', '.join(
sorted([f'{key}: {value}' for (key, value) in self.items()])
)
def __add__(self: 'ResourceDict', rhs: 'ResourceDict') -> 'ResourceDict':
"""Returns a sum of two ResourceDicts."""
result = ResourceDict()
for key in [*self.keys(), *rhs.keys()]:
result[key] = self.get(key, 0) + rhs.get(key, 0)
return result
def __mul__(self: 'ResourceDict', rhs: float) -> 'ResourceDict':
"""Returns the multiplication of a ResourceDict with a scalar."""
result = ResourceDict()
for key, value in self.items():
result[key] = value * rhs
return result
def __rmul__(self: 'ResourceDict', rhs: float) -> 'ResourceDict':
"""Returns the multiplication of a ResourceDict with a scalar."""
return self * rhs
class InvalidTpuTopologyError(Exception):
"""An unrecognized TPU topology has been provided."""
class Topology:
"""Accelerator topology configuration.
Describes accelerator interconnection. For example could be a TPU topology or
several GPUs connected with NVLink. Topologies have a form of 'NxM_suffix'
where N & M are the number of accelerators across the dimension and suffix
corresponds to a specific interconnect type. Number of dimensions may vary.
Examples of valid topologies:
'1' - a single device.
'4' - 4 GPUs on one host.
'4x4' - a 4x4 TPU grid.
"""
def __init__(self, name: str) -> None:
if not re.fullmatch('([\\d]+x?)+(_(un)?twisted)?', name):
raise InvalidTpuTopologyError(f'Invalid TPU topology: {name}.')
self._name = name
dimensions_str = name.split('_')[0]
self.dimensions = list(map(int, dimensions_str.split('x')))
@property
def chip_count(self) -> int:
"""Returns the number of chips of the TPU topology."""
return functools.reduce(operator.mul, self.dimensions)
@property
def name(self) -> str:
"""Returns the topology as a string."""
return self._name
def __repr__(self) -> str:
return f'xm.Topology({self.name!r})'
def __eq__(self, other: 'Topology') -> bool:
return self.name == other.name
def __hash__(self) -> int:
return hash(self.name)
ResourceQuantity = Union[int, float, str, Topology]
def _parse_resource_quantity(
resource_name: str, value: ResourceQuantity
) -> Tuple[float, Optional[Topology]]:
"""Parses a string representation of a resource quantity."""
def parse_string(value: str):
if 'x' in value:
topology = Topology(value)
return topology.chip_count, topology
else:
# TODO: Parse SI suffixes, like GiB.
return float(value), None
try:
match value:
case builtins.str() as str_value:
return parse_string(str_value)
case Topology():
topology = cast(Topology, value) # needed to work around a pytype bug
return topology.chip_count, topology
case _:
return float(value), None
except Exception as e:
raise ValueError(
f"Couldn't parse resource quantity for {resource_name}. "
f'{value!r} was given.'
) from e
class JobRequirements:
# pyformat: disable
"""Describes the resource requirements of a Job.
Attributes:
task_requirements: Amount of resources needed for a single task within a
job.
accelerator: The accelerator the jobs uses, if there is one. Jobs using
multiple accelerators are not supported because different kinds of
accelerators are usually not installed on the same host.
topology: Accelerator topology, if an accelerator is used.
location: Place where the job should run. For example a cluster name or a
Borg cell.
service_tier: A service tier at which the job should run.
replicas: Number of identical tasks to run within a job
"""
# pyformat:enable
task_requirements: ResourceDict
accelerator: Optional[ResourceType]
topology: Optional[Topology]
location: Optional[str]
_service_tier: ServiceTier
def __init__(
self,
resources: Mapping[
Union[ResourceType, str], ResourceQuantity
] = immutabledict.immutabledict(),
*,
location: Optional[str] = None,
replicas: Optional[int] = None,
service_tier: Optional[ServiceTier] = None,
**kw_resources: ResourceQuantity,
) -> None:
# pyformat: disable
"""Define a set of resources.
Args:
resources: resource amounts as a dictionary, for example
{xm.ResourceType.V100: 2}.
location: Place where the job should run. For example a cluster name or a
Borg cell.
replicas: Number of identical tasks to run within a job. 1 by default.
service_tier: A service tier at which the job should run.
**kw_resources: resource amounts as a kwargs, for example `v100=2` or
`ram=1 * xm.GiB`. See xm.ResourceType enum for the list of supported
types and aliases.
Raises:
ValueError:
If several accelerator resources are supplied (i.e. GPU and TPU).
If the same resource is passed in a `resources` dictionary and as
a command line argument.
If topology is supplied for a non accelerator resource.
"""
# pyformat: enable
self.location = location
self._service_tier = service_tier or ServiceTier.PROD
self.task_requirements = ResourceDict()
self.accelerator = None
self.topology = None
for resource_name, value in itertools.chain(
resources.items(), kw_resources.items()
):
scalar, topology = _parse_resource_quantity(resource_name, value) # pylint: disable=unpacking-non-sequence
match resource_name:
case builtins.str() as r:
resource = ResourceType[r]
case ResourceType():
resource = resource_name
case _:
raise TypeError(f'Unsupported resource: {resource_name!r}')
if resource in _AcceleratorType:
if self.accelerator is not None:
raise ValueError('Accelerator already set.')
self.accelerator = resource
self.topology = topology or Topology(f'{scalar:g}')
elif topology is not None:
raise ValueError(
f'A topology specified for non accelerator resource {resource}.'
)
if resource in self.task_requirements:
raise ValueError(f'{resource} has been specified twice.')
self.task_requirements[resource] = scalar
if (
self.accelerator in GpuType
and self.topology
and len(self.topology.dimensions) == 2
):
if replicas is not None and replicas != self.topology.dimensions[1]:
raise ValueError(
f'For multihost GPUs with topology {self.topology}, replicas should'
f'be either None or {self.topology.dimensions[1]}. Found: '
f'{replicas}'
)
replicas = self.topology.dimensions[1]
self.replicas = replicas or 1
self._validate_replicas()
@property
def service_tier(self):
return self._service_tier
@service_tier.setter
def service_tier(self, new_service_tier):
self._service_tier = new_service_tier
def _validate_replicas(self) -> None:
"""Raises ValueError if replication is not supported."""
if self.replicas > 1 and self.accelerator in TpuType:
raise ValueError(
f'Replicated jobs are not supported for {self.accelerator}.'
)
def __repr__(self) -> str:
"""Returns string representation of the requirements."""
args = []
for resource, value in self.task_requirements.items():
if resource in TpuType:
args.append(f'{resource.name.lower()}={self.topology!r}')
else:
args.append(f'{resource.name.lower()}={value!r}')
if self.location:
args.append(f'location={self.location!r}')
if self.service_tier != ServiceTier.PROD:
args.append(f'service_tier=xm.{self.service_tier}')
if self.replicas != 1:
args.append(f'replicas={self.replicas}')
return f'xm.JobRequirements({", ".join(args)})'
|
xmanager-main
|
xmanager/xm/resources.py
|
# Copyright 2021 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for executables."""
import os
import unittest
from xmanager.xm import executables
from xmanager.xm import utils
class ExecutablesTest(unittest.TestCase):
def test_python_container_name(self):
executable = executables.PythonContainer(
entrypoint=executables.ModuleName('module'),
path='/home/user/project/',
)
self.assertEqual(executable.name, 'project')
def test_container_name(self):
executable = executables.Container(
image_path='/home/user/project/image.tar'
)
self.assertEqual(executable.name, 'image_tar')
def test_binary_name(self):
executable = executables.Binary(path='./binary')
self.assertEqual(executable.name, 'binary')
def test_bazel_container_name(self):
executable = executables.BazelContainer(label='//container')
self.assertEqual(executable.name, 'container')
def test_bazel_binary_name(self):
executable = executables.BazelBinary(label=':binary')
self.assertEqual(executable.name, '_binary')
def test_dockerfile_defaults(self):
root = utils.resolve_path_relative_to_launcher('.')
spec = executables.Dockerfile()
self.assertEqual(spec.path, root)
self.assertEqual(spec.dockerfile, os.path.join(root, 'Dockerfile'))
if __name__ == '__main__':
unittest.main()
|
xmanager-main
|
xmanager/xm/executables_test.py
|
# Copyright 2021 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from xmanager import xm_mock
from xmanager.xm import job_blocks
from xmanager.xm import job_operators
def construct_job(name=None):
return job_blocks.Job(
name=name,
executable=xm_mock.MockExecutable(),
executor=xm_mock.MockExecutor(),
)
class JobOperatorsTest(unittest.TestCase):
def test_collect_jobs_by_filter_gathers_matches(self):
job_group = job_blocks.JobGroup(
foo=construct_job('foo'),
bar=construct_job('bar'),
baz=construct_job('baz'),
)
self.assertEqual(
job_operators.collect_jobs_by_filter(
job_group,
predicate=lambda job: job.name in ['foo', 'baz'],
),
[job_group.jobs['foo'], job_group.jobs['baz']],
)
def test_flatten_jobs_traverses_nested_groups(self):
baz = construct_job('baz')
foo = construct_job('foo')
job_group = job_blocks.JobGroup(
foo=foo,
bar=job_blocks.JobGroup(baz=baz),
)
self.assertEqual(
job_operators.flatten_jobs(job_group),
[foo, baz],
)
def test_aggregate_constraint_cliques(self):
outer_1 = construct_job('outer_1')
inner_1 = construct_job('inner_1')
inner_2 = construct_job('inner_2')
constraint_a = xm_mock.MockConstraint('A')
constraint_b = xm_mock.MockConstraint('B')
constraint_c = xm_mock.MockConstraint('C')
job_group = job_blocks.JobGroup(
outer_1=outer_1,
outer_2=job_blocks.JobGroup(
inner_1=inner_1,
inner_2=inner_2,
constraints=[constraint_b, constraint_c],
),
constraints=[constraint_a],
)
self.assertEqual(
job_operators.aggregate_constraint_cliques(job_group),
[
job_operators.ConstraintClique(
constraint=constraint_a,
jobs=[outer_1, inner_1, inner_2],
),
job_operators.ConstraintClique(
constraint=constraint_b,
jobs=[inner_1, inner_2],
),
job_operators.ConstraintClique(
constraint=constraint_c,
jobs=[inner_1, inner_2],
),
],
)
if __name__ == '__main__':
unittest.main()
|
xmanager-main
|
xmanager/xm/job_operators_test.py
|
# Copyright 2021 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Defines convenience constants/functions for converting various units."""
# pylint: disable=invalid-name
vCPU = 1.0 # Virtual CPU
KiB = 2**10 # kibibyte
MiB = 2**20 # mibibyte
GiB = 2**30 # gibibyte
TiB = 2**40 # tebibyte
PiB = 2**50 # pebibyte
KB = 10**3 # kilobyte
MB = 10**6 # megabyte
GB = 10**9 # gigabyte
TB = 10**12 # terabyte
PB = 10**15 # petabyte
# pylint: enable=invalid-name
|
xmanager-main
|
xmanager/xm/compute_units.py
|
# Copyright 2021 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Methods for executing job groups using the local backend."""
import abc
import asyncio
import atexit
from concurrent import futures
import os
import subprocess
import threading
from typing import Any, Callable, List, Optional, cast
from absl import logging
import attr
from docker.models import containers
from xmanager import xm
from xmanager.docker import docker_adapter
from xmanager.xm import job_operators
from xmanager.xm import utils
from xmanager.xm_local import executables
from xmanager.xm_local import executors
from xmanager.xm_local import status
_DEFAULT_ENCODING = 'utf-8'
_BRIDGE_NETWORK_NAME = 'xmanager'
def _print_chunk(name: str, line: str) -> None:
print('[{}] {}'.format(name, line.strip()))
class ExecutionHandle(abc.ABC):
"""An interface for operating on executions."""
@abc.abstractmethod
async def wait(self) -> None:
raise NotImplementedError
@abc.abstractmethod
def get_status(self) -> status.LocalWorkUnitStatus:
"""Aggregates the statuses of all jobs in the work unit into one status."""
raise NotImplementedError
class LocalExecutionHandle(ExecutionHandle, abc.ABC):
"""An interface for operating on local executions."""
@abc.abstractmethod
async def monitor(self) -> None:
raise NotImplementedError
@abc.abstractmethod
def terminate(self) -> None:
raise NotImplementedError
async def _throw_on_unknown_executable(
get_full_job_name: Callable[[str], str],
job: xm.Job,
executable: Any,
) -> LocalExecutionHandle:
raise TypeError(f'Unsupported executable for local execution: {executable!r}')
@attr.s(auto_attribs=True)
class ContainerHandle(LocalExecutionHandle):
"""A handle for referring to the launched container."""
name: str
model: Optional[containers.Container]
stream_output: bool
futures_executor: futures.Executor = attr.Factory(futures.ThreadPoolExecutor)
async def wait(self) -> None:
if self.model is None:
return
response = await asyncio.wrap_future(
self.futures_executor.submit(self.model.wait)
)
status_code = response['StatusCode']
if status_code != 0:
raise RuntimeError(
f'Container {self.model!r} returned non-zero status: {status_code}'
)
def get_status(self) -> status.LocalWorkUnitStatus:
raise NotImplementedError
def terminate(self) -> None:
if self.model is None:
return
self.model.stop()
self.futures_executor.shutdown(wait=True)
async def monitor(self) -> None:
if self.model is None:
return
def _stream_chunks() -> None:
for chunk in self.model.logs(stream=True, follow=True):
_print_chunk(self.name, chunk.decode(_DEFAULT_ENCODING))
if self.stream_output:
await asyncio.wrap_future(self.futures_executor.submit(_stream_chunks))
async def _launch_loaded_container_image(
get_full_job_name: Callable[[str], str],
job: xm.Job,
executable: executables.LoadedContainerImage,
) -> LocalExecutionHandle:
"""Launches a preloaded image as a detached container."""
if not isinstance(job.executor, executors.Local):
raise TypeError(f'Expected {job!r} to have the Local executor')
executor = cast(executors.Local, job.executor)
instance = docker_adapter.instance()
if not instance.has_network(_BRIDGE_NETWORK_NAME):
instance.create_network(_BRIDGE_NETWORK_NAME)
gpu_count = int(
executor.requirements.task_requirements.get(xm.ResourceType.LOCAL_GPU, 0)
)
if gpu_count > 0:
try:
subprocess.check_output('nvidia-smi')
except Exception as exception:
raise RuntimeError(
'No NVIDIA devices detected. Only NVIDIA GPUs are currently supported'
) from exception
args = xm.merge_args(executable.args, job.args).to_list(utils.ARG_ESCAPER)
env_vars = {**executable.env_vars, **job.env_vars}
options = executor.docker_options or executors.DockerOptions()
volumes = options.volumes or {}
# Add GCP credentials to Local Executor.
local_gcloud_config_path = os.path.expanduser('~/.config/gcloud')
image_gcloud_config_path = '/root/.config/gcloud'
volumes[local_gcloud_config_path] = image_gcloud_config_path
if options.mount_gcs_path and os.path.isdir(os.path.expanduser('~/gcs')):
local_gcs_path = os.path.expanduser('~/gcs')
image_gcs_path = '/gcs'
if local_gcs_path not in volumes:
volumes[local_gcs_path] = image_gcs_path
else:
logging.warning(
(
'Default GCS path inside container overwritten by'
'`volumes` parameter to %s'
),
volumes[local_gcs_path],
)
container = instance.run_container(
name=get_full_job_name(job.name),
image_id=executable.image_id,
network=_BRIDGE_NETWORK_NAME,
args=args,
env_vars=env_vars,
ports=options.ports or {},
volumes=volumes,
gpu_count=gpu_count,
interactive=options.interactive,
)
return ContainerHandle(
name=job.name,
model=container,
stream_output=executor.experimental_stream_output,
)
@attr.s(auto_attribs=True)
class BinaryHandle(LocalExecutionHandle):
"""A handle referring to the launched binary."""
name: str
process: asyncio.subprocess.Process # pytype: disable=module-attr
stream_output: bool
async def wait(self) -> None:
return_code = await self.process.wait()
if return_code != 0:
raise RuntimeError(
f'Process {self.process!r} returned non-zero code: {return_code}'
)
def get_status(self) -> status.LocalWorkUnitStatus:
raise NotImplementedError
def terminate(self) -> None:
self.process.terminate()
async def monitor(self) -> None:
if self.stream_output:
if not self.process.stdout:
raise ValueError(
'No stdout available from process. Cannot stream output.'
)
while True:
line = await self.process.stdout.readline()
if not line:
break
_print_chunk(self.name, line.decode(_DEFAULT_ENCODING))
async def _launch_local_binary(
get_full_job_name: Callable[[str], str],
job: xm.Job,
executable: executables.LocalBinary,
) -> LocalExecutionHandle:
"""Launches a local binary as a detached process."""
del get_full_job_name # Unused.
if not isinstance(job.executor, executors.Local):
raise TypeError(f'Expected {job!r} to have the Local executor')
args = xm.merge_args(executable.args, job.args).to_list(utils.ARG_ESCAPER)
env_vars = {**executable.env_vars, **job.env_vars}
process = await asyncio.create_subprocess_exec(
executable.path,
*args,
env=env_vars,
start_new_session=True,
stdout=asyncio.subprocess.PIPE
if job.executor.experimental_stream_output
else None,
stderr=asyncio.subprocess.STDOUT
if job.executor.experimental_stream_output
else None,
)
return BinaryHandle(
name=job.name,
process=process,
stream_output=job.executor.experimental_stream_output,
)
async def _local_execution_router(
get_full_job_name: Callable[[str], str],
job: xm.Job,
executable: xm.Executable,
) -> LocalExecutionHandle:
match executable:
case executables.LoadedContainerImage() as container_image:
return await _launch_loaded_container_image(
get_full_job_name,
job,
container_image,
)
case executables.LocalBinary() as local_binary:
return await _launch_local_binary(get_full_job_name, job, local_binary)
case _:
raise TypeError(
f'Unsupported executable for local execution: {executable!r}'
)
# Note that currently handles are never removed from the list. We can consider
# removing them on completion if needed.
_local_jobs: List[LocalExecutionHandle] = []
_local_jobs_lock = threading.Lock()
@atexit.register
def _terminate_local_jobs():
"""Terminates local jobs that were launched during the current session."""
with _local_jobs_lock:
if _local_jobs:
print(
f'Terminating {len(_local_jobs)} local job(s)'
' that may still be running...'
)
for local_job in _local_jobs:
try:
local_job.terminate()
except Exception: # pylint: disable=broad-except
logging.warning('Unable to terminate %s', repr(local_job))
def _local_job_predicate(job: xm.Job) -> bool:
return isinstance(job.executor, executors.Local)
async def launch(
get_full_job_name: Callable[[str], str], job_group: xm.JobGroup
) -> List[LocalExecutionHandle]:
"""Launches jobs with `xm_local.Local` executor."""
# Must act on all jobs with `Local` executor.
local_jobs = job_operators.collect_jobs_by_filter(
job_group, _local_job_predicate
)
handles: List[LocalExecutionHandle] = [
await _local_execution_router(get_full_job_name, job, job.executable)
for job in local_jobs
]
with _local_jobs_lock:
_local_jobs.extend(handles)
return handles
|
xmanager-main
|
xmanager/xm_local/execution.py
|
# Copyright 2021 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implementation of the XManager Launch API within the local scheduler."""
from xmanager.cloud import auth
from xmanager.cloud import vertex
from xmanager.xm_local import experiment
from xmanager.xm_local.executors import *
create_experiment = experiment.create_experiment
get_experiment = experiment.get_experiment
list_experiments = experiment.list_experiments
|
xmanager-main
|
xmanager/xm_local/__init__.py
|
# Copyright 2021 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implementation of the local scheduler experiment."""
import asyncio
from concurrent import futures
import time
from typing import Any, Awaitable, Callable, List, Mapping, Optional, Sequence
from absl import logging
import attr
from kubernetes import client as k8s_client
from xmanager import xm
from xmanager.cloud import kubernetes
from xmanager.cloud import vertex
from xmanager.xm import async_packager
from xmanager.xm import id_predictor
from xmanager.xm import job_operators
from xmanager.xm_local import execution as local_execution
from xmanager.xm_local import executors as local_executors
from xmanager.xm_local import status as local_status
from xmanager.xm_local.packaging import router as packaging_router
from xmanager.xm_local.storage import database
def _validate_job_group(job_group: xm.JobGroup) -> None:
all_jobs = job_operators.flatten_jobs(job_group)
for job in all_jobs:
match job.executor:
case local_executors.Local() | local_executors.Vertex() | local_executors.Kubernetes():
pass
case _:
raise TypeError(f'Unsupported executor: {job.executor!r}. Job: {job!r}')
@attr.s(auto_attribs=True)
class _LaunchResult:
vertex_handles: List[vertex.VertexHandle]
k8s_handles: List[kubernetes.KubernetesHandle]
local_handles: List[local_execution.LocalExecutionHandle]
class LocalExperimentUnit(xm.ExperimentUnit):
"""Experiment unit operated by the local backend."""
def __init__(
self,
experiment: 'LocalExperiment',
experiment_title: str,
create_task: Callable[[Awaitable[Any]], futures.Future[Any]],
args: Optional[Mapping[str, Any]],
role: xm.ExperimentUnitRole,
) -> None:
super().__init__(experiment, create_task, args, role)
self._experiment_title = experiment_title
self._local_execution_handles: List[
local_execution.LocalExecutionHandle
] = []
self._non_local_execution_handles: List[local_execution.ExecutionHandle] = (
[]
)
async def _submit_jobs_for_execution(
self, job_group: xm.JobGroup
) -> _LaunchResult:
# We are delegating the traversal of the job group to modules.
# That improves modularity, but sacrifices the ability to make
# cross-executor decisions.
vertex_handles = vertex.launch(
self._experiment_title, self.experiment_unit_name, job_group
)
k8s_handles = kubernetes.launch(self.get_full_job_name, job_group)
local_handles = await local_execution.launch(
self.get_full_job_name, job_group
)
return _LaunchResult(
vertex_handles=vertex_handles,
k8s_handles=k8s_handles,
local_handles=local_handles,
)
def _ingest_execution_handles(self, launch_result: _LaunchResult) -> None:
self._non_local_execution_handles.extend(
launch_result.vertex_handles + launch_result.k8s_handles
)
self._local_execution_handles.extend(launch_result.local_handles)
def _monitor_local_jobs(
self,
local_execution_handles: Sequence[local_execution.LocalExecutionHandle],
) -> None:
for handle in local_execution_handles:
self._create_task(handle.monitor())
async def _wait_until_complete(self) -> None:
try:
await asyncio.gather(
*[
handle.wait()
for handle in self._local_execution_handles
+ self._non_local_execution_handles
]
)
except RuntimeError as error:
raise xm.ExperimentUnitFailedError(
error, work_unit=self if isinstance(self, LocalWorkUnit) else None
)
async def wait_for_local_jobs(self, is_exit_abrupt: bool):
if not is_exit_abrupt:
await asyncio.gather(
*[handle.wait() for handle in self._local_execution_handles]
)
def stop(
self,
*,
mark_as_failed: bool = False,
mark_as_completed: bool = False,
message: Optional[str] = None,
) -> None:
"""Initiate the process to stop the work unit from running.
This method will synchronously make a request for the work unit to stop.
However, the method does not actually wait for the work unit to be in a
terminal state.
Use self.wait_until_complete() after self.stop() to guarantee the work unit
is stopped.
Args:
mark_as_failed: Mark this unit as failed rather than stopped.
mark_as_completed: Mark this unit as completed rather than stopped.
message: Optional user-defined status message.
"""
del mark_as_failed # Not implemented in xm_local.
del mark_as_completed # Not implemented in xm_local.
del message # Not implemented in xm_local.
handles = self._non_local_execution_handles + self._local_execution_handles
for handle in handles:
match handle:
case vertex.VertexHandle() as vertex_handle:
vertex_handle.stop()
case _:
raise TypeError(f'Unsupported handle: {handle!r}')
def get_status(self) -> local_status.LocalWorkUnitStatus:
"""Gets the current status of the work unit."""
handles = self._non_local_execution_handles + self._local_execution_handles
if len(handles) == 1:
return handles[0].get_status()
raise NotImplementedError(
'Status aggregation for work units with multiple jobs is not '
'implemented yet.'
)
class LocalWorkUnit(LocalExperimentUnit):
"""A work unit operated by the local backend."""
def __init__(
self,
experiment: 'LocalExperiment',
experiment_title: str,
create_task: Callable[[Awaitable[Any]], futures.Future[Any]],
args: Mapping[str, Any],
role: xm.ExperimentUnitRole,
work_unit_id_predictor: id_predictor.Predictor,
) -> None:
super().__init__(experiment, experiment_title, create_task, args, role)
self._work_unit_id_predictor = work_unit_id_predictor
self._work_unit_id = self._work_unit_id_predictor.reserve_id()
def _save_handles_to_storage(
self, handles: Sequence[local_execution.ExecutionHandle]
) -> None:
"""Saves jobs present in the handlers."""
for handle in handles:
match handle:
case vertex.VertexHandle() as vertex_handle:
database.database().insert_vertex_job(
self.experiment_id, self.work_unit_id, vertex_handle.job_name
)
case kubernetes.KubernetesHandle() as k8s_handle:
for job in k8s_handle.jobs:
namespace = job.metadata.namespace or 'default'
name = job.metadata.name
database.database().insert_kubernetes_job(
self.experiment_id, self.work_unit_id, namespace, name
)
case _:
raise TypeError(f'Unsupported handle: {handle!r}')
async def _launch_job_group(
self,
job_group: xm.JobGroup,
args_view: Mapping[str, Any],
identity: str,
) -> None:
del args_view # Unused.
_validate_job_group(job_group)
if identity:
raise ValueError(
'LocalExperiment does not support idempotent experiment '
'unit creation.'
)
async with self._work_unit_id_predictor.submit_id(self.work_unit_id):
launch_result = await self._submit_jobs_for_execution(job_group)
self._ingest_execution_handles(launch_result)
# TODO: Save the local jobs to the database as well.
self._save_handles_to_storage(
launch_result.vertex_handles + launch_result.k8s_handles
)
self._monitor_local_jobs(launch_result.local_handles)
@property
def experiment_unit_name(self) -> str:
return f'{self.experiment_id}_{self._work_unit_id}'
@property
def work_unit_id(self) -> int:
return self._work_unit_id
class LocalAuxiliaryUnit(LocalExperimentUnit):
"""An auxiliary unit operated by the local backend."""
async def _launch_job_group(
self,
job_group: xm.JobGroup,
args_view: Mapping[str, Any],
identity: str,
) -> None:
del args_view # Unused.
_validate_job_group(job_group)
if identity:
raise ValueError(
'LocalExperiment does not support idempotent experiment '
'unit creation.'
)
launch_result = await self._submit_jobs_for_execution(job_group)
self._ingest_execution_handles(launch_result)
self._monitor_local_jobs(launch_result.local_handles)
@property
def experiment_unit_name(self) -> str:
return f'{self.experiment_id}_auxiliary'
class LocalExperiment(xm.Experiment):
"""Experiment contains a family of jobs that run with the local scheduler."""
_id: int
_experiment_title: str
_experiment_units: List[LocalExperimentUnit]
_work_unit_count: int
_async_packager = async_packager.AsyncPackager(packaging_router.package)
def __init__(self, experiment_title: str) -> None:
super().__init__()
# To distinguish local job names until we use a local database generator.
self._id = int(time.time() * 10**3)
self._experiment_title = experiment_title
self._experiment_units = []
self._work_unit_count = 0
def _create_experiment_unit(
self,
args: Optional[Mapping[str, Any]],
role: xm.ExperimentUnitRole,
identity: str,
) -> Awaitable[xm.ExperimentUnit]:
"""Creates a new WorkUnit instance for the experiment."""
if identity:
raise ValueError(
'LocalExperiment does not support idempotent experiment '
'unit creation.'
)
def create_work_unit(role: xm.WorkUnitRole) -> Awaitable[xm.ExperimentUnit]:
work_unit = LocalWorkUnit(
self,
self._experiment_title,
self._create_task,
args,
role,
self._work_unit_id_predictor,
)
self._experiment_units.append(work_unit)
self._work_unit_count += 1
database.database().insert_work_unit(
self.experiment_id,
work_unit.work_unit_id,
)
future = asyncio.Future()
future.set_result(work_unit)
return future
# TODO: Support `role.termination_delay_secs`.
def create_auxiliary_unit(
role: xm.AuxiliaryUnitRole,
) -> Awaitable[xm.ExperimentUnit]:
auxiliary_unit = LocalAuxiliaryUnit(
self,
self._experiment_title,
self._create_task,
args,
role,
)
self._experiment_units.append(auxiliary_unit)
future = asyncio.Future()
future.set_result(auxiliary_unit)
return future
match role:
case xm.WorkUnitRole() as role:
return create_work_unit(role)
case xm.AuxiliaryUnitRole() as role:
return create_auxiliary_unit(role)
case _:
raise TypeError(f'Unsupported role: {role!r}')
def _wait_for_local_jobs(self, is_exit_abrupt: bool):
if self._experiment_units:
print(
'Waiting for local jobs to complete. '
'Press Ctrl+C to terminate them and exit'
)
for unit in self._experiment_units:
self._create_task(unit.wait_for_local_jobs(is_exit_abrupt))
def __exit__(self, exc_type, exc_value, traceback):
# Flush `.add` calls.
self._wait_for_tasks()
self._wait_for_local_jobs(exc_value is not None)
return super().__exit__(exc_type, exc_value, traceback)
async def __aexit__(self, exc_type, exc_value, traceback):
# Flush `.add` calls.
await self._await_for_tasks()
self._wait_for_local_jobs(exc_value is not None)
return await super().__aexit__(exc_type, exc_value, traceback)
@property
def experiment_id(self) -> int:
return self._id
@property
def work_unit_count(self) -> int:
return self._work_unit_count
@property
def work_units(self) -> Mapping[int, LocalExperimentUnit]:
"""Gets work units created via self.add()."""
raise NotImplementedError
def _get_experiment_unit(
self,
experiment_id: int,
identity: str,
role: xm.ExperimentUnitRole,
args: Optional[Mapping[str, Any]] = None,
) -> Awaitable[xm.ExperimentUnit]:
"""Returns an existing experiment unit by identity.
Args:
experiment_id: The ID of the experiment to get the Experiment Unit for.
identity: The identity of the Experiment Unit toget.
role: Executable unit role: whether to fetch a work unit or auxiliary
unit.
args: Keyword arguments to be passed to the job.
Returns:
An awaitable which fetches the work unit.
"""
raise NotImplementedError
def _should_reload_experiment_unit(self, role: xm.ExperimentUnitRole) -> bool:
"""Returns True if the Work Unit should be reloaded based on its role.
Args:
role: Executable unit role trying to be reloaded.
"""
# Since reloading isn't supported locally, we always return False.
return False
def create_experiment(experiment_title: str) -> xm.Experiment:
"""Create Experiment."""
experiment = LocalExperiment(experiment_title)
database.database().insert_experiment(
experiment.experiment_id, experiment._experiment_title # pylint: disable=protected-access
)
return experiment
def get_experiment(experiment_id: int) -> xm.Experiment:
"""Returns an Experiment instance associated with this experiment id."""
# pylint: disable=protected-access
experiment_result = database.database().get_experiment(experiment_id)
experiment = LocalExperiment(experiment_result.experiment_title)
experiment._id = experiment_id
experiment._work_unit_id_predictor = id_predictor.Predictor(1)
for work_unit_result in experiment_result.work_units:
work_unit = LocalWorkUnit(
experiment,
experiment_result.experiment_title,
lambda _: None,
{},
xm.WorkUnitRole(),
experiment._work_unit_id_predictor,
)
work_unit._work_unit_id = work_unit_result.work_unit_id
non_local_handles = []
kubernetes_jobs = []
for _, data in work_unit_result.jobs.items():
if data.HasField('local'):
logging.warning(
(
'[Experiment id: %s, work unit id: %s] Loading local experiment'
' units from storage is not implemented.'
),
experiment_id,
work_unit_result.work_unit_id,
)
# "caip" is the legacy field name of vertex inside the proto.
elif data.HasField('caip'):
non_local_handles = [vertex.VertexHandle(data.caip.resource_name)]
elif data.HasField('kubernetes'):
job = k8s_client.V1Job()
job.metadata = k8s_client.V1ObjectMeta(
namespace=data.kubernetes.namespace, name=data.kubernetes.job_name
)
kubernetes_jobs.append(job)
non_local_handles = [kubernetes.KubernetesHandle(kubernetes_jobs)]
work_unit._non_local_execution_handles = non_local_handles
experiment._experiment_units.append(work_unit)
experiment._work_unit_count += 1
return experiment
# pylint: enable=protected-access
def list_experiments() -> Sequence[xm.Experiment]:
"""Yields a list of Experiment instances that have been created thus far."""
experiment_ids = database.database().list_experiment_ids()
return [get_experiment(experiment_id) for experiment_id in experiment_ids]
|
xmanager-main
|
xmanager/xm_local/experiment.py
|
# Copyright 2021 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Local backend executors."""
from typing import Dict, Optional
import attr
from xmanager import xm
from xmanager.docker import docker_adapter
GOOGLE_KUBERNETES_ENGINE_CLOUD_PROVIDER = 'GOOGLE_KUBERNETES_ENGINE'
@attr.s(auto_attribs=True)
class LocalSpec(xm.ExecutorSpec):
"""Current machine executor's specification."""
@attr.s(auto_attribs=True)
class DockerOptions:
"""Options of the container to be run.
Attributes:
ports: In the simplest form -- a dictionary from `int` to `int`, where the
keys represent the ports inside the container and the values represent
the ports of the host to bind. See the specification at
https://docker-py.readthedocs.io/en/stable/containers.html.
volumes: A dictionary from `str` to `str`, where the keys represent paths
inside the host to mount and the values represent paths in the
container.
mount_gcs_path: If True, checks for the `~/gcs` directory on the host and
mounts it (if found) at `/gcs` in the container. Defaults to True.
interactive: If True, requests a run with interactive shell.
"""
ports: Optional[docker_adapter.Ports] = None
volumes: Optional[Dict[str, str]] = None
mount_gcs_path: bool = True
interactive: bool = False
@attr.s(auto_attribs=True)
class Local(xm.Executor):
"""Current machine executor.
Attributes:
requirements: Resources to be requested from the host.
Note: Currently, only the `local_gpu` resource is supported (and only with
a container-based executable). Any other resource requirement will be
ignored.
docker_options: Options applied if the job is a container-based executable.
experimental_stream_output: Whether to pipe the job's stdout and stderr to
the terminal. Might be removed once we decide on the logging design.
"""
requirements: xm.JobRequirements = attr.Factory(xm.JobRequirements)
docker_options: Optional[DockerOptions] = None
experimental_stream_output: bool = True
Spec = LocalSpec # pylint: disable=invalid-name
@attr.s(auto_attribs=True)
class TpuCapability:
"""TPU capability configures the TPU software requested by an executor."""
# Read about TPU versions:
# https://cloud.google.com/tpu/docs/version-switching
tpu_runtime_version: str
@attr.s(auto_attribs=True)
class TensorboardCapability:
"""Tensorboard capability integrates a Vertex AI Job with Tensorboard."""
# The name of the tensorboard to use.
name: str
# The "gs://$GCS_BUCKET/dir_name" to save output.
# Tensorboard will read the logs from $BASE_OUTPUT_DIRECTORY/logs/
# If None, then the root of the default bucket will be used.
base_output_directory: Optional[str] = None
@attr.s(auto_attribs=True)
class VertexSpec(xm.ExecutorSpec):
"""Vertex AI spec describes the Google Cloud Platform (GCP) location."""
# An image registry name tag to push.
# The image tag should be in the form 'myregistryhost/name:tag'
push_image_tag: Optional[str] = None
@attr.s(auto_attribs=True)
class Vertex(xm.Executor):
"""Vertex AI Executor describes the runtime environment of GCP."""
requirements: xm.JobRequirements = attr.Factory(xm.JobRequirements)
tensorboard: Optional[TensorboardCapability] = None
Spec = VertexSpec # pylint: disable=invalid-name
# Declaring variable aliases for legacy compatability.
# New code should not use these aliases.
Caip = Vertex
CaipSpec = VertexSpec
@attr.s(auto_attribs=True)
class KubernetesSpec(xm.ExecutorSpec):
"""K8s spec describes the K8s location."""
# An image registry name tag to push.
# The image tag should be in the form 'myregistryhost/name:tag'
push_image_tag: Optional[str] = None
@attr.s(auto_attribs=True)
class Kubernetes(xm.Executor):
"""K8s Executor describes the runtime environment of Kubernetes."""
requirements: xm.JobRequirements = attr.Factory(xm.JobRequirements)
cloud_provider: str = GOOGLE_KUBERNETES_ENGINE_CLOUD_PROVIDER
tpu_capability: Optional[TpuCapability] = None
Spec = KubernetesSpec # pylint: disable=invalid-name
|
xmanager-main
|
xmanager/xm_local/executors.py
|
# Copyright 2021 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for xmanager.xm_local.executors."""
import os
import subprocess
import sys
import unittest
from unittest import mock
from absl import flags
from absl.testing import parameterized
import docker
from xmanager import xm
from xmanager.docker import docker_adapter
from xmanager.xm_local import executables as local_executables
from xmanager.xm_local import execution
from xmanager.xm_local import executors as local_executors
def create_test_job(
gpu_count: int, interactive: bool = False, mount_gcs_path: bool = True
) -> xm.Job:
return xm.Job(
name='test-job',
executable=local_executables.LoadedContainerImage(
name='test',
image_id='test-image',
args=xm.SequentialArgs.from_collection({'a': 1}),
env_vars={'c': '0'},
),
executor=local_executors.Local(
requirements=xm.JobRequirements(local_gpu=gpu_count),
docker_options=local_executors.DockerOptions(
ports={8080: 8080},
volumes={'a': 'b'},
interactive=interactive,
mount_gcs_path=mount_gcs_path,
),
),
)
class ExecutionTest(unittest.IsolatedAsyncioTestCase, parameterized.TestCase):
"""Tests for xm_local.execution (currently only for container launches)."""
async def asyncSetUp(self):
# Force flag initialization to avoid errors
flags.FLAGS(sys.argv)
@parameterized.product(
interactive=[True, False],
mount_gcs_path=[True, False],
gcs_dir_exists=[True, False],
gpu_count=[0, 1, 4],
)
@mock.patch.object(
docker_adapter.DockerAdapter, 'run_container', return_value=None
)
async def test_container_launch_dispatcher(
self,
mock_run_container,
interactive,
mount_gcs_path,
gcs_dir_exists,
gpu_count,
):
"""Tests if the container launch dispatcher is called correctly when using `xm_local.execution.launch`."""
mock_docker_client = mock.Mock()
mock_docker_client.has_network.return_value = True
job = create_test_job(
interactive=interactive,
mount_gcs_path=mount_gcs_path,
gpu_count=gpu_count,
)
mock_gcs_dir_existence = lambda path: ( # pylint:disable=g-long-lambda
gcs_dir_exists and path.endswith('/gcs')
)
with mock.patch.object(
docker, 'from_env', return_value=mock_docker_client
), mock.patch.object(
os.path, 'isdir', side_effect=mock_gcs_dir_existence
), mock.patch.object(
subprocess, 'check_output', return_value=True
):
await execution.launch(lambda x: x, job_group=xm.JobGroup(test_job=job))
expected_gcs_volume = {os.path.expanduser('~/gcs'): '/gcs'}
mock_run_container.assert_called_once_with(
name='test-job',
image_id='test-image',
network='xmanager',
args=['--a=1'],
env_vars={'c': '0'},
ports={8080: 8080},
volumes=(
{
'a': 'b',
os.path.expanduser('~/.config/gcloud'): '/root/.config/gcloud',
}
| (expected_gcs_volume if mount_gcs_path and gcs_dir_exists else {})
),
gpu_count=gpu_count,
interactive=interactive,
)
@parameterized.product(
mount_gcs_path=[True, False],
gcs_dir_exists=[True, False],
gpu_count=[0, 1, 4],
)
@mock.patch.object(
docker.models.containers.ContainerCollection, 'run', return_value=None
)
async def test_container_launch_by_client(
self, mock_client_run, mount_gcs_path, gcs_dir_exists, gpu_count
):
"""Tests if the Docker Python client launches containers correctly when using `xm_local.execution.launch`."""
mock_docker_client = mock.Mock()
mock_docker_client.has_network.return_value = True
mock_docker_client.containers = (
docker.models.containers.ContainerCollection(None)
)
job = create_test_job(
interactive=False, mount_gcs_path=mount_gcs_path, gpu_count=gpu_count
)
mock_gcs_dir_existence = lambda path: ( # pylint:disable=g-long-lambda
gcs_dir_exists and path.endswith('/gcs')
)
with mock.patch.object(
docker, 'from_env', return_value=mock_docker_client
), mock.patch.object(
os.path, 'isdir', side_effect=mock_gcs_dir_existence
), mock.patch.object(
subprocess, 'check_output', return_value=True
):
await execution.launch(lambda x: x, job_group=xm.JobGroup(test_job=job))
expected_gcs_volume = {
os.path.expanduser('~/gcs'): {'bind': '/gcs', 'mode': 'rw'}
}
mock_client_run.assert_called_once_with(
'test-image',
name='test-job',
hostname='test-job',
network='xmanager',
detach=True,
remove=True,
command=['--a=1'],
environment={'c': '0'},
ports={8080: 8080},
volumes=(
{
'a': {'bind': 'b', 'mode': 'rw'},
os.path.expanduser('~/.config/gcloud'): {
'bind': '/root/.config/gcloud',
'mode': 'rw',
},
}
| (expected_gcs_volume if mount_gcs_path and gcs_dir_exists else {})
),
runtime='nvidia' if gpu_count > 0 else None,
device_requests=[ # pylint:disable=g-long-ternary
docker.types.DeviceRequest(count=gpu_count, capabilities=[['gpu']])
]
if gpu_count > 0
else None,
)
@parameterized.product(
mount_gcs_path=[True, False],
gcs_dir_exists=[True, False],
gpu_count=[0, 1, 4],
)
@mock.patch.object(subprocess, 'run', return_value=None)
async def test_container_launch_by_subprocess(
self,
mock_container_launch_by_subprocess,
mount_gcs_path,
gcs_dir_exists,
gpu_count,
):
"""Tests if the Docker subprocesses are created correctly when using `xm_local.execution.launch."""
mock_docker_client = mock.Mock()
mock_docker_client.has_network.return_value = True
job = create_test_job(
interactive=True, mount_gcs_path=mount_gcs_path, gpu_count=gpu_count
)
mock_gcs_dir_existence = lambda path: ( # pylint:disable=g-long-lambda
gcs_dir_exists and path.endswith('/gcs')
)
with mock.patch.object(
docker, 'from_env', return_value=mock_docker_client
), mock.patch.object(
os.path, 'isdir', side_effect=mock_gcs_dir_existence
), mock.patch.object(
subprocess, 'check_output', return_value=True
):
await execution.launch(lambda x: x, job_group=xm.JobGroup(test_job=job))
expected_gcs_path_args = []
if mount_gcs_path and gcs_dir_exists:
expected_gcs_path_args = ['-v', '%s:/gcs' % os.path.expanduser('~/gcs')]
expected_gpu_args = []
if gpu_count > 0:
expected_gpu_args = ['--gpus', str(gpu_count), '--runtime', 'nvidia']
mock_container_launch_by_subprocess.assert_called_once_with(
args=[
'docker',
'run',
'--network',
'xmanager',
'-p',
'8080:8080',
'-e',
'c=0',
'-v',
'a:b',
'-v',
'%s:/root/.config/gcloud' % os.path.expanduser('~/.config/gcloud'),
]
+ expected_gcs_path_args
+ expected_gpu_args
+ ['-it', '--entrypoint', 'bash', 'test-image'],
check=True,
)
@parameterized.product(interactive=[True, False], gpu_count=[0, 1, 4])
@mock.patch.object(
docker_adapter.DockerAdapter, 'run_container', return_value=None
)
async def test_no_nvidia_smi_launch(
self, mock_run_container, interactive, gpu_count
):
mock_docker_client = mock.Mock()
mock_docker_client.has_network.return_value = True
job = create_test_job(
interactive=interactive, mount_gcs_path=True, gpu_count=gpu_count
)
if gpu_count > 0:
with mock.patch.object(
subprocess, 'check_output', side_effect=Exception()
), self.assertRaises(RuntimeError):
await execution.launch(lambda x: x, job_group=xm.JobGroup(test_job=job))
mock_run_container.assert_not_called()
else:
await execution.launch(lambda x: x, job_group=xm.JobGroup(test_job=job))
mock_run_container.assert_called_once_with(
name='test-job',
image_id='test-image',
network='xmanager',
args=['--a=1'],
env_vars={'c': '0'},
ports={8080: 8080},
volumes={
'a': 'b',
os.path.expanduser('~/.config/gcloud'): '/root/.config/gcloud',
},
gpu_count=gpu_count,
interactive=interactive,
)
if __name__ == '__main__':
unittest.main()
|
xmanager-main
|
xmanager/xm_local/execution_test.py
|
# Copyright 2021 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Local backend executables."""
from typing import Dict
import attr
from xmanager import xm
@attr.s(auto_attribs=True)
class LoadedContainerImage(xm.Executable):
"""A locally loaded container image."""
image_id: str
args: xm.SequentialArgs = attr.Factory(xm.SequentialArgs)
env_vars: Dict[str, str] = attr.Factory(dict)
@attr.s(auto_attribs=True)
class LocalBinary(xm.Executable):
"""A locally located binary."""
path: str
args: xm.SequentialArgs = attr.Factory(xm.SequentialArgs)
env_vars: Dict[str, str] = attr.Factory(dict)
@attr.s(auto_attribs=True)
class GoogleContainerRegistryImage(xm.Executable):
"""An image inside Google Container Registry."""
image_path: str
args: xm.SequentialArgs = attr.Factory(xm.SequentialArgs)
env_vars: Dict[str, str] = attr.Factory(dict)
|
xmanager-main
|
xmanager/xm_local/executables.py
|
# Copyright 2021 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implementation of local work unit statuses."""
import enum
from xmanager import xm
class LocalWorkUnitStatusEnum(enum.Enum):
"""Status of a local experiment job."""
# Work unit was created, but has not terminated yet.
RUNNING = 1
# Work unit terminated and was successful.
COMPLETED = 2
# Work unit terminated and was not succesful.
FAILED = 3
# Work unit terminated because it was cancelled by the user.
CANCELLED = 4
class LocalWorkUnitStatus(xm.ExperimentUnitStatus):
"""Status of a local experiment job."""
def __init__(
self, status: LocalWorkUnitStatusEnum, message: str = ''
) -> None:
super().__init__()
self._status = status
self._message = message
@property
def is_active(self) -> bool:
return self._status == LocalWorkUnitStatusEnum.RUNNING
@property
def is_completed(self) -> bool:
return self._status == LocalWorkUnitStatusEnum.COMPLETED
@property
def is_failed(self) -> bool:
return self._status == LocalWorkUnitStatusEnum.FAILED
@property
def message(self) -> str:
return self._message
|
xmanager-main
|
xmanager/xm_local/status.py
|
# Copyright 2021 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Packaging for execution on Cloud."""
from typing import Optional
from xmanager import xm
from xmanager.bazel import client as bazel_client
from xmanager.cloud import auth
from xmanager.cloud import build_image
from xmanager.cloud import docker_lib
from xmanager.docker import docker_adapter
from xmanager.xm_local import executables as local_executables
from xmanager.xm_local import executors as local_executors
from xmanager.xm_local.packaging import bazel_tools
def _get_push_image_tag(executor_spec: xm.ExecutorSpec) -> Optional[str]:
"""Get the push_image_tag from executor or None."""
match executor_spec:
case local_executors.CaipSpec() as caip_spec:
return caip_spec.push_image_tag
case local_executors.KubernetesSpec() as kubernetes_spec:
return kubernetes_spec.push_image_tag
case _:
raise TypeError(
f'Unsupported executor specification: {executor_spec!r}. '
)
def _package_container(
bazel_outputs: bazel_tools.TargetOutputs,
packageable: xm.Packageable,
container: xm.Container,
) -> xm.Executable:
"""Matcher method for packaging `xm.Container`.
If the user builds an image with the same repository name as an image in a
remote Cloud registry, the user should push the image before running XManager,
because XManager will not push a local image in the packaging step.
Unless the container's image path already matches the GCR project prefix, we
always pull the container and push it to push_image_tag location. This avoids
a potential permissions error where the user has permissions to read an image,
but the Cloud service agent does not have permissions. If container.image_path
already points to the GCR project, we skip pushing because the image should
already be in the destination location.
Args:
bazel_outputs: TargetOutputs mapping from Bazel targets to outputs.
packageable: Packageable containing Executor and Executable.
container: Container specifying image path.
Returns:
GoogleContainerRegistryImage Executable.
"""
del bazel_outputs
gcr_project_prefix = 'gcr.io/' + auth.get_project_name()
if (
container.image_path.startswith(gcr_project_prefix)
or not docker_lib.is_docker_installed()
):
return local_executables.GoogleContainerRegistryImage(
name=packageable.executable_spec.name,
image_path=container.image_path,
args=packageable.args,
env_vars=packageable.env_vars,
)
instance = docker_adapter.instance()
client = instance.get_client()
print(f'Pulling {container.image_path}...')
repository, tag = instance.split_tag(container.image_path)
image_id = instance.pull_image(container.image_path)
image = client.images.get(image_id)
push_image_tag = _get_push_image_tag(packageable.executor_spec)
if not push_image_tag:
if repository.startswith(gcr_project_prefix):
# If the image path already points to the project's GCR, reuse it.
push_image_tag = f'{repository}:{tag}'
else:
# Otherwise, create a new image repository inside the project's GCR.
push_image_tag = f'{gcr_project_prefix}/{repository}:{tag}'
image.tag(push_image_tag)
print(f'Pushing {push_image_tag}...')
client.images.push(push_image_tag)
return local_executables.GoogleContainerRegistryImage(
name=packageable.executable_spec.name,
image_path=push_image_tag,
args=packageable.args,
env_vars=packageable.env_vars,
)
def _package_dockerfile(
bazel_outputs: bazel_tools.TargetOutputs,
packageable: xm.Packageable,
dockerfile: xm.Dockerfile,
):
"""Matcher method for packaging `xm.Dockerfile`."""
del bazel_outputs
push_image_tag = _get_push_image_tag(packageable.executor_spec)
if not push_image_tag:
gcr_project_prefix = 'gcr.io/' + auth.get_project_name()
tag = docker_lib.create_tag()
push_image_tag = f'{gcr_project_prefix}/{dockerfile.name}:{tag}'
image = build_image.build_by_dockerfile(
dockerfile.path,
dockerfile.dockerfile,
push_image_tag,
pull_image=docker_lib.is_docker_installed(),
)
if docker_lib.is_docker_installed():
build_image.push(image)
return local_executables.GoogleContainerRegistryImage(
name=packageable.executable_spec.name,
image_path=push_image_tag,
args=packageable.args,
env_vars=packageable.env_vars,
)
def _package_python_container(
bazel_outputs: bazel_tools.TargetOutputs,
packageable: xm.Packageable,
python_container: xm.PythonContainer,
) -> xm.Executable:
"""Matcher method for packaging `xm.PythonContainer`."""
del bazel_outputs
push_image_tag = _get_push_image_tag(packageable.executor_spec)
image = build_image.build(
python_container,
packageable.args,
packageable.env_vars,
push_image_tag,
pull_image=docker_lib.is_docker_installed(),
)
if docker_lib.is_docker_installed():
build_image.push(image)
return local_executables.GoogleContainerRegistryImage(
name=packageable.executable_spec.name,
image_path=image,
)
def _package_bazel_container(
bazel_outputs: bazel_tools.TargetOutputs,
packageable: xm.Packageable,
bazel_container: xm.BazelContainer,
):
"""Matcher method for packaging `xm.BazelContainer`."""
paths = bazel_outputs[
bazel_client.BazelTarget(
label=bazel_container.label,
bazel_args=bazel_container.bazel_args,
)
]
instance = docker_adapter.instance()
client = instance.get_client()
push_image_tag = _get_push_image_tag(packageable.executor_spec)
print(f'Loading {bazel_container.label}...')
loaded_image_id = instance.load_image(paths[0])
image = client.images.get(loaded_image_id)
image.tag(push_image_tag)
print(f'Pushing {push_image_tag}...')
client.images.push(push_image_tag)
return local_executables.GoogleContainerRegistryImage(
name=packageable.executable_spec.name,
image_path=push_image_tag,
)
def package_cloud_executable(
bazel_outputs: bazel_tools.TargetOutputs,
packageable: xm.Packageable,
executable_spec: xm.ExecutableSpec,
) -> xm.Executable:
match executable_spec:
case xm.Container() as container:
return _package_container(
bazel_outputs,
packageable,
container,
)
case xm.Dockerfile() as dockerfile:
return _package_dockerfile(
bazel_outputs,
packageable,
dockerfile,
)
case xm.PythonContainer() as python_container:
return _package_python_container(
bazel_outputs,
packageable,
python_container,
)
case xm.BazelContainer() as bazel_container:
return _package_bazel_container(
bazel_outputs,
packageable,
bazel_container,
)
case _:
raise TypeError(
'Unsupported executable specification '
f'for Cloud packaging: {executable_spec!r}'
)
|
xmanager-main
|
xmanager/xm_local/packaging/cloud.py
|
# Copyright 2021 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Packaging for local executions."""
import os
from xmanager import xm
from xmanager.bazel import client as bazel_client
from xmanager.cloud import build_image
from xmanager.cloud import docker_lib
from xmanager.docker import docker_adapter
from xmanager.xm import executables
from xmanager.xm_local import executables as local_executables
from xmanager.xm_local.packaging import bazel_tools
def _package_container(
bazel_outputs: bazel_tools.TargetOutputs,
packageable: xm.Packageable,
container: executables.Container,
) -> xm.Executable:
"""Packages a container for local execution."""
del bazel_outputs
instance = docker_adapter.instance()
image_id = None
if os.path.exists(container.image_path):
image_id = instance.load_image(container.image_path)
elif instance.is_registry_label(container.image_path):
image_id = instance.pull_image(container.image_path)
if image_id is not None:
return local_executables.LoadedContainerImage(
name=packageable.executable_spec.name,
image_id=image_id,
args=packageable.args,
env_vars=packageable.env_vars,
)
else:
raise ValueError(
f'{container.image_path} is found neither locally nor remotely'
)
def _package_binary(
bazel_outputs: bazel_tools.TargetOutputs,
packageable: xm.Packageable,
binary: executables.Binary,
):
del bazel_outputs
if not os.path.exists(binary.path):
raise ValueError(f'{binary.path} does not exist on this machine')
return local_executables.LocalBinary(
name=packageable.executable_spec.name,
path=binary.path,
args=packageable.args,
env_vars=packageable.env_vars,
)
def _package_dockerfile(
bazel_outputs: bazel_tools.TargetOutputs,
packageable: xm.Packageable,
dockerfile: executables.Dockerfile,
):
del bazel_outputs
image_id = docker_lib.build_docker_image(
dockerfile.name, dockerfile.path, dockerfile.dockerfile
)
return local_executables.LoadedContainerImage(
name=packageable.executable_spec.name,
image_id=image_id,
args=packageable.args,
env_vars=packageable.env_vars,
)
def _package_python_container(
bazel_outputs: bazel_tools.TargetOutputs,
packageable: xm.Packageable,
py_executable: executables.PythonContainer,
):
del bazel_outputs
# Use the directory as the image name.
image_name = os.path.basename(py_executable.path)
image_id = build_image.build(
py_executable, packageable.args, packageable.env_vars, image_name
)
return local_executables.LoadedContainerImage(
name=packageable.executable_spec.name, image_id=image_id
)
def _package_bazel_container(
bazel_outputs: bazel_tools.TargetOutputs,
packageable: xm.Packageable,
container: executables.BazelContainer,
) -> xm.Executable:
"""Matcher to package BazelContainer."""
paths = bazel_outputs[
bazel_client.BazelTarget(
label=container.label,
bazel_args=container.bazel_args,
)
]
assert len(paths) == 1
image_id = docker_adapter.instance().load_image(paths[0])
return local_executables.LoadedContainerImage(
name=packageable.executable_spec.name,
image_id=image_id,
args=packageable.args,
env_vars=packageable.env_vars,
)
def _package_bazel_binary(
bazel_outputs: bazel_tools.TargetOutputs,
packageable: xm.Packageable,
binary: executables.BazelBinary,
) -> xm.Executable:
"""Matcher to package BazelBinary."""
paths = bazel_outputs[
bazel_client.BazelTarget(
label=binary.label,
bazel_args=binary.bazel_args,
)
]
assert len(paths) == 1
return local_executables.LocalBinary(
name=packageable.executable_spec.name,
path=paths[0],
args=packageable.args,
env_vars=packageable.env_vars,
)
def package_for_local_executor(
bazel_outputs: bazel_tools.TargetOutputs,
packageable: xm.Packageable,
executable_spec: xm.ExecutableSpec,
):
match executable_spec:
case executables.BazelBinary() as bazel_binary:
return _package_bazel_binary(bazel_outputs, packageable, bazel_binary)
case executables.BazelContainer() as bazel_container:
return _package_bazel_container(
bazel_outputs, packageable, bazel_container
)
case executables.Binary() as binary:
return _package_binary(bazel_outputs, packageable, binary)
case executables.Container() as container:
return _package_container(bazel_outputs, packageable, container)
case executables.Dockerfile() as dockerfile:
return _package_dockerfile(bazel_outputs, packageable, dockerfile)
case executables.PythonContainer() as python_container:
return _package_python_container(
bazel_outputs, packageable, python_container
)
case _:
raise TypeError(
'Unsupported executable specification '
f'for local packaging: {executable_spec!r}'
)
|
xmanager-main
|
xmanager/xm_local/packaging/local.py
|
# Copyright 2021 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from xmanager.xm_local.packaging import bazel_tools
class BazelToolsTest(unittest.TestCase):
def test_lex_full_label(self):
self.assertEqual(
bazel_tools._lex_label('//project/directory:target'),
(['project', 'directory'], 'target'),
)
def test_lex_short_label(self):
self.assertEqual(
bazel_tools._lex_label('//project/package'),
(['project', 'package'], 'package'),
)
def test_lex_root_target(self):
self.assertEqual(bazel_tools._lex_label('//:label'), ([], 'label'))
def test_lex_empty_label(self):
with self.assertRaises(ValueError):
bazel_tools._lex_label('//')
def test_lex_relative_label(self):
with self.assertRaises(ValueError):
bazel_tools._lex_label('a/b:c')
def test_assemble_label(self):
self.assertEqual(bazel_tools._assemble_label((['a', 'b'], 'c')), '//a/b:c')
def test_label_kind_lines_to_dict(self):
self.assertEqual(
bazel_tools._label_kind_lines_to_dict([
'py_binary rule //:py_target',
'cc_binary rule //:cc_target',
]),
{'//:py_target': 'py_binary rule', '//:cc_target': 'cc_binary rule'},
)
def test_absolute_label_with_extension_dot(self):
self.assertEqual(
bazel_tools._lex_label('//project/directory:image.tar'),
(['project', 'directory'], 'image.tar'),
)
def test_label_with_three_dots(self):
with self.assertRaisesRegex(ValueError, 'is not an absolute Bazel label'):
bazel_tools._lex_label('//project/directory/...')
def test_label_with_star_target(self):
with self.assertRaisesRegex(ValueError, 'is not an absolute Bazel label'):
bazel_tools._lex_label('//project/directory:*')
def test_label_with_all_target(self):
with self.assertRaisesRegex(ValueError, '`:all` is not a valid target'):
bazel_tools._lex_label('//project/directory:all')
if __name__ == '__main__':
unittest.main()
|
xmanager-main
|
xmanager/xm_local/packaging/bazel_tools_test.py
|
# Copyright 2021 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Bazel tools for local packaging."""
import functools
import itertools
import os
import re
import subprocess
from typing import Dict, List, Optional, Sequence, Tuple
from absl import flags
from xmanager import xm
from xmanager.bazel import client
from xmanager.bazel import file_utils
from google.protobuf.internal.decoder import _DecodeVarint32
from xmanager.generated import build_event_stream_pb2 as bes_pb2
_BAZEL_COMMAND = flags.DEFINE_string(
'xm_bazel_command', 'bazel', 'A command that runs Bazel.'
)
def _get_important_outputs(
events: Sequence[bes_pb2.BuildEvent], labels: Sequence[str]
) -> List[List[bes_pb2.File]]:
label_to_output: Dict[str, List[bes_pb2.File]] = {}
for event in events:
if event.id.HasField('target_completed'):
# Note that we ignore `event.id.target_completed.aspect`.
label_to_output[event.id.target_completed.label] = list(
event.completed.important_output
)
return [label_to_output[label] for label in labels]
def _get_normalized_labels(
events: Sequence[bes_pb2.BuildEvent], labels: Sequence[str]
) -> List[str]:
label_to_expansion: Dict[str, str] = {}
for event in events:
if event.id.HasField('pattern'):
for index, pattern in enumerate(event.id.pattern.pattern):
# Note that we ignore `event.children.target_configured.aspect`.
label_to_expansion[pattern] = event.children[
index
].target_configured.label
return [label_to_expansion[label] for label in labels]
def _get_workspace_directory(events: Sequence[bes_pb2.BuildEvent]) -> str:
for event in events:
if event.id.HasField('started'):
return event.started.workspace_directory
raise ValueError('Missing start event in Bazel logs')
def _read_build_events(path: str) -> List[bes_pb2.BuildEvent]:
"""Parses build events from a file referenced by a given `path`.
The file should contain serialized length-delimited`bes_pb2.BuildEvent`
messages. See
https://docs.bazel.build/versions/master/build-event-protocol.html#consume-in-binary-format
for details.
Args:
path: Path to a file with the protocol.
Returns:
A list of build events.
"""
with open(path, 'rb') as bep_file:
buffer = bep_file.read()
events = []
position = 0
while position < len(buffer):
# Reimplementation of Java's `AbstractParser.parseDelimitedFrom` for
# protobufs, which is not available in Python.
size, start = _DecodeVarint32(buffer, position)
event = bes_pb2.BuildEvent()
event.ParseFromString(buffer[start : start + size])
events.append(event)
position = start + size
return events
def _root_absolute_path() -> str:
# If the launch script is run with Bazel, use `BUILD_WORKSPACE_DIRECTORY` to
# get the root of the workspace where the build was initiated. If the launch
# script is run with the CLI, query Bazel to find out.
return (
os.getenv('BUILD_WORKSPACE_DIRECTORY')
or subprocess.run(
[_BAZEL_COMMAND.value, 'info', 'workspace'],
check=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
universal_newlines=True,
).stdout.strip()
)
def _build_multiple_targets(
labels: Sequence[str], bazel_args: Sequence[str] = ()
) -> List[List[str]]:
"""Builds the targets and returns paths to their important outputs.
The definition of 'important artifacts in an output group' can be found at
https://github.com/bazelbuild/bazel/blob/8346ea4cfdd9fbd170d51a528fee26f912dad2d5/src/main/java/com/google/devtools/build/lib/analysis/TopLevelArtifactHelper.java#L223-L224.
Args:
labels: Labels of the targets to build.
bazel_args: Arguments to append to the Bazel command.
Returns:
A list of paths to the output.
"""
with file_utils.TemporaryFilePath() as bep_path:
subprocess.run(
[
_BAZEL_COMMAND.value,
'build',
f'--build_event_binary_file={bep_path}',
# Forces a GC at the end of the build and publishes value to BEP.
'--memory_profile=/dev/null',
*labels,
*bazel_args,
],
check=True,
cwd=_root_absolute_path(),
)
events = _read_build_events(bep_path)
normalized_labels = _get_normalized_labels(events, labels)
output_lists = _get_important_outputs(events, normalized_labels)
workspace = _get_workspace_directory(events)
results: List[List[str]] = []
for files in output_lists:
results.append(
[
os.path.join(workspace, *file.path_prefix, file.name)
for file in files
]
)
return results
# Expansions (`...`, `*`) are not allowed.
_NAME_RE = r'(?:[^.*:/]|\.(?!\.\.))+'
_LABEL_LEXER = re.compile(
f'^//(?P<packages>{_NAME_RE}(/{_NAME_RE})*)?(?P<target>:{_NAME_RE})?$'
)
_LexedLabel = Tuple[List[str], str]
def _lex_label(label: str) -> _LexedLabel:
"""Splits the label into packages and target."""
match = _LABEL_LEXER.match(label)
if match is None:
raise ValueError(f'{label} is not an absolute Bazel label')
groups = match.groupdict()
packages: Optional[str] = groups['packages']
target: Optional[str] = groups['target']
if not packages and not target:
raise ValueError(f'{label} cannot be empty')
if target == ':all':
raise ValueError('`:all` is not a valid target')
init = packages.split('/') if packages else []
last = target[1:] if target else init[-1]
return init, last
def _assemble_label(parts: _LexedLabel) -> str:
init, last = parts
return f"//{'/'.join(init)}:{last}"
def _label_kind_lines_to_dict(lines: Sequence[str]) -> Dict[str, str]:
kind_label_tuples = [line.rsplit(' ', 1) for line in lines]
return {label: kind for kind, label in kind_label_tuples}
class LocalBazelService(client.BazelService):
"""Local implementation of `BazelService`."""
def fetch_kinds(self, labels: Sequence[str]) -> List[str]:
"""Retrieves kind for each given target in the current workspace."""
labels = [_assemble_label(_lex_label(label)) for label in labels]
# For each matching target `bazel query` produces a line formatted as
# `<rule name> rule <target name>`, for example, `py_library rule
# //third_party/py/xmanager/xm:__init__`. See
# https://docs.bazel.build/versions/main/query.html#output-label_kind.
stdout = subprocess.run(
[
_BAZEL_COMMAND.value,
'query',
f"'{' union '.join(labels)}'",
'--output',
'label_kind',
],
check=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
cwd=_root_absolute_path(),
).stdout.decode('utf-8')
label_kinds = _label_kind_lines_to_dict(stdout.strip().split(os.linesep))
return [label_kinds[label] for label in labels]
def build_targets(
self, labels: Sequence[str], bazel_args: Sequence[str]
) -> List[List[str]]:
return _build_multiple_targets(labels, bazel_args)
@functools.lru_cache()
def local_bazel_service() -> LocalBazelService:
"""Returns a singleton instance of `LocalBazelService`."""
return LocalBazelService()
def _collect_executables(
executable: xm.ExecutableSpec,
) -> List[client.BazelTarget]:
match executable:
case xm.BazelBinary() as bazel_binary:
return [
client.BazelTarget(
label=bazel_binary.label,
bazel_args=bazel_binary.bazel_args,
),
]
case xm.BazelContainer() as bazel_container:
return [
client.BazelTarget(
label=bazel_container.label,
bazel_args=bazel_container.bazel_args,
),
]
case _:
return []
def collect_bazel_targets(
packageables: Sequence[xm.Packageable],
) -> List[client.BazelTarget]:
"""Extracts Bazel targets to package from a sequence of `Packageable`s."""
return list(
itertools.chain(
*[
_collect_executables(packageable.executable_spec)
for packageable in packageables
]
)
)
TargetOutputs = Dict[client.BazelTarget, List[str]]
|
xmanager-main
|
xmanager/xm_local/packaging/bazel_tools.py
|
# Copyright 2021 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Methods for routing packageables to appropriate packagers."""
import collections
from typing import Dict, List, Sequence, Tuple
from xmanager import xm
from xmanager.bazel import client as bazel_client
from xmanager.xm_local import executors
from xmanager.xm_local.packaging import bazel_tools
from xmanager.xm_local.packaging import cloud as cloud_packaging
from xmanager.xm_local.packaging import local as local_packaging
def _packaging_router(
built_targets: bazel_tools.TargetOutputs, packageable: xm.Packageable
) -> xm.Executable:
match packageable.executor_spec:
case executors.VertexSpec():
return cloud_packaging.package_cloud_executable(
built_targets,
packageable,
packageable.executable_spec,
)
case executors.LocalSpec():
return local_packaging.package_for_local_executor(
built_targets,
packageable,
packageable.executable_spec,
)
case executors.KubernetesSpec():
return cloud_packaging.package_cloud_executable(
built_targets,
packageable,
packageable.executable_spec,
)
case _:
raise TypeError(
f'Unsupported executor specification: {packageable.executor_spec!r}. '
f'Packageable: {packageable!r}'
)
def _normalize_label(label: str, kind: str) -> str:
"""Attempts to correct the label if it does not point to the right target.
In certain cases people might specify labels that do not correspond to the
desired output. For example, for a `py_binary(name='foo', ...)` target the
self-contained executable is actually called 'foo.par'.
Args:
label: The target's name.
kind: The target's kind.
Returns:
Either the same or a corrected label.
"""
if kind == 'py_binary rule' and not label.endswith('.par'):
return f'{label}.par'
return label
_ArgsToTargets = Dict[Tuple[str, ...], List[bazel_client.BazelTarget]]
def package(packageables: Sequence[xm.Packageable]) -> List[xm.Executable]:
"""Routes a packageable to an appropriate packaging mechanism."""
built_targets: bazel_tools.TargetOutputs = {}
bazel_targets = bazel_tools.collect_bazel_targets(packageables)
if bazel_targets:
bazel_service = bazel_tools.local_bazel_service()
bazel_labels = [target.label for target in bazel_targets]
bazel_kinds = bazel_service.fetch_kinds(bazel_labels)
label_to_kind = dict(zip(bazel_labels, bazel_kinds))
args_to_targets: _ArgsToTargets = collections.defaultdict(list)
for target in bazel_targets:
args_to_targets[target.bazel_args].append(target)
for args, targets in args_to_targets.items():
outputs = bazel_service.build_targets(
labels=tuple(
_normalize_label(target.label, label_to_kind[target.label])
for target in targets
),
bazel_args=args,
)
for target, output in zip(targets, outputs):
built_targets[target] = output
return [
_packaging_router(built_targets, packageable)
for packageable in packageables
]
|
xmanager-main
|
xmanager/xm_local/packaging/router.py
|
# Copyright 2021 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Database connector module."""
import abc
import functools
import os
from typing import Dict, List, Optional, Type, TypeVar, Any
from absl import flags
import alembic
from alembic.config import Config
import attr
import sqlalchemy
from xmanager import xm
from xmanager.generated import data_pb2
import yaml
from google.protobuf import text_format
from google.cloud.sql.connector import Connector, IPTypes
_DB_YAML_CONFIG_PATH = flags.DEFINE_string(
'xm_db_yaml_config_path',
None,
"""
Path of YAML config file containing DB connection details.
A valid config file contains two main entries:
`sql_connector`: must be one of [`sqlite`, `generic`, `cloudsql`]
`sql_connection_settings`: contains details about the connection URL.
These match the interface of `SqlConnectionSettings` and their
combination must form a valid `sqlalchemy` connection URL. Possible
fields are:
- backend, e.g. 'mysql', 'postgresql'
- db_name
- driver, e.g. 'pymysql', 'pg8000'
- username
- password
- host (instance connection name when using CloudSql)
- port
""",
)
_UPGRADE_DB = flags.DEFINE_boolean(
'xm_upgrade_db',
False,
"""
Specifies if XManager should update the database to the latest version.
It's recommended to take a back-up of the database before updating, since
migrations can fail/have errors. This is especially true
for non-transactional DDLs, where partial migrations can occur on
failure, leaving the database in a not well-defined state.
""",
)
@attr.s(auto_attribs=True)
class WorkUnitResult:
"""Result of a WorkUnit database query."""
work_unit_id: int
jobs: Dict[str, data_pb2.Job]
@attr.s(auto_attribs=True)
class ExperimentResult:
"""Result of an Experiment database query."""
experiment_id: int
experiment_title: str
work_units: List[WorkUnitResult]
Engine = sqlalchemy.engine.Engine
text = sqlalchemy.sql.text
@attr.s(auto_attribs=True)
class SqlConnectionSettings:
"""Settings for a generic SQL connection."""
backend: str
db_name: str
driver: Optional[str] = None
username: Optional[str] = None
password: Optional[str] = None
host: Optional[str] = None
port: Optional[int] = None
class SqlConnector(abc.ABC):
"""Provides a way of connecting to a SQL DB from some settings."""
@staticmethod
@abc.abstractmethod
def create_engine(settings: SqlConnectionSettings) -> Engine:
raise NotImplementedError
TSqlConnector = TypeVar('TSqlConnector', bound=SqlConnector)
class GenericSqlConnector(SqlConnector):
"""Generic way of connecting to SQL databases using an URL."""
@staticmethod
def create_engine(settings: SqlConnectionSettings) -> Engine:
driver_name = settings.backend + (
f'+{settings.driver}' if settings.driver else ''
)
url = sqlalchemy.engine.url.URL(
drivername=driver_name,
username=settings.username,
password=settings.password,
host=settings.host,
port=settings.port,
database=settings.db_name,
)
return sqlalchemy.engine.create_engine(url)
class SqliteConnector(SqlConnector):
"""Provides way of connecting to a SQLite database.
The database used at the file path pointed to by the `database` field
in the settings used. The database is created if it doesn't exist.
"""
@staticmethod
def create_engine(settings: SqlConnectionSettings) -> Engine:
if settings.backend and settings.backend != 'sqlite':
raise RuntimeError(
"Can't use SqliteConnector with a backendother than `sqlite`"
)
if not os.path.isdir(os.path.dirname(settings.db_name)):
os.makedirs(os.path.dirname(settings.db_name))
return GenericSqlConnector.create_engine(settings)
class CloudSqlConnector(SqlConnector):
"""Provides way of connecting to a CloudSQL database."""
# Each CloudSql backend supports one driver.
BACKEND_DRIVERS = {
'mysql': 'pymysql',
'postgresql': 'pg8000',
'mssql': 'pytds'
}
@staticmethod
def create_engine(settings: SqlConnectionSettings) -> Engine:
ip_type = IPTypes.PRIVATE if os.environ.get(
'PRIVATE_IP') else IPTypes.PUBLIC
connector = Connector(ip_type)
if settings.backend not in CloudSqlConnector.BACKEND_DRIVERS:
raise RuntimeError(f'CloudSql doesn\'t support the '
f'`{settings.backend}` backend.')
driver = CloudSqlConnector.BACKEND_DRIVERS[settings.backend]
if settings.driver and settings.driver != driver:
raise RuntimeError(f'CloudSql backend `{settings.backend}` does not '
f'support the `{settings.driver}` driver')
def get_connection():
return connector.connect(
settings.host,
driver,
user=settings.username,
password=settings.password,
db=settings.db_name)
url = sqlalchemy.engine.url.URL(drivername=f'{settings.backend}+{driver}',
host='localhost')
return sqlalchemy.create_engine(url, creator=get_connection)
class Database:
"""Database object with interacting with experiment metadata storage."""
def __init__(
self, connector: Type[TSqlConnector], settings: SqlConnectionSettings
):
self.settings = settings
self.engine: Engine = connector.create_engine(settings)
# https://github.com/sqlalchemy/sqlalchemy/issues/5645
# TODO: Remove this line after using sqlalchemy>=1.14.
self.engine.dialect.description_encoding = None
storage_dir = os.path.dirname(__file__)
self.alembic_cfg = Config(os.path.join(storage_dir, 'alembic.ini'))
self.alembic_cfg.set_main_option('sqlalchemy.url', str(self.engine.url))
self.alembic_cfg.set_main_option(
'script_location', os.path.join(storage_dir, 'alembic')
)
self.maybe_migrate_database_version()
def upgrade_database(self, revision: str = 'head') -> None:
"""Upgrades database to given revision."""
with self.engine.begin() as connection:
# https://alembic.sqlalchemy.org/en/latest/cookbook.html#sharing-a-connection-across-one-or-more-programmatic-migration-commands
# Allows sharing connection across multiple commands.
self.alembic_cfg.attributes['connection'] = connection
try:
alembic.command.upgrade(self.alembic_cfg, revision)
except Exception as e:
raise RuntimeError(
'Database upgrade failed. The DB may be in an undefined state or '
'data may have been lost. Revert to the previous state using your '
'backup or proceed with caution.'
) from e
finally:
self.alembic_cfg.attributes['connection'] = None
def database_version(self) -> str:
with self.engine.begin() as connection:
context = alembic.migration.MigrationContext.configure(connection)
return context.get_current_revision()
def latest_version_available(self) -> str:
script_directory = alembic.script.ScriptDirectory.from_config(
self.alembic_cfg
)
return script_directory.get_current_head()
def maybe_migrate_database_version(self):
"""Enforces the latest version of the database to be used."""
db_version = self.database_version()
with self.engine.connect() as connection:
legacy_sqlite_db = self.engine.dialect.has_table(
connection, 'VersionHistory'
)
need_to_update = (
db_version != self.latest_version_available() and db_version
) or legacy_sqlite_db
if need_to_update and not _UPGRADE_DB.value:
raise RuntimeError(
f'Database is not up to date: current={self.database_version()}, '
f'latest={self.latest_version_available()}. Take a backup of the '
'database and then launch using the `--xm_upgrade_db` flag '
'to update to the the latest version.'
)
self.upgrade_database()
def insert_experiment(
self, experiment_id: int, experiment_title: str
) -> None:
query = text(
'INSERT INTO experiment (experiment_id, experiment_title) '
'VALUES (:experiment_id, :experiment_title)'
)
self.engine.execute(
query, experiment_id=experiment_id, experiment_title=experiment_title
)
def insert_work_unit(self, experiment_id: int, work_unit_id: int) -> None:
query = text(
'INSERT INTO work_unit (experiment_id, work_unit_id) '
'VALUES (:experiment_id, :work_unit_id)'
)
self.engine.execute(
query, experiment_id=experiment_id, work_unit_id=work_unit_id
)
def insert_vertex_job(
self, experiment_id: int, work_unit_id: int, vertex_job_id: str
) -> None:
job = data_pb2.Job(caip=data_pb2.AIPlatformJob(resource_name=vertex_job_id))
data = text_format.MessageToBytes(job)
query = text(
'INSERT INTO '
'job (experiment_id, work_unit_id, job_name, job_data) '
'VALUES (:experiment_id, :work_unit_id, :job_name, :job_data)'
)
self.engine.execute(
query,
experiment_id=experiment_id,
work_unit_id=work_unit_id,
job_name=vertex_job_id,
job_data=data,
)
def insert_kubernetes_job(
self, experiment_id: int, work_unit_id: int, namespace: str, job_name: str
) -> None:
"""Insert a Kubernetes job into the database."""
job = data_pb2.Job(
kubernetes=data_pb2.KubernetesJob(
namespace=namespace, job_name=job_name
)
)
data = text_format.MessageToString(job)
query = text(
'INSERT INTO '
'job (experiment_id, work_unit_id, job_name, job_data) '
'VALUES (:experiment_id, :work_unit_id, :job_name, :job_data)'
)
self.engine.execute(
query,
experiment_id=experiment_id,
work_unit_id=work_unit_id,
job_name=job_name,
job_data=data,
)
def list_experiment_ids(self) -> List[int]:
"""Lists all the experiment ids from local database."""
query = text('SELECT experiment_id FROM experiment')
rows = self.engine.execute(query)
return [r['experiment_id'] for r in rows]
def get_experiment(self, experiment_id: int) -> ExperimentResult:
"""Gets an experiment from local database."""
query = text(
'SELECT experiment_title FROM experiment '
'WHERE experiment_id=:experiment_id'
)
rows = self.engine.execute(query, experiment_id=experiment_id)
title = None
for r in rows:
title = r['experiment_title']
break
if title is None:
raise ValueError(f"Experiment Id {experiment_id} doesn't exist.")
return ExperimentResult(
experiment_id, title, self.list_work_units(experiment_id)
)
def list_work_units(self, experiment_id: int) -> List[WorkUnitResult]:
"""Lists an experiment's work unit ids from local database."""
query = text(
'SELECT work_unit_id FROM work_unit WHERE experiment_id=:experiment_id'
)
rows = self.engine.execute(query, experiment_id=experiment_id)
return [self.get_work_unit(experiment_id, r['work_unit_id']) for r in rows]
def get_work_unit(
self, experiment_id: int, work_unit_id: int
) -> WorkUnitResult:
"""Gets a work unit from local database."""
query = text(
'SELECT job_name, job_data FROM job '
'WHERE experiment_id=:experiment_id '
'AND work_unit_id=:work_unit_id'
)
rows = self.engine.execute(
query, experiment_id=experiment_id, work_unit_id=work_unit_id
)
jobs = {}
for r in rows:
job = data_pb2.Job()
jobs[r['job_name']] = text_format.Parse(r['job_data'], job)
return WorkUnitResult(work_unit_id, jobs)
def sqlite_settings(
db_file='~/.xmanager/experiments.sqlite3',
) -> SqlConnectionSettings:
return SqlConnectionSettings(
backend='sqlite', db_name=os.path.expanduser(db_file)
)
_SUPPORTED_CONNECTORS = ['sqlite', 'generic', 'cloudsql']
def _validate_db_config(config: Dict[str, Any]) -> None:
if 'sql_connector' not in config:
raise RuntimeError('DB YAML config must contain `sql_connector` entry')
if config['sql_connector'] not in _SUPPORTED_CONNECTORS:
raise RuntimeError(
f'`sql_connector` must be one of: {_SUPPORTED_CONNECTORS}'
)
if 'sql_connection_settings' not in config:
raise RuntimeError(
'DB YAML config must contain `sql_connection_settings` entry'
)
@functools.lru_cache()
def _db_config() -> Dict[str, Any]:
"""Parses and validates YAML DB config file to a dict."""
if _DB_YAML_CONFIG_PATH.value is not None:
db_config_file = xm.utils.resolve_path_relative_to_launcher(
_DB_YAML_CONFIG_PATH.value
)
with open(db_config_file, 'r') as f:
config = yaml.safe_load(f)
_validate_db_config(config)
return config
return {}
@functools.lru_cache()
def db_connector() -> Type[TSqlConnector]:
"""Returns connector based on DB configuration."""
sql_connector = _db_config().get('sql_connector')
if sql_connector is None or sql_connector == 'sqlite':
return SqliteConnector
if sql_connector == 'cloudsql':
return CloudSqlConnector
return GenericSqlConnector
@functools.lru_cache()
def db_settings() -> SqlConnectionSettings:
"""Returns connection settings created based on DB configuration."""
if _db_config():
return SqlConnectionSettings(**_db_config()['sql_connection_settings'])
return sqlite_settings()
@functools.lru_cache()
def database() -> Database:
"""Returns database based on DB configuration."""
return Database(db_connector(), db_settings())
|
xmanager-main
|
xmanager/xm_local/storage/database.py
|
# Copyright 2021 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Alembic env.py."""
from alembic import context
from sqlalchemy import engine_from_config
from sqlalchemy import pool
config = context.config
target_metadata = None
def run_migrations_offline() -> None:
"""Run migrations in 'offline' mode.
This configures the context with just a URL
and not an Engine, though an Engine is acceptable
here as well. By skipping the Engine creation
we don't even need a DBAPI to be available.
"""
url = config.get_main_option('sqlalchemy.url')
context.configure(
url=url,
target_metadata=target_metadata,
literal_binds=True,
dialect_opts={'paramstyle': 'named'},
)
with context.begin_transaction():
context.run_migrations()
def run_migrations_online() -> None:
"""Run migrations in 'online' mode.
In this scenario we need to create an Engine
and associate a connection with the context.
"""
connectable = config.attributes.get('connection', None)
if connectable is None:
connectable = engine_from_config(
config.get_section(config.config_ini_section),
prefix='sqlalchemy.',
poolclass=pool.NullPool,
)
with connectable.connect() as connection:
context.configure(connection=connection, target_metadata=target_metadata)
with context.begin_transaction():
context.run_migrations()
else:
context.configure(connection=connectable, target_metadata=target_metadata)
with context.begin_transaction():
context.run_migrations()
if context.is_offline_mode():
run_migrations_offline()
else:
run_migrations_online()
|
xmanager-main
|
xmanager/xm_local/storage/alembic/env.py
|
# Copyright 2021 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Generated script names don't follow the module naming convention.
# pylint: disable=invalid-name
"""Migrate from legacy SQLite DB or create new tables.
There are two ways a DB doesn't already have a current revision:
1. It's an old SQLite DB using the `VersionHistory` table
=> Migrate to new schema.
2. It's a new database with no tables.
=> Create tables
Revision ID: f45829405692
Revises:
Create Date: 2022-09-16 10:50:41.096403
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.engine.reflection import Inspector
# revision identifiers, used by Alembic.
revision = 'f45829405692'
down_revision = None
branch_labels = None
depends_on = None
def using_legacy_sqlite_db() -> bool:
connection = op.get_bind()
return 'VersionHistory' in Inspector.from_engine(connection).get_table_names()
def update_columns() -> None:
"""Migrates legacy SQLite DB to portable column names and types.
`batch_alter_table` is required when using SQLite because of its limited
support for the `ALTER` statement (see
https://alembic.sqlalchemy.org/en/latest/batch.html)
SQLite table names are case-insensitive, but the table names
still appear to be upper case when inspecting database with `sqlite`.
One has to rename to an intermediate name when changing the case of a
table name is required.
"""
with op.batch_alter_table('Experiment') as batch_op:
batch_op.alter_column(
column_name='Id',
new_column_name='experiment_id',
existing_type=sa.Integer(),
type_=sa.BigInteger(),
)
batch_op.alter_column(
column_name='Title',
new_column_name='experiment_title',
existing_type=sa.TEXT,
type_=sa.String(255),
)
op.rename_table('Experiment', 'tmp_experiment')
op.rename_table('tmp_experiment', 'experiment')
with op.batch_alter_table('WorkUnit') as batch_op:
batch_op.alter_column(
column_name='ExperimentId',
new_column_name='experiment_id',
existing_type=sa.Integer(),
type_=sa.BigInteger(),
)
batch_op.alter_column(
column_name='WorkUnitId',
new_column_name='work_unit_id',
)
op.rename_table('WorkUnit', 'work_unit')
with op.batch_alter_table('Job') as batch_op:
batch_op.alter_column(
column_name='ExperimentId',
new_column_name='experiment_id',
existing_type=sa.Integer(),
type_=sa.BigInteger(),
)
batch_op.alter_column(
column_name='WorkUnitId',
new_column_name='work_unit_id',
)
batch_op.alter_column(
column_name='Name',
new_column_name='job_name',
existing_type=sa.TEXT,
type_=sa.String(255),
)
batch_op.alter_column(
column_name='Data',
new_column_name='job_data',
existing_type=sa.TEXT,
type_=sa.String(255),
)
op.rename_table('Job', 'tmp_job')
op.rename_table('tmp_job', 'job')
def create_new_tables() -> None:
"""Creates tables for new database.
`autoincrement` field is required for MSSql (check
https://docs.sqlalchemy.org/en/13/dialects/mssql.html#auto-increment-behavior-identity-columns)
"""
op.create_table(
'experiment',
sa.Column(
'experiment_id',
sa.BigInteger(),
primary_key=True,
autoincrement=False,
),
sa.Column('experiment_title', sa.String(255)),
)
op.create_table(
'work_unit',
sa.Column(
'experiment_id',
sa.BigInteger(),
primary_key=True,
autoincrement=False,
),
sa.Column('work_unit_id', sa.Integer(), primary_key=True),
)
op.create_table(
'job',
sa.Column(
'experiment_id',
sa.BigInteger(),
primary_key=True,
autoincrement=False,
),
sa.Column('work_unit_id', sa.Integer(), primary_key=True),
sa.Column('job_name', sa.String(255), primary_key=True),
sa.Column('job_data', sa.String(255)),
)
def upgrade() -> None:
"""Upgrades DB."""
if using_legacy_sqlite_db():
op.drop_table('VersionHistory')
update_columns()
else:
create_new_tables()
def downgrade() -> None:
"""Downgrades DB."""
raise RuntimeError(
'Downgrade operation is not supported: would downgrade '
'to legacy SQLite schema using `VersionHistory` or to '
' empty database.'
)
|
xmanager-main
|
xmanager/xm_local/storage/alembic/versions/f45829405692_migrate_or_create.py
|
# Copyright 2023 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module setuptools script."""
from setuptools import setup
description = (
"GraphCast: Learning skillful medium-range global weather forecasting"
)
setup(
name="graphcast",
version="0.1",
description=description,
long_description=description,
author="DeepMind",
license="Apache License, Version 2.0",
keywords="GraphCast Weather Prediction",
url="https://github.com/deepmind/graphcast",
packages=["graphcast"],
install_requires=[
"cartopy",
"chex",
"colabtools",
"dask",
"dm-haiku",
"jax",
"jraph",
"matplotlib",
"numpy",
"pandas",
"rtree",
"scipy",
"tree",
"trimesh",
"typing_extensions",
"xarray",
],
classifiers=[
"Development Status :: 3 - Alpha",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: Apache Software License",
"Operating System :: POSIX :: Linux",
"Programming Language :: Python :: 3",
"Topic :: Scientific/Engineering :: Artificial Intelligence",
"Topic :: Scientific/Engineering :: Atmospheric Science",
"Topic :: Scientific/Engineering :: Physics",
],
)
|
graphcast-main
|
setup.py
|
# Copyright 2023 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for building models."""
from typing import Mapping, Optional, Tuple
import numpy as np
from scipy.spatial import transform
import xarray
def get_graph_spatial_features(
*, node_lat: np.ndarray, node_lon: np.ndarray,
senders: np.ndarray, receivers: np.ndarray,
add_node_positions: bool,
add_node_latitude: bool,
add_node_longitude: bool,
add_relative_positions: bool,
relative_longitude_local_coordinates: bool,
relative_latitude_local_coordinates: bool,
sine_cosine_encoding: bool = False,
encoding_num_freqs: int = 10,
encoding_multiplicative_factor: float = 1.2,
) -> Tuple[np.ndarray, np.ndarray]:
"""Computes spatial features for the nodes.
Args:
node_lat: Latitudes in the [-90, 90] interval of shape [num_nodes]
node_lon: Longitudes in the [0, 360] interval of shape [num_nodes]
senders: Sender indices of shape [num_edges]
receivers: Receiver indices of shape [num_edges]
add_node_positions: Add unit norm absolute positions.
add_node_latitude: Add a feature for latitude (cos(90 - lat))
Note even if this is set to False, the model may be able to infer the
longitude from relative features, unless
`relative_latitude_local_coordinates` is also True, or if there is any
bias on the relative edge sizes for different longitudes.
add_node_longitude: Add features for longitude (cos(lon), sin(lon)).
Note even if this is set to False, the model may be able to infer the
longitude from relative features, unless
`relative_longitude_local_coordinates` is also True, or if there is any
bias on the relative edge sizes for different longitudes.
add_relative_positions: Whether to relative positions in R3 to the edges.
relative_longitude_local_coordinates: If True, relative positions are
computed in a local space where the receiver is at 0 longitude.
relative_latitude_local_coordinates: If True, relative positions are
computed in a local space where the receiver is at 0 latitude.
sine_cosine_encoding: If True, we will transform the node/edge features
with sine and cosine functions, similar to NERF.
encoding_num_freqs: frequency parameter
encoding_multiplicative_factor: used for calculating the frequency.
Returns:
Arrays of shape: [num_nodes, num_features] and [num_edges, num_features].
with node and edge features.
"""
num_nodes = node_lat.shape[0]
num_edges = senders.shape[0]
dtype = node_lat.dtype
node_phi, node_theta = lat_lon_deg_to_spherical(node_lat, node_lon)
# Computing some node features.
node_features = []
if add_node_positions:
# Already in [-1, 1.] range.
node_features.extend(spherical_to_cartesian(node_phi, node_theta))
if add_node_latitude:
# Using the cos of theta.
# From 1. (north pole) to -1 (south pole).
node_features.append(np.cos(node_theta))
if add_node_longitude:
# Using the cos and sin, which is already normalized.
node_features.append(np.cos(node_phi))
node_features.append(np.sin(node_phi))
if not node_features:
node_features = np.zeros([num_nodes, 0], dtype=dtype)
else:
node_features = np.stack(node_features, axis=-1)
# Computing some edge features.
edge_features = []
if add_relative_positions:
relative_position = get_relative_position_in_receiver_local_coordinates(
node_phi=node_phi,
node_theta=node_theta,
senders=senders,
receivers=receivers,
latitude_local_coordinates=relative_latitude_local_coordinates,
longitude_local_coordinates=relative_longitude_local_coordinates
)
# Note this is L2 distance in 3d space, rather than geodesic distance.
relative_edge_distances = np.linalg.norm(
relative_position, axis=-1, keepdims=True)
# Normalize to the maximum edge distance. Note that we expect to always
# have an edge that goes in the opposite direction of any given edge
# so the distribution of relative positions should be symmetric around
# zero. So by scaling by the maximum length, we expect all relative
# positions to fall in the [-1., 1.] interval, and all relative distances
# to fall in the [0., 1.] interval.
max_edge_distance = relative_edge_distances.max()
edge_features.append(relative_edge_distances / max_edge_distance)
edge_features.append(relative_position / max_edge_distance)
if not edge_features:
edge_features = np.zeros([num_edges, 0], dtype=dtype)
else:
edge_features = np.concatenate(edge_features, axis=-1)
if sine_cosine_encoding:
def sine_cosine_transform(x: np.ndarray) -> np.ndarray:
freqs = encoding_multiplicative_factor**np.arange(encoding_num_freqs)
phases = freqs * x[..., None]
x_sin = np.sin(phases)
x_cos = np.cos(phases)
x_cat = np.concatenate([x_sin, x_cos], axis=-1)
return x_cat.reshape([x.shape[0], -1])
node_features = sine_cosine_transform(node_features)
edge_features = sine_cosine_transform(edge_features)
return node_features, edge_features
def lat_lon_to_leading_axes(
grid_xarray: xarray.DataArray) -> xarray.DataArray:
"""Reorders xarray so lat/lon axes come first."""
# leading + ["lat", "lon"] + trailing
# to
# ["lat", "lon"] + leading + trailing
return grid_xarray.transpose("lat", "lon", ...)
def restore_leading_axes(grid_xarray: xarray.DataArray) -> xarray.DataArray:
"""Reorders xarray so batch/time/level axes come first (if present)."""
# ["lat", "lon"] + [(batch,) (time,) (level,)] + trailing
# to
# [(batch,) (time,) (level,)] + ["lat", "lon"] + trailing
input_dims = list(grid_xarray.dims)
output_dims = list(input_dims)
for leading_key in ["level", "time", "batch"]: # reverse order for insert
if leading_key in input_dims:
output_dims.remove(leading_key)
output_dims.insert(0, leading_key)
return grid_xarray.transpose(*output_dims)
def lat_lon_deg_to_spherical(node_lat: np.ndarray,
node_lon: np.ndarray,
) -> Tuple[np.ndarray, np.ndarray]:
phi = np.deg2rad(node_lon)
theta = np.deg2rad(90 - node_lat)
return phi, theta
def spherical_to_lat_lon(phi: np.ndarray,
theta: np.ndarray,
) -> Tuple[np.ndarray, np.ndarray]:
lon = np.mod(np.rad2deg(phi), 360)
lat = 90 - np.rad2deg(theta)
return lat, lon
def cartesian_to_spherical(x: np.ndarray,
y: np.ndarray,
z: np.ndarray,
) -> Tuple[np.ndarray, np.ndarray]:
phi = np.arctan2(y, x)
with np.errstate(invalid="ignore"): # circumventing b/253179568
theta = np.arccos(z) # Assuming unit radius.
return phi, theta
def spherical_to_cartesian(
phi: np.ndarray, theta: np.ndarray
) -> Tuple[np.ndarray, np.ndarray, np.ndarray]:
# Assuming unit radius.
return (np.cos(phi)*np.sin(theta),
np.sin(phi)*np.sin(theta),
np.cos(theta))
def get_relative_position_in_receiver_local_coordinates(
node_phi: np.ndarray,
node_theta: np.ndarray,
senders: np.ndarray,
receivers: np.ndarray,
latitude_local_coordinates: bool,
longitude_local_coordinates: bool
) -> np.ndarray:
"""Returns relative position features for the edges.
The relative positions will be computed in a rotated space for a local
coordinate system as defined by the receiver. The relative positions are
simply obtained by subtracting sender position minues receiver position in
that local coordinate system after the rotation in R^3.
Args:
node_phi: [num_nodes] with polar angles.
node_theta: [num_nodes] with azimuthal angles.
senders: [num_edges] with indices.
receivers: [num_edges] with indices.
latitude_local_coordinates: Whether to rotate edges such that in the
positions are computed such that the receiver is always at latitude 0.
longitude_local_coordinates: Whether to rotate edges such that in the
positions are computed such that the receiver is always at longitude 0.
Returns:
Array of relative positions in R3 [num_edges, 3]
"""
node_pos = np.stack(spherical_to_cartesian(node_phi, node_theta), axis=-1)
# No rotation in this case.
if not (latitude_local_coordinates or longitude_local_coordinates):
return node_pos[senders] - node_pos[receivers]
# Get rotation matrices for the local space space for every node.
rotation_matrices = get_rotation_matrices_to_local_coordinates(
reference_phi=node_phi,
reference_theta=node_theta,
rotate_latitude=latitude_local_coordinates,
rotate_longitude=longitude_local_coordinates)
# Each edge will be rotated according to the rotation matrix of its receiver
# node.
edge_rotation_matrices = rotation_matrices[receivers]
# Rotate all nodes to the rotated space of the corresponding edge.
# Note for receivers we can also do the matmul first and the gather second:
# ```
# receiver_pos_in_rotated_space = rotate_with_matrices(
# rotation_matrices, node_pos)[receivers]
# ```
# which is more efficient, however, we do gather first to keep it more
# symmetric with the sender computation.
receiver_pos_in_rotated_space = rotate_with_matrices(
edge_rotation_matrices, node_pos[receivers])
sender_pos_in_in_rotated_space = rotate_with_matrices(
edge_rotation_matrices, node_pos[senders])
# Note, here, that because the rotated space is chosen according to the
# receiver, if:
# * latitude_local_coordinates = True: latitude for the receivers will be
# 0, that is the z coordinate will always be 0.
# * longitude_local_coordinates = True: longitude for the receivers will be
# 0, that is the y coordinate will be 0.
# Now we can just subtract.
# Note we are rotating to a local coordinate system, where the y-z axes are
# parallel to a tangent plane to the sphere, but still remain in a 3d space.
# Note that if both `latitude_local_coordinates` and
# `longitude_local_coordinates` are True, and edges are short,
# then the difference in x coordinate between sender and receiver
# should be small, so we could consider dropping the new x coordinate if
# we wanted to the tangent plane, however in doing so
# we would lose information about the curvature of the mesh, which may be
# important for very coarse meshes.
return sender_pos_in_in_rotated_space - receiver_pos_in_rotated_space
def get_rotation_matrices_to_local_coordinates(
reference_phi: np.ndarray,
reference_theta: np.ndarray,
rotate_latitude: bool,
rotate_longitude: bool) -> np.ndarray:
"""Returns a rotation matrix to rotate to a point based on a reference vector.
The rotation matrix is build such that, a vector in the
same coordinate system at the reference point that points towards the pole
before the rotation, continues to point towards the pole after the rotation.
Args:
reference_phi: [leading_axis] Polar angles of the reference.
reference_theta: [leading_axis] Azimuthal angles of the reference.
rotate_latitude: Whether to produce a rotation matrix that would rotate
R^3 vectors to zero latitude.
rotate_longitude: Whether to produce a rotation matrix that would rotate
R^3 vectors to zero longitude.
Returns:
Matrices of shape [leading_axis] such that when applied to the reference
position with `rotate_with_matrices(rotation_matrices, reference_pos)`
* phi goes to 0. if "rotate_longitude" is True.
* theta goes to np.pi / 2 if "rotate_latitude" is True.
The rotation consists of:
* rotate_latitude = False, rotate_longitude = True:
Latitude preserving rotation.
* rotate_latitude = True, rotate_longitude = True:
Latitude preserving rotation, followed by longitude preserving
rotation.
* rotate_latitude = True, rotate_longitude = False:
Latitude preserving rotation, followed by longitude preserving
rotation, and the inverse of the latitude preserving rotation. Note
this is computationally different from rotating the longitude only
and is. We do it like this, so the polar geodesic curve, continues
to be aligned with one of the axis after the rotation.
"""
if rotate_longitude and rotate_latitude:
# We first rotate around the z axis "minus the azimuthal angle", to get the
# point with zero longitude
azimuthal_rotation = - reference_phi
# One then we will do a polar rotation (which can be done along the y
# axis now that we are at longitude 0.), "minus the polar angle plus 2pi"
# to get the point with zero latitude.
polar_rotation = - reference_theta + np.pi/2
return transform.Rotation.from_euler(
"zy", np.stack([azimuthal_rotation, polar_rotation],
axis=1)).as_matrix()
elif rotate_longitude:
# Just like the previous case, but applying only the azimuthal rotation.
azimuthal_rotation = - reference_phi
return transform.Rotation.from_euler("z", -reference_phi).as_matrix()
elif rotate_latitude:
# Just like the first case, but after doing the polar rotation, undoing
# the azimuthal rotation.
azimuthal_rotation = - reference_phi
polar_rotation = - reference_theta + np.pi/2
return transform.Rotation.from_euler(
"zyz", np.stack(
[azimuthal_rotation, polar_rotation, -azimuthal_rotation]
, axis=1)).as_matrix()
else:
raise ValueError(
"At least one of longitude and latitude should be rotated.")
def rotate_with_matrices(rotation_matrices: np.ndarray, positions: np.ndarray
) -> np.ndarray:
return np.einsum("bji,bi->bj", rotation_matrices, positions)
def get_bipartite_graph_spatial_features(
*,
senders_node_lat: np.ndarray,
senders_node_lon: np.ndarray,
senders: np.ndarray,
receivers_node_lat: np.ndarray,
receivers_node_lon: np.ndarray,
receivers: np.ndarray,
add_node_positions: bool,
add_node_latitude: bool,
add_node_longitude: bool,
add_relative_positions: bool,
edge_normalization_factor: Optional[float] = None,
relative_longitude_local_coordinates: bool,
relative_latitude_local_coordinates: bool,
) -> Tuple[np.ndarray, np.ndarray, np.ndarray]:
"""Computes spatial features for the nodes.
This function is almost identical to `get_graph_spatial_features`. The only
difference is that sender nodes and receiver nodes can be in different arrays.
This is necessary to enable combination with typed Graph.
Args:
senders_node_lat: Latitudes in the [-90, 90] interval of shape
[num_sender_nodes]
senders_node_lon: Longitudes in the [0, 360] interval of shape
[num_sender_nodes]
senders: Sender indices of shape [num_edges], indices in [0,
num_sender_nodes)
receivers_node_lat: Latitudes in the [-90, 90] interval of shape
[num_receiver_nodes]
receivers_node_lon: Longitudes in the [0, 360] interval of shape
[num_receiver_nodes]
receivers: Receiver indices of shape [num_edges], indices in [0,
num_receiver_nodes)
add_node_positions: Add unit norm absolute positions.
add_node_latitude: Add a feature for latitude (cos(90 - lat)) Note even if
this is set to False, the model may be able to infer the longitude from
relative features, unless `relative_latitude_local_coordinates` is also
True, or if there is any bias on the relative edge sizes for different
longitudes.
add_node_longitude: Add features for longitude (cos(lon), sin(lon)). Note
even if this is set to False, the model may be able to infer the longitude
from relative features, unless `relative_longitude_local_coordinates` is
also True, or if there is any bias on the relative edge sizes for
different longitudes.
add_relative_positions: Whether to relative positions in R3 to the edges.
edge_normalization_factor: Allows explicitly controlling edge normalization.
If None, defaults to max edge length. This supports using pre-trained
model weights with a different graph structure to what it was trained on.
relative_longitude_local_coordinates: If True, relative positions are
computed in a local space where the receiver is at 0 longitude.
relative_latitude_local_coordinates: If True, relative positions are
computed in a local space where the receiver is at 0 latitude.
Returns:
Arrays of shape: [num_nodes, num_features] and [num_edges, num_features].
with node and edge features.
"""
num_senders = senders_node_lat.shape[0]
num_receivers = receivers_node_lat.shape[0]
num_edges = senders.shape[0]
dtype = senders_node_lat.dtype
assert receivers_node_lat.dtype == dtype
senders_node_phi, senders_node_theta = lat_lon_deg_to_spherical(
senders_node_lat, senders_node_lon)
receivers_node_phi, receivers_node_theta = lat_lon_deg_to_spherical(
receivers_node_lat, receivers_node_lon)
# Computing some node features.
senders_node_features = []
receivers_node_features = []
if add_node_positions:
# Already in [-1, 1.] range.
senders_node_features.extend(
spherical_to_cartesian(senders_node_phi, senders_node_theta))
receivers_node_features.extend(
spherical_to_cartesian(receivers_node_phi, receivers_node_theta))
if add_node_latitude:
# Using the cos of theta.
# From 1. (north pole) to -1 (south pole).
senders_node_features.append(np.cos(senders_node_theta))
receivers_node_features.append(np.cos(receivers_node_theta))
if add_node_longitude:
# Using the cos and sin, which is already normalized.
senders_node_features.append(np.cos(senders_node_phi))
senders_node_features.append(np.sin(senders_node_phi))
receivers_node_features.append(np.cos(receivers_node_phi))
receivers_node_features.append(np.sin(receivers_node_phi))
if not senders_node_features:
senders_node_features = np.zeros([num_senders, 0], dtype=dtype)
receivers_node_features = np.zeros([num_receivers, 0], dtype=dtype)
else:
senders_node_features = np.stack(senders_node_features, axis=-1)
receivers_node_features = np.stack(receivers_node_features, axis=-1)
# Computing some edge features.
edge_features = []
if add_relative_positions:
relative_position = get_bipartite_relative_position_in_receiver_local_coordinates( # pylint: disable=line-too-long
senders_node_phi=senders_node_phi,
senders_node_theta=senders_node_theta,
receivers_node_phi=receivers_node_phi,
receivers_node_theta=receivers_node_theta,
senders=senders,
receivers=receivers,
latitude_local_coordinates=relative_latitude_local_coordinates,
longitude_local_coordinates=relative_longitude_local_coordinates)
# Note this is L2 distance in 3d space, rather than geodesic distance.
relative_edge_distances = np.linalg.norm(
relative_position, axis=-1, keepdims=True)
if edge_normalization_factor is None:
# Normalize to the maximum edge distance. Note that we expect to always
# have an edge that goes in the opposite direction of any given edge
# so the distribution of relative positions should be symmetric around
# zero. So by scaling by the maximum length, we expect all relative
# positions to fall in the [-1., 1.] interval, and all relative distances
# to fall in the [0., 1.] interval.
edge_normalization_factor = relative_edge_distances.max()
edge_features.append(relative_edge_distances / edge_normalization_factor)
edge_features.append(relative_position / edge_normalization_factor)
if not edge_features:
edge_features = np.zeros([num_edges, 0], dtype=dtype)
else:
edge_features = np.concatenate(edge_features, axis=-1)
return senders_node_features, receivers_node_features, edge_features
def get_bipartite_relative_position_in_receiver_local_coordinates(
senders_node_phi: np.ndarray,
senders_node_theta: np.ndarray,
senders: np.ndarray,
receivers_node_phi: np.ndarray,
receivers_node_theta: np.ndarray,
receivers: np.ndarray,
latitude_local_coordinates: bool,
longitude_local_coordinates: bool) -> np.ndarray:
"""Returns relative position features for the edges.
This function is equivalent to
`get_relative_position_in_receiver_local_coordinates`, but adapted to work
with bipartite typed graphs.
The relative positions will be computed in a rotated space for a local
coordinate system as defined by the receiver. The relative positions are
simply obtained by subtracting sender position minues receiver position in
that local coordinate system after the rotation in R^3.
Args:
senders_node_phi: [num_sender_nodes] with polar angles.
senders_node_theta: [num_sender_nodes] with azimuthal angles.
senders: [num_edges] with indices into sender nodes.
receivers_node_phi: [num_sender_nodes] with polar angles.
receivers_node_theta: [num_sender_nodes] with azimuthal angles.
receivers: [num_edges] with indices into receiver nodes.
latitude_local_coordinates: Whether to rotate edges such that in the
positions are computed such that the receiver is always at latitude 0.
longitude_local_coordinates: Whether to rotate edges such that in the
positions are computed such that the receiver is always at longitude 0.
Returns:
Array of relative positions in R3 [num_edges, 3]
"""
senders_node_pos = np.stack(
spherical_to_cartesian(senders_node_phi, senders_node_theta), axis=-1)
receivers_node_pos = np.stack(
spherical_to_cartesian(receivers_node_phi, receivers_node_theta), axis=-1)
# No rotation in this case.
if not (latitude_local_coordinates or longitude_local_coordinates):
return senders_node_pos[senders] - receivers_node_pos[receivers]
# Get rotation matrices for the local space space for every receiver node.
receiver_rotation_matrices = get_rotation_matrices_to_local_coordinates(
reference_phi=receivers_node_phi,
reference_theta=receivers_node_theta,
rotate_latitude=latitude_local_coordinates,
rotate_longitude=longitude_local_coordinates)
# Each edge will be rotated according to the rotation matrix of its receiver
# node.
edge_rotation_matrices = receiver_rotation_matrices[receivers]
# Rotate all nodes to the rotated space of the corresponding edge.
# Note for receivers we can also do the matmul first and the gather second:
# ```
# receiver_pos_in_rotated_space = rotate_with_matrices(
# rotation_matrices, node_pos)[receivers]
# ```
# which is more efficient, however, we do gather first to keep it more
# symmetric with the sender computation.
receiver_pos_in_rotated_space = rotate_with_matrices(
edge_rotation_matrices, receivers_node_pos[receivers])
sender_pos_in_in_rotated_space = rotate_with_matrices(
edge_rotation_matrices, senders_node_pos[senders])
# Note, here, that because the rotated space is chosen according to the
# receiver, if:
# * latitude_local_coordinates = True: latitude for the receivers will be
# 0, that is the z coordinate will always be 0.
# * longitude_local_coordinates = True: longitude for the receivers will be
# 0, that is the y coordinate will be 0.
# Now we can just subtract.
# Note we are rotating to a local coordinate system, where the y-z axes are
# parallel to a tangent plane to the sphere, but still remain in a 3d space.
# Note that if both `latitude_local_coordinates` and
# `longitude_local_coordinates` are True, and edges are short,
# then the difference in x coordinate between sender and receiver
# should be small, so we could consider dropping the new x coordinate if
# we wanted to the tangent plane, however in doing so
# we would lose information about the curvature of the mesh, which may be
# important for very coarse meshes.
return sender_pos_in_in_rotated_space - receiver_pos_in_rotated_space
def variable_to_stacked(
variable: xarray.Variable,
sizes: Mapping[str, int],
preserved_dims: Tuple[str, ...] = ("batch", "lat", "lon"),
) -> xarray.Variable:
"""Converts an xarray.Variable to preserved_dims + ("channels",).
Any dimensions other than those included in preserved_dims get stacked into a
final "channels" dimension. If any of the preserved_dims are missing then they
are added, with the data broadcast/tiled to match the sizes specified in
`sizes`.
Args:
variable: An xarray.Variable.
sizes: Mapping including sizes for any dimensions which are not present in
`variable` but are needed for the output. This may be needed for example
for a static variable with only ("lat", "lon") dims, or if you want to
encode just the latitude coordinates (a variable with dims ("lat",)).
preserved_dims: dimensions of variable to not be folded in channels.
Returns:
An xarray.Variable with dimensions preserved_dims + ("channels",).
"""
stack_to_channels_dims = [
d for d in variable.dims if d not in preserved_dims]
if stack_to_channels_dims:
variable = variable.stack(channels=stack_to_channels_dims)
dims = {dim: variable.sizes.get(dim) or sizes[dim] for dim in preserved_dims}
dims["channels"] = variable.sizes.get("channels", 1)
return variable.set_dims(dims)
def dataset_to_stacked(
dataset: xarray.Dataset,
sizes: Optional[Mapping[str, int]] = None,
preserved_dims: Tuple[str, ...] = ("batch", "lat", "lon"),
) -> xarray.DataArray:
"""Converts an xarray.Dataset to a single stacked array.
This takes each consistuent data_var, converts it into BHWC layout
using `variable_to_stacked`, then concats them all along the channels axis.
Args:
dataset: An xarray.Dataset.
sizes: Mapping including sizes for any dimensions which are not present in
the `dataset` but are needed for the output. See variable_to_stacked.
preserved_dims: dimensions from the dataset that should not be folded in
the predictions channels.
Returns:
An xarray.DataArray with dimensions preserved_dims + ("channels",).
Existing coordinates for preserved_dims axes will be preserved, however
there will be no coordinates for "channels".
"""
data_vars = [
variable_to_stacked(dataset.variables[name], sizes or dataset.sizes,
preserved_dims)
for name in sorted(dataset.data_vars.keys())
]
coords = {
dim: coord
for dim, coord in dataset.coords.items()
if dim in preserved_dims
}
return xarray.DataArray(
data=xarray.Variable.concat(data_vars, dim="channels"), coords=coords)
def stacked_to_dataset(
stacked_array: xarray.Variable,
template_dataset: xarray.Dataset,
preserved_dims: Tuple[str, ...] = ("batch", "lat", "lon"),
) -> xarray.Dataset:
"""The inverse of dataset_to_stacked.
Requires a template dataset to demonstrate the variables/shapes/coordinates
required.
All variables must have preserved_dims dimensions.
Args:
stacked_array: Data in BHWC layout, encoded the same as dataset_to_stacked
would if it was asked to encode `template_dataset`.
template_dataset: A template Dataset (or other mapping of DataArrays)
demonstrating the shape of output required (variables, shapes,
coordinates etc).
preserved_dims: dimensions from the target_template that were not folded in
the predictions channels. The preserved_dims need to be a subset of the
dims of all the variables of template_dataset.
Returns:
An xarray.Dataset (or other mapping of DataArrays) with the same shape and
type as template_dataset.
"""
unstack_from_channels_sizes = {}
var_names = sorted(template_dataset.keys())
for name in var_names:
template_var = template_dataset[name]
if not all(dim in template_var.dims for dim in preserved_dims):
raise ValueError(
f"stacked_to_dataset requires all Variables to have {preserved_dims} "
f"dimensions, but found only {template_var.dims}.")
unstack_from_channels_sizes[name] = {
dim: size for dim, size in template_var.sizes.items()
if dim not in preserved_dims}
channels = {name: np.prod(list(unstack_sizes.values()), dtype=np.int64)
for name, unstack_sizes in unstack_from_channels_sizes.items()}
total_expected_channels = sum(channels.values())
found_channels = stacked_array.sizes["channels"]
if total_expected_channels != found_channels:
raise ValueError(
f"Expected {total_expected_channels} channels but found "
f"{found_channels}, when trying to convert a stacked array of shape "
f"{stacked_array.sizes} to a dataset of shape {template_dataset}.")
data_vars = {}
index = 0
for name in var_names:
template_var = template_dataset[name]
var = stacked_array.isel({"channels": slice(index, index + channels[name])})
index += channels[name]
var = var.unstack({"channels": unstack_from_channels_sizes[name]})
var = var.transpose(*template_var.dims)
data_vars[name] = xarray.DataArray(
data=var,
coords=template_var.coords,
# This might not always be the same as the name it's keyed under; it
# will refer to the original variable name, whereas the key might be
# some alias e.g. temperature_850 under which it should be logged:
name=template_var.name,
)
return type(template_dataset)(data_vars) # pytype:disable=not-callable,wrong-arg-count
|
graphcast-main
|
graphcast/model_utils.py
|
# Copyright 2023 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A Predictor wrapping a one-step Predictor to make autoregressive predictions.
"""
from typing import Optional, cast
from absl import logging
from graphcast import predictor_base
from graphcast import xarray_jax
from graphcast import xarray_tree
import haiku as hk
import jax
import xarray
def _unflatten_and_expand_time(flat_variables, tree_def, time_coords):
variables = jax.tree_util.tree_unflatten(tree_def, flat_variables)
return variables.expand_dims(time=time_coords, axis=0)
def _get_flat_arrays_and_single_timestep_treedef(variables):
flat_arrays = jax.tree_util.tree_leaves(variables.transpose('time', ...))
_, treedef = jax.tree_util.tree_flatten(variables.isel(time=0, drop=True))
return flat_arrays, treedef
class Predictor(predictor_base.Predictor):
"""Wraps a one-step Predictor to make multi-step predictions autoregressively.
The wrapped Predictor will be used to predict a single timestep conditional
on the inputs passed to the outer Predictor. Its predictions are then
passed back in as inputs at the next timestep, for as many timesteps as are
requested in the targets_template. (When multiple timesteps of input are
used, a rolling window of inputs is maintained with new predictions
concatenated onto the end).
You may ask for additional variables to be predicted as targets which aren't
used as inputs. These will be predicted as output variables only and not fed
back in autoregressively. All target variables must be time-dependent however.
You may also specify static (non-time-dependent) inputs which will be passed
in at each timestep but are not predicted.
At present, any time-dependent inputs must also be present as targets so they
can be passed in autoregressively.
The loss of the wrapped one-step Predictor is averaged over all timesteps to
give a loss for the autoregressive Predictor.
"""
def __init__(
self,
predictor: predictor_base.Predictor,
noise_level: Optional[float] = None,
gradient_checkpointing: bool = False,
):
"""Initializes an autoregressive predictor wrapper.
Args:
predictor: A predictor to wrap in an auto-regressive way.
noise_level: Optional value that multiplies the standard normal noise
added to the time-dependent variables of the predictor inputs. In
particular, no noise is added to the predictions that are fed back
auto-regressively. Defaults to not adding noise.
gradient_checkpointing: If True, gradient checkpointing will be
used at each step of the computation to save on memory. Roughtly this
should make the backwards pass two times more expensive, and the time
per step counting the forward pass, should only increase by about 50%.
Note this parameter will be ignored with a warning if the scan sequence
length is 1.
"""
self._predictor = predictor
self._noise_level = noise_level
self._gradient_checkpointing = gradient_checkpointing
def _get_and_validate_constant_inputs(self, inputs, targets, forcings):
constant_inputs = inputs.drop_vars(targets.keys(), errors='ignore')
constant_inputs = constant_inputs.drop_vars(
forcings.keys(), errors='ignore')
for name, var in constant_inputs.items():
if 'time' in var.dims:
raise ValueError(
f'Time-dependent input variable {name} must either be a forcing '
'variable, or a target variable to allow for auto-regressive '
'feedback.')
return constant_inputs
def _validate_targets_and_forcings(self, targets, forcings):
for name, var in targets.items():
if 'time' not in var.dims:
raise ValueError(f'Target variable {name} must be time-dependent.')
for name, var in forcings.items():
if 'time' not in var.dims:
raise ValueError(f'Forcing variable {name} must be time-dependent.')
overlap = forcings.keys() & targets.keys()
if overlap:
raise ValueError('The following were specified as both targets and '
f'forcings, which isn\'t allowed: {overlap}')
def _update_inputs(self, inputs, next_frame):
num_inputs = inputs.dims['time']
predicted_or_forced_inputs = next_frame[list(inputs.keys())]
# Combining datasets with inputs and target time stamps aligns them.
# Only keep the num_inputs trailing frames for use as next inputs.
return (xarray.concat([inputs, predicted_or_forced_inputs], dim='time')
.tail(time=num_inputs)
# Update the time coordinate to reset the lead times for
# next AR iteration.
.assign_coords(time=inputs.coords['time']))
def __call__(self,
inputs: xarray.Dataset,
targets_template: xarray.Dataset,
forcings: xarray.Dataset,
**kwargs) -> xarray.Dataset:
"""Calls the Predictor.
Args:
inputs: input variable used to make predictions. Inputs can include both
time-dependent and time independent variables. Any time-dependent
input variables must also be present in the targets_template or the
forcings.
targets_template: A target template containing informations about which
variables should be predicted and the time alignment of the predictions.
All target variables must be time-dependent.
The number of time frames is used to set the number of unroll of the AR
predictor (e.g. multiple unroll of the inner predictor for one time step
in the targets is not supported yet).
forcings: Variables that will be fed to the model. The variables
should not overlap with the target ones. The time coordinates of the
forcing variables should match the target ones.
Forcing variables which are also present in the inputs, will be used to
supply ground-truth values for those inputs when they are passed to the
underlying predictor at timesteps beyond the first timestep.
**kwargs: Additional arguments passed along to the inner Predictor.
Returns:
predictions: the model predictions matching the target template.
Raise:
ValueError: if the time coordinates of the inputs and targets are not
different by a constant time step.
"""
constant_inputs = self._get_and_validate_constant_inputs(
inputs, targets_template, forcings)
self._validate_targets_and_forcings(targets_template, forcings)
# After the above checks, the remaining inputs must be time-dependent:
inputs = inputs.drop_vars(constant_inputs.keys())
# A predictions template only including the next time to predict.
target_template = targets_template.isel(time=[0])
flat_forcings, forcings_treedef = (
_get_flat_arrays_and_single_timestep_treedef(forcings))
scan_variables = flat_forcings
def one_step_prediction(inputs, scan_variables):
flat_forcings = scan_variables
forcings = _unflatten_and_expand_time(flat_forcings, forcings_treedef,
target_template.coords['time'])
# Add constant inputs:
all_inputs = xarray.merge([constant_inputs, inputs])
predictions: xarray.Dataset = self._predictor(
all_inputs, target_template,
forcings=forcings,
**kwargs)
next_frame = xarray.merge([predictions, forcings])
next_inputs = self._update_inputs(inputs, next_frame)
# Drop the length-1 time dimension, since scan will concat all the outputs
# for different times along a new leading time dimension:
predictions = predictions.squeeze('time', drop=True)
# We return the prediction flattened into plain jax arrays, because the
# extra leading dimension added by scan prevents the tree_util
# registrations in xarray_jax from unflattening them back into an
# xarray.Dataset automatically:
flat_pred = jax.tree_util.tree_leaves(predictions)
return next_inputs, flat_pred
if self._gradient_checkpointing:
scan_length = targets_template.dims['time']
if scan_length <= 1:
logging.warning(
'Skipping gradient checkpointing for sequence length of 1')
else:
# Just in case we take gradients (e.g. for control), although
# in most cases this will just be for a forward pass.
one_step_prediction = hk.remat(one_step_prediction)
# Loop (without unroll) with hk states in cell (jax.lax.scan won't do).
_, flat_preds = hk.scan(one_step_prediction, inputs, scan_variables)
# The result of scan will have an extra leading axis on all arrays,
# corresponding to the target times in this case. We need to be prepared for
# it when unflattening the arrays back into a Dataset:
scan_result_template = (
target_template.squeeze('time', drop=True)
.expand_dims(time=targets_template.coords['time'], axis=0))
_, scan_result_treedef = jax.tree_util.tree_flatten(scan_result_template)
predictions = jax.tree_util.tree_unflatten(scan_result_treedef, flat_preds)
return predictions
def loss(self,
inputs: xarray.Dataset,
targets: xarray.Dataset,
forcings: xarray.Dataset,
**kwargs
) -> predictor_base.LossAndDiagnostics:
"""The mean of the per-timestep losses of the underlying predictor."""
if targets.sizes['time'] == 1:
# If there is only a single target timestep then we don't need any
# autoregressive feedback and can delegate the loss directly to the
# underlying single-step predictor. This means the underlying predictor
# doesn't need to implement .loss_and_predictions.
return self._predictor.loss(inputs, targets, forcings, **kwargs)
constant_inputs = self._get_and_validate_constant_inputs(
inputs, targets, forcings)
self._validate_targets_and_forcings(targets, forcings)
# After the above checks, the remaining inputs must be time-dependent:
inputs = inputs.drop_vars(constant_inputs.keys())
if self._noise_level:
def add_noise(x):
return x + self._noise_level * jax.random.normal(
hk.next_rng_key(), shape=x.shape)
# Add noise to time-dependent variables of the inputs.
inputs = jax.tree_map(add_noise, inputs)
# The per-timestep targets passed by scan to one_step_loss below will have
# no leading time axis. We need a treedef without the time axis to use
# inside one_step_loss to unflatten it back into a dataset:
flat_targets, target_treedef = _get_flat_arrays_and_single_timestep_treedef(
targets)
scan_variables = flat_targets
flat_forcings, forcings_treedef = (
_get_flat_arrays_and_single_timestep_treedef(forcings))
scan_variables = (flat_targets, flat_forcings)
def one_step_loss(inputs, scan_variables):
flat_target, flat_forcings = scan_variables
forcings = _unflatten_and_expand_time(flat_forcings, forcings_treedef,
targets.coords['time'][:1])
target = _unflatten_and_expand_time(flat_target, target_treedef,
targets.coords['time'][:1])
# Add constant inputs:
all_inputs = xarray.merge([constant_inputs, inputs])
(loss, diagnostics), predictions = self._predictor.loss_and_predictions(
all_inputs,
target,
forcings=forcings,
**kwargs)
# Unwrap to jax arrays shape (batch,):
loss, diagnostics = xarray_tree.map_structure(
xarray_jax.unwrap_data, (loss, diagnostics))
predictions = cast(xarray.Dataset, predictions) # Keeps pytype happy.
next_frame = xarray.merge([predictions, forcings])
next_inputs = self._update_inputs(inputs, next_frame)
return next_inputs, (loss, diagnostics)
if self._gradient_checkpointing:
scan_length = targets.dims['time']
if scan_length <= 1:
logging.warning(
'Skipping gradient checkpointing for sequence length of 1')
else:
one_step_loss = hk.remat(one_step_loss)
# We can pass inputs (the initial state of the loop) in directly as a
# Dataset because the shape we pass in to scan is the same as the shape scan
# passes to the inner function. But, for scan_variables, we must flatten the
# targets (and unflatten them inside the inner function) because they are
# passed to the inner function per-timestep without the original time axis.
# The same apply to the optional forcing.
_, (per_timestep_losses, per_timestep_diagnostics) = hk.scan(
one_step_loss, inputs, scan_variables)
# Re-wrap loss and diagnostics as DataArray and average them over time:
(loss, diagnostics) = jax.tree_util.tree_map(
lambda x: xarray_jax.DataArray(x, dims=('time', 'batch')).mean( # pylint: disable=g-long-lambda
'time', skipna=False),
(per_timestep_losses, per_timestep_diagnostics))
return loss, diagnostics
|
graphcast-main
|
graphcast/autoregressive.py
|
# Copyright 2023 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""JAX implementation of Graph Networks Simulator.
Generalization to TypedGraphs of the deep Graph Neural Network from:
@inproceedings{pfaff2021learning,
title={Learning Mesh-Based Simulation with Graph Networks},
author={Pfaff, Tobias and Fortunato, Meire and Sanchez-Gonzalez, Alvaro and
Battaglia, Peter},
booktitle={International Conference on Learning Representations},
year={2021}
}
@inproceedings{sanchez2020learning,
title={Learning to simulate complex physics with graph networks},
author={Sanchez-Gonzalez, Alvaro and Godwin, Jonathan and Pfaff, Tobias and
Ying, Rex and Leskovec, Jure and Battaglia, Peter},
booktitle={International conference on machine learning},
pages={8459--8468},
year={2020},
organization={PMLR}
}
"""
from typing import Mapping, Optional
from graphcast import typed_graph
from graphcast import typed_graph_net
import haiku as hk
import jax
import jax.numpy as jnp
import jraph
class DeepTypedGraphNet(hk.Module):
"""Deep Graph Neural Network.
It works with TypedGraphs with typed nodes and edges. It runs message
passing on all of the node sets and all of the edge sets in the graph. For
each message passing step a `typed_graph_net.InteractionNetwork` is used to
update the full TypedGraph by using different MLPs for each of the node sets
and each of the edge sets.
If embed_{nodes,edges} is specified the node/edge features will be embedded
into a fixed dimensionality before running the first step of message passing.
If {node,edge}_output_size the final node/edge features will be embedded into
the specified output size.
This class may be used for shared or unshared message passing:
* num_message_passing_steps = N, num_processor_repetitions = 1, gives
N layers of message passing with fully unshared weights:
[W_1, W_2, ... , W_M] (default)
* num_message_passing_steps = 1, num_processor_repetitions = M, gives
N layers of message passing with fully shared weights:
[W_1] * M
* num_message_passing_steps = N, num_processor_repetitions = M, gives
M*N layers of message passing with both shared and unshared message passing
such that the weights used at each iteration are:
[W_1, W_2, ... , W_N] * M
"""
def __init__(self,
*,
node_latent_size: Mapping[str, int],
edge_latent_size: Mapping[str, int],
mlp_hidden_size: int,
mlp_num_hidden_layers: int,
num_message_passing_steps: int,
num_processor_repetitions: int = 1,
embed_nodes: bool = True,
embed_edges: bool = True,
node_output_size: Optional[Mapping[str, int]] = None,
edge_output_size: Optional[Mapping[str, int]] = None,
include_sent_messages_in_node_update: bool = False,
use_layer_norm: bool = True,
activation: str = "relu",
f32_aggregation: bool = False,
aggregate_edges_for_nodes_fn: str = "segment_sum",
aggregate_normalization: Optional[float] = None,
name: str = "DeepTypedGraphNet"):
"""Inits the model.
Args:
node_latent_size: Size of the node latent representations.
edge_latent_size: Size of the edge latent representations.
mlp_hidden_size: Hidden layer size for all MLPs.
mlp_num_hidden_layers: Number of hidden layers in all MLPs.
num_message_passing_steps: Number of unshared message passing steps
in the processor steps.
num_processor_repetitions: Number of times that the same processor is
applied sequencially.
embed_nodes: If False, the node embedder will be omitted.
embed_edges: If False, the edge embedder will be omitted.
node_output_size: Size of the output node representations for
each node type. For node types not specified here, the latent node
representation from the output of the processor will be returned.
edge_output_size: Size of the output edge representations for
each edge type. For edge types not specified here, the latent edge
representation from the output of the processor will be returned.
include_sent_messages_in_node_update: Whether to include pooled sent
messages from each node in the node update.
use_layer_norm: Whether it uses layer norm or not.
activation: name of activation function.
f32_aggregation: Use float32 in the edge aggregation.
aggregate_edges_for_nodes_fn: function used to aggregate messages to each
node.
aggregate_normalization: An optional constant that normalizes the output
of aggregate_edges_for_nodes_fn. For context, this can be used to
reduce the shock the model undergoes when switching resolution, which
increase the number of edges connected to a node. In particular, this is
useful when using segment_sum, but should not be combined with
segment_mean.
name: Name of the model.
"""
super().__init__(name=name)
self._node_latent_size = node_latent_size
self._edge_latent_size = edge_latent_size
self._mlp_hidden_size = mlp_hidden_size
self._mlp_num_hidden_layers = mlp_num_hidden_layers
self._num_message_passing_steps = num_message_passing_steps
self._num_processor_repetitions = num_processor_repetitions
self._embed_nodes = embed_nodes
self._embed_edges = embed_edges
self._node_output_size = node_output_size
self._edge_output_size = edge_output_size
self._include_sent_messages_in_node_update = (
include_sent_messages_in_node_update)
self._use_layer_norm = use_layer_norm
self._activation = _get_activation_fn(activation)
self._initialized = False
self._f32_aggregation = f32_aggregation
self._aggregate_edges_for_nodes_fn = _get_aggregate_edges_for_nodes_fn(
aggregate_edges_for_nodes_fn)
self._aggregate_normalization = aggregate_normalization
if aggregate_normalization:
# using aggregate_normalization only makes sense with segment_sum.
assert aggregate_edges_for_nodes_fn == "segment_sum"
def __call__(self,
input_graph: typed_graph.TypedGraph) -> typed_graph.TypedGraph:
"""Forward pass of the learnable dynamics model."""
self._networks_builder(input_graph)
# Embed input features (if applicable).
latent_graph_0 = self._embed(input_graph)
# Do `m` message passing steps in the latent graphs.
latent_graph_m = self._process(latent_graph_0)
# Compute outputs from the last latent graph (if applicable).
return self._output(latent_graph_m)
def _networks_builder(self, graph_template):
if self._initialized:
return
self._initialized = True
def build_mlp(name, output_size):
mlp = hk.nets.MLP(
output_sizes=[self._mlp_hidden_size] * self._mlp_num_hidden_layers + [
output_size], name=name + "_mlp", activation=self._activation)
return jraph.concatenated_args(mlp)
def build_mlp_with_maybe_layer_norm(name, output_size):
network = build_mlp(name, output_size)
if self._use_layer_norm:
layer_norm = hk.LayerNorm(
axis=-1, create_scale=True, create_offset=True,
name=name + "_layer_norm")
network = hk.Sequential([network, layer_norm])
return jraph.concatenated_args(network)
# The embedder graph network independently embeds edge and node features.
if self._embed_edges:
embed_edge_fn = _build_update_fns_for_edge_types(
build_mlp_with_maybe_layer_norm,
graph_template,
"encoder_edges_",
output_sizes=self._edge_latent_size)
else:
embed_edge_fn = None
if self._embed_nodes:
embed_node_fn = _build_update_fns_for_node_types(
build_mlp_with_maybe_layer_norm,
graph_template,
"encoder_nodes_",
output_sizes=self._node_latent_size)
else:
embed_node_fn = None
embedder_kwargs = dict(
embed_edge_fn=embed_edge_fn,
embed_node_fn=embed_node_fn,
)
self._embedder_network = typed_graph_net.GraphMapFeatures(
**embedder_kwargs)
if self._f32_aggregation:
def aggregate_fn(data, *args, **kwargs):
dtype = data.dtype
data = data.astype(jnp.float32)
output = self._aggregate_edges_for_nodes_fn(data, *args, **kwargs)
if self._aggregate_normalization:
output = output / self._aggregate_normalization
output = output.astype(dtype)
return output
else:
def aggregate_fn(data, *args, **kwargs):
output = self._aggregate_edges_for_nodes_fn(data, *args, **kwargs)
if self._aggregate_normalization:
output = output / self._aggregate_normalization
return output
# Create `num_message_passing_steps` graph networks with unshared parameters
# that update the node and edge latent features.
# Note that we can use `modules.InteractionNetwork` because
# it also outputs the messages as updated edge latent features.
self._processor_networks = []
for step_i in range(self._num_message_passing_steps):
self._processor_networks.append(
typed_graph_net.InteractionNetwork(
update_edge_fn=_build_update_fns_for_edge_types(
build_mlp_with_maybe_layer_norm,
graph_template,
f"processor_edges_{step_i}_",
output_sizes=self._edge_latent_size),
update_node_fn=_build_update_fns_for_node_types(
build_mlp_with_maybe_layer_norm,
graph_template,
f"processor_nodes_{step_i}_",
output_sizes=self._node_latent_size),
aggregate_edges_for_nodes_fn=aggregate_fn,
include_sent_messages_in_node_update=(
self._include_sent_messages_in_node_update),
))
# The output MLPs converts edge/node latent features into the output sizes.
output_kwargs = dict(
embed_edge_fn=_build_update_fns_for_edge_types(
build_mlp, graph_template, "decoder_edges_", self._edge_output_size)
if self._edge_output_size else None,
embed_node_fn=_build_update_fns_for_node_types(
build_mlp, graph_template, "decoder_nodes_", self._node_output_size)
if self._node_output_size else None,)
self._output_network = typed_graph_net.GraphMapFeatures(
**output_kwargs)
def _embed(
self, input_graph: typed_graph.TypedGraph) -> typed_graph.TypedGraph:
"""Embeds the input graph features into a latent graph."""
# Copy the context to all of the node types, if applicable.
context_features = input_graph.context.features
if jax.tree_util.tree_leaves(context_features):
# This code assumes a single input feature array for the context and for
# each node type.
assert len(jax.tree_util.tree_leaves(context_features)) == 1
new_nodes = {}
for node_set_name, node_set in input_graph.nodes.items():
node_features = node_set.features
broadcasted_context = jnp.repeat(
context_features, node_set.n_node, axis=0,
total_repeat_length=node_features.shape[0])
new_nodes[node_set_name] = node_set._replace(
features=jnp.concatenate(
[node_features, broadcasted_context], axis=-1))
input_graph = input_graph._replace(
nodes=new_nodes,
context=input_graph.context._replace(features=()))
# Embeds the node and edge features.
latent_graph_0 = self._embedder_network(input_graph)
return latent_graph_0
def _process(
self, latent_graph_0: typed_graph.TypedGraph) -> typed_graph.TypedGraph:
"""Processes the latent graph with several steps of message passing."""
# Do `num_message_passing_steps` with each of the `self._processor_networks`
# with unshared weights, and repeat that `self._num_processor_repetitions`
# times.
latent_graph = latent_graph_0
for unused_repetition_i in range(self._num_processor_repetitions):
for processor_network in self._processor_networks:
latent_graph = self._process_step(processor_network, latent_graph)
return latent_graph
def _process_step(
self, processor_network_k,
latent_graph_prev_k: typed_graph.TypedGraph) -> typed_graph.TypedGraph:
"""Single step of message passing with node/edge residual connections."""
# One step of message passing.
latent_graph_k = processor_network_k(latent_graph_prev_k)
# Add residuals.
nodes_with_residuals = {}
for k, prev_set in latent_graph_prev_k.nodes.items():
nodes_with_residuals[k] = prev_set._replace(
features=prev_set.features + latent_graph_k.nodes[k].features)
edges_with_residuals = {}
for k, prev_set in latent_graph_prev_k.edges.items():
edges_with_residuals[k] = prev_set._replace(
features=prev_set.features + latent_graph_k.edges[k].features)
latent_graph_k = latent_graph_k._replace(
nodes=nodes_with_residuals, edges=edges_with_residuals)
return latent_graph_k
def _output(self,
latent_graph: typed_graph.TypedGraph) -> typed_graph.TypedGraph:
"""Produces the output from the latent graph."""
return self._output_network(latent_graph)
def _build_update_fns_for_node_types(
builder_fn, graph_template, prefix, output_sizes=None):
"""Builds an update function for all node types or a subset of them."""
output_fns = {}
for node_set_name in graph_template.nodes.keys():
if output_sizes is None:
# Use the default output size for all types.
output_size = None
else:
# Otherwise, ignore any type that does not have an explicit output size.
if node_set_name in output_sizes:
output_size = output_sizes[node_set_name]
else:
continue
output_fns[node_set_name] = builder_fn(
f"{prefix}{node_set_name}", output_size)
return output_fns
def _build_update_fns_for_edge_types(
builder_fn, graph_template, prefix, output_sizes=None):
"""Builds an edge function for all node types or a subset of them."""
output_fns = {}
for edge_set_key in graph_template.edges.keys():
edge_set_name = edge_set_key.name
if output_sizes is None:
# Use the default output size for all types.
output_size = None
else:
# Otherwise, ignore any type that does not have an explicit output size.
if edge_set_name in output_sizes:
output_size = output_sizes[edge_set_name]
else:
continue
output_fns[edge_set_name] = builder_fn(
f"{prefix}{edge_set_name}", output_size)
return output_fns
def _get_activation_fn(name):
"""Return activation function corresponding to function_name."""
if name == "identity":
return lambda x: x
if hasattr(jax.nn, name):
return getattr(jax.nn, name)
if hasattr(jnp, name):
return getattr(jnp, name)
raise ValueError(f"Unknown activation function {name} specified.")
def _get_aggregate_edges_for_nodes_fn(name):
"""Return aggregate_edges_for_nodes_fn corresponding to function_name."""
if hasattr(jraph, name):
return getattr(jraph, name)
raise ValueError(
f"Unknown aggregate_edges_for_nodes_fn function {name} specified.")
|
graphcast-main
|
graphcast/deep_typed_graph_net.py
|
# Copyright 2023 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Data-structure for storing graphs with typed edges and nodes."""
from typing import NamedTuple, Any, Union, Tuple, Mapping, TypeVar
ArrayLike = Union[Any] # np.ndarray, jnp.ndarray, tf.tensor
ArrayLikeTree = Union[Any, ArrayLike] # Nest of ArrayLike
_T = TypeVar('_T')
# All tensors have a "flat_batch_axis", which is similar to the leading
# axes of graph_tuples:
# * In the case of nodes this is simply a shared node and flat batch axis, with
# size corresponding to the total number of nodes in the flattened batch.
# * In the case of edges this is simply a shared edge and flat batch axis, with
# size corresponding to the total number of edges in the flattened batch.
# * In the case of globals this is simply the number of graphs in the flattened
# batch.
# All shapes may also have any additional leading shape "batch_shape".
# Options for building batches are:
# * Use a provided "flatten" method that takes a leading `batch_shape` and
# it into the flat_batch_axis (this will be useful when using `tf.Dataset`
# which supports batching into RaggedTensors, with leading batch shape even
# if graphs have different numbers of nodes and edges), so the RaggedBatches
# can then be converted into something without ragged dimensions that jax can
# use.
# * Directly build a "flat batch" using a provided function for batching a list
# of graphs (how it is done in `jraph`).
class NodeSet(NamedTuple):
"""Represents a set of nodes."""
n_node: ArrayLike # [num_flat_graphs]
features: ArrayLikeTree # Prev. `nodes`: [num_flat_nodes] + feature_shape
class EdgesIndices(NamedTuple):
"""Represents indices to nodes adjacent to the edges."""
senders: ArrayLike # [num_flat_edges]
receivers: ArrayLike # [num_flat_edges]
class EdgeSet(NamedTuple):
"""Represents a set of edges."""
n_edge: ArrayLike # [num_flat_graphs]
indices: EdgesIndices
features: ArrayLikeTree # Prev. `edges`: [num_flat_edges] + feature_shape
class Context(NamedTuple):
# `n_graph` always contains ones but it is useful to query the leading shape
# in case of graphs without any nodes or edges sets.
n_graph: ArrayLike # [num_flat_graphs]
features: ArrayLikeTree # Prev. `globals`: [num_flat_graphs] + feature_shape
class EdgeSetKey(NamedTuple):
name: str # Name of the EdgeSet.
# Sender node set name and receiver node set name connected by the edge set.
node_sets: Tuple[str, str]
class TypedGraph(NamedTuple):
"""A graph with typed nodes and edges.
A typed graph is made of a context, multiple sets of nodes and multiple
sets of edges connecting those nodes (as indicated by the EdgeSetKey).
"""
context: Context
nodes: Mapping[str, NodeSet]
edges: Mapping[EdgeSetKey, EdgeSet]
def edge_key_by_name(self, name: str) -> EdgeSetKey:
found_key = [k for k in self.edges.keys() if k.name == name]
if len(found_key) != 1:
raise KeyError("invalid edge key '{}'. Available edges: [{}]".format(
name, ', '.join(x.name for x in self.edges.keys())))
return found_key[0]
def edge_by_name(self, name: str) -> EdgeSet:
return self.edges[self.edge_key_by_name(name)]
|
graphcast-main
|
graphcast/typed_graph.py
|
# Copyright 2023 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Serialize and deserialize trees."""
import dataclasses
import io
import types
from typing import Any, BinaryIO, Optional, TypeVar
import numpy as np
_T = TypeVar("_T")
def dump(dest: BinaryIO, value: Any) -> None:
"""Dump a tree of dicts/dataclasses to a file object.
Args:
dest: a file object to write to.
value: A tree of dicts, lists, tuples and dataclasses of numpy arrays and
other basic types. Unions are not supported, other than Optional/None
which is only supported in dataclasses, not in dicts, lists or tuples.
All leaves must be coercible to a numpy array, and recoverable as a single
arg to a type.
"""
buffer = io.BytesIO() # In case the destination doesn't support seeking.
np.savez(buffer, **_flatten(value))
dest.write(buffer.getvalue())
def load(source: BinaryIO, typ: type[_T]) -> _T:
"""Load from a file object and convert it to the specified type.
Args:
source: a file object to read from.
typ: a type object that acts as a schema for deserialization. It must match
what was serialized. If a type is Any, it will be returned however numpy
serialized it, which is what you want for a tree of numpy arrays.
Returns:
the deserialized value as the specified type.
"""
return _convert_types(typ, _unflatten(np.load(source)))
_SEP = ":"
def _flatten(tree: Any) -> dict[str, Any]:
"""Flatten a tree of dicts/dataclasses/lists/tuples to a single dict."""
if dataclasses.is_dataclass(tree):
# Don't use dataclasses.asdict as it is recursive so skips dropping None.
tree = {f.name: v for f in dataclasses.fields(tree)
if (v := getattr(tree, f.name)) is not None}
elif isinstance(tree, (list, tuple)):
tree = dict(enumerate(tree))
assert isinstance(tree, dict)
flat = {}
for k, v in tree.items():
k = str(k)
assert _SEP not in k
if dataclasses.is_dataclass(v) or isinstance(v, (dict, list, tuple)):
for a, b in _flatten(v).items():
flat[f"{k}{_SEP}{a}"] = b
else:
assert v is not None
flat[k] = v
return flat
def _unflatten(flat: dict[str, Any]) -> dict[str, Any]:
"""Unflatten a dict to a tree of dicts."""
tree = {}
for flat_key, v in flat.items():
node = tree
keys = flat_key.split(_SEP)
for k in keys[:-1]:
if k not in node:
node[k] = {}
node = node[k]
node[keys[-1]] = v
return tree
def _convert_types(typ: type[_T], value: Any) -> _T:
"""Convert some structure into the given type. The structures must match."""
if typ in (Any, ...):
return value
if typ in (int, float, str, bool):
return typ(value)
if typ is np.ndarray:
assert isinstance(value, np.ndarray)
return value
if dataclasses.is_dataclass(typ):
kwargs = {}
for f in dataclasses.fields(typ):
# Only support Optional for dataclasses, as numpy can't serialize it
# directly (without pickle), and dataclasses are the only case where we
# can know the full set of values and types and therefore know the
# non-existence must mean None.
if isinstance(f.type, (types.UnionType, type(Optional[int]))):
constructors = [t for t in f.type.__args__ if t is not types.NoneType]
if len(constructors) != 1:
raise TypeError(
"Optional works, Union with anything except None doesn't")
if f.name not in value:
kwargs[f.name] = None
continue
constructor = constructors[0]
else:
constructor = f.type
if f.name in value:
kwargs[f.name] = _convert_types(constructor, value[f.name])
else:
raise ValueError(f"Missing value: {f.name}")
return typ(**kwargs)
base_type = getattr(typ, "__origin__", None)
if base_type is dict:
assert len(typ.__args__) == 2
key_type, value_type = typ.__args__
return {_convert_types(key_type, k): _convert_types(value_type, v)
for k, v in value.items()}
if base_type is list:
assert len(typ.__args__) == 1
value_type = typ.__args__[0]
return [_convert_types(value_type, v)
for _, v in sorted(value.items(), key=lambda x: int(x[0]))]
if base_type is tuple:
if len(typ.__args__) == 2 and typ.__args__[1] == ...:
# An arbitrary length tuple of a single type, eg: tuple[int, ...]
value_type = typ.__args__[0]
return tuple(_convert_types(value_type, v)
for _, v in sorted(value.items(), key=lambda x: int(x[0])))
else:
# A fixed length tuple of arbitrary types, eg: tuple[int, str, float]
assert len(typ.__args__) == len(value)
return tuple(
_convert_types(t, v)
for t, (_, v) in zip(
typ.__args__, sorted(value.items(), key=lambda x: int(x[0]))))
# This is probably unreachable with reasonable serializable inputs.
try:
return typ(value)
except TypeError as e:
raise TypeError(
"_convert_types expects the type argument to be a dataclass defined "
"with types that are valid constructors (eg tuple is fine, Tuple "
"isn't), and accept a numpy array as the sole argument.") from e
|
graphcast-main
|
graphcast/checkpoint.py
|
# Copyright 2023 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for `data_utils.py`."""
import datetime
from absl.testing import absltest
from absl.testing import parameterized
from graphcast import data_utils
import numpy as np
import xarray
class DataUtilsTest(parameterized.TestCase):
def setUp(self):
super().setUp()
# Fix the seed for reproducibility.
np.random.seed(0)
def test_year_progress_is_zero_at_year_start_or_end(self):
year_progress = data_utils.get_year_progress(
np.array([
0,
data_utils.AVG_SEC_PER_YEAR,
data_utils.AVG_SEC_PER_YEAR * 42, # 42 years.
])
)
np.testing.assert_array_equal(year_progress, np.zeros(year_progress.shape))
def test_year_progress_is_almost_one_before_year_ends(self):
year_progress = data_utils.get_year_progress(
np.array([
data_utils.AVG_SEC_PER_YEAR - 1,
(data_utils.AVG_SEC_PER_YEAR - 1) * 42, # ~42 years
])
)
with self.subTest("Year progress values are close to 1"):
self.assertTrue(np.all(year_progress > 0.999))
with self.subTest("Year progress values != 1"):
self.assertTrue(np.all(year_progress < 1.0))
def test_day_progress_computes_for_all_times_and_longitudes(self):
times = np.random.randint(low=0, high=1e10, size=10)
longitudes = np.arange(0, 360.0, 1.0)
day_progress = data_utils.get_day_progress(times, longitudes)
with self.subTest("Day progress is computed for all times and longinutes"):
self.assertSequenceEqual(
day_progress.shape, (len(times), len(longitudes))
)
@parameterized.named_parameters(
dict(
testcase_name="random_date_1",
year=1988,
month=11,
day=7,
hour=2,
minute=45,
second=34,
),
dict(
testcase_name="random_date_2",
year=2022,
month=3,
day=12,
hour=7,
minute=1,
second=0,
),
)
def test_day_progress_is_in_between_zero_and_one(
self, year, month, day, hour, minute, second
):
# Datetime from a timestamp.
dt = datetime.datetime(year, month, day, hour, minute, second)
# Epoch time.
epoch_time = datetime.datetime(1970, 1, 1)
# Seconds since epoch.
seconds_since_epoch = np.array([(dt - epoch_time).total_seconds()])
# Longitudes with 1 degree resolution.
longitudes = np.arange(0, 360.0, 1.0)
day_progress = data_utils.get_day_progress(seconds_since_epoch, longitudes)
with self.subTest("Day progress >= 0"):
self.assertTrue(np.all(day_progress >= 0.0))
with self.subTest("Day progress < 1"):
self.assertTrue(np.all(day_progress < 1.0))
def test_day_progress_is_zero_at_day_start_or_end(self):
day_progress = data_utils.get_day_progress(
seconds_since_epoch=np.array([
0,
data_utils.SEC_PER_DAY,
data_utils.SEC_PER_DAY * 42, # 42 days.
]),
longitude=np.array([0.0]),
)
np.testing.assert_array_equal(day_progress, np.zeros(day_progress.shape))
def test_day_progress_specific_value(self):
day_progress = data_utils.get_day_progress(
seconds_since_epoch=np.array([123]),
longitude=np.array([0.0]),
)
np.testing.assert_array_almost_equal(
day_progress, np.array([[0.00142361]]), decimal=6
)
def test_featurize_progress_valid_values_and_dimensions(self):
day_progress = np.array([0.0, 0.45, 0.213])
feature_dimensions = ("time",)
progress_features = data_utils.featurize_progress(
name="day_progress", dims=feature_dimensions, progress=day_progress
)
for feature in progress_features.values():
with self.subTest(f"Valid dimensions for {feature}"):
self.assertSequenceEqual(feature.dims, feature_dimensions)
with self.subTest("Valid values for day_progress"):
np.testing.assert_array_equal(
day_progress, progress_features["day_progress"].values
)
with self.subTest("Valid values for day_progress_sin"):
np.testing.assert_array_almost_equal(
np.array([0.0, 0.30901699, 0.97309851]),
progress_features["day_progress_sin"].values,
decimal=6,
)
with self.subTest("Valid values for day_progress_cos"):
np.testing.assert_array_almost_equal(
np.array([1.0, -0.95105652, 0.23038943]),
progress_features["day_progress_cos"].values,
decimal=6,
)
def test_featurize_progress_invalid_dimensions(self):
year_progress = np.array([0.0, 0.45, 0.213])
feature_dimensions = ("time", "longitude")
with self.assertRaises(ValueError):
data_utils.featurize_progress(
name="year_progress", dims=feature_dimensions, progress=year_progress
)
def test_add_derived_vars_variables_added(self):
data = xarray.Dataset(
data_vars={
"var1": (["x", "lon", "datetime"], 8 * np.random.randn(2, 2, 3))
},
coords={
"lon": np.array([0.0, 0.5]),
"datetime": np.array([
datetime.datetime(2021, 1, 1),
datetime.datetime(2023, 1, 1),
datetime.datetime(2023, 1, 3),
]),
},
)
data_utils.add_derived_vars(data)
all_variables = set(data.variables)
with self.subTest("Original value was not removed"):
self.assertIn("var1", all_variables)
with self.subTest("Year progress feature was added"):
self.assertIn(data_utils.YEAR_PROGRESS, all_variables)
with self.subTest("Day progress feature was added"):
self.assertIn(data_utils.DAY_PROGRESS, all_variables)
@parameterized.named_parameters(
dict(testcase_name="missing_datetime", coord_name="lon"),
dict(testcase_name="missing_lon", coord_name="datetime"),
)
def test_add_derived_vars_missing_coordinate_raises_value_error(
self, coord_name
):
with self.subTest(f"Missing {coord_name} coordinate"):
data = xarray.Dataset(
data_vars={"var1": (["x", coord_name], 8 * np.random.randn(2, 2))},
coords={
coord_name: np.array([0.0, 0.5]),
},
)
with self.assertRaises(ValueError):
data_utils.add_derived_vars(data)
if __name__ == "__main__":
absltest.main()
|
graphcast-main
|
graphcast/data_utils_test.py
|
# Copyright 2023 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Abstract base classes for an xarray-based Predictor API."""
import abc
from typing import Tuple
from graphcast import losses
from graphcast import xarray_jax
import jax.numpy as jnp
import xarray
LossAndDiagnostics = losses.LossAndDiagnostics
class Predictor(abc.ABC):
"""A possibly-trainable predictor of weather, exposing an xarray-based API.
Typically wraps an underlying JAX model and handles translating the xarray
Dataset values to and from plain JAX arrays that are convenient for input to
(and output from) the underlying model.
Different subclasses may exist to wrap different kinds of underlying model,
e.g. models taking stacked inputs/outputs, models taking separate 2D and 3D
inputs/outputs, autoregressive models.
You can also implement a specific model directly as a Predictor if you want,
for example if it has quite specific/unique requirements for its input/output
or loss function, or if it's convenient to implement directly using xarray.
"""
@abc.abstractmethod
def __call__(self,
inputs: xarray.Dataset,
targets_template: xarray.Dataset,
forcings: xarray.Dataset,
**optional_kwargs
) -> xarray.Dataset:
"""Makes predictions.
This is only used by the Experiment for inference / evaluation, with
training going via the .loss method. So it should default to making
predictions for evaluation, although you can also support making predictions
for use in the loss via an is_training argument -- see
LossFunctionPredictor which helps with that.
Args:
inputs: An xarray.Dataset of inputs.
targets_template: An xarray.Dataset or other mapping of xarray.DataArrays,
with the same shape as the targets, to demonstrate what kind of
predictions are required. You can use this to determine which variables,
levels and lead times must be predicted.
You are free to raise an error if you don't support predicting what is
requested.
forcings: An xarray.Dataset of forcings terms. Forcings are variables
that can be fed to the model, but do not need to be predicted. This is
often because this variable can be computed analytically (e.g. the toa
radiation of the sun is mostly a function of geometry) or are considered
to be controlled for the experiment (e.g., impose a scenario of C02
emission into the atmosphere). Unlike `inputs`, the `forcings` can
include information "from the future", that is, information at target
times specified in the `targets_template`.
**optional_kwargs: Implementations may support extra optional kwargs,
provided they set appropriate defaults for them.
Returns:
Predictions, as an xarray.Dataset or other mapping of DataArrays which
is capable of being evaluated against targets with shape given by
targets_template.
For probabilistic predictors which can return multiple samples from a
predictive distribution, these should (by convention) be returned along
an additional 'sample' dimension.
"""
def loss(self,
inputs: xarray.Dataset,
targets: xarray.Dataset,
forcings: xarray.Dataset,
**optional_kwargs,
) -> LossAndDiagnostics:
"""Computes a training loss, for predictors that are trainable.
Why make this the Predictor's responsibility, rather than letting callers
compute their own loss function using predictions obtained from
Predictor.__call__?
Doing it this way gives Predictors more control over their training setup.
For example, some predictors may wish to train using different targets to
the ones they predict at evaluation time -- perhaps different lead times and
variables, perhaps training to predict transformed versions of targets
where the transform needs to be inverted at evaluation time, etc.
It's also necessary for generative models (VAEs, GANs, ...) where the
training loss is more complex and isn't expressible as a parameter-free
function of predictions and targets.
Args:
inputs: An xarray.Dataset.
targets: An xarray.Dataset or other mapping of xarray.DataArrays. See
docs on __call__ for an explanation about the targets.
forcings: xarray.Dataset of forcing terms.
**optional_kwargs: Implementations may support extra optional kwargs,
provided they set appropriate defaults for them.
Returns:
loss: A DataArray with dimensions ('batch',) containing losses for each
element of the batch. These will be averaged to give the final
loss, locally and across replicas.
diagnostics: Mapping of additional quantities to log by name alongside the
loss. These will will typically correspond to terms in the loss. They
should also have dimensions ('batch',) and will be averaged over the
batch before logging.
You need not include the loss itself in this dict; it will be added for
you.
"""
del targets, forcings, optional_kwargs
batch_size = inputs.sizes['batch']
dummy_loss = xarray_jax.DataArray(jnp.zeros(batch_size), dims=('batch',))
return dummy_loss, {}
def loss_and_predictions(
self,
inputs: xarray.Dataset,
targets: xarray.Dataset,
forcings: xarray.Dataset,
**optional_kwargs,
) -> Tuple[LossAndDiagnostics, xarray.Dataset]:
"""Like .loss but also returns corresponding predictions.
Implementing this is optional as it's not used directly by the Experiment,
but it is required by autoregressive.Predictor when applying an inner
Predictor autoregressively at training time; we need a loss at each step but
also predictions to feed back in for the next step.
Note the loss itself may not be directly regressing the predictions towards
targets, the loss may be computed in terms of transformed predictions and
targets (or in some other way). For this reason we can't always cleanly
separate this into step 1: get predictions, step 2: compute loss from them,
hence the need for this combined method.
Args:
inputs:
targets:
forcings:
**optional_kwargs:
As for self.loss.
Returns:
(loss, diagnostics)
As for self.loss
predictions:
The predictions which the loss relates to. These should be of the same
shape as what you would get from
`self.__call__(inputs, targets_template=targets)`, and should be in the
same 'domain' as the inputs (i.e. they shouldn't be transformed
differently to how the predictor expects its inputs).
"""
raise NotImplementedError
|
graphcast-main
|
graphcast/predictor_base.py
|
# Copyright 2023 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for working with trees of xarray.DataArray (including Datasets).
Note that xarray.Dataset doesn't work out-of-the-box with the `tree` library;
it won't work as a leaf node since it implements Mapping, but also won't work
as an internal node since tree doesn't know how to re-create it properly.
To fix this, we reimplement a subset of `map_structure`, exposing its
constituent DataArrays as leaf nodes. This means it can be mapped over as a
generic container of DataArrays, while still preserving the result as a Dataset
where possible.
This is useful because in a few places we need to handle a general
Mapping[str, DataArray] (where the coordinates might not be compatible across
the constituent DataArrays) but also the special case of a Dataset nicely.
For the result e.g. of a tree.map_structure(fn, dataset), if fn returns None for
some of the child DataArrays, they will be omitted from the returned dataset. If
any values other than DataArrays or None are returned, then we don't attempt to
return a Dataset and just return a plain dict of the results. Similarly if
DataArrays are returned but with non-matching coordinates, it will just return a
plain dict of DataArrays.
Note xarray datatypes are registered with `jax.tree_util` by xarray_jax.py,
but `jax.tree_util.tree_map` is distinct from the `xarray_tree.map_structure`.
as the former exposes the underlying JAX/numpy arrays as leaf nodes, while the
latter exposes DataArrays as leaf nodes.
"""
from typing import Any, Callable
import xarray
def map_structure(func: Callable[..., Any], *structures: Any) -> Any:
"""Maps func through given structures with xarrays. See tree.map_structure."""
if not callable(func):
raise TypeError(f'func must be callable, got: {func}')
if not structures:
raise ValueError('Must provide at least one structure')
first = structures[0]
if isinstance(first, xarray.Dataset):
data = {k: func(*[s[k] for s in structures]) for k in first.keys()}
if all(isinstance(a, (type(None), xarray.DataArray))
for a in data.values()):
data_arrays = [v.rename(k) for k, v in data.items() if v is not None]
try:
return xarray.merge(data_arrays, join='exact')
except ValueError: # Exact join not possible.
pass
return data
if isinstance(first, dict):
return {k: map_structure(func, *[s[k] for s in structures])
for k in first.keys()}
if isinstance(first, (list, tuple, set)):
return type(first)(map_structure(func, *s) for s in zip(*structures))
return func(*structures)
|
graphcast-main
|
graphcast/xarray_tree.py
|
# Copyright 2023 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Check that the checkpoint serialization is reversable."""
import dataclasses
import io
from typing import Any, Optional, Union
from absl.testing import absltest
from graphcast import checkpoint
import numpy as np
@dataclasses.dataclass
class SubConfig:
a: int
b: str
@dataclasses.dataclass
class Config:
bt: bool
bf: bool
i: int
f: float
o1: Optional[int]
o2: Optional[int]
o3: Union[int, None]
o4: Union[int, None]
o5: int | None
o6: int | None
li: list[int]
ls: list[str]
ldc: list[SubConfig]
tf: tuple[float, ...]
ts: tuple[str, ...]
t: tuple[str, int, SubConfig]
tdc: tuple[SubConfig, ...]
dsi: dict[str, int]
dss: dict[str, str]
dis: dict[int, str]
dsdis: dict[str, dict[int, str]]
dc: SubConfig
dco: Optional[SubConfig]
ddc: dict[str, SubConfig]
@dataclasses.dataclass
class Checkpoint:
params: dict[str, Any]
config: Config
class DataclassTest(absltest.TestCase):
def test_serialize_dataclass(self):
ckpt = Checkpoint(
params={
"layer1": {
"w": np.arange(10).reshape(2, 5),
"b": np.array([2, 6]),
},
"layer2": {
"w": np.arange(8).reshape(2, 4),
"b": np.array([2, 6]),
},
"blah": np.array([3, 9]),
},
config=Config(
bt=True,
bf=False,
i=42,
f=3.14,
o1=1,
o2=None,
o3=2,
o4=None,
o5=3,
o6=None,
li=[12, 9, 7, 15, 16, 14, 1, 6, 11, 4, 10, 5, 13, 3, 8, 2],
ls=list("qhjfdxtpzgemryoikwvblcaus"),
ldc=[SubConfig(1, "hello"), SubConfig(2, "world")],
tf=(1, 4, 2, 10, 5, 9, 13, 16, 15, 8, 12, 7, 11, 14, 3, 6),
ts=("hello", "world"),
t=("foo", 42, SubConfig(1, "bar")),
tdc=(SubConfig(1, "hello"), SubConfig(2, "world")),
dsi={"a": 1, "b": 2, "c": 3},
dss={"d": "e", "f": "g"},
dis={1: "a", 2: "b", 3: "c"},
dsdis={"a": {1: "hello", 2: "world"}, "b": {1: "world"}},
dc=SubConfig(1, "hello"),
dco=None,
ddc={"a": SubConfig(1, "hello"), "b": SubConfig(2, "world")},
))
buffer = io.BytesIO()
checkpoint.dump(buffer, ckpt)
buffer.seek(0)
ckpt2 = checkpoint.load(buffer, Checkpoint)
np.testing.assert_array_equal(ckpt.params["layer1"]["w"],
ckpt2.params["layer1"]["w"])
np.testing.assert_array_equal(ckpt.params["layer1"]["b"],
ckpt2.params["layer1"]["b"])
np.testing.assert_array_equal(ckpt.params["layer2"]["w"],
ckpt2.params["layer2"]["w"])
np.testing.assert_array_equal(ckpt.params["layer2"]["b"],
ckpt2.params["layer2"]["b"])
np.testing.assert_array_equal(ckpt.params["blah"], ckpt2.params["blah"])
self.assertEqual(ckpt.config, ckpt2.config)
if __name__ == "__main__":
absltest.main()
|
graphcast-main
|
graphcast/checkpoint_test.py
|
# Copyright 2023 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utils for creating icosahedral meshes."""
import itertools
from typing import List, NamedTuple, Sequence, Tuple
import numpy as np
from scipy.spatial import transform
class TriangularMesh(NamedTuple):
"""Data structure for triangular meshes.
Attributes:
vertices: spatial positions of the vertices of the mesh of shape
[num_vertices, num_dims].
faces: triangular faces of the mesh of shape [num_faces, 3]. Contains
integer indices into `vertices`.
"""
vertices: np.ndarray
faces: np.ndarray
def merge_meshes(
mesh_list: Sequence[TriangularMesh]) -> TriangularMesh:
"""Merges all meshes into one. Assumes the last mesh is the finest.
Args:
mesh_list: Sequence of meshes, from coarse to fine refinement levels. The
vertices and faces may contain those from preceding, coarser levels.
Returns:
`TriangularMesh` for which the vertices correspond to the highest
resolution mesh in the hierarchy, and the faces are the join set of the
faces at all levels of the hierarchy.
"""
for mesh_i, mesh_ip1 in itertools.pairwise(mesh_list):
num_nodes_mesh_i = mesh_i.vertices.shape[0]
assert np.allclose(mesh_i.vertices, mesh_ip1.vertices[:num_nodes_mesh_i])
return TriangularMesh(
vertices=mesh_list[-1].vertices,
faces=np.concatenate([mesh.faces for mesh in mesh_list], axis=0))
def get_hierarchy_of_triangular_meshes_for_sphere(
splits: int) -> List[TriangularMesh]:
"""Returns a sequence of meshes, each with triangularization sphere.
Starting with a regular icosahedron (12 vertices, 20 faces, 30 edges) with
circumscribed unit sphere. Then, each triangular face is iteratively
subdivided into 4 triangular faces `splits` times. The new vertices are then
projected back onto the unit sphere. All resulting meshes are returned in a
list, from lowest to highest resolution.
The vertices in each face are specified in counter-clockwise order as
observed from the outside the icosahedron.
Args:
splits: How many times to split each triangle.
Returns:
Sequence of `TriangularMesh`s of length `splits + 1` each with:
vertices: [num_vertices, 3] vertex positions in 3D, all with unit norm.
faces: [num_faces, 3] with triangular faces joining sets of 3 vertices.
Each row contains three indices into the vertices array, indicating
the vertices adjacent to the face. Always with positive orientation
(counterclock-wise when looking from the outside).
"""
current_mesh = get_icosahedron()
output_meshes = [current_mesh]
for _ in range(splits):
current_mesh = _two_split_unit_sphere_triangle_faces(current_mesh)
output_meshes.append(current_mesh)
return output_meshes
def get_icosahedron() -> TriangularMesh:
"""Returns a regular icosahedral mesh with circumscribed unit sphere.
See https://en.wikipedia.org/wiki/Regular_icosahedron#Cartesian_coordinates
for details on the construction of the regular icosahedron.
The vertices in each face are specified in counter-clockwise order as observed
from the outside of the icosahedron.
Returns:
TriangularMesh with:
vertices: [num_vertices=12, 3] vertex positions in 3D, all with unit norm.
faces: [num_faces=20, 3] with triangular faces joining sets of 3 vertices.
Each row contains three indices into the vertices array, indicating
the vertices adjacent to the face. Always with positive orientation (
counterclock-wise when looking from the outside).
"""
phi = (1 + np.sqrt(5)) / 2
vertices = []
for c1 in [1., -1.]:
for c2 in [phi, -phi]:
vertices.append((c1, c2, 0.))
vertices.append((0., c1, c2))
vertices.append((c2, 0., c1))
vertices = np.array(vertices, dtype=np.float32)
vertices /= np.linalg.norm([1., phi])
# I did this manually, checking the orientation one by one.
faces = [(0, 1, 2),
(0, 6, 1),
(8, 0, 2),
(8, 4, 0),
(3, 8, 2),
(3, 2, 7),
(7, 2, 1),
(0, 4, 6),
(4, 11, 6),
(6, 11, 5),
(1, 5, 7),
(4, 10, 11),
(4, 8, 10),
(10, 8, 3),
(10, 3, 9),
(11, 10, 9),
(11, 9, 5),
(5, 9, 7),
(9, 3, 7),
(1, 6, 5),
]
# By default the top is an aris parallel to the Y axis.
# Need to rotate around the y axis by half the supplementary to the
# angle between faces divided by two to get the desired orientation.
# /O\ (top arist)
# / \ Z
# (adjacent face)/ \ (adjacent face) ^
# / angle_between_faces \ |
# / \ |
# / \ YO-----> X
# This results in:
# (adjacent faceis now top plane)
# ----------------------O\ (top arist)
# \
# \
# \ (adjacent face)
# \
# \
# \
angle_between_faces = 2 * np.arcsin(phi / np.sqrt(3))
rotation_angle = (np.pi - angle_between_faces) / 2
rotation = transform.Rotation.from_euler(seq="y", angles=rotation_angle)
rotation_matrix = rotation.as_matrix()
vertices = np.dot(vertices, rotation_matrix)
return TriangularMesh(vertices=vertices.astype(np.float32),
faces=np.array(faces, dtype=np.int32))
def _two_split_unit_sphere_triangle_faces(
triangular_mesh: TriangularMesh) -> TriangularMesh:
"""Splits each triangular face into 4 triangles keeping the orientation."""
# Every time we split a triangle into 4 we will be adding 3 extra vertices,
# located at the edge centres.
# This class handles the positioning of the new vertices, and avoids creating
# duplicates.
new_vertices_builder = _ChildVerticesBuilder(triangular_mesh.vertices)
new_faces = []
for ind1, ind2, ind3 in triangular_mesh.faces:
# Transform each triangular face into 4 triangles,
# preserving the orientation.
# ind3
# / \
# / \
# / #3 \
# / \
# ind31 -------------- ind23
# / \ / \
# / \ #4 / \
# / #1 \ / #2 \
# / \ / \
# ind1 ------------ ind12 ------------ ind2
ind12 = new_vertices_builder.get_new_child_vertex_index((ind1, ind2))
ind23 = new_vertices_builder.get_new_child_vertex_index((ind2, ind3))
ind31 = new_vertices_builder.get_new_child_vertex_index((ind3, ind1))
# Note how each of the 4 triangular new faces specifies the order of the
# vertices to preserve the orientation of the original face. As the input
# face should always be counter-clockwise as specified in the diagram,
# this means child faces should also be counter-clockwise.
new_faces.extend([[ind1, ind12, ind31], # 1
[ind12, ind2, ind23], # 2
[ind31, ind23, ind3], # 3
[ind12, ind23, ind31], # 4
])
return TriangularMesh(vertices=new_vertices_builder.get_all_vertices(),
faces=np.array(new_faces, dtype=np.int32))
class _ChildVerticesBuilder(object):
"""Bookkeeping of new child vertices added to an existing set of vertices."""
def __init__(self, parent_vertices):
# Because the same new vertex will be required when splitting adjacent
# triangles (which share an edge) we keep them in a hash table indexed by
# sorted indices of the vertices adjacent to the edge, to avoid creating
# duplicated child vertices.
self._child_vertices_index_mapping = {}
self._parent_vertices = parent_vertices
# We start with all previous vertices.
self._all_vertices_list = list(parent_vertices)
def _get_child_vertex_key(self, parent_vertex_indices):
return tuple(sorted(parent_vertex_indices))
def _create_child_vertex(self, parent_vertex_indices):
"""Creates a new vertex."""
# Position for new vertex is the middle point, between the parent points,
# projected to unit sphere.
child_vertex_position = self._parent_vertices[
list(parent_vertex_indices)].mean(0)
child_vertex_position /= np.linalg.norm(child_vertex_position)
# Add the vertex to the output list. The index for this new vertex will
# match the length of the list before adding it.
child_vertex_key = self._get_child_vertex_key(parent_vertex_indices)
self._child_vertices_index_mapping[child_vertex_key] = len(
self._all_vertices_list)
self._all_vertices_list.append(child_vertex_position)
def get_new_child_vertex_index(self, parent_vertex_indices):
"""Returns index for a child vertex, creating it if necessary."""
# Get the key to see if we already have a new vertex in the middle.
child_vertex_key = self._get_child_vertex_key(parent_vertex_indices)
if child_vertex_key not in self._child_vertices_index_mapping:
self._create_child_vertex(parent_vertex_indices)
return self._child_vertices_index_mapping[child_vertex_key]
def get_all_vertices(self):
"""Returns an array with old vertices."""
return np.array(self._all_vertices_list)
def faces_to_edges(faces: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
"""Transforms polygonal faces to sender and receiver indices.
It does so by transforming every face into N_i edges. Such if the triangular
face has indices [0, 1, 2], three edges are added 0->1, 1->2, and 2->0.
If all faces have consistent orientation, and the surface represented by the
faces is closed, then every edge in a polygon with a certain orientation
is also part of another polygon with the opposite orientation. In this
situation, the edges returned by the method are always bidirectional.
Args:
faces: Integer array of shape [num_faces, 3]. Contains node indices
adjacent to each face.
Returns:
Tuple with sender/receiver indices, each of shape [num_edges=num_faces*3].
"""
assert faces.ndim == 2
assert faces.shape[-1] == 3
senders = np.concatenate([faces[:, 0], faces[:, 1], faces[:, 2]])
receivers = np.concatenate([faces[:, 1], faces[:, 2], faces[:, 0]])
return senders, receivers
|
graphcast-main
|
graphcast/icosahedral_mesh.py
|
# Copyright 2023 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A library of typed Graph Neural Networks."""
from typing import Callable, Mapping, Optional, Union
from graphcast import typed_graph
import jax.numpy as jnp
import jax.tree_util as tree
import jraph
# All features will be an ArrayTree.
NodeFeatures = EdgeFeatures = SenderFeatures = ReceiverFeatures = Globals = (
jraph.ArrayTree)
# Signature:
# (node features, outgoing edge features, incoming edge features,
# globals) -> updated node features
GNUpdateNodeFn = Callable[
[NodeFeatures, Mapping[str, SenderFeatures], Mapping[str, ReceiverFeatures],
Globals],
NodeFeatures]
GNUpdateGlobalFn = Callable[
[Mapping[str, NodeFeatures], Mapping[str, EdgeFeatures], Globals],
Globals]
def GraphNetwork( # pylint: disable=invalid-name
update_edge_fn: Mapping[str, jraph.GNUpdateEdgeFn],
update_node_fn: Mapping[str, GNUpdateNodeFn],
update_global_fn: Optional[GNUpdateGlobalFn] = None,
aggregate_edges_for_nodes_fn: jraph.AggregateEdgesToNodesFn = jraph
.segment_sum,
aggregate_nodes_for_globals_fn: jraph.AggregateNodesToGlobalsFn = jraph
.segment_sum,
aggregate_edges_for_globals_fn: jraph.AggregateEdgesToGlobalsFn = jraph
.segment_sum,
):
"""Returns a method that applies a configured GraphNetwork.
This implementation follows Algorithm 1 in https://arxiv.org/abs/1806.01261
extended to Typed Graphs with multiple edge sets and node sets and extended to
allow aggregating not only edges received by the nodes, but also edges sent by
the nodes.
Example usage::
gn = GraphNetwork(update_edge_function,
update_node_function, **kwargs)
# Conduct multiple rounds of message passing with the same parameters:
for _ in range(num_message_passing_steps):
graph = gn(graph)
Args:
update_edge_fn: mapping of functions used to update a subset of the edge
types, indexed by edge type name.
update_node_fn: mapping of functions used to update a subset of the node
types, indexed by node type name.
update_global_fn: function used to update the globals or None to deactivate
globals updates.
aggregate_edges_for_nodes_fn: function used to aggregate messages to each
node.
aggregate_nodes_for_globals_fn: function used to aggregate the nodes for the
globals.
aggregate_edges_for_globals_fn: function used to aggregate the edges for the
globals.
Returns:
A method that applies the configured GraphNetwork.
"""
def _apply_graph_net(graph: typed_graph.TypedGraph) -> typed_graph.TypedGraph:
"""Applies a configured GraphNetwork to a graph.
This implementation follows Algorithm 1 in https://arxiv.org/abs/1806.01261
extended to Typed Graphs with multiple edge sets and node sets and extended
to allow aggregating not only edges received by the nodes, but also edges
sent by the nodes.
Args:
graph: a `TypedGraph` containing the graph.
Returns:
Updated `TypedGraph`.
"""
updated_graph = graph
# Edge update.
updated_edges = dict(updated_graph.edges)
for edge_set_name, edge_fn in update_edge_fn.items():
edge_set_key = graph.edge_key_by_name(edge_set_name)
updated_edges[edge_set_key] = _edge_update(
updated_graph, edge_fn, edge_set_key)
updated_graph = updated_graph._replace(edges=updated_edges)
# Node update.
updated_nodes = dict(updated_graph.nodes)
for node_set_key, node_fn in update_node_fn.items():
updated_nodes[node_set_key] = _node_update(
updated_graph, node_fn, node_set_key, aggregate_edges_for_nodes_fn)
updated_graph = updated_graph._replace(nodes=updated_nodes)
# Global update.
if update_global_fn:
updated_context = _global_update(
updated_graph, update_global_fn,
aggregate_edges_for_globals_fn,
aggregate_nodes_for_globals_fn)
updated_graph = updated_graph._replace(context=updated_context)
return updated_graph
return _apply_graph_net
def _edge_update(graph, edge_fn, edge_set_key): # pylint: disable=invalid-name
"""Updates an edge set of a given key."""
sender_nodes = graph.nodes[edge_set_key.node_sets[0]]
receiver_nodes = graph.nodes[edge_set_key.node_sets[1]]
edge_set = graph.edges[edge_set_key]
senders = edge_set.indices.senders # pytype: disable=attribute-error
receivers = edge_set.indices.receivers # pytype: disable=attribute-error
sent_attributes = tree.tree_map(
lambda n: n[senders], sender_nodes.features)
received_attributes = tree.tree_map(
lambda n: n[receivers], receiver_nodes.features)
n_edge = edge_set.n_edge
sum_n_edge = senders.shape[0]
global_features = tree.tree_map(
lambda g: jnp.repeat(g, n_edge, axis=0, total_repeat_length=sum_n_edge),
graph.context.features)
new_features = edge_fn(
edge_set.features, sent_attributes, received_attributes,
global_features)
return edge_set._replace(features=new_features)
def _node_update(graph, node_fn, node_set_key, aggregation_fn): # pylint: disable=invalid-name
"""Updates an edge set of a given key."""
node_set = graph.nodes[node_set_key]
sum_n_node = tree.tree_leaves(node_set.features)[0].shape[0]
sent_features = {}
for edge_set_key, edge_set in graph.edges.items():
sender_node_set_key = edge_set_key.node_sets[0]
if sender_node_set_key == node_set_key:
assert isinstance(edge_set.indices, typed_graph.EdgesIndices)
senders = edge_set.indices.senders
sent_features[edge_set_key.name] = tree.tree_map(
lambda e: aggregation_fn(e, senders, sum_n_node), edge_set.features) # pylint: disable=cell-var-from-loop
received_features = {}
for edge_set_key, edge_set in graph.edges.items():
receiver_node_set_key = edge_set_key.node_sets[1]
if receiver_node_set_key == node_set_key:
assert isinstance(edge_set.indices, typed_graph.EdgesIndices)
receivers = edge_set.indices.receivers
received_features[edge_set_key.name] = tree.tree_map(
lambda e: aggregation_fn(e, receivers, sum_n_node), edge_set.features) # pylint: disable=cell-var-from-loop
n_node = node_set.n_node
global_features = tree.tree_map(
lambda g: jnp.repeat(g, n_node, axis=0, total_repeat_length=sum_n_node),
graph.context.features)
new_features = node_fn(
node_set.features, sent_features, received_features, global_features)
return node_set._replace(features=new_features)
def _global_update(graph, global_fn, edge_aggregation_fn, node_aggregation_fn): # pylint: disable=invalid-name
"""Updates an edge set of a given key."""
n_graph = graph.context.n_graph.shape[0]
graph_idx = jnp.arange(n_graph)
edge_features = {}
for edge_set_key, edge_set in graph.edges.items():
assert isinstance(edge_set.indices, typed_graph.EdgesIndices)
sum_n_edge = edge_set.indices.senders.shape[0]
edge_gr_idx = jnp.repeat(
graph_idx, edge_set.n_edge, axis=0, total_repeat_length=sum_n_edge)
edge_features[edge_set_key.name] = tree.tree_map(
lambda e: edge_aggregation_fn(e, edge_gr_idx, n_graph), # pylint: disable=cell-var-from-loop
edge_set.features)
node_features = {}
for node_set_key, node_set in graph.nodes.items():
sum_n_node = tree.tree_leaves(node_set.features)[0].shape[0]
node_gr_idx = jnp.repeat(
graph_idx, node_set.n_node, axis=0, total_repeat_length=sum_n_node)
node_features[node_set_key] = tree.tree_map(
lambda n: node_aggregation_fn(n, node_gr_idx, n_graph), # pylint: disable=cell-var-from-loop
node_set.features)
new_features = global_fn(node_features, edge_features, graph.context.features)
return graph.context._replace(features=new_features)
InteractionUpdateNodeFn = Callable[
[jraph.NodeFeatures,
Mapping[str, SenderFeatures],
Mapping[str, ReceiverFeatures]],
jraph.NodeFeatures]
InteractionUpdateNodeFnNoSentEdges = Callable[
[jraph.NodeFeatures,
Mapping[str, ReceiverFeatures]],
jraph.NodeFeatures]
def InteractionNetwork( # pylint: disable=invalid-name
update_edge_fn: Mapping[str, jraph.InteractionUpdateEdgeFn],
update_node_fn: Mapping[str, Union[InteractionUpdateNodeFn,
InteractionUpdateNodeFnNoSentEdges]],
aggregate_edges_for_nodes_fn: jraph.AggregateEdgesToNodesFn = jraph
.segment_sum,
include_sent_messages_in_node_update: bool = False):
"""Returns a method that applies a configured InteractionNetwork.
An interaction network computes interactions on the edges based on the
previous edges features, and on the features of the nodes sending into those
edges. It then updates the nodes based on the incoming updated edges.
See https://arxiv.org/abs/1612.00222 for more details.
This implementation extends the behavior to `TypedGraphs` adding an option
to include edge features for which a node is a sender in the arguments to
the node update function.
Args:
update_edge_fn: mapping of functions used to update a subset of the edge
types, indexed by edge type name.
update_node_fn: mapping of functions used to update a subset of the node
types, indexed by node type name.
aggregate_edges_for_nodes_fn: function used to aggregate messages to each
node.
include_sent_messages_in_node_update: pass edge features for which a node is
a sender to the node update function.
"""
# An InteractionNetwork is a GraphNetwork without globals features,
# so we implement the InteractionNetwork as a configured GraphNetwork.
# An InteractionNetwork edge function does not have global feature inputs,
# so we filter the passed global argument in the GraphNetwork.
wrapped_update_edge_fn = tree.tree_map(
lambda fn: lambda e, s, r, g: fn(e, s, r), update_edge_fn)
# Similarly, we wrap the update_node_fn to ensure only the expected
# arguments are passed to the Interaction net.
if include_sent_messages_in_node_update:
wrapped_update_node_fn = tree.tree_map(
lambda fn: lambda n, s, r, g: fn(n, s, r), update_node_fn)
else:
wrapped_update_node_fn = tree.tree_map(
lambda fn: lambda n, s, r, g: fn(n, r), update_node_fn)
return GraphNetwork(
update_edge_fn=wrapped_update_edge_fn,
update_node_fn=wrapped_update_node_fn,
aggregate_edges_for_nodes_fn=aggregate_edges_for_nodes_fn)
def GraphMapFeatures( # pylint: disable=invalid-name
embed_edge_fn: Optional[Mapping[str, jraph.EmbedEdgeFn]] = None,
embed_node_fn: Optional[Mapping[str, jraph.EmbedNodeFn]] = None,
embed_global_fn: Optional[jraph.EmbedGlobalFn] = None):
"""Returns function which embeds the components of a graph independently.
Args:
embed_edge_fn: mapping of functions used to embed each edge type,
indexed by edge type name.
embed_node_fn: mapping of functions used to embed each node type,
indexed by node type name.
embed_global_fn: function used to embed the globals.
"""
def _embed(graph: typed_graph.TypedGraph) -> typed_graph.TypedGraph:
updated_edges = dict(graph.edges)
if embed_edge_fn:
for edge_set_name, embed_fn in embed_edge_fn.items():
edge_set_key = graph.edge_key_by_name(edge_set_name)
edge_set = graph.edges[edge_set_key]
updated_edges[edge_set_key] = edge_set._replace(
features=embed_fn(edge_set.features))
updated_nodes = dict(graph.nodes)
if embed_node_fn:
for node_set_key, embed_fn in embed_node_fn.items():
node_set = graph.nodes[node_set_key]
updated_nodes[node_set_key] = node_set._replace(
features=embed_fn(node_set.features))
updated_context = graph.context
if embed_global_fn:
updated_context = updated_context._replace(
features=embed_global_fn(updated_context.features))
return graph._replace(edges=updated_edges, nodes=updated_nodes,
context=updated_context)
return _embed
|
graphcast-main
|
graphcast/typed_graph_net.py
|
# Copyright 2023 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for xarray_tree."""
from absl.testing import absltest
from graphcast import xarray_tree
import numpy as np
import xarray
TEST_DATASET = xarray.Dataset(
data_vars={
"foo": (("x", "y"), np.zeros((2, 3))),
"bar": (("x",), np.zeros((2,))),
},
coords={
"x": [1, 2],
"y": [10, 20, 30],
}
)
class XarrayTreeTest(absltest.TestCase):
def test_map_structure_maps_over_leaves_but_preserves_dataset_type(self):
def fn(leaf):
self.assertIsInstance(leaf, xarray.DataArray)
result = leaf + 1
# Removing the name from the returned DataArray to test that we don't rely
# on it being present to restore the correct names in the result:
result = result.rename(None)
return result
result = xarray_tree.map_structure(fn, TEST_DATASET)
self.assertIsInstance(result, xarray.Dataset)
self.assertSameElements({"foo", "bar"}, result.keys())
def test_map_structure_on_data_arrays(self):
data_arrays = dict(TEST_DATASET)
result = xarray_tree.map_structure(lambda x: x+1, data_arrays)
self.assertIsInstance(result, dict)
self.assertSameElements({"foo", "bar"}, result.keys())
def test_map_structure_on_dataset_plain_dict_when_coords_incompatible(self):
def fn(leaf):
# Returns DataArrays that can't be exactly merged back into a Dataset
# due to the coordinates not matching:
if leaf.name == "foo":
return xarray.DataArray(
data=np.zeros(2), dims=("x",), coords={"x": [1, 2]})
else:
return xarray.DataArray(
data=np.zeros(2), dims=("x",), coords={"x": [3, 4]})
result = xarray_tree.map_structure(fn, TEST_DATASET)
self.assertIsInstance(result, dict)
self.assertSameElements({"foo", "bar"}, result.keys())
def test_map_structure_on_dataset_drops_vars_with_none_return_values(self):
def fn(leaf):
return leaf if leaf.name == "foo" else None
result = xarray_tree.map_structure(fn, TEST_DATASET)
self.assertIsInstance(result, xarray.Dataset)
self.assertSameElements({"foo"}, result.keys())
def test_map_structure_on_dataset_returns_plain_dict_other_return_types(self):
def fn(leaf):
self.assertIsInstance(leaf, xarray.DataArray)
return "not a DataArray"
result = xarray_tree.map_structure(fn, TEST_DATASET)
self.assertEqual({"foo": "not a DataArray",
"bar": "not a DataArray"}, result)
def test_map_structure_two_args_different_variable_orders(self):
dataset_different_order = TEST_DATASET[["bar", "foo"]]
def fn(arg1, arg2):
self.assertEqual(arg1.name, arg2.name)
xarray_tree.map_structure(fn, TEST_DATASET, dataset_different_order)
if __name__ == "__main__":
absltest.main()
|
graphcast-main
|
graphcast/xarray_tree_test.py
|
# Copyright 2023 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Wrappers that take care of casting."""
import contextlib
from typing import Any, Mapping, Tuple
import chex
from graphcast import predictor_base
import haiku as hk
import jax
import jax.numpy as jnp
import numpy as np
import xarray
PyTree = Any
class Bfloat16Cast(predictor_base.Predictor):
"""Wrapper that casts all inputs to bfloat16 and outputs to targets dtype."""
def __init__(self, predictor: predictor_base.Predictor, enabled: bool = True):
"""Inits the wrapper.
Args:
predictor: predictor being wrapped.
enabled: disables the wrapper if False, for simpler hyperparameter scans.
"""
self._enabled = enabled
self._predictor = predictor
def __call__(self,
inputs: xarray.Dataset,
targets_template: xarray.Dataset,
forcings: xarray.Dataset,
**kwargs
) -> xarray.Dataset:
if not self._enabled:
return self._predictor(inputs, targets_template, forcings, **kwargs)
with bfloat16_variable_view():
predictions = self._predictor(
*_all_inputs_to_bfloat16(inputs, targets_template, forcings),
**kwargs,)
predictions_dtype = infer_floating_dtype(predictions)
if predictions_dtype != jnp.bfloat16:
raise ValueError(f'Expected bfloat16 output, got {predictions_dtype}')
targets_dtype = infer_floating_dtype(targets_template)
return tree_map_cast(
predictions, input_dtype=jnp.bfloat16, output_dtype=targets_dtype)
def loss(self,
inputs: xarray.Dataset,
targets: xarray.Dataset,
forcings: xarray.Dataset,
**kwargs,
) -> predictor_base.LossAndDiagnostics:
if not self._enabled:
return self._predictor.loss(inputs, targets, forcings, **kwargs)
with bfloat16_variable_view():
loss, scalars = self._predictor.loss(
*_all_inputs_to_bfloat16(inputs, targets, forcings), **kwargs)
if loss.dtype != jnp.bfloat16:
raise ValueError(f'Expected bfloat16 loss, got {loss.dtype}')
targets_dtype = infer_floating_dtype(targets)
# Note that casting back the loss to e.g. float32 should not affect data
# types of the backwards pass, because the first thing the backwards pass
# should do is to go backwards the casting op and cast back to bfloat16
# (and xprofs seem to confirm this).
return tree_map_cast((loss, scalars),
input_dtype=jnp.bfloat16, output_dtype=targets_dtype)
def loss_and_predictions( # pytype: disable=signature-mismatch # jax-ndarray
self,
inputs: xarray.Dataset,
targets: xarray.Dataset,
forcings: xarray.Dataset,
**kwargs,
) -> Tuple[predictor_base.LossAndDiagnostics,
xarray.Dataset]:
if not self._enabled:
return self._predictor.loss_and_predictions(inputs, targets, forcings, # pytype: disable=bad-return-type # jax-ndarray
**kwargs)
with bfloat16_variable_view():
(loss, scalars), predictions = self._predictor.loss_and_predictions(
*_all_inputs_to_bfloat16(inputs, targets, forcings), **kwargs)
if loss.dtype != jnp.bfloat16:
raise ValueError(f'Expected bfloat16 loss, got {loss.dtype}')
predictions_dtype = infer_floating_dtype(predictions)
if predictions_dtype != jnp.bfloat16:
raise ValueError(f'Expected bfloat16 output, got {predictions_dtype}')
targets_dtype = infer_floating_dtype(targets)
return tree_map_cast(((loss, scalars), predictions),
input_dtype=jnp.bfloat16, output_dtype=targets_dtype)
def infer_floating_dtype(data_vars: Mapping[str, chex.Array]) -> np.dtype:
"""Infers a floating dtype from an input mapping of data."""
dtypes = {
v.dtype
for k, v in data_vars.items() if jnp.issubdtype(v.dtype, np.floating)}
if len(dtypes) != 1:
dtypes_and_shapes = {
k: (v.dtype, v.shape)
for k, v in data_vars.items() if jnp.issubdtype(v.dtype, np.floating)}
raise ValueError(
f'Did not found exactly one floating dtype {dtypes} in input variables:'
f'{dtypes_and_shapes}')
return list(dtypes)[0]
def _all_inputs_to_bfloat16(
inputs: xarray.Dataset,
targets: xarray.Dataset,
forcings: xarray.Dataset,
) -> Tuple[xarray.Dataset,
xarray.Dataset,
xarray.Dataset]:
return (inputs.astype(jnp.bfloat16),
jax.tree_map(lambda x: x.astype(jnp.bfloat16), targets),
forcings.astype(jnp.bfloat16))
def tree_map_cast(inputs: PyTree, input_dtype: np.dtype, output_dtype: np.dtype,
) -> PyTree:
def cast_fn(x):
if x.dtype == input_dtype:
return x.astype(output_dtype)
return jax.tree_map(cast_fn, inputs)
@contextlib.contextmanager
def bfloat16_variable_view(enabled: bool = True):
"""Context for Haiku modules with float32 params, but bfloat16 activations.
It works as follows:
* Every time a variable is requested to be created/set as np.bfloat16,
it will create an underlying float32 variable, instead.
* Every time a variable a variable is requested as bfloat16, it will check the
variable is of float32 type, and cast the variable to bfloat16.
Note the gradients are still computed and accumulated as float32, because
the params returned by init are float32, so the gradient function with
respect to the params will already include an implicit casting to float32.
Args:
enabled: Only enables bfloat16 behavior if True.
Yields:
None
"""
if enabled:
with hk.custom_creator(
_bfloat16_creator, state=True), hk.custom_getter(
_bfloat16_getter, state=True), hk.custom_setter(
_bfloat16_setter):
yield
else:
yield
def _bfloat16_creator(next_creator, shape, dtype, init, context):
"""Creates float32 variables when bfloat16 is requested."""
if context.original_dtype == jnp.bfloat16:
dtype = jnp.float32
return next_creator(shape, dtype, init)
def _bfloat16_getter(next_getter, value, context):
"""Casts float32 to bfloat16 when bfloat16 was originally requested."""
if context.original_dtype == jnp.bfloat16:
assert value.dtype == jnp.float32
value = value.astype(jnp.bfloat16)
return next_getter(value)
def _bfloat16_setter(next_setter, value, context):
"""Casts bfloat16 to float32 when bfloat16 was originally set."""
if context.original_dtype == jnp.bfloat16:
value = value.astype(jnp.float32)
return next_setter(value)
|
graphcast-main
|
graphcast/casting.py
|
# Copyright 2023 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Dataset utilities."""
from typing import Any, Mapping, Sequence, Tuple, Union
import numpy as np
import pandas as pd
import xarray
TimedeltaLike = Any # Something convertible to pd.Timedelta.
TimedeltaStr = str # A string convertible to pd.Timedelta.
TargetLeadTimes = Union[
TimedeltaLike,
Sequence[TimedeltaLike],
slice # with TimedeltaLike as its start and stop.
]
_SEC_PER_HOUR = 3600
_HOUR_PER_DAY = 24
SEC_PER_DAY = _SEC_PER_HOUR * _HOUR_PER_DAY
_AVG_DAY_PER_YEAR = 365.24219
AVG_SEC_PER_YEAR = SEC_PER_DAY * _AVG_DAY_PER_YEAR
DAY_PROGRESS = "day_progress"
YEAR_PROGRESS = "year_progress"
def get_year_progress(seconds_since_epoch: np.ndarray) -> np.ndarray:
"""Computes year progress for times in seconds.
Args:
seconds_since_epoch: Times in seconds since the "epoch" (the point at which
UNIX time starts).
Returns:
Year progress normalized to be in the [0, 1) interval for each time point.
"""
# Start with the pure integer division, and then float at the very end.
# We will try to keep as much precision as possible.
years_since_epoch = (
seconds_since_epoch / SEC_PER_DAY / np.float64(_AVG_DAY_PER_YEAR)
)
# Note depending on how these ops are down, we may end up with a "weak_type"
# which can cause issues in subtle ways, and hard to track here.
# In any case, casting to float32 should get rid of the weak type.
# [0, 1.) Interval.
return np.mod(years_since_epoch, 1.0).astype(np.float32)
def get_day_progress(
seconds_since_epoch: np.ndarray,
longitude: np.ndarray,
) -> np.ndarray:
"""Computes day progress for times in seconds at each longitude.
Args:
seconds_since_epoch: 1D array of times in seconds since the 'epoch' (the
point at which UNIX time starts).
longitude: 1D array of longitudes at which day progress is computed.
Returns:
2D array of day progress values normalized to be in the [0, 1) inverval
for each time point at each longitude.
"""
# [0.0, 1.0) Interval.
day_progress_greenwich = (
np.mod(seconds_since_epoch, SEC_PER_DAY) / SEC_PER_DAY
)
# Offset the day progress to the longitude of each point on Earth.
longitude_offsets = np.deg2rad(longitude) / (2 * np.pi)
day_progress = np.mod(
day_progress_greenwich[..., np.newaxis] + longitude_offsets, 1.0
)
return day_progress.astype(np.float32)
def featurize_progress(
name: str, dims: Sequence[str], progress: np.ndarray
) -> Mapping[str, xarray.Variable]:
"""Derives features used by ML models from the `progress` variable.
Args:
name: Base variable name from which features are derived.
dims: List of the output feature dimensions, e.g. ("day", "lon").
progress: Progress variable values.
Returns:
Dictionary of xarray variables derived from the `progress` values. It
includes the original `progress` variable along with its sin and cos
transformations.
Raises:
ValueError if the number of feature dimensions is not equal to the number
of data dimensions.
"""
if len(dims) != progress.ndim:
raise ValueError(
f"Number of feature dimensions ({len(dims)}) must be equal to the"
f" number of data dimensions: {progress.ndim}."
)
progress_phase = progress * (2 * np.pi)
return {
name: xarray.Variable(dims, progress),
name + "_sin": xarray.Variable(dims, np.sin(progress_phase)),
name + "_cos": xarray.Variable(dims, np.cos(progress_phase)),
}
def add_derived_vars(data: xarray.Dataset) -> None:
"""Adds year and day progress features to `data` in place.
NOTE: `toa_incident_solar_radiation` needs to be computed in this function
as well.
Args:
data: Xarray dataset to which derived features will be added.
Raises:
ValueError if `datetime` or `lon` are not in `data` coordinates.
"""
for coord in ("datetime", "lon"):
if coord not in data.coords:
raise ValueError(f"'{coord}' must be in `data` coordinates.")
# Compute seconds since epoch.
# Note `data.coords["datetime"].astype("datetime64[s]").astype(np.int64)`
# does not work as xarrays always cast dates into nanoseconds!
seconds_since_epoch = (
data.coords["datetime"].data.astype("datetime64[s]").astype(np.int64)
)
batch_dim = ("batch",) if "batch" in data.dims else ()
# Add year progress features.
year_progress = get_year_progress(seconds_since_epoch)
data.update(
featurize_progress(
name=YEAR_PROGRESS, dims=batch_dim + ("time",), progress=year_progress
)
)
# Add day progress features.
longitude_coord = data.coords["lon"]
day_progress = get_day_progress(seconds_since_epoch, longitude_coord.data)
data.update(
featurize_progress(
name=DAY_PROGRESS,
dims=batch_dim + ("time",) + longitude_coord.dims,
progress=day_progress,
)
)
def extract_input_target_times(
dataset: xarray.Dataset,
input_duration: TimedeltaLike,
target_lead_times: TargetLeadTimes,
) -> Tuple[xarray.Dataset, xarray.Dataset]:
"""Extracts inputs and targets for prediction, from a Dataset with a time dim.
The input period is assumed to be contiguous (specified by a duration), but
the targets can be a list of arbitrary lead times.
Examples:
# Use 18 hours of data as inputs, and two specific lead times as targets:
# 3 days and 5 days after the final input.
extract_inputs_targets(
dataset,
input_duration='18h',
target_lead_times=('3d', '5d')
)
# Use 1 day of data as input, and all lead times between 6 hours and
# 24 hours inclusive as targets. Demonstrates a friendlier supported string
# syntax.
extract_inputs_targets(
dataset,
input_duration='1 day',
target_lead_times=slice('6 hours', '24 hours')
)
# Just use a single target lead time of 3 days:
extract_inputs_targets(
dataset,
input_duration='24h',
target_lead_times='3d'
)
Args:
dataset: An xarray.Dataset with a 'time' dimension whose coordinates are
timedeltas. It's assumed that the time coordinates have a fixed offset /
time resolution, and that the input_duration and target_lead_times are
multiples of this.
input_duration: pandas.Timedelta or something convertible to it (e.g. a
shorthand string like '6h' or '5d12h').
target_lead_times: Either a single lead time, a slice with start and stop
(inclusive) lead times, or a sequence of lead times. Lead times should be
Timedeltas (or something convertible to). They are given relative to the
final input timestep, and should be positive.
Returns:
inputs:
targets:
Two datasets with the same shape as the input dataset except that a
selection has been made from the time axis, and the origin of the
time coordinate will be shifted to refer to lead times relative to the
final input timestep. So for inputs the times will end at lead time 0,
for targets the time coordinates will refer to the lead times requested.
"""
(target_lead_times, target_duration
) = _process_target_lead_times_and_get_duration(target_lead_times)
# Shift the coordinates for the time axis so that a timedelta of zero
# corresponds to the forecast reference time. That is, the final timestep
# that's available as input to the forecast, with all following timesteps
# forming the target period which needs to be predicted.
# This means the time coordinates are now forecast lead times.
time = dataset.coords["time"]
dataset = dataset.assign_coords(time=time + target_duration - time[-1])
# Slice out targets:
targets = dataset.sel({"time": target_lead_times})
input_duration = pd.Timedelta(input_duration)
# Both endpoints are inclusive with label-based slicing, so we offset by a
# small epsilon to make one of the endpoints non-inclusive:
zero = pd.Timedelta(0)
epsilon = pd.Timedelta(1, "ns")
inputs = dataset.sel({"time": slice(-input_duration + epsilon, zero)})
return inputs, targets
def _process_target_lead_times_and_get_duration(
target_lead_times: TargetLeadTimes) -> TimedeltaLike:
"""Returns the minimum duration for the target lead times."""
if isinstance(target_lead_times, slice):
# A slice of lead times. xarray already accepts timedelta-like values for
# the begin/end/step of the slice.
if target_lead_times.start is None:
# If the start isn't specified, we assume it starts at the next timestep
# after lead time 0 (lead time 0 is the final input timestep):
target_lead_times = slice(
pd.Timedelta(1, "ns"), target_lead_times.stop, target_lead_times.step
)
target_duration = pd.Timedelta(target_lead_times.stop)
else:
if not isinstance(target_lead_times, (list, tuple, set)):
# A single lead time, which we wrap as a length-1 array to ensure there
# still remains a time dimension (here of length 1) for consistency.
target_lead_times = [target_lead_times]
# A list of multiple (not necessarily contiguous) lead times:
target_lead_times = [pd.Timedelta(x) for x in target_lead_times]
target_lead_times.sort()
target_duration = target_lead_times[-1]
return target_lead_times, target_duration
def extract_inputs_targets_forcings(
dataset: xarray.Dataset,
*,
input_variables: Tuple[str, ...],
target_variables: Tuple[str, ...],
forcing_variables: Tuple[str, ...],
pressure_levels: Tuple[int, ...],
input_duration: TimedeltaLike,
target_lead_times: TargetLeadTimes,
) -> Tuple[xarray.Dataset, xarray.Dataset, xarray.Dataset]:
"""Extracts inputs, targets and forcings according to requirements."""
dataset = dataset.sel(level=list(pressure_levels))
# "Forcings" are derived variables and do not exist in the original ERA5 or
# HRES datasets. Compute them if they are not in `dataset`.
if not set(forcing_variables).issubset(set(dataset.data_vars)):
add_derived_vars(dataset)
# `datetime` is needed by add_derived_vars but breaks autoregressive rollouts.
dataset = dataset.drop_vars("datetime")
inputs, targets = extract_input_target_times(
dataset,
input_duration=input_duration,
target_lead_times=target_lead_times)
if set(forcing_variables) & set(target_variables):
raise ValueError(
f"Forcing variables {forcing_variables} should not "
f"overlap with target variables {target_variables}."
)
inputs = inputs[list(input_variables)]
# The forcing uses the same time coordinates as the target.
forcings = targets[list(forcing_variables)]
targets = targets[list(target_variables)]
return inputs, targets, forcings
|
graphcast-main
|
graphcast/data_utils.py
|
# Copyright 2023 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for xarray_jax."""
from absl.testing import absltest
import chex
from graphcast import xarray_jax
import jax
import jax.numpy as jnp
import numpy as np
import xarray
class XarrayJaxTest(absltest.TestCase):
def test_jax_array_wrapper_with_numpy_api(self):
# This is just a side benefit of making things work with xarray, but the
# JaxArrayWrapper does allow you to manipulate JAX arrays using the
# standard numpy API, without converting them to numpy in the process:
ones = jnp.ones((3, 4), dtype=np.float32)
x = xarray_jax.JaxArrayWrapper(ones)
x = np.abs((x + 2) * (x - 3))
x = x[:-1, 1:3]
x = np.concatenate([x, x + 1], axis=0)
x = np.transpose(x, (1, 0))
x = np.reshape(x, (-1,))
x = x.astype(np.int32)
self.assertIsInstance(x, xarray_jax.JaxArrayWrapper)
# An explicit conversion gets us out of JAX-land however:
self.assertIsInstance(np.asarray(x), np.ndarray)
def test_jax_xarray_variable(self):
def ops_via_xarray(inputs):
x = xarray_jax.Variable(('lat', 'lon'), inputs)
# We'll apply a sequence of operations just to test that the end result is
# still a JAX array, i.e. we haven't converted to numpy at any point.
x = np.abs((x + 2) * (x - 3))
x = x.isel({'lat': slice(0, -1), 'lon': slice(1, 3)})
x = xarray.Variable.concat([x, x + 1], dim='lat')
x = x.transpose('lon', 'lat')
x = x.stack(channels=('lon', 'lat'))
x = x.sum()
return xarray_jax.jax_data(x)
# Check it doesn't leave jax-land when passed concrete values:
ones = jnp.ones((3, 4), dtype=np.float32)
result = ops_via_xarray(ones)
self.assertIsInstance(result, jax.Array)
# And that you can JIT it and compute gradients through it. These will
# involve passing jax tracers through the xarray computation:
jax.jit(ops_via_xarray)(ones)
jax.grad(ops_via_xarray)(ones)
def test_jax_xarray_data_array(self):
def ops_via_xarray(inputs):
x = xarray_jax.DataArray(dims=('lat', 'lon'),
data=inputs,
coords={'lat': np.arange(3) * 10,
'lon': np.arange(4) * 10})
x = np.abs((x + 2) * (x - 3))
x = x.sel({'lat': slice(0, 20)})
y = xarray_jax.DataArray(dims=('lat', 'lon'),
data=ones,
coords={'lat': np.arange(3, 6) * 10,
'lon': np.arange(4) * 10})
x = xarray.concat([x, y], dim='lat')
x = x.transpose('lon', 'lat')
x = x.stack(channels=('lon', 'lat'))
x = x.unstack()
x = x.sum()
return xarray_jax.jax_data(x)
ones = jnp.ones((3, 4), dtype=np.float32)
result = ops_via_xarray(ones)
self.assertIsInstance(result, jax.Array)
jax.jit(ops_via_xarray)(ones)
jax.grad(ops_via_xarray)(ones)
def test_jax_xarray_dataset(self):
def ops_via_xarray(foo, bar):
x = xarray_jax.Dataset(
data_vars={'foo': (('lat', 'lon'), foo),
'bar': (('time', 'lat', 'lon'), bar)},
coords={
'time': np.arange(2),
'lat': np.arange(3) * 10,
'lon': np.arange(4) * 10})
x = np.abs((x + 2) * (x - 3))
x = x.sel({'lat': slice(0, 20)})
y = xarray_jax.Dataset(
data_vars={'foo': (('lat', 'lon'), foo),
'bar': (('time', 'lat', 'lon'), bar)},
coords={
'time': np.arange(2),
'lat': np.arange(3, 6) * 10,
'lon': np.arange(4) * 10})
x = xarray.concat([x, y], dim='lat')
x = x.transpose('lon', 'lat', 'time')
x = x.stack(channels=('lon', 'lat'))
x = (x.foo + x.bar).sum()
return xarray_jax.jax_data(x)
foo = jnp.ones((3, 4), dtype=np.float32)
bar = jnp.ones((2, 3, 4), dtype=np.float32)
result = ops_via_xarray(foo, bar)
self.assertIsInstance(result, jax.Array)
jax.jit(ops_via_xarray)(foo, bar)
jax.grad(ops_via_xarray)(foo, bar)
def test_jit_function_with_xarray_variable_arguments_and_return(self):
function = jax.jit(lambda v: v + 1)
with self.subTest('jax input'):
inputs = xarray_jax.Variable(
('lat', 'lon'), jnp.ones((3, 4), dtype=np.float32))
_ = function(inputs)
# We test running the jitted function a second time, to exercise logic in
# jax which checks if the structure of the inputs (including dimension
# names and coordinates) is the same as it was for the previous call and
# so whether it needs to re-trace-and-compile a new version of the
# function or not. This can run into problems if the 'aux' structure
# returned by the registered flatten function is not hashable/comparable.
outputs = function(inputs)
self.assertEqual(outputs.dims, inputs.dims)
with self.subTest('numpy input'):
inputs = xarray.Variable(
('lat', 'lon'), np.ones((3, 4), dtype=np.float32))
_ = function(inputs)
outputs = function(inputs)
self.assertEqual(outputs.dims, inputs.dims)
def test_jit_problem_if_convert_to_plain_numpy_array(self):
inputs = xarray_jax.DataArray(
data=jnp.ones((2,), dtype=np.float32), dims=('foo',))
with self.assertRaises(jax.errors.TracerArrayConversionError):
# Calling .values on a DataArray converts its values to numpy:
jax.jit(lambda data_array: data_array.values)(inputs)
def test_grad_function_with_xarray_variable_arguments(self):
x = xarray_jax.Variable(('lat', 'lon'), jnp.ones((3, 4), dtype=np.float32))
# For grad we still need a JAX scalar as the output:
jax.grad(lambda v: xarray_jax.jax_data(v.sum()))(x)
def test_jit_function_with_xarray_data_array_arguments_and_return(self):
inputs = xarray_jax.DataArray(
data=jnp.ones((3, 4), dtype=np.float32),
dims=('lat', 'lon'),
coords={'lat': np.arange(3),
'lon': np.arange(4) * 10})
fn = jax.jit(lambda v: v + 1)
_ = fn(inputs)
outputs = fn(inputs)
self.assertEqual(outputs.dims, inputs.dims)
chex.assert_trees_all_equal(outputs.coords, inputs.coords)
def test_jit_function_with_data_array_and_jax_coords(self):
inputs = xarray_jax.DataArray(
data=jnp.ones((3, 4), dtype=np.float32),
dims=('lat', 'lon'),
coords={'lat': np.arange(3)},
jax_coords={'lon': jnp.arange(4) * 10})
# Verify the jax_coord 'lon' retains jax data, and has not been created
# as an index coordinate:
self.assertIsInstance(inputs.coords['lon'].data, xarray_jax.JaxArrayWrapper)
self.assertNotIn('lon', inputs.indexes)
@jax.jit
def fn(v):
# The non-JAX coord is passed with numpy array data and an index:
self.assertIsInstance(v.coords['lat'].data, np.ndarray)
self.assertIn('lat', v.indexes)
# The jax_coord is passed with JAX array data:
self.assertIsInstance(v.coords['lon'].data, xarray_jax.JaxArrayWrapper)
self.assertNotIn('lon', v.indexes)
# Use the jax coord in the computation:
v = v + v.coords['lon']
# Return with an updated jax coord:
return xarray_jax.assign_jax_coords(v, lon=v.coords['lon'] + 1)
_ = fn(inputs)
outputs = fn(inputs)
# Verify the jax_coord 'lon' has jax data in the output too:
self.assertIsInstance(
outputs.coords['lon'].data, xarray_jax.JaxArrayWrapper)
self.assertNotIn('lon', outputs.indexes)
self.assertEqual(outputs.dims, inputs.dims)
chex.assert_trees_all_equal(outputs.coords['lat'], inputs.coords['lat'])
# Check our computations with the coordinate values worked:
chex.assert_trees_all_equal(
outputs.coords['lon'].data, (inputs.coords['lon']+1).data)
chex.assert_trees_all_equal(
outputs.data, (inputs + inputs.coords['lon']).data)
def test_jit_function_with_xarray_dataset_arguments_and_return(self):
foo = jnp.ones((3, 4), dtype=np.float32)
bar = jnp.ones((2, 3, 4), dtype=np.float32)
inputs = xarray_jax.Dataset(
data_vars={'foo': (('lat', 'lon'), foo),
'bar': (('time', 'lat', 'lon'), bar)},
coords={
'time': np.arange(2),
'lat': np.arange(3) * 10,
'lon': np.arange(4) * 10})
fn = jax.jit(lambda v: v + 1)
_ = fn(inputs)
outputs = fn(inputs)
self.assertEqual({'foo', 'bar'}, outputs.data_vars.keys())
self.assertEqual(inputs.foo.dims, outputs.foo.dims)
self.assertEqual(inputs.bar.dims, outputs.bar.dims)
chex.assert_trees_all_equal(outputs.coords, inputs.coords)
def test_jit_function_with_dataset_and_jax_coords(self):
foo = jnp.ones((3, 4), dtype=np.float32)
bar = jnp.ones((2, 3, 4), dtype=np.float32)
inputs = xarray_jax.Dataset(
data_vars={'foo': (('lat', 'lon'), foo),
'bar': (('time', 'lat', 'lon'), bar)},
coords={
'time': np.arange(2),
'lat': np.arange(3) * 10,
},
jax_coords={'lon': jnp.arange(4) * 10}
)
# Verify the jax_coord 'lon' retains jax data, and has not been created
# as an index coordinate:
self.assertIsInstance(inputs.coords['lon'].data, xarray_jax.JaxArrayWrapper)
self.assertNotIn('lon', inputs.indexes)
@jax.jit
def fn(v):
# The non-JAX coords are passed with numpy array data and an index:
self.assertIsInstance(v.coords['lat'].data, np.ndarray)
self.assertIn('lat', v.indexes)
# The jax_coord is passed with JAX array data:
self.assertIsInstance(v.coords['lon'].data, xarray_jax.JaxArrayWrapper)
self.assertNotIn('lon', v.indexes)
# Use the jax coord in the computation:
v = v + v.coords['lon']
# Return with an updated jax coord:
return xarray_jax.assign_jax_coords(v, lon=v.coords['lon'] + 1)
_ = fn(inputs)
outputs = fn(inputs)
# Verify the jax_coord 'lon' has jax data in the output too:
self.assertIsInstance(
outputs.coords['lon'].data, xarray_jax.JaxArrayWrapper)
self.assertNotIn('lon', outputs.indexes)
self.assertEqual(outputs.dims, inputs.dims)
chex.assert_trees_all_equal(outputs.coords['lat'], inputs.coords['lat'])
# Check our computations with the coordinate values worked:
chex.assert_trees_all_equal(
(outputs.coords['lon']).data,
(inputs.coords['lon']+1).data,
)
outputs_dict = {key: outputs[key].data for key in outputs}
inputs_and_inputs_coords_dict = {
key: (inputs + inputs.coords['lon'])[key].data
for key in inputs + inputs.coords['lon']
}
chex.assert_trees_all_equal(outputs_dict, inputs_and_inputs_coords_dict)
def test_flatten_unflatten_variable(self):
variable = xarray_jax.Variable(
('lat', 'lon'), jnp.ones((3, 4), dtype=np.float32))
children, aux = xarray_jax._flatten_variable(variable)
# Check auxiliary info is hashable/comparable (important for jax.jit):
hash(aux)
self.assertEqual(aux, aux)
roundtrip = xarray_jax._unflatten_variable(aux, children)
self.assertTrue(variable.equals(roundtrip))
def test_flatten_unflatten_data_array(self):
data_array = xarray_jax.DataArray(
data=jnp.ones((3, 4), dtype=np.float32),
dims=('lat', 'lon'),
coords={'lat': np.arange(3)},
jax_coords={'lon': np.arange(4) * 10},
)
children, aux = xarray_jax._flatten_data_array(data_array)
# Check auxiliary info is hashable/comparable (important for jax.jit):
hash(aux)
self.assertEqual(aux, aux)
roundtrip = xarray_jax._unflatten_data_array(aux, children)
self.assertTrue(data_array.equals(roundtrip))
def test_flatten_unflatten_dataset(self):
foo = jnp.ones((3, 4), dtype=np.float32)
bar = jnp.ones((2, 3, 4), dtype=np.float32)
dataset = xarray_jax.Dataset(
data_vars={'foo': (('lat', 'lon'), foo),
'bar': (('time', 'lat', 'lon'), bar)},
coords={
'time': np.arange(2),
'lat': np.arange(3) * 10},
jax_coords={
'lon': np.arange(4) * 10})
children, aux = xarray_jax._flatten_dataset(dataset)
# Check auxiliary info is hashable/comparable (important for jax.jit):
hash(aux)
self.assertEqual(aux, aux)
roundtrip = xarray_jax._unflatten_dataset(aux, children)
self.assertTrue(dataset.equals(roundtrip))
def test_flatten_unflatten_added_dim(self):
data_array = xarray_jax.DataArray(
data=jnp.ones((3, 4), dtype=np.float32),
dims=('lat', 'lon'),
coords={'lat': np.arange(3),
'lon': np.arange(4) * 10})
leaves, treedef = jax.tree_util.tree_flatten(data_array)
leaves = [jnp.expand_dims(x, 0) for x in leaves]
with xarray_jax.dims_change_on_unflatten(lambda dims: ('new',) + dims):
with_new_dim = jax.tree_util.tree_unflatten(treedef, leaves)
self.assertEqual(('new', 'lat', 'lon'), with_new_dim.dims)
xarray.testing.assert_identical(
jax.device_get(data_array),
jax.device_get(with_new_dim.isel(new=0)))
def test_map_added_dim(self):
data_array = xarray_jax.DataArray(
data=jnp.ones((3, 4), dtype=np.float32),
dims=('lat', 'lon'),
coords={'lat': np.arange(3),
'lon': np.arange(4) * 10})
with xarray_jax.dims_change_on_unflatten(lambda dims: ('new',) + dims):
with_new_dim = jax.tree_util.tree_map(lambda x: jnp.expand_dims(x, 0),
data_array)
self.assertEqual(('new', 'lat', 'lon'), with_new_dim.dims)
xarray.testing.assert_identical(
jax.device_get(data_array),
jax.device_get(with_new_dim.isel(new=0)))
def test_map_remove_dim(self):
foo = jnp.ones((1, 3, 4), dtype=np.float32)
bar = jnp.ones((1, 2, 3, 4), dtype=np.float32)
dataset = xarray_jax.Dataset(
data_vars={'foo': (('batch', 'lat', 'lon'), foo),
'bar': (('batch', 'time', 'lat', 'lon'), bar)},
coords={
'batch': np.array([123]),
'time': np.arange(2),
'lat': np.arange(3) * 10,
'lon': np.arange(4) * 10})
with xarray_jax.dims_change_on_unflatten(lambda dims: dims[1:]):
with_removed_dim = jax.tree_util.tree_map(lambda x: jnp.squeeze(x, 0),
dataset)
self.assertEqual(('lat', 'lon'), with_removed_dim['foo'].dims)
self.assertEqual(('time', 'lat', 'lon'), with_removed_dim['bar'].dims)
self.assertNotIn('batch', with_removed_dim.dims)
self.assertNotIn('batch', with_removed_dim.coords)
xarray.testing.assert_identical(
jax.device_get(dataset.isel(batch=0, drop=True)),
jax.device_get(with_removed_dim))
def test_pmap(self):
devices = jax.local_device_count()
foo = jnp.zeros((devices, 3, 4), dtype=np.float32)
bar = jnp.zeros((devices, 2, 3, 4), dtype=np.float32)
dataset = xarray_jax.Dataset({
'foo': (('device', 'lat', 'lon'), foo),
'bar': (('device', 'time', 'lat', 'lon'), bar)})
def func(d):
self.assertNotIn('device', d.dims)
return d + 1
func = xarray_jax.pmap(func, dim='device')
result = func(dataset)
xarray.testing.assert_identical(
jax.device_get(dataset + 1),
jax.device_get(result))
# Can call it again with a different argument structure (it will recompile
# under the hood but should work):
dataset = dataset.drop_vars('foo')
result = func(dataset)
xarray.testing.assert_identical(
jax.device_get(dataset + 1),
jax.device_get(result))
def test_pmap_with_jax_coords(self):
devices = jax.local_device_count()
foo = jnp.zeros((devices, 3, 4), dtype=np.float32)
bar = jnp.zeros((devices, 2, 3, 4), dtype=np.float32)
time = jnp.zeros((devices, 2), dtype=np.float32)
dataset = xarray_jax.Dataset(
{'foo': (('device', 'lat', 'lon'), foo),
'bar': (('device', 'time', 'lat', 'lon'), bar)},
coords={
'lat': np.arange(3),
'lon': np.arange(4),
},
jax_coords={
# Currently any jax_coords need a leading device dimension to use
# with pmap, same as for data_vars.
# TODO(matthjw): have pmap automatically broadcast to all devices
# where the device dimension not present.
'time': xarray_jax.Variable(('device', 'time'), time),
}
)
def func(d):
self.assertNotIn('device', d.dims)
self.assertNotIn('device', d.coords['time'].dims)
# The jax_coord 'time' should be passed in backed by a JAX array, but
# not as an index coordinate.
self.assertIsInstance(d.coords['time'].data, xarray_jax.JaxArrayWrapper)
self.assertNotIn('time', d.indexes)
return d + 1
func = xarray_jax.pmap(func, dim='device')
result = func(dataset)
xarray.testing.assert_identical(
jax.device_get(dataset + 1),
jax.device_get(result))
# Can call it again with a different argument structure (it will recompile
# under the hood but should work):
dataset = dataset.drop_vars('foo')
result = func(dataset)
xarray.testing.assert_identical(
jax.device_get(dataset + 1),
jax.device_get(result))
def test_pmap_with_tree_mix_of_xarray_and_jax_array(self):
devices = jax.local_device_count()
data_array = xarray_jax.DataArray(
data=jnp.ones((devices, 3, 4), dtype=np.float32),
dims=('device', 'lat', 'lon'))
plain_array = jnp.ones((devices, 2), dtype=np.float32)
inputs = {'foo': data_array,
'bar': plain_array}
def func(x):
return x['foo'] + 1, x['bar'] + 1
func = xarray_jax.pmap(func, dim='device')
result_foo, result_bar = func(inputs)
xarray.testing.assert_identical(
jax.device_get(inputs['foo'] + 1),
jax.device_get(result_foo))
np.testing.assert_array_equal(
jax.device_get(inputs['bar'] + 1),
jax.device_get(result_bar))
def test_pmap_complains_when_dim_not_first(self):
devices = jax.local_device_count()
data_array = xarray_jax.DataArray(
data=jnp.ones((3, devices, 4), dtype=np.float32),
dims=('lat', 'device', 'lon'))
func = xarray_jax.pmap(lambda x: x+1, dim='device')
with self.assertRaisesRegex(
ValueError, 'Expected dim device at index 0, found at 1'):
func(data_array)
def test_apply_ufunc(self):
inputs = xarray_jax.DataArray(
data=jnp.asarray([[1, 2], [3, 4]]),
dims=('x', 'y'),
coords={'x': [0, 1],
'y': [2, 3]})
result = xarray_jax.apply_ufunc(
lambda x: jnp.sum(x, axis=-1),
inputs,
input_core_dims=[['x']])
expected_result = xarray_jax.DataArray(
data=[4, 6],
dims=('y',),
coords={'y': [2, 3]})
xarray.testing.assert_identical(expected_result, jax.device_get(result))
def test_apply_ufunc_multiple_return_values(self):
def ufunc(array):
return jnp.min(array, axis=-1), jnp.max(array, axis=-1)
inputs = xarray_jax.DataArray(
data=jnp.asarray([[1, 4], [3, 2]]),
dims=('x', 'y'),
coords={'x': [0, 1],
'y': [2, 3]})
result = xarray_jax.apply_ufunc(
ufunc, inputs, input_core_dims=[['x']], output_core_dims=[[], []])
expected = (
# Mins:
xarray_jax.DataArray(
data=[1, 2],
dims=('y',),
coords={'y': [2, 3]}
),
# Maxes:
xarray_jax.DataArray(
data=[3, 4],
dims=('y',),
coords={'y': [2, 3]}
)
)
xarray.testing.assert_identical(expected[0], jax.device_get(result[0]))
xarray.testing.assert_identical(expected[1], jax.device_get(result[1]))
if __name__ == '__main__':
absltest.main()
|
graphcast-main
|
graphcast/xarray_jax_test.py
|
# Copyright 2023 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utils for rolling out models."""
from typing import Iterator
from absl import logging
import chex
import dask
from graphcast import xarray_tree
import jax
import numpy as np
import typing_extensions
import xarray
class PredictorFn(typing_extensions.Protocol):
"""Functional version of base.Predictor.__call__ with explicit rng."""
def __call__(
self, rng: chex.PRNGKey, inputs: xarray.Dataset,
targets_template: xarray.Dataset,
forcings: xarray.Dataset,
**optional_kwargs,
) -> xarray.Dataset:
...
def chunked_prediction(
predictor_fn: PredictorFn,
rng: chex.PRNGKey,
inputs: xarray.Dataset,
targets_template: xarray.Dataset,
forcings: xarray.Dataset,
num_steps_per_chunk: int = 1,
verbose: bool = False,
) -> xarray.Dataset:
"""Outputs a long trajectory by iteratively concatenating chunked predictions.
Args:
predictor_fn: Function to use to make predictions for each chunk.
rng: Random key.
inputs: Inputs for the model.
targets_template: Template for the target prediction, requires targets
equispaced in time.
forcings: Optional forcing for the model.
num_steps_per_chunk: How many of the steps in `targets_template` to predict
at each call of `predictor_fn`. It must evenly divide the number of
steps in `targets_template`.
verbose: Whether to log the current chunk being predicted.
Returns:
Predictions for the targets template.
"""
chunks_list = []
for prediction_chunk in chunked_prediction_generator(
predictor_fn=predictor_fn,
rng=rng,
inputs=inputs,
targets_template=targets_template,
forcings=forcings,
num_steps_per_chunk=num_steps_per_chunk,
verbose=verbose):
chunks_list.append(jax.device_get(prediction_chunk))
return xarray.concat(chunks_list, dim="time")
def chunked_prediction_generator(
predictor_fn: PredictorFn,
rng: chex.PRNGKey,
inputs: xarray.Dataset,
targets_template: xarray.Dataset,
forcings: xarray.Dataset,
num_steps_per_chunk: int = 1,
verbose: bool = False,
) -> Iterator[xarray.Dataset]:
"""Outputs a long trajectory by yielding chunked predictions.
Args:
predictor_fn: Function to use to make predictions for each chunk.
rng: Random key.
inputs: Inputs for the model.
targets_template: Template for the target prediction, requires targets
equispaced in time.
forcings: Optional forcing for the model.
num_steps_per_chunk: How many of the steps in `targets_template` to predict
at each call of `predictor_fn`. It must evenly divide the number of
steps in `targets_template`.
verbose: Whether to log the current chunk being predicted.
Yields:
The predictions for each chunked step of the chunked rollout, such as
if all predictions are concatenated in time this would match the targets
template in structure.
"""
# Create copies to avoid mutating inputs.
inputs = xarray.Dataset(inputs)
targets_template = xarray.Dataset(targets_template)
forcings = xarray.Dataset(forcings)
if "datetime" in inputs.coords:
del inputs.coords["datetime"]
if "datetime" in targets_template.coords:
output_datetime = targets_template.coords["datetime"]
del targets_template.coords["datetime"]
else:
output_datetime = None
if "datetime" in forcings.coords:
del forcings.coords["datetime"]
num_target_steps = targets_template.dims["time"]
num_chunks, remainder = divmod(num_target_steps, num_steps_per_chunk)
if remainder != 0:
raise ValueError(
f"The number of steps per chunk {num_steps_per_chunk} must "
f"evenly divide the number of target steps {num_target_steps} ")
if len(np.unique(np.diff(targets_template.coords["time"].data))) > 1:
raise ValueError("The targets time coordinates must be evenly spaced")
# Our template targets will always have a time axis corresponding for the
# timedeltas for the first chunk.
targets_chunk_time = targets_template.time.isel(
time=slice(0, num_steps_per_chunk))
current_inputs = inputs
for chunk_index in range(num_chunks):
if verbose:
logging.info("Chunk %d/%d", chunk_index, num_chunks)
logging.flush()
# Select targets for the time period that we are predicting for this chunk.
target_offset = num_steps_per_chunk * chunk_index
target_slice = slice(target_offset, target_offset + num_steps_per_chunk)
current_targets_template = targets_template.isel(time=target_slice)
# Replace the timedelta, by the one corresponding to the first chunk, so we
# don't recompile at every iteration, keeping the
actual_target_time = current_targets_template.coords["time"]
current_targets_template = current_targets_template.assign_coords(
time=targets_chunk_time).compute()
current_forcings = forcings.isel(time=target_slice)
current_forcings = current_forcings.assign_coords(time=targets_chunk_time)
current_forcings = current_forcings.compute()
# Make predictions for the chunk.
rng, this_rng = jax.random.split(rng)
predictions = predictor_fn(
rng=this_rng,
inputs=current_inputs,
targets_template=current_targets_template,
forcings=current_forcings)
next_frame = xarray.merge([predictions, current_forcings])
current_inputs = _get_next_inputs(current_inputs, next_frame)
# At this point we can assign the actual targets time coordinates.
predictions = predictions.assign_coords(time=actual_target_time)
if output_datetime is not None:
predictions.coords["datetime"] = output_datetime.isel(
time=target_slice)
yield predictions
del predictions
def _get_next_inputs(
prev_inputs: xarray.Dataset, next_frame: xarray.Dataset,
) -> xarray.Dataset:
"""Computes next inputs, from previous inputs and predictions."""
# Make sure are are predicting all inputs with a time axis.
non_predicted_or_forced_inputs = list(
set(prev_inputs.keys()) - set(next_frame.keys()))
if "time" in prev_inputs[non_predicted_or_forced_inputs].dims:
raise ValueError(
"Found an input with a time index that is not predicted or forced.")
# Keys we need to copy from predictions to inputs.
next_inputs_keys = list(
set(next_frame.keys()).intersection(set(prev_inputs.keys())))
next_inputs = next_frame[next_inputs_keys]
# Apply concatenate next frame with inputs, crop what we don't need and
# shift timedelta coordinates, so we don't recompile at every iteration.
num_inputs = prev_inputs.dims["time"]
return (
xarray.concat(
[prev_inputs, next_inputs], dim="time", data_vars="different")
.tail(time=num_inputs)
.assign_coords(time=prev_inputs.coords["time"]))
def extend_targets_template(
targets_template: xarray.Dataset,
required_num_steps: int) -> xarray.Dataset:
"""Extends `targets_template` to `required_num_steps` with lazy arrays.
It uses lazy dask arrays of zeros, so it does not require instantiating the
array in memory.
Args:
targets_template: Input template to extend.
required_num_steps: Number of steps required in the returned template.
Returns:
`xarray.Dataset` identical in variables and timestep to `targets_template`
full of `dask.array.zeros` such that the time axis has `required_num_steps`.
"""
# Extend the "time" and "datetime" coordinates
time = targets_template.coords["time"]
# Assert the first target time corresponds to the timestep.
timestep = time[0].data
if time.shape[0] > 1:
assert np.all(timestep == time[1:] - time[:-1])
extended_time = (np.arange(required_num_steps) + 1) * timestep
if "datetime" in targets_template.coords:
datetime = targets_template.coords["datetime"]
extended_datetime = (datetime[0].data - timestep) + extended_time
else:
extended_datetime = None
# Replace the values with empty dask arrays extending the time coordinates.
datetime = targets_template.coords["time"]
def extend_time(data_array: xarray.DataArray) -> xarray.DataArray:
dims = data_array.dims
shape = list(data_array.shape)
shape[dims.index("time")] = required_num_steps
dask_data = dask.array.zeros(
shape=tuple(shape),
chunks=-1, # Will give chunk info directly to `ChunksToZarr``.
dtype=data_array.dtype)
coords = dict(data_array.coords)
coords["time"] = extended_time
if extended_datetime is not None:
coords["datetime"] = ("time", extended_datetime)
return xarray.DataArray(
dims=dims,
data=dask_data,
coords=coords)
return xarray_tree.map_structure(extend_time, targets_template)
|
graphcast-main
|
graphcast/rollout.py
|
# Copyright 2023 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tools for converting from regular grids on a sphere, to triangular meshes."""
from graphcast import icosahedral_mesh
import numpy as np
import scipy
import trimesh
def _grid_lat_lon_to_coordinates(
grid_latitude: np.ndarray, grid_longitude: np.ndarray) -> np.ndarray:
"""Lat [num_lat] lon [num_lon] to 3d coordinates [num_lat, num_lon, 3]."""
# Convert to spherical coordinates phi and theta defined in the grid.
# Each [num_latitude_points, num_longitude_points]
phi_grid, theta_grid = np.meshgrid(
np.deg2rad(grid_longitude),
np.deg2rad(90 - grid_latitude))
# [num_latitude_points, num_longitude_points, 3]
# Note this assumes unit radius, since for now we model the earth as a
# sphere of unit radius, and keep any vertical dimension as a regular grid.
return np.stack(
[np.cos(phi_grid)*np.sin(theta_grid),
np.sin(phi_grid)*np.sin(theta_grid),
np.cos(theta_grid)], axis=-1)
def radius_query_indices(
*,
grid_latitude: np.ndarray,
grid_longitude: np.ndarray,
mesh: icosahedral_mesh.TriangularMesh,
radius: float) -> tuple[np.ndarray, np.ndarray]:
"""Returns mesh-grid edge indices for radius query.
Args:
grid_latitude: Latitude values for the grid [num_lat_points]
grid_longitude: Longitude values for the grid [num_lon_points]
mesh: Mesh object.
radius: Radius of connectivity in R3. for a sphere of unit radius.
Returns:
tuple with `grid_indices` and `mesh_indices` indicating edges between the
grid and the mesh such that the distances in a straight line (not geodesic)
are smaller than or equal to `radius`.
* grid_indices: Indices of shape [num_edges], that index into a
[num_lat_points, num_lon_points] grid, after flattening the leading axes.
* mesh_indices: Indices of shape [num_edges], that index into mesh.vertices.
"""
# [num_grid_points=num_lat_points * num_lon_points, 3]
grid_positions = _grid_lat_lon_to_coordinates(
grid_latitude, grid_longitude).reshape([-1, 3])
# [num_mesh_points, 3]
mesh_positions = mesh.vertices
kd_tree = scipy.spatial.cKDTree(mesh_positions)
# [num_grid_points, num_mesh_points_per_grid_point]
# Note `num_mesh_points_per_grid_point` is not constant, so this is a list
# of arrays, rather than a 2d array.
query_indices = kd_tree.query_ball_point(x=grid_positions, r=radius)
grid_edge_indices = []
mesh_edge_indices = []
for grid_index, mesh_neighbors in enumerate(query_indices):
grid_edge_indices.append(np.repeat(grid_index, len(mesh_neighbors)))
mesh_edge_indices.append(mesh_neighbors)
# [num_edges]
grid_edge_indices = np.concatenate(grid_edge_indices, axis=0).astype(int)
mesh_edge_indices = np.concatenate(mesh_edge_indices, axis=0).astype(int)
return grid_edge_indices, mesh_edge_indices
def in_mesh_triangle_indices(
*,
grid_latitude: np.ndarray,
grid_longitude: np.ndarray,
mesh: icosahedral_mesh.TriangularMesh) -> tuple[np.ndarray, np.ndarray]:
"""Returns mesh-grid edge indices for grid points contained in mesh triangles.
Args:
grid_latitude: Latitude values for the grid [num_lat_points]
grid_longitude: Longitude values for the grid [num_lon_points]
mesh: Mesh object.
Returns:
tuple with `grid_indices` and `mesh_indices` indicating edges between the
grid and the mesh vertices of the triangle that contain each grid point.
The number of edges is always num_lat_points * num_lon_points * 3
* grid_indices: Indices of shape [num_edges], that index into a
[num_lat_points, num_lon_points] grid, after flattening the leading axes.
* mesh_indices: Indices of shape [num_edges], that index into mesh.vertices.
"""
# [num_grid_points=num_lat_points * num_lon_points, 3]
grid_positions = _grid_lat_lon_to_coordinates(
grid_latitude, grid_longitude).reshape([-1, 3])
mesh_trimesh = trimesh.Trimesh(vertices=mesh.vertices, faces=mesh.faces)
# [num_grid_points] with mesh face indices for each grid point.
_, _, query_face_indices = trimesh.proximity.closest_point(
mesh_trimesh, grid_positions)
# [num_grid_points, 3] with mesh node indices for each grid point.
mesh_edge_indices = mesh.faces[query_face_indices]
# [num_grid_points, 3] with grid node indices, where every row simply contains
# the row (grid_point) index.
grid_indices = np.arange(grid_positions.shape[0])
grid_edge_indices = np.tile(grid_indices.reshape([-1, 1]), [1, 3])
# Flatten to get a regular list.
# [num_edges=num_grid_points*3]
mesh_edge_indices = mesh_edge_indices.reshape([-1])
grid_edge_indices = grid_edge_indices.reshape([-1])
return grid_edge_indices, mesh_edge_indices
|
graphcast-main
|
graphcast/grid_mesh_connectivity.py
|
# Copyright 2023 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for graphcast.grid_mesh_connectivity."""
from absl.testing import absltest
from graphcast import grid_mesh_connectivity
from graphcast import icosahedral_mesh
import numpy as np
class GridMeshConnectivityTest(absltest.TestCase):
def test_grid_lat_lon_to_coordinates(self):
# Intervals of 30 degrees.
grid_latitude = np.array([-45., 0., 45])
grid_longitude = np.array([0., 90., 180., 270.])
inv_sqrt2 = 1 / np.sqrt(2)
expected_coordinates = np.array([
[[inv_sqrt2, 0., -inv_sqrt2],
[0., inv_sqrt2, -inv_sqrt2],
[-inv_sqrt2, 0., -inv_sqrt2],
[0., -inv_sqrt2, -inv_sqrt2]],
[[1., 0., 0.],
[0., 1., 0.],
[-1., 0., 0.],
[0., -1., 0.]],
[[inv_sqrt2, 0., inv_sqrt2],
[0., inv_sqrt2, inv_sqrt2],
[-inv_sqrt2, 0., inv_sqrt2],
[0., -inv_sqrt2, inv_sqrt2]],
])
coordinates = grid_mesh_connectivity._grid_lat_lon_to_coordinates(
grid_latitude, grid_longitude)
np.testing.assert_allclose(expected_coordinates, coordinates, atol=1e-15)
def test_radius_query_indices_smoke(self):
# TODO(alvarosg): Add non-smoke test?
grid_latitude = np.linspace(-75, 75, 6)
grid_longitude = np.arange(12) * 30.
mesh = icosahedral_mesh.get_hierarchy_of_triangular_meshes_for_sphere(
splits=3)[-1]
grid_mesh_connectivity.radius_query_indices(
grid_latitude=grid_latitude,
grid_longitude=grid_longitude,
mesh=mesh, radius=0.2)
def test_in_mesh_triangle_indices_smoke(self):
# TODO(alvarosg): Add non-smoke test?
grid_latitude = np.linspace(-75, 75, 6)
grid_longitude = np.arange(12) * 30.
mesh = icosahedral_mesh.get_hierarchy_of_triangular_meshes_for_sphere(
splits=3)[-1]
grid_mesh_connectivity.in_mesh_triangle_indices(
grid_latitude=grid_latitude,
grid_longitude=grid_longitude,
mesh=mesh)
if __name__ == "__main__":
absltest.main()
|
graphcast-main
|
graphcast/grid_mesh_connectivity_test.py
|
# Copyright 2023 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Loss functions (and terms for use in loss functions) used for weather."""
from typing import Mapping
from graphcast import xarray_tree
import numpy as np
from typing_extensions import Protocol
import xarray
LossAndDiagnostics = tuple[xarray.DataArray, xarray.Dataset]
class LossFunction(Protocol):
"""A loss function.
This is a protocol so it's fine to use a plain function which 'quacks like'
this. This is just to document the interface.
"""
def __call__(self,
predictions: xarray.Dataset,
targets: xarray.Dataset,
**optional_kwargs) -> LossAndDiagnostics:
"""Computes a loss function.
Args:
predictions: Dataset of predictions.
targets: Dataset of targets.
**optional_kwargs: Implementations may support extra optional kwargs.
Returns:
loss: A DataArray with dimensions ('batch',) containing losses for each
element of the batch. These will be averaged to give the final
loss, locally and across replicas.
diagnostics: Mapping of additional quantities to log by name alongside the
loss. These will will typically correspond to terms in the loss. They
should also have dimensions ('batch',) and will be averaged over the
batch before logging.
"""
def weighted_mse_per_level(
predictions: xarray.Dataset,
targets: xarray.Dataset,
per_variable_weights: Mapping[str, float],
) -> LossAndDiagnostics:
"""Latitude- and pressure-level-weighted MSE loss."""
def loss(prediction, target):
loss = (prediction - target)**2
loss *= normalized_latitude_weights(target).astype(loss.dtype)
if 'level' in target.dims:
loss *= normalized_level_weights(target).astype(loss.dtype)
return _mean_preserving_batch(loss)
losses = xarray_tree.map_structure(loss, predictions, targets)
return sum_per_variable_losses(losses, per_variable_weights)
def _mean_preserving_batch(x: xarray.DataArray) -> xarray.DataArray:
return x.mean([d for d in x.dims if d != 'batch'], skipna=False)
def sum_per_variable_losses(
per_variable_losses: Mapping[str, xarray.DataArray],
weights: Mapping[str, float],
) -> LossAndDiagnostics:
"""Weighted sum of per-variable losses."""
if not set(weights.keys()).issubset(set(per_variable_losses.keys())):
raise ValueError(
'Passing a weight that does not correspond to any variable '
f'{set(weights.keys())-set(per_variable_losses.keys())}')
weighted_per_variable_losses = {
name: loss * weights.get(name, 1)
for name, loss in per_variable_losses.items()
}
total = xarray.concat(
weighted_per_variable_losses.values(), dim='variable', join='exact').sum(
'variable', skipna=False)
return total, per_variable_losses
def normalized_level_weights(data: xarray.DataArray) -> xarray.DataArray:
"""Weights proportional to pressure at each level."""
level = data.coords['level']
return level / level.mean(skipna=False)
def normalized_latitude_weights(data: xarray.DataArray) -> xarray.DataArray:
"""Weights based on latitude, roughly proportional to grid cell area.
This method supports two use cases only (both for equispaced values):
* Latitude values such that the closest value to the pole is at latitude
(90 - d_lat/2), where d_lat is the difference between contiguous latitudes.
For example: [-89, -87, -85, ..., 85, 87, 89]) (d_lat = 2)
In this case each point with `lat` value represents a sphere slice between
`lat - d_lat/2` and `lat + d_lat/2`, and the area of this slice would be
proportional to:
`sin(lat + d_lat/2) - sin(lat - d_lat/2) = 2 * sin(d_lat/2) * cos(lat)`, and
we can simply omit the term `2 * sin(d_lat/2)` which is just a constant
that cancels during normalization.
* Latitude values that fall exactly at the poles.
For example: [-90, -88, -86, ..., 86, 88, 90]) (d_lat = 2)
In this case each point with `lat` value also represents
a sphere slice between `lat - d_lat/2` and `lat + d_lat/2`,
except for the points at the poles, that represent a slice between
`90 - d_lat/2` and `90` or, `-90` and `-90 + d_lat/2`.
The areas of the first type of point are still proportional to:
* sin(lat + d_lat/2) - sin(lat - d_lat/2) = 2 * sin(d_lat/2) * cos(lat)
but for the points at the poles now is:
* sin(90) - sin(90 - d_lat/2) = 2 * sin(d_lat/4) ^ 2
and we will be using these weights, depending on whether we are looking at
pole cells, or non-pole cells (omitting the common factor of 2 which will be
absorbed by the normalization).
It can be shown via a limit, or simple geometry, that in the small angles
regime, the proportion of area per pole-point is equal to 1/8th
the proportion of area covered by each of the nearest non-pole point, and we
test for this in the test.
Args:
data: `DataArray` with latitude coordinates.
Returns:
Unit mean latitude weights.
"""
latitude = data.coords['lat']
if np.any(np.isclose(np.abs(latitude), 90.)):
weights = _weight_for_latitude_vector_with_poles(latitude)
else:
weights = _weight_for_latitude_vector_without_poles(latitude)
return weights / weights.mean(skipna=False)
def _weight_for_latitude_vector_without_poles(latitude):
"""Weights for uniform latitudes of the form [+-90-+d/2, ..., -+90+-d/2]."""
delta_latitude = np.abs(_check_uniform_spacing_and_get_delta(latitude))
if (not np.isclose(np.max(latitude), 90 - delta_latitude/2) or
not np.isclose(np.min(latitude), -90 + delta_latitude/2)):
raise ValueError(
f'Latitude vector {latitude} does not start/end at '
'+- (90 - delta_latitude/2) degrees.')
return np.cos(np.deg2rad(latitude))
def _weight_for_latitude_vector_with_poles(latitude):
"""Weights for uniform latitudes of the form [+- 90, ..., -+90]."""
delta_latitude = np.abs(_check_uniform_spacing_and_get_delta(latitude))
if (not np.isclose(np.max(latitude), 90.) or
not np.isclose(np.min(latitude), -90.)):
raise ValueError(
f'Latitude vector {latitude} does not start/end at +- 90 degrees.')
weights = np.cos(np.deg2rad(latitude)) * np.sin(np.deg2rad(delta_latitude/2))
# The two checks above enough to guarantee that latitudes are sorted, so
# the extremes are the poles
weights[[0, -1]] = np.sin(np.deg2rad(delta_latitude/4)) ** 2
return weights
def _check_uniform_spacing_and_get_delta(vector):
diff = np.diff(vector)
if not np.all(np.isclose(diff[0], diff)):
raise ValueError(f'Vector {diff} is not uniformly spaced.')
return diff[0]
|
graphcast-main
|
graphcast/losses.py
|
# Copyright 2023 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for icosahedral_mesh."""
from absl.testing import absltest
from absl.testing import parameterized
import chex
from graphcast import icosahedral_mesh
import numpy as np
def _get_mesh_spec(splits: int):
"""Returns size of the final icosahedral mesh resulting from the splitting."""
num_vertices = 12
num_faces = 20
for _ in range(splits):
# Each previous face adds three new vertices, but each vertex is shared
# by two faces.
num_vertices += num_faces * 3 // 2
num_faces *= 4
return num_vertices, num_faces
class IcosahedralMeshTest(parameterized.TestCase):
def test_icosahedron(self):
mesh = icosahedral_mesh.get_icosahedron()
_assert_valid_mesh(
mesh, num_expected_vertices=12, num_expected_faces=20)
@parameterized.parameters(list(range(5)))
def test_get_hierarchy_of_triangular_meshes_for_sphere(self, splits):
meshes = icosahedral_mesh.get_hierarchy_of_triangular_meshes_for_sphere(
splits=splits)
prev_vertices = None
for mesh_i, mesh in enumerate(meshes):
# Check that `mesh` is valid.
num_expected_vertices, num_expected_faces = _get_mesh_spec(mesh_i)
_assert_valid_mesh(mesh, num_expected_vertices, num_expected_faces)
# Check that the first N vertices from this mesh match all of the
# vertices from the previous mesh.
if prev_vertices is not None:
leading_mesh_vertices = mesh.vertices[:prev_vertices.shape[0]]
np.testing.assert_array_equal(leading_mesh_vertices, prev_vertices)
# Increase the expected/previous values for the next iteration.
if mesh_i < len(meshes) - 1:
prev_vertices = mesh.vertices
@parameterized.parameters(list(range(4)))
def test_merge_meshes(self, splits):
mesh_hierarchy = (
icosahedral_mesh.get_hierarchy_of_triangular_meshes_for_sphere(
splits=splits))
mesh = icosahedral_mesh.merge_meshes(mesh_hierarchy)
expected_faces = np.concatenate([m.faces for m in mesh_hierarchy], axis=0)
np.testing.assert_array_equal(mesh.vertices, mesh_hierarchy[-1].vertices)
np.testing.assert_array_equal(mesh.faces, expected_faces)
def test_faces_to_edges(self):
faces = np.array([[0, 1, 2],
[3, 4, 5]])
# This also documents the order of the edges returned by the method.
expected_edges = np.array(
[[0, 1],
[3, 4],
[1, 2],
[4, 5],
[2, 0],
[5, 3]])
expected_senders = expected_edges[:, 0]
expected_receivers = expected_edges[:, 1]
senders, receivers = icosahedral_mesh.faces_to_edges(faces)
np.testing.assert_array_equal(senders, expected_senders)
np.testing.assert_array_equal(receivers, expected_receivers)
def _assert_valid_mesh(mesh, num_expected_vertices, num_expected_faces):
vertices = mesh.vertices
faces = mesh.faces
chex.assert_shape(vertices, [num_expected_vertices, 3])
chex.assert_shape(faces, [num_expected_faces, 3])
# Vertices norm should be 1.
vertices_norm = np.linalg.norm(vertices, axis=-1)
np.testing.assert_allclose(vertices_norm, 1., rtol=1e-6)
_assert_positive_face_orientation(vertices, faces)
def _assert_positive_face_orientation(vertices, faces):
# Obtain a unit vector that points, in the direction of the face.
face_orientation = np.cross(vertices[faces[:, 1]] - vertices[faces[:, 0]],
vertices[faces[:, 2]] - vertices[faces[:, 1]])
face_orientation /= np.linalg.norm(face_orientation, axis=-1, keepdims=True)
# And a unit vector pointing from the origin to the center of the face.
face_centers = vertices[faces].mean(1)
face_centers /= np.linalg.norm(face_centers, axis=-1, keepdims=True)
# Positive orientation means those two vectors should be parallel
# (dot product, 1), and not anti-parallel (dot product, -1).
dot_center_orientation = np.einsum("ik,ik->i", face_orientation, face_centers)
# Check that the face normal is parallel to the vector that joins the center
# of the face to the center of the sphere. Note we need a small tolerance
# because some discretizations are not exactly uniform, so it will not be
# exactly parallel.
np.testing.assert_allclose(dot_center_orientation, 1., atol=6e-4)
if __name__ == "__main__":
absltest.main()
|
graphcast-main
|
graphcast/icosahedral_mesh_test.py
|
# Copyright 2023 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Wrappers for Predictors which allow them to work with normalized data.
The Predictor which is wrapped sees normalized inputs and targets, and makes
normalized predictions. The wrapper handles translating the predictions back
to the original domain.
"""
import logging
from typing import Optional, Tuple
from graphcast import predictor_base
from graphcast import xarray_tree
import xarray
def normalize(values: xarray.Dataset,
scales: xarray.Dataset,
locations: Optional[xarray.Dataset],
) -> xarray.Dataset:
"""Normalize variables using the given scales and (optionally) locations."""
def normalize_array(array):
if array.name is None:
raise ValueError(
"Can't look up normalization constants because array has no name.")
if locations is not None:
if array.name in locations:
array = array - locations[array.name].astype(array.dtype)
else:
logging.warning('No normalization location found for %s', array.name)
if array.name in scales:
array = array / scales[array.name].astype(array.dtype)
else:
logging.warning('No normalization scale found for %s', array.name)
return array
return xarray_tree.map_structure(normalize_array, values)
def unnormalize(values: xarray.Dataset,
scales: xarray.Dataset,
locations: Optional[xarray.Dataset],
) -> xarray.Dataset:
"""Unnormalize variables using the given scales and (optionally) locations."""
def unnormalize_array(array):
if array.name is None:
raise ValueError(
"Can't look up normalization constants because array has no name.")
if array.name in scales:
array = array * scales[array.name].astype(array.dtype)
else:
logging.warning('No normalization scale found for %s', array.name)
if locations is not None:
if array.name in locations:
array = array + locations[array.name].astype(array.dtype)
else:
logging.warning('No normalization location found for %s', array.name)
return array
return xarray_tree.map_structure(unnormalize_array, values)
class InputsAndResiduals(predictor_base.Predictor):
"""Wraps with a residual connection, normalizing inputs and target residuals.
The inner predictor is given inputs that are normalized using `locations`
and `scales` to roughly zero-mean unit variance.
For target variables that are present in the inputs, the inner predictor is
trained to predict residuals (target - last_frame_of_input) that have been
normalized using `residual_scales` (and optionally `residual_locations`) to
roughly unit variance / zero mean.
This replaces `residual.Predictor` in the case where you want normalization
that's based on the scales of the residuals.
Since we return the underlying predictor's loss on the normalized residuals,
if the underlying predictor is a sum of per-variable losses, the normalization
will affect the relative weighting of the per-variable loss terms (hopefully
in a good way).
For target variables *not* present in the inputs, the inner predictor is
trained to predict targets directly, that have been normalized in the same
way as the inputs.
The transforms applied to the targets (the residual connection and the
normalization) are applied in reverse to the predictions before returning
them.
"""
def __init__(
self,
predictor: predictor_base.Predictor,
stddev_by_level: xarray.Dataset,
mean_by_level: xarray.Dataset,
diffs_stddev_by_level: xarray.Dataset):
self._predictor = predictor
self._scales = stddev_by_level
self._locations = mean_by_level
self._residual_scales = diffs_stddev_by_level
self._residual_locations = None
def _unnormalize_prediction_and_add_input(self, inputs, norm_prediction):
if norm_prediction.sizes.get('time') != 1:
raise ValueError(
'normalization.InputsAndResiduals only supports predicting a '
'single timestep.')
if norm_prediction.name in inputs:
# Residuals are assumed to be predicted as normalized (unit variance),
# but the scale and location they need mapping to is that of the residuals
# not of the values themselves.
prediction = unnormalize(
norm_prediction, self._residual_scales, self._residual_locations)
# A prediction for which we have a corresponding input -- we are
# predicting the residual:
last_input = inputs[norm_prediction.name].isel(time=-1)
prediction += last_input
return prediction
else:
# A predicted variable which is not an input variable. We are predicting
# it directly, so unnormalize it directly to the target scale/location:
return unnormalize(norm_prediction, self._scales, self._locations)
def _subtract_input_and_normalize_target(self, inputs, target):
if target.sizes.get('time') != 1:
raise ValueError(
'normalization.InputsAndResiduals only supports wrapping predictors'
'that predict a single timestep.')
if target.name in inputs:
target_residual = target
last_input = inputs[target.name].isel(time=-1)
target_residual -= last_input
return normalize(
target_residual, self._residual_scales, self._residual_locations)
else:
return normalize(target, self._scales, self._locations)
def __call__(self,
inputs: xarray.Dataset,
targets_template: xarray.Dataset,
forcings: xarray.Dataset,
**kwargs
) -> xarray.Dataset:
norm_inputs = normalize(inputs, self._scales, self._locations)
norm_forcings = normalize(forcings, self._scales, self._locations)
norm_predictions = self._predictor(
norm_inputs, targets_template, forcings=norm_forcings, **kwargs)
return xarray_tree.map_structure(
lambda pred: self._unnormalize_prediction_and_add_input(inputs, pred),
norm_predictions)
def loss(self,
inputs: xarray.Dataset,
targets: xarray.Dataset,
forcings: xarray.Dataset,
**kwargs,
) -> predictor_base.LossAndDiagnostics:
"""Returns the loss computed on normalized inputs and targets."""
norm_inputs = normalize(inputs, self._scales, self._locations)
norm_forcings = normalize(forcings, self._scales, self._locations)
norm_target_residuals = xarray_tree.map_structure(
lambda t: self._subtract_input_and_normalize_target(inputs, t),
targets)
return self._predictor.loss(
norm_inputs, norm_target_residuals, forcings=norm_forcings, **kwargs)
def loss_and_predictions( # pytype: disable=signature-mismatch # jax-ndarray
self,
inputs: xarray.Dataset,
targets: xarray.Dataset,
forcings: xarray.Dataset,
**kwargs,
) -> Tuple[predictor_base.LossAndDiagnostics,
xarray.Dataset]:
"""The loss computed on normalized data, with unnormalized predictions."""
norm_inputs = normalize(inputs, self._scales, self._locations)
norm_forcings = normalize(forcings, self._scales, self._locations)
norm_target_residuals = xarray_tree.map_structure(
lambda t: self._subtract_input_and_normalize_target(inputs, t),
targets)
(loss, scalars), norm_predictions = self._predictor.loss_and_predictions(
norm_inputs, norm_target_residuals, forcings=norm_forcings, **kwargs)
predictions = xarray_tree.map_structure(
lambda pred: self._unnormalize_prediction_and_add_input(inputs, pred),
norm_predictions)
return (loss, scalars), predictions
|
graphcast-main
|
graphcast/normalization.py
|
# Copyright 2023 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A predictor that runs multiple graph neural networks on mesh data.
It learns to interpolate between the grid and the mesh nodes, with the loss
and the rollouts ultimately computed at the grid level.
It uses ideas similar to those in Keisler (2022):
Reference:
https://arxiv.org/pdf/2202.07575.pdf
It assumes data across time and level is stacked, and operates only operates in
a 2D mesh over latitudes and longitudes.
"""
from typing import Any, Callable, Mapping, Optional
import chex
from graphcast import deep_typed_graph_net
from graphcast import grid_mesh_connectivity
from graphcast import icosahedral_mesh
from graphcast import losses
from graphcast import model_utils
from graphcast import predictor_base
from graphcast import typed_graph
from graphcast import xarray_jax
import jax.numpy as jnp
import jraph
import numpy as np
import xarray
Kwargs = Mapping[str, Any]
GNN = Callable[[jraph.GraphsTuple], jraph.GraphsTuple]
# https://www.ecmwf.int/en/forecasts/dataset/ecmwf-reanalysis-v5
PRESSURE_LEVELS_ERA5_37 = (
1, 2, 3, 5, 7, 10, 20, 30, 50, 70, 100, 125, 150, 175, 200, 225, 250, 300,
350, 400, 450, 500, 550, 600, 650, 700, 750, 775, 800, 825, 850, 875, 900,
925, 950, 975, 1000)
# https://www.ecmwf.int/en/forecasts/datasets/set-i
PRESSURE_LEVELS_HRES_25 = (
1, 2, 3, 5, 7, 10, 20, 30, 50, 70, 100, 150, 200, 250, 300, 400, 500, 600,
700, 800, 850, 900, 925, 950, 1000)
# https://agupubs.onlinelibrary.wiley.com/doi/full/10.1029/2020MS002203
PRESSURE_LEVELS_WEATHERBENCH_13 = (
50, 100, 150, 200, 250, 300, 400, 500, 600, 700, 850, 925, 1000)
PRESSURE_LEVELS = {
13: PRESSURE_LEVELS_WEATHERBENCH_13,
25: PRESSURE_LEVELS_HRES_25,
37: PRESSURE_LEVELS_ERA5_37,
}
# The list of all possible atmospheric variables. Taken from:
# https://confluence.ecmwf.int/display/CKB/ERA5%3A+data+documentation#ERA5:datadocumentation-Table9
ALL_ATMOSPHERIC_VARS = (
"potential_vorticity",
"specific_rain_water_content",
"specific_snow_water_content",
"geopotential",
"temperature",
"u_component_of_wind",
"v_component_of_wind",
"specific_humidity",
"vertical_velocity",
"vorticity",
"divergence",
"relative_humidity",
"ozone_mass_mixing_ratio",
"specific_cloud_liquid_water_content",
"specific_cloud_ice_water_content",
"fraction_of_cloud_cover",
)
TARGET_SURFACE_VARS = (
"2m_temperature",
"mean_sea_level_pressure",
"10m_v_component_of_wind",
"10m_u_component_of_wind",
"total_precipitation_6hr",
)
TARGET_SURFACE_NO_PRECIP_VARS = (
"2m_temperature",
"mean_sea_level_pressure",
"10m_v_component_of_wind",
"10m_u_component_of_wind",
)
TARGET_ATMOSPHERIC_VARS = (
"temperature",
"geopotential",
"u_component_of_wind",
"v_component_of_wind",
"vertical_velocity",
"specific_humidity",
)
TARGET_ATMOSPHERIC_NO_W_VARS = (
"temperature",
"geopotential",
"u_component_of_wind",
"v_component_of_wind",
"specific_humidity",
)
EXTERNAL_FORCING_VARS = (
"toa_incident_solar_radiation",
)
GENERATED_FORCING_VARS = (
"year_progress_sin",
"year_progress_cos",
"day_progress_sin",
"day_progress_cos",
)
FORCING_VARS = EXTERNAL_FORCING_VARS + GENERATED_FORCING_VARS
STATIC_VARS = (
"geopotential_at_surface",
"land_sea_mask",
)
@chex.dataclass(frozen=True, eq=True)
class TaskConfig:
"""Defines inputs and targets on which a model is trained and/or evaluated."""
input_variables: tuple[str, ...]
# Target variables which the model is expected to predict.
target_variables: tuple[str, ...]
forcing_variables: tuple[str, ...]
pressure_levels: tuple[int, ...]
input_duration: str
TASK = TaskConfig(
input_variables=(
TARGET_SURFACE_VARS + TARGET_ATMOSPHERIC_VARS + FORCING_VARS +
STATIC_VARS),
target_variables=TARGET_SURFACE_VARS + TARGET_ATMOSPHERIC_VARS,
forcing_variables=FORCING_VARS,
pressure_levels=PRESSURE_LEVELS_ERA5_37,
input_duration="12h",
)
TASK_13 = TaskConfig(
input_variables=(
TARGET_SURFACE_VARS + TARGET_ATMOSPHERIC_VARS + FORCING_VARS +
STATIC_VARS),
target_variables=TARGET_SURFACE_VARS + TARGET_ATMOSPHERIC_VARS,
forcing_variables=FORCING_VARS,
pressure_levels=PRESSURE_LEVELS_WEATHERBENCH_13,
input_duration="12h",
)
TASK_13_PRECIP_OUT = TaskConfig(
input_variables=(
TARGET_SURFACE_NO_PRECIP_VARS + TARGET_ATMOSPHERIC_VARS + FORCING_VARS +
STATIC_VARS),
target_variables=TARGET_SURFACE_VARS + TARGET_ATMOSPHERIC_VARS,
forcing_variables=FORCING_VARS,
pressure_levels=PRESSURE_LEVELS_WEATHERBENCH_13,
input_duration="12h",
)
@chex.dataclass(frozen=True, eq=True)
class ModelConfig:
"""Defines the architecture of the GraphCast neural network architecture.
Properties:
resolution: The resolution of the data, in degrees (e.g. 0.25 or 1.0).
mesh_size: How many refinements to do on the multi-mesh.
gnn_msg_steps: How many Graph Network message passing steps to do.
latent_size: How many latent features to include in the various MLPs.
hidden_layers: How many hidden layers for each MLP.
radius_query_fraction_edge_length: Scalar that will be multiplied by the
length of the longest edge of the finest mesh to define the radius of
connectivity to use in the Grid2Mesh graph. Reasonable values are
between 0.6 and 1. 0.6 reduces the number of grid points feeding into
multiple mesh nodes and therefore reduces edge count and memory use, but
1 gives better predictions.
mesh2grid_edge_normalization_factor: Allows explicitly controlling edge
normalization for mesh2grid edges. If None, defaults to max edge length.
This supports using pre-trained model weights with a different graph
structure to what it was trained on.
"""
resolution: float
mesh_size: int
latent_size: int
gnn_msg_steps: int
hidden_layers: int
radius_query_fraction_edge_length: float
mesh2grid_edge_normalization_factor: Optional[float] = None
@chex.dataclass(frozen=True, eq=True)
class CheckPoint:
params: dict[str, Any]
model_config: ModelConfig
task_config: TaskConfig
description: str
license: str
class GraphCast(predictor_base.Predictor):
"""GraphCast Predictor.
The model works on graphs that take into account:
* Mesh nodes: nodes for the vertices of the mesh.
* Grid nodes: nodes for the points of the grid.
* Nodes: When referring to just "nodes", this means the joint set of
both mesh nodes, concatenated with grid nodes.
The model works with 3 graphs:
* Grid2Mesh graph: Graph that contains all nodes. This graph is strictly
bipartite with edges going from grid nodes to mesh nodes using a
fixed radius query. The grid2mesh_gnn will operate in this graph. The output
of this stage will be a latent representation for the mesh nodes, and a
latent representation for the grid nodes.
* Mesh graph: Graph that contains mesh nodes only. The mesh_gnn will
operate in this graph. It will update the latent state of the mesh nodes
only.
* Mesh2Grid graph: Graph that contains all nodes. This graph is strictly
bipartite with edges going from mesh nodes to grid nodes such that each grid
nodes is connected to 3 nodes of the mesh triangular face that contains
the grid points. The mesh2grid_gnn will operate in this graph. It will
process the updated latent state of the mesh nodes, and the latent state
of the grid nodes, to produce the final output for the grid nodes.
The model is built on top of `TypedGraph`s so the different types of nodes and
edges can be stored and treated separately.
"""
def __init__(self, model_config: ModelConfig, task_config: TaskConfig):
"""Initializes the predictor."""
self._spatial_features_kwargs = dict(
add_node_positions=False,
add_node_latitude=True,
add_node_longitude=True,
add_relative_positions=True,
relative_longitude_local_coordinates=True,
relative_latitude_local_coordinates=True,
)
# Specification of the multimesh.
self._meshes = (
icosahedral_mesh.get_hierarchy_of_triangular_meshes_for_sphere(
splits=model_config.mesh_size))
# Encoder, which moves data from the grid to the mesh with a single message
# passing step.
self._grid2mesh_gnn = deep_typed_graph_net.DeepTypedGraphNet(
embed_nodes=True, # Embed raw features of the grid and mesh nodes.
embed_edges=True, # Embed raw features of the grid2mesh edges.
edge_latent_size=dict(grid2mesh=model_config.latent_size),
node_latent_size=dict(
mesh_nodes=model_config.latent_size,
grid_nodes=model_config.latent_size),
mlp_hidden_size=model_config.latent_size,
mlp_num_hidden_layers=model_config.hidden_layers,
num_message_passing_steps=1,
use_layer_norm=True,
include_sent_messages_in_node_update=False,
activation="swish",
f32_aggregation=True,
aggregate_normalization=None,
name="grid2mesh_gnn",
)
# Processor, which performs message passing on the multi-mesh.
self._mesh_gnn = deep_typed_graph_net.DeepTypedGraphNet(
embed_nodes=False, # Node features already embdded by previous layers.
embed_edges=True, # Embed raw features of the multi-mesh edges.
node_latent_size=dict(mesh_nodes=model_config.latent_size),
edge_latent_size=dict(mesh=model_config.latent_size),
mlp_hidden_size=model_config.latent_size,
mlp_num_hidden_layers=model_config.hidden_layers,
num_message_passing_steps=model_config.gnn_msg_steps,
use_layer_norm=True,
include_sent_messages_in_node_update=False,
activation="swish",
f32_aggregation=False,
name="mesh_gnn",
)
num_surface_vars = len(
set(task_config.target_variables) - set(ALL_ATMOSPHERIC_VARS))
num_atmospheric_vars = len(
set(task_config.target_variables) & set(ALL_ATMOSPHERIC_VARS))
num_outputs = (num_surface_vars +
len(task_config.pressure_levels) * num_atmospheric_vars)
# Decoder, which moves data from the mesh back into the grid with a single
# message passing step.
self._mesh2grid_gnn = deep_typed_graph_net.DeepTypedGraphNet(
# Require a specific node dimensionaly for the grid node outputs.
node_output_size=dict(grid_nodes=num_outputs),
embed_nodes=False, # Node features already embdded by previous layers.
embed_edges=True, # Embed raw features of the mesh2grid edges.
edge_latent_size=dict(mesh2grid=model_config.latent_size),
node_latent_size=dict(
mesh_nodes=model_config.latent_size,
grid_nodes=model_config.latent_size),
mlp_hidden_size=model_config.latent_size,
mlp_num_hidden_layers=model_config.hidden_layers,
num_message_passing_steps=1,
use_layer_norm=True,
include_sent_messages_in_node_update=False,
activation="swish",
f32_aggregation=False,
name="mesh2grid_gnn",
)
# Obtain the query radius in absolute units for the unit-sphere for the
# grid2mesh model, by rescaling the `radius_query_fraction_edge_length`.
self._query_radius = (_get_max_edge_distance(self._finest_mesh)
* model_config.radius_query_fraction_edge_length)
self._mesh2grid_edge_normalization_factor = (
model_config.mesh2grid_edge_normalization_factor
)
# Other initialization is delayed until the first call (`_maybe_init`)
# when we get some sample data so we know the lat/lon values.
self._initialized = False
# A "_init_mesh_properties":
# This one could be initialized at init but we delay it for consistency too.
self._num_mesh_nodes = None # num_mesh_nodes
self._mesh_nodes_lat = None # [num_mesh_nodes]
self._mesh_nodes_lon = None # [num_mesh_nodes]
# A "_init_grid_properties":
self._grid_lat = None # [num_lat_points]
self._grid_lon = None # [num_lon_points]
self._num_grid_nodes = None # num_lat_points * num_lon_points
self._grid_nodes_lat = None # [num_grid_nodes]
self._grid_nodes_lon = None # [num_grid_nodes]
# A "_init_{grid2mesh,processor,mesh2grid}_graph"
self._grid2mesh_graph_structure = None
self._mesh_graph_structure = None
self._mesh2grid_graph_structure = None
@property
def _finest_mesh(self):
return self._meshes[-1]
def __call__(self,
inputs: xarray.Dataset,
targets_template: xarray.Dataset,
forcings: xarray.Dataset,
is_training: bool = False,
) -> xarray.Dataset:
self._maybe_init(inputs)
# Convert all input data into flat vectors for each of the grid nodes.
# xarray (batch, time, lat, lon, level, multiple vars, forcings)
# -> [num_grid_nodes, batch, num_channels]
grid_node_features = self._inputs_to_grid_node_features(inputs, forcings)
# Transfer data for the grid to the mesh,
# [num_mesh_nodes, batch, latent_size], [num_grid_nodes, batch, latent_size]
(latent_mesh_nodes, latent_grid_nodes
) = self._run_grid2mesh_gnn(grid_node_features)
# Run message passing in the multimesh.
# [num_mesh_nodes, batch, latent_size]
updated_latent_mesh_nodes = self._run_mesh_gnn(latent_mesh_nodes)
# Transfer data frome the mesh to the grid.
# [num_grid_nodes, batch, output_size]
output_grid_nodes = self._run_mesh2grid_gnn(
updated_latent_mesh_nodes, latent_grid_nodes)
# Conver output flat vectors for the grid nodes to the format of the output.
# [num_grid_nodes, batch, output_size] ->
# xarray (batch, one time step, lat, lon, level, multiple vars)
return self._grid_node_outputs_to_prediction(
output_grid_nodes, targets_template)
def loss_and_predictions( # pytype: disable=signature-mismatch # jax-ndarray
self,
inputs: xarray.Dataset,
targets: xarray.Dataset,
forcings: xarray.Dataset,
) -> tuple[predictor_base.LossAndDiagnostics, xarray.Dataset]:
# Forward pass.
predictions = self(
inputs, targets_template=targets, forcings=forcings, is_training=True)
# Compute loss.
loss = losses.weighted_mse_per_level(
predictions, targets,
per_variable_weights={
# Any variables not specified here are weighted as 1.0.
# A single-level variable, but an important headline variable
# and also one which we have struggled to get good performance
# on at short lead times, so leaving it weighted at 1.0, equal
# to the multi-level variables:
"2m_temperature": 1.0,
# New single-level variables, which we don't weight too highly
# to avoid hurting performance on other variables.
"10m_u_component_of_wind": 0.1,
"10m_v_component_of_wind": 0.1,
"mean_sea_level_pressure": 0.1,
"total_precipitation_6hr": 0.1,
})
return loss, predictions # pytype: disable=bad-return-type # jax-ndarray
def loss( # pytype: disable=signature-mismatch # jax-ndarray
self,
inputs: xarray.Dataset,
targets: xarray.Dataset,
forcings: xarray.Dataset,
) -> predictor_base.LossAndDiagnostics:
loss, _ = self.loss_and_predictions(inputs, targets, forcings)
return loss # pytype: disable=bad-return-type # jax-ndarray
def _maybe_init(self, sample_inputs: xarray.Dataset):
"""Inits everything that has a dependency on the input coordinates."""
if not self._initialized:
self._init_mesh_properties()
self._init_grid_properties(
grid_lat=sample_inputs.lat, grid_lon=sample_inputs.lon)
self._grid2mesh_graph_structure = self._init_grid2mesh_graph()
self._mesh_graph_structure = self._init_mesh_graph()
self._mesh2grid_graph_structure = self._init_mesh2grid_graph()
self._initialized = True
def _init_mesh_properties(self):
"""Inits static properties that have to do with mesh nodes."""
self._num_mesh_nodes = self._finest_mesh.vertices.shape[0]
mesh_phi, mesh_theta = model_utils.cartesian_to_spherical(
self._finest_mesh.vertices[:, 0],
self._finest_mesh.vertices[:, 1],
self._finest_mesh.vertices[:, 2])
(
mesh_nodes_lat,
mesh_nodes_lon,
) = model_utils.spherical_to_lat_lon(
phi=mesh_phi, theta=mesh_theta)
# Convert to f32 to ensure the lat/lon features aren't in f64.
self._mesh_nodes_lat = mesh_nodes_lat.astype(np.float32)
self._mesh_nodes_lon = mesh_nodes_lon.astype(np.float32)
def _init_grid_properties(self, grid_lat: np.ndarray, grid_lon: np.ndarray):
"""Inits static properties that have to do with grid nodes."""
self._grid_lat = grid_lat.astype(np.float32)
self._grid_lon = grid_lon.astype(np.float32)
# Initialized the counters.
self._num_grid_nodes = grid_lat.shape[0] * grid_lon.shape[0]
# Initialize lat and lon for the grid.
grid_nodes_lon, grid_nodes_lat = np.meshgrid(grid_lon, grid_lat)
self._grid_nodes_lon = grid_nodes_lon.reshape([-1]).astype(np.float32)
self._grid_nodes_lat = grid_nodes_lat.reshape([-1]).astype(np.float32)
def _init_grid2mesh_graph(self) -> typed_graph.TypedGraph:
"""Build Grid2Mesh graph."""
# Create some edges according to distance between mesh and grid nodes.
assert self._grid_lat is not None and self._grid_lon is not None
(grid_indices, mesh_indices) = grid_mesh_connectivity.radius_query_indices(
grid_latitude=self._grid_lat,
grid_longitude=self._grid_lon,
mesh=self._finest_mesh,
radius=self._query_radius)
# Edges sending info from grid to mesh.
senders = grid_indices
receivers = mesh_indices
# Precompute structural node and edge features according to config options.
# Structural features are those that depend on the fixed values of the
# latitude and longitudes of the nodes.
(senders_node_features, receivers_node_features,
edge_features) = model_utils.get_bipartite_graph_spatial_features(
senders_node_lat=self._grid_nodes_lat,
senders_node_lon=self._grid_nodes_lon,
receivers_node_lat=self._mesh_nodes_lat,
receivers_node_lon=self._mesh_nodes_lon,
senders=senders,
receivers=receivers,
edge_normalization_factor=None,
**self._spatial_features_kwargs,
)
n_grid_node = np.array([self._num_grid_nodes])
n_mesh_node = np.array([self._num_mesh_nodes])
n_edge = np.array([mesh_indices.shape[0]])
grid_node_set = typed_graph.NodeSet(
n_node=n_grid_node, features=senders_node_features)
mesh_node_set = typed_graph.NodeSet(
n_node=n_mesh_node, features=receivers_node_features)
edge_set = typed_graph.EdgeSet(
n_edge=n_edge,
indices=typed_graph.EdgesIndices(senders=senders, receivers=receivers),
features=edge_features)
nodes = {"grid_nodes": grid_node_set, "mesh_nodes": mesh_node_set}
edges = {
typed_graph.EdgeSetKey("grid2mesh", ("grid_nodes", "mesh_nodes")):
edge_set
}
grid2mesh_graph = typed_graph.TypedGraph(
context=typed_graph.Context(n_graph=np.array([1]), features=()),
nodes=nodes,
edges=edges)
return grid2mesh_graph
def _init_mesh_graph(self) -> typed_graph.TypedGraph:
"""Build Mesh graph."""
merged_mesh = icosahedral_mesh.merge_meshes(self._meshes)
# Work simply on the mesh edges.
senders, receivers = icosahedral_mesh.faces_to_edges(merged_mesh.faces)
# Precompute structural node and edge features according to config options.
# Structural features are those that depend on the fixed values of the
# latitude and longitudes of the nodes.
assert self._mesh_nodes_lat is not None and self._mesh_nodes_lon is not None
node_features, edge_features = model_utils.get_graph_spatial_features(
node_lat=self._mesh_nodes_lat,
node_lon=self._mesh_nodes_lon,
senders=senders,
receivers=receivers,
**self._spatial_features_kwargs,
)
n_mesh_node = np.array([self._num_mesh_nodes])
n_edge = np.array([senders.shape[0]])
assert n_mesh_node == len(node_features)
mesh_node_set = typed_graph.NodeSet(
n_node=n_mesh_node, features=node_features)
edge_set = typed_graph.EdgeSet(
n_edge=n_edge,
indices=typed_graph.EdgesIndices(senders=senders, receivers=receivers),
features=edge_features)
nodes = {"mesh_nodes": mesh_node_set}
edges = {
typed_graph.EdgeSetKey("mesh", ("mesh_nodes", "mesh_nodes")): edge_set
}
mesh_graph = typed_graph.TypedGraph(
context=typed_graph.Context(n_graph=np.array([1]), features=()),
nodes=nodes,
edges=edges)
return mesh_graph
def _init_mesh2grid_graph(self) -> typed_graph.TypedGraph:
"""Build Mesh2Grid graph."""
# Create some edges according to how the grid nodes are contained by
# mesh triangles.
(grid_indices,
mesh_indices) = grid_mesh_connectivity.in_mesh_triangle_indices(
grid_latitude=self._grid_lat,
grid_longitude=self._grid_lon,
mesh=self._finest_mesh)
# Edges sending info from mesh to grid.
senders = mesh_indices
receivers = grid_indices
# Precompute structural node and edge features according to config options.
assert self._mesh_nodes_lat is not None and self._mesh_nodes_lon is not None
(senders_node_features, receivers_node_features,
edge_features) = model_utils.get_bipartite_graph_spatial_features(
senders_node_lat=self._mesh_nodes_lat,
senders_node_lon=self._mesh_nodes_lon,
receivers_node_lat=self._grid_nodes_lat,
receivers_node_lon=self._grid_nodes_lon,
senders=senders,
receivers=receivers,
edge_normalization_factor=self._mesh2grid_edge_normalization_factor,
**self._spatial_features_kwargs,
)
n_grid_node = np.array([self._num_grid_nodes])
n_mesh_node = np.array([self._num_mesh_nodes])
n_edge = np.array([senders.shape[0]])
grid_node_set = typed_graph.NodeSet(
n_node=n_grid_node, features=receivers_node_features)
mesh_node_set = typed_graph.NodeSet(
n_node=n_mesh_node, features=senders_node_features)
edge_set = typed_graph.EdgeSet(
n_edge=n_edge,
indices=typed_graph.EdgesIndices(senders=senders, receivers=receivers),
features=edge_features)
nodes = {"grid_nodes": grid_node_set, "mesh_nodes": mesh_node_set}
edges = {
typed_graph.EdgeSetKey("mesh2grid", ("mesh_nodes", "grid_nodes")):
edge_set
}
mesh2grid_graph = typed_graph.TypedGraph(
context=typed_graph.Context(n_graph=np.array([1]), features=()),
nodes=nodes,
edges=edges)
return mesh2grid_graph
def _run_grid2mesh_gnn(self, grid_node_features: chex.Array,
) -> tuple[chex.Array, chex.Array]:
"""Runs the grid2mesh_gnn, extracting latent mesh and grid nodes."""
# Concatenate node structural features with input features.
batch_size = grid_node_features.shape[1]
grid2mesh_graph = self._grid2mesh_graph_structure
assert grid2mesh_graph is not None
grid_nodes = grid2mesh_graph.nodes["grid_nodes"]
mesh_nodes = grid2mesh_graph.nodes["mesh_nodes"]
new_grid_nodes = grid_nodes._replace(
features=jnp.concatenate([
grid_node_features,
_add_batch_second_axis(
grid_nodes.features.astype(grid_node_features.dtype),
batch_size)
],
axis=-1))
# To make sure capacity of the embedded is identical for the grid nodes and
# the mesh nodes, we also append some dummy zero input features for the
# mesh nodes.
dummy_mesh_node_features = jnp.zeros(
(self._num_mesh_nodes,) + grid_node_features.shape[1:],
dtype=grid_node_features.dtype)
new_mesh_nodes = mesh_nodes._replace(
features=jnp.concatenate([
dummy_mesh_node_features,
_add_batch_second_axis(
mesh_nodes.features.astype(dummy_mesh_node_features.dtype),
batch_size)
],
axis=-1))
# Broadcast edge structural features to the required batch size.
grid2mesh_edges_key = grid2mesh_graph.edge_key_by_name("grid2mesh")
edges = grid2mesh_graph.edges[grid2mesh_edges_key]
new_edges = edges._replace(
features=_add_batch_second_axis(
edges.features.astype(dummy_mesh_node_features.dtype), batch_size))
input_graph = self._grid2mesh_graph_structure._replace(
edges={grid2mesh_edges_key: new_edges},
nodes={
"grid_nodes": new_grid_nodes,
"mesh_nodes": new_mesh_nodes
})
# Run the GNN.
grid2mesh_out = self._grid2mesh_gnn(input_graph)
latent_mesh_nodes = grid2mesh_out.nodes["mesh_nodes"].features
latent_grid_nodes = grid2mesh_out.nodes["grid_nodes"].features
return latent_mesh_nodes, latent_grid_nodes
def _run_mesh_gnn(self, latent_mesh_nodes: chex.Array) -> chex.Array:
"""Runs the mesh_gnn, extracting updated latent mesh nodes."""
# Add the structural edge features of this graph. Note we don't need
# to add the structural node features, because these are already part of
# the latent state, via the original Grid2Mesh gnn, however, we need
# the edge ones, because it is the first time we are seeing this particular
# set of edges.
batch_size = latent_mesh_nodes.shape[1]
mesh_graph = self._mesh_graph_structure
assert mesh_graph is not None
mesh_edges_key = mesh_graph.edge_key_by_name("mesh")
edges = mesh_graph.edges[mesh_edges_key]
# We are assuming here that the mesh gnn uses a single set of edge keys
# named "mesh" for the edges and that it uses a single set of nodes named
# "mesh_nodes"
msg = ("The setup currently requires to only have one kind of edge in the"
" mesh GNN.")
assert len(mesh_graph.edges) == 1, msg
new_edges = edges._replace(
features=_add_batch_second_axis(
edges.features.astype(latent_mesh_nodes.dtype), batch_size))
nodes = mesh_graph.nodes["mesh_nodes"]
nodes = nodes._replace(features=latent_mesh_nodes)
input_graph = mesh_graph._replace(
edges={mesh_edges_key: new_edges}, nodes={"mesh_nodes": nodes})
# Run the GNN.
return self._mesh_gnn(input_graph).nodes["mesh_nodes"].features
def _run_mesh2grid_gnn(self,
updated_latent_mesh_nodes: chex.Array,
latent_grid_nodes: chex.Array,
) -> chex.Array:
"""Runs the mesh2grid_gnn, extracting the output grid nodes."""
# Add the structural edge features of this graph. Note we don't need
# to add the structural node features, because these are already part of
# the latent state, via the original Grid2Mesh gnn, however, we need
# the edge ones, because it is the first time we are seeing this particular
# set of edges.
batch_size = updated_latent_mesh_nodes.shape[1]
mesh2grid_graph = self._mesh2grid_graph_structure
assert mesh2grid_graph is not None
mesh_nodes = mesh2grid_graph.nodes["mesh_nodes"]
grid_nodes = mesh2grid_graph.nodes["grid_nodes"]
new_mesh_nodes = mesh_nodes._replace(features=updated_latent_mesh_nodes)
new_grid_nodes = grid_nodes._replace(features=latent_grid_nodes)
mesh2grid_key = mesh2grid_graph.edge_key_by_name("mesh2grid")
edges = mesh2grid_graph.edges[mesh2grid_key]
new_edges = edges._replace(
features=_add_batch_second_axis(
edges.features.astype(latent_grid_nodes.dtype), batch_size))
input_graph = mesh2grid_graph._replace(
edges={mesh2grid_key: new_edges},
nodes={
"mesh_nodes": new_mesh_nodes,
"grid_nodes": new_grid_nodes
})
# Run the GNN.
output_graph = self._mesh2grid_gnn(input_graph)
output_grid_nodes = output_graph.nodes["grid_nodes"].features
return output_grid_nodes
def _inputs_to_grid_node_features(
self,
inputs: xarray.Dataset,
forcings: xarray.Dataset,
) -> chex.Array:
"""xarrays -> [num_grid_nodes, batch, num_channels]."""
# xarray `Dataset` (batch, time, lat, lon, level, multiple vars)
# to xarray `DataArray` (batch, lat, lon, channels)
stacked_inputs = model_utils.dataset_to_stacked(inputs)
stacked_forcings = model_utils.dataset_to_stacked(forcings)
stacked_inputs = xarray.concat(
[stacked_inputs, stacked_forcings], dim="channels")
# xarray `DataArray` (batch, lat, lon, channels)
# to single numpy array with shape [lat_lon_node, batch, channels]
grid_xarray_lat_lon_leading = model_utils.lat_lon_to_leading_axes(
stacked_inputs)
return xarray_jax.unwrap(grid_xarray_lat_lon_leading.data).reshape(
(-1,) + grid_xarray_lat_lon_leading.data.shape[2:])
def _grid_node_outputs_to_prediction(
self,
grid_node_outputs: chex.Array,
targets_template: xarray.Dataset,
) -> xarray.Dataset:
"""[num_grid_nodes, batch, num_outputs] -> xarray."""
# numpy array with shape [lat_lon_node, batch, channels]
# to xarray `DataArray` (batch, lat, lon, channels)
assert self._grid_lat is not None and self._grid_lon is not None
grid_shape = (self._grid_lat.shape[0], self._grid_lon.shape[0])
grid_outputs_lat_lon_leading = grid_node_outputs.reshape(
grid_shape + grid_node_outputs.shape[1:])
dims = ("lat", "lon", "batch", "channels")
grid_xarray_lat_lon_leading = xarray_jax.DataArray(
data=grid_outputs_lat_lon_leading,
dims=dims)
grid_xarray = model_utils.restore_leading_axes(grid_xarray_lat_lon_leading)
# xarray `DataArray` (batch, lat, lon, channels)
# to xarray `Dataset` (batch, one time step, lat, lon, level, multiple vars)
return model_utils.stacked_to_dataset(
grid_xarray.variable, targets_template)
def _add_batch_second_axis(data, batch_size):
# data [leading_dim, trailing_dim]
assert data.ndim == 2
ones = jnp.ones([batch_size, 1], dtype=data.dtype)
return data[:, None] * ones # [leading_dim, batch, trailing_dim]
def _get_max_edge_distance(mesh):
senders, receivers = icosahedral_mesh.faces_to_edges(mesh.faces)
edge_distances = np.linalg.norm(
mesh.vertices[senders] - mesh.vertices[receivers], axis=-1)
return edge_distances.max()
|
graphcast-main
|
graphcast/graphcast.py
|
# Copyright 2023 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Helpers to use xarray.{Variable,DataArray,Dataset} with JAX.
Allows them to be based on JAX arrays without converting to numpy arrays under
the hood, so you can start with a JAX array, do some computation with it in
xarray-land, get a JAX array out the other end and (for example) jax.jit
through the whole thing. You can even jax.jit a function which accepts and
returns xarray.Dataset, DataArray and Variable.
## Creating xarray datatypes from jax arrays, and vice-versa.
You can use the xarray_jax.{Variable,DataArray,Dataset} constructors, which have
the same API as the standard xarray constructors but will accept JAX arrays
without converting them to numpy.
It does this by wrapping the JAX array in a wrapper before passing it to
xarray; you can also do this manually by calling xarray_jax.wrap on your JAX
arrays before passing them to the standard xarray constructors.
To get non-wrapped JAX arrays out the other end, you can use e.g.:
xarray_jax.jax_vars(dataset)
xarray_jax.jax_data(dataset.some_var)
which will complain if the data isn't actually a JAX array. Use this if you need
to make sure the computation has gone via JAX, e.g. if it's the output of code
that you want to JIT or compute gradients through. If this is not the case and
you want to support passing plain numpy arrays through as well as potentially
JAX arrays, you can use:
xarray_jax.unwrap_vars(dataset)
xarray_jax.unwrap_data(dataset.some_var)
which will unwrap the data if it is a wrapped JAX array, but otherwise pass
it through to you without complaint.
The wrapped JAX arrays aim to support all the core operations from the numpy
array API that xarray expects, however there may still be some gaps; if you run
into any problems around this, you may need to add a few more proxy methods onto
the wrapper class below.
In future once JAX and xarray support the new Python array API standard
(https://data-apis.org/array-api/latest/index.html), we hope to avoid the need
for wrapping the JAX arrays like this.
## jax.jit and pmap of functions taking and returning xarray datatypes
We register xarray datatypes with jax.tree_util, which allows them to be treated
as generic containers of jax arrays by various parts of jax including jax.jit.
This allows for, e.g.:
@jax.jit
def foo(input: xarray.Dataset) -> xarray.Dataset:
...
It will not work out-of-the-box with shape-modifying transformations like
jax.pmap, or e.g. a jax.tree_util.tree_map with some transform that alters array
shapes or dimension order. That's because we won't know what dimension names
and/or coordinates to use when unflattening, if the results have a different
shape to the data that was originally flattened.
You can work around this using xarray_jax.dims_change_on_unflatten, however,
and in the case of jax.pmap we provide a wrapper xarray_jax.pmap which allows
it to be used with functions taking and returning xarrays.
## Treatment of coordinates
We don't support passing jax arrays as coordinates when constructing a
DataArray/Dataset. This is because xarray's advanced indexing and slicing is
unlikely to work with jax arrays (at least when a Tracer is used during
jax.jit), and also because some important datatypes used for coordinates, like
timedelta64 and datetime64, are not supported by jax.
For the purposes of tree_util and jax.jit, coordinates are not treated as leaves
of the tree (array data 'contained' by a Dataset/DataArray), they are just a
static part of the structure. That means that if a jit'ed function is called
twice with Dataset inputs that use different coordinates, it will compile a
separate version of the function for each. The coordinates are treated like
static_argnums by jax.jit.
If you want to use dynamic data for coordinates, we recommend making it a
data_var instead of a coord. You won't be able to do indexing and slicing using
the coordinate, but that wasn't going to work with a jax array anyway.
"""
import collections
import contextlib
import contextvars
from typing import Any, Callable, Hashable, Iterator, Mapping, Optional, Union, Tuple, TypeVar, cast
import jax
import jax.numpy as jnp
import numpy as np
import tree
import xarray
def Variable(dims, data, **kwargs) -> xarray.Variable: # pylint:disable=invalid-name
"""Like xarray.Variable, but can wrap JAX arrays."""
return xarray.Variable(dims, wrap(data), **kwargs)
_JAX_COORD_ATTR_NAME = '_jax_coord'
def DataArray( # pylint:disable=invalid-name
data,
coords=None,
dims=None,
name=None,
attrs=None,
jax_coords=None,
) -> xarray.DataArray:
"""Like xarray.DataArray, but supports using JAX arrays.
Args:
data: As for xarray.DataArray, except jax arrays are also supported.
coords: Coordinates for the array, see xarray.DataArray. These coordinates
must be based on plain numpy arrays or something convertible to plain
numpy arrays. Their values will form a static part of the data structure
from the point of view of jax.tree_util. In particular this means these
coordinates will be passed as plain numpy arrays even inside a JIT'd
function, and the JIT'd function will be recompiled under the hood if the
coordinates of DataArrays passed into it change.
If this is not convenient for you, see also jax_coords below.
dims: See xarray.DataArray.
name: See xarray.DataArray.
attrs: See xarray.DataArray.
jax_coords: Additional coordinates, which *can* use JAX arrays. These
coordinates will be treated as JAX data from the point of view of
jax.tree_util, that means when JIT'ing they will be passed as tracers and
computation involving them will be JIT'd.
Unfortunately a side-effect of this is that they can't be used as index
coordinates (because xarray's indexing logic is not JIT-able). If you
specify a coordinate with the same name as a dimension here, it will not
be set as an index coordinate; this behaviour is different to the default
for `coords`, and it means that things like `.sel` based on the jax
coordinate will not work.
Note we require `jax_coords` to be explicitly specified via a different
constructor argument to `coords`, rather than just looking for jax arrays
within the `coords` and treating them differently. This is because it
affects the way jax.tree_util treats them, which is somewhat orthogonal to
whether the value is passed in as numpy or not, and generally needs to be
handled consistently so is something we encourage explicit control over.
Returns:
An instance of xarray.DataArray. Where JAX arrays are used as data or
coords, they will be wrapped with JaxArrayWrapper and can be unwrapped via
`unwrap` and `unwrap_data`.
"""
result = xarray.DataArray(
wrap(data), dims=dims, name=name, attrs=attrs or {})
return assign_coords(result, coords=coords, jax_coords=jax_coords)
def Dataset( # pylint:disable=invalid-name
data_vars,
coords=None,
attrs=None,
jax_coords=None,
) -> xarray.Dataset:
"""Like xarray.Dataset, but can wrap JAX arrays.
Args:
data_vars: As for xarray.Dataset, except jax arrays are also supported.
coords: Coordinates for the dataset, see xarray.Dataset. These coordinates
must be based on plain numpy arrays or something convertible to plain
numpy arrays. Their values will form a static part of the data structure
from the point of view of jax.tree_util. In particular this means these
coordinates will be passed as plain numpy arrays even inside a JIT'd
function, and the JIT'd function will be recompiled under the hood if the
coordinates of DataArrays passed into it change.
If this is not convenient for you, see also jax_coords below.
attrs: See xarray.Dataset.
jax_coords: Additional coordinates, which *can* use JAX arrays. These
coordinates will be treated as JAX data from the point of view of
jax.tree_util, that means when JIT'ing they will be passed as tracers and
computation involving them will be JIT'd.
Unfortunately a side-effect of this is that they can't be used as index
coordinates (because xarray's indexing logic is not JIT-able). If you
specify a coordinate with the same name as a dimension here, it will not
be set as an index coordinate; this behaviour is different to the default
for `coords`, and it means that things like `.sel` based on the jax
coordinate will not work.
Note we require `jax_coords` to be explicitly specified via a different
constructor argument to `coords`, rather than just looking for jax arrays
within the `coords` and treating them differently. This is because it
affects the way jax.tree_util treats them, which is somewhat orthogonal to
whether the value is passed in as numpy or not, and generally needs to be
handled consistently so is something we encourage explicit control over.
Returns:
An instance of xarray.Dataset. Where JAX arrays are used as data, they
will be wrapped with JaxArrayWrapper.
"""
wrapped_data_vars = {}
for name, var_like in data_vars.items():
# xarray.Dataset accepts a few different formats for data_vars:
if isinstance(var_like, jax.Array):
wrapped_data_vars[name] = wrap(var_like)
elif isinstance(var_like, tuple):
# Layout is (dims, data, ...). We wrap data.
wrapped_data_vars[name] = (var_like[0], wrap(var_like[1])) + var_like[2:]
else:
# Could be a plain numpy array or scalar (we don't wrap), or an
# xarray.Variable, DataArray etc, which we must assume is already wrapped
# if necessary (e.g. if creating using xarray_jax.{Variable,DataArray}).
wrapped_data_vars[name] = var_like
result = xarray.Dataset(
data_vars=wrapped_data_vars,
attrs=attrs)
return assign_coords(result, coords=coords, jax_coords=jax_coords)
DatasetOrDataArray = TypeVar(
'DatasetOrDataArray', xarray.Dataset, xarray.DataArray)
def assign_coords(
x: DatasetOrDataArray,
*,
coords: Optional[Mapping[Hashable, Any]] = None,
jax_coords: Optional[Mapping[Hashable, Any]] = None,
) -> DatasetOrDataArray:
"""Replacement for assign_coords which works in presence of jax_coords.
`jax_coords` allow certain specified coordinates to have their data passed as
JAX arrays (including through jax.jit boundaries). The compromise in return is
that they are not created as index coordinates and cannot be used for .sel
and other coordinate-based indexing operations. See docs for `jax_coords` on
xarray_jax.Dataset and xarray_jax.DataArray for more information.
This function can be used to set jax_coords on an existing DataArray or
Dataset, and also to set a mix of jax and non-jax coordinates. It implements
some workarounds to prevent xarray trying and failing to create IndexVariables
from jax arrays under the hood.
If you have any jax_coords with the same name as a dimension, you'll need to
use this function instead of data_array.assign_coords or dataset.assign_coords
in general, to avoid an xarray bug where it tries (and in our case fails) to
create indexes for existing jax coords. See
https://github.com/pydata/xarray/issues/7885.
Args:
x: An xarray Dataset or DataArray.
coords: Dict of (non-JAX) coords, or None if not assigning any.
jax_coords: Dict of JAX coords, or None if not assigning any. See docs for
xarray_jax.Dataset / DataArray for more information on jax_coords.
Returns:
The Dataset or DataArray with coordinates assigned, similarly to
Dataset.assign_coords / DataArray.assign_coords.
"""
coords = {} if coords is None else dict(coords) # Copy before mutating.
jax_coords = {} if jax_coords is None else dict(jax_coords)
# Any existing JAX coords must be dropped and re-added via the workaround
# below, since otherwise .assign_coords will trigger an xarray bug where
# it tries to recreate the indexes again for the existing coordinates.
# Can remove if/when https://github.com/pydata/xarray/issues/7885 fixed.
existing_jax_coords = {
name: coord_var for name, coord_var in x.coords.variables.items()
if coord_var.attrs.get(_JAX_COORD_ATTR_NAME, False)
}
jax_coords = existing_jax_coords | jax_coords
x = x.drop_vars(existing_jax_coords.keys())
# We need to ensure that xarray doesn't try to create an index for
# coordinates with the same name as a dimension, since this will fail if
# given a wrapped JAX tracer.
# It appears the only way to avoid this is to name them differently to any
# dimension name, then rename them back afterwards.
renamed_jax_coords = {}
for name, coord in jax_coords.items():
if isinstance(coord, xarray.DataArray):
coord = coord.variable
if isinstance(coord, xarray.Variable):
coord = coord.copy(deep=False) # Copy before mutating attrs.
else:
# Must wrap as Variable with the correct dims first if this has not
# already been done, otherwise xarray.Dataset will assume the dimension
# name is also __NONINDEX_{n}.
coord = Variable((name,), coord)
# We set an attr on each jax_coord identifying it as such. These attrs on
# the coord Variable gets reflected on the coord DataArray exposed too, and
# when set on coordinates they generally get preserved under the default
# keep_attrs setting.
# These attrs are used by jax.tree_util registered flatten/unflatten to
# determine which coords need to be treated as leaves of the flattened
# structure vs static data.
coord.attrs[_JAX_COORD_ATTR_NAME] = True
renamed_jax_coords[f'__NONINDEX_{name}'] = coord
x = x.assign_coords(coords=coords | renamed_jax_coords)
rename_back_mapping = {f'__NONINDEX_{name}': name for name in jax_coords}
if isinstance(x, xarray.Dataset):
# Using 'rename' doesn't work if renaming to the same name as a dimension.
return x.rename_vars(rename_back_mapping)
else: # DataArray
return x.rename(rename_back_mapping)
def assign_jax_coords(
x: DatasetOrDataArray,
jax_coords: Optional[Mapping[Hashable, Any]] = None,
**jax_coords_kwargs
) -> DatasetOrDataArray:
"""Assigns only jax_coords, with same API as xarray's assign_coords."""
return assign_coords(x, jax_coords=jax_coords or jax_coords_kwargs)
def wrap(value):
"""Wraps JAX arrays for use in xarray, passing through other values."""
if isinstance(value, jax.Array):
return JaxArrayWrapper(value)
else:
return value
def unwrap(value, require_jax=False):
"""Unwraps wrapped JAX arrays used in xarray, passing through other values."""
if isinstance(value, JaxArrayWrapper):
return value.jax_array
elif isinstance(value, jax.Array):
return value
elif require_jax:
raise TypeError(f'Expected JAX array, found {type(value)}.')
else:
return value
def _wrapped(func):
"""Surrounds a function with JAX array unwrapping/wrapping."""
def wrapped_func(*args, **kwargs):
args, kwargs = tree.map_structure(unwrap, (args, kwargs))
result = func(*args, **kwargs)
return tree.map_structure(wrap, result)
return wrapped_func
def unwrap_data(
value: Union[xarray.Variable, xarray.DataArray],
require_jax: bool = False
) -> Union[jax.Array, np.ndarray]:
"""The unwrapped (see unwrap) data of a an xarray.Variable or DataArray."""
return unwrap(value.data, require_jax=require_jax)
def unwrap_vars(
dataset: Mapping[Hashable, xarray.DataArray],
require_jax: bool = False
) -> Mapping[str, Union[jax.Array, np.ndarray]]:
"""The unwrapped data (see unwrap) of the variables in a dataset."""
# xarray types variable names as Hashable, but in practice they're invariably
# strings and we convert to str to allow for a more useful return type.
return {str(name): unwrap_data(var, require_jax=require_jax)
for name, var in dataset.items()}
def unwrap_coords(
dataset: Union[xarray.Dataset, xarray.DataArray],
require_jax: bool = False
) -> Mapping[str, Union[jax.Array, np.ndarray]]:
"""The unwrapped data (see unwrap) of the coords in a Dataset or DataArray."""
return {str(name): unwrap_data(var, require_jax=require_jax)
for name, var in dataset.coords.items()}
def jax_data(value: Union[xarray.Variable, xarray.DataArray]) -> jax.Array:
"""Like unwrap_data, but will complain if not a jax array."""
# Implementing this separately so we can give a more specific return type
# for it.
return cast(jax.Array, unwrap_data(value, require_jax=True))
def jax_vars(
dataset: Mapping[Hashable, xarray.DataArray]) -> Mapping[str, jax.Array]:
"""Like unwrap_vars, but will complain if vars are not all jax arrays."""
return cast(Mapping[str, jax.Array], unwrap_vars(dataset, require_jax=True))
class JaxArrayWrapper(np.lib.mixins.NDArrayOperatorsMixin):
"""Wraps a JAX array into a duck-typed array suitable for use with xarray.
This uses an older duck-typed array protocol based on __array_ufunc__ and
__array_function__ which works with numpy and xarray. This is in the process
of being superseded by the Python array API standard
(https://data-apis.org/array-api/latest/index.html), but JAX and xarray
haven't implemented it yet. Once they have, we should be able to get rid of
this wrapper and use JAX arrays directly with xarray.
"""
def __init__(self, jax_array):
self.jax_array = jax_array
def __array_ufunc__(self, ufunc, method, *args, **kwargs):
for x in args:
if not isinstance(x, (jax.typing.ArrayLike, type(self))):
return NotImplemented
if method != '__call__':
return NotImplemented
try:
# Get the corresponding jax.numpy function to the NumPy ufunc:
func = getattr(jnp, ufunc.__name__)
except AttributeError:
return NotImplemented
# There may be an 'out' kwarg requesting an in-place operation, e.g. when
# this is called via __iadd__ (+=), __imul__ (*=) etc. JAX doesn't support
# in-place operations so we just remove this argument and have the ufunc
# return a fresh JAX array instead.
kwargs.pop('out', None)
return _wrapped(func)(*args, **kwargs)
def __array_function__(self, func, types, args, kwargs):
try:
# Get the corresponding jax.np function to the NumPy function:
func = getattr(jnp, func.__name__)
except AttributeError:
return NotImplemented
return _wrapped(func)(*args, **kwargs)
def __repr__(self):
return f'xarray_jax.JaxArrayWrapper({repr(self.jax_array)})'
# NDArrayOperatorsMixin already proxies most __dunder__ operator methods.
# We need to proxy through a few more methods in a similar way:
# Essential array properties:
@property
def shape(self):
return self.jax_array.shape
@property
def dtype(self):
return self.jax_array.dtype
@property
def ndim(self):
return self.jax_array.ndim
@property
def size(self):
return self.jax_array.size
# Array methods not covered by NDArrayOperatorsMixin:
# Allows conversion to numpy array using np.asarray etc. Warning: doing this
# will fail in a jax.jit-ed function.
def __array__(self, dtype=None, context=None):
return np.asarray(self.jax_array, dtype=dtype)
__getitem__ = _wrapped(lambda array, *args: array.__getitem__(*args))
# We drop the kwargs on this as they are not supported by JAX, but xarray
# uses at least one of them (the copy arg).
astype = _wrapped(lambda array, *args, **kwargs: array.astype(*args))
# There are many more methods which are more canonically available via (j)np
# functions, e.g. .sum() available via jnp.sum, and also mean, max, min,
# argmax, argmin etc. We don't attempt to proxy through all of these as
# methods, since this doesn't appear to be expected from a duck-typed array
# implementation. But there are a few which xarray calls as methods, so we
# proxy those:
transpose = _wrapped(jnp.transpose)
reshape = _wrapped(jnp.reshape)
all = _wrapped(jnp.all)
def apply_ufunc(func, *args, require_jax=False, **apply_ufunc_kwargs):
"""Like xarray.apply_ufunc but for jax-specific ufuncs.
Many numpy ufuncs will work fine out of the box with xarray_jax and
JaxArrayWrapper, since JaxArrayWrapper quacks (mostly) like a numpy array and
will convert many numpy operations to jax ops under the hood. For these
situations, xarray.apply_ufunc should work fine.
But sometimes you need a jax-specific ufunc which needs to be given a
jax array as input or return a jax array as output. In that case you should
use this helper as it will remove any JaxArrayWrapper before calling the func,
and wrap the result afterwards before handing it back to xarray.
Args:
func: A function that works with jax arrays (e.g. using functions from
jax.numpy) but otherwise meets the spec for the func argument to
xarray.apply_ufunc.
*args: xarray arguments to be mapped to arguments for func
(see xarray.apply_ufunc).
require_jax: Whether to require that inputs are based on jax arrays or allow
those based on plain numpy arrays too.
**apply_ufunc_kwargs: See xarray.apply_ufunc.
Returns:
Corresponding xarray results (see xarray.apply_ufunc).
"""
def wrapped_func(*maybe_wrapped_args):
unwrapped_args = [unwrap(a, require_jax) for a in maybe_wrapped_args]
result = func(*unwrapped_args)
# Result can be an array or a tuple of arrays, this handles both:
return jax.tree_util.tree_map(wrap, result)
return xarray.apply_ufunc(wrapped_func, *args, **apply_ufunc_kwargs)
def pmap(fn: Callable[..., Any],
dim: str,
axis_name: Optional[str] = None,
devices: ... = None,
backend: ... = None) -> Callable[..., Any]:
"""Wraps a subset of jax.pmap functionality to handle xarray input/output.
Constraints:
* Any Dataset or DataArray passed to the function must have `dim` as the
first dimension. This will be checked. You can ensure this if necessary
by calling `.transpose(dim, ...)` beforehand.
* All args and return values will be mapped over the first dimension,
it will use in_axes=0, out_axes=0.
* No support for static_broadcasted_argnums, donate_argnums etc.
Args:
fn: Function to be pmap'd which takes and returns trees which may contain
xarray Dataset/DataArray. Any Dataset/DataArrays passed as input must use
`dim` as the first dimension on all arrays.
dim: The xarray dimension name corresponding to the first dimension that is
pmapped over (pmap is called with in_axes=0, out_axes=0).
axis_name: Used by jax to identify the mapped axis so that parallel
collectives can be applied. Defaults to same as `dim`.
devices:
backend:
See jax.pmap.
Returns:
A pmap'd version of `fn`, which takes and returns Dataset/DataArray with an
extra leading dimension `dim` relative to what the original `fn` sees.
"""
input_treedef = None
output_treedef = None
def fn_passed_to_pmap(*flat_args):
assert input_treedef is not None
# Inside the pmap the original first dimension will no longer be present:
def check_and_remove_leading_dim(dims):
try:
index = dims.index(dim)
except ValueError:
index = None
if index != 0:
raise ValueError(f'Expected dim {dim} at index 0, found at {index}.')
return dims[1:]
with dims_change_on_unflatten(check_and_remove_leading_dim):
args = jax.tree_util.tree_unflatten(input_treedef, flat_args)
result = fn(*args)
nonlocal output_treedef
flat_result, output_treedef = jax.tree_util.tree_flatten(result)
return flat_result
pmapped_fn = jax.pmap(
fn_passed_to_pmap,
axis_name=axis_name or dim,
in_axes=0,
out_axes=0,
devices=devices,
backend=backend)
def result_fn(*args):
nonlocal input_treedef
flat_args, input_treedef = jax.tree_util.tree_flatten(args)
flat_result = pmapped_fn(*flat_args)
assert output_treedef is not None
# After the pmap an extra leading axis will be present, we need to add an
# xarray dimension for this when unflattening the result:
with dims_change_on_unflatten(lambda dims: (dim,) + dims):
return jax.tree_util.tree_unflatten(output_treedef, flat_result)
return result_fn
# Register xarray datatypes with jax.tree_util.
DimsChangeFn = Callable[[Tuple[Hashable, ...]], Tuple[Hashable, ...]]
_DIMS_CHANGE_ON_UNFLATTEN_FN: contextvars.ContextVar[DimsChangeFn] = (
contextvars.ContextVar('dims_change_on_unflatten_fn'))
@contextlib.contextmanager
def dims_change_on_unflatten(dims_change_fn: DimsChangeFn):
"""Can be used to change the dims used when unflattening arrays into xarrays.
This is useful when some axes were added to / removed from the underlying jax
arrays after they were flattened using jax.tree_util.tree_flatten, and you
want to unflatten them again afterwards using the original treedef but
adjusted for the added/removed dimensions.
It can also be used with jax.tree_util.tree_map, when it's called with a
function that adds/removes axes or otherwise changes the axis order.
When dimensions are removed, any coordinates using those removed dimensions
will also be removed on unflatten.
This is implemented as a context manager that sets some thread-local state
affecting the behaviour of our unflatten functions, because it's not possible
to directly modify the treedef to change the dims/coords in it (and with
tree_map, the treedef isn't exposed to you anyway).
Args:
dims_change_fn: Maps a tuple of dimension names for the original
Variable/DataArray/Dataset that was flattened, to an updated tuple of
dimensions which should be used when unflattening.
Yields:
To a context manager in whose scope jax.tree_util.tree_unflatten and
jax.tree_util.tree_map will apply the dims_change_fn before reconstructing
xarrays from jax arrays.
"""
token = _DIMS_CHANGE_ON_UNFLATTEN_FN.set(dims_change_fn)
try:
yield
finally:
_DIMS_CHANGE_ON_UNFLATTEN_FN.reset(token)
def _flatten_variable(v: xarray.Variable) -> Tuple[
Tuple[jax.typing.ArrayLike], Tuple[Hashable, ...]]:
"""Flattens a Variable for jax.tree_util."""
children = (unwrap_data(v),)
aux = v.dims
return children, aux
def _unflatten_variable(
aux: Tuple[Hashable, ...],
children: Tuple[jax.typing.ArrayLike]) -> xarray.Variable:
"""Unflattens a Variable for jax.tree_util."""
dims = aux
dims_change_fn = _DIMS_CHANGE_ON_UNFLATTEN_FN.get(None)
if dims_change_fn: dims = dims_change_fn(dims)
return Variable(dims=dims, data=children[0])
def _split_static_and_jax_coords(
coords: xarray.core.coordinates.Coordinates) -> Tuple[
Mapping[Hashable, xarray.Variable], Mapping[Hashable, xarray.Variable]]:
static_coord_vars = {}
jax_coord_vars = {}
for name, coord in coords.items():
if coord.attrs.get(_JAX_COORD_ATTR_NAME, False):
jax_coord_vars[name] = coord.variable
else:
assert not isinstance(coord, (jax.Array, JaxArrayWrapper))
static_coord_vars[name] = coord.variable
return static_coord_vars, jax_coord_vars
def _drop_with_none_of_dims(
coord_vars: Mapping[Hashable, xarray.Variable],
dims: Tuple[Hashable]) -> Mapping[Hashable, xarray.Variable]:
return {name: var for name, var in coord_vars.items()
if set(var.dims) <= set(dims)}
class _HashableCoords(collections.abc.Mapping):
"""Wraps a dict of xarray Variables as hashable, used for static coordinates.
This needs to be hashable so that when an xarray.Dataset is passed to a
jax.jit'ed function, jax can check whether it's seen an array with the
same static coordinates(*) before or whether it needs to recompile the
function for the new values of the static coordinates.
(*) note jax_coords are not included in this; their value can be different
on different calls without triggering a recompile.
"""
def __init__(self, coord_vars: Mapping[Hashable, xarray.Variable]):
self._variables = coord_vars
def __repr__(self) -> str:
return f'_HashableCoords({repr(self._variables)})'
def __getitem__(self, key: Hashable) -> xarray.Variable:
return self._variables[key]
def __len__(self) -> int:
return len(self._variables)
def __iter__(self) -> Iterator[Hashable]:
return iter(self._variables)
def __hash__(self):
if not hasattr(self, '_hash'):
self._hash = hash(frozenset((name, var.data.tobytes())
for name, var in self._variables.items()))
return self._hash
def __eq__(self, other):
if self is other:
return True
elif not isinstance(other, type(self)):
return NotImplemented
elif self._variables is other._variables:
return True
else:
return self._variables.keys() == other._variables.keys() and all(
variable.equals(other._variables[name])
for name, variable in self._variables.items())
def _flatten_data_array(v: xarray.DataArray) -> Tuple[
# Children (data variable, jax_coord_vars):
Tuple[xarray.Variable, Mapping[Hashable, xarray.Variable]],
# Static auxiliary data (name, static_coord_vars):
Tuple[Optional[Hashable], _HashableCoords]]:
"""Flattens a DataArray for jax.tree_util."""
static_coord_vars, jax_coord_vars = _split_static_and_jax_coords(v.coords)
children = (v.variable, jax_coord_vars)
aux = (v.name, _HashableCoords(static_coord_vars))
return children, aux
def _unflatten_data_array(
aux: Tuple[Optional[Hashable], _HashableCoords],
children: Tuple[xarray.Variable, Mapping[Hashable, xarray.Variable]],
) -> xarray.DataArray:
"""Unflattens a DataArray for jax.tree_util."""
variable, jax_coord_vars = children
name, static_coord_vars = aux
# Drop static coords which have dims not present in any of the data_vars.
# These would generally be dims that were dropped by a dims_change_fn, but
# because static coordinates don't go through dims_change_fn on unflatten, we
# just drop them where this causes a problem.
# Since jax_coords go through the dims_change_fn on unflatten we don't need
# to do this for jax_coords.
static_coord_vars = _drop_with_none_of_dims(static_coord_vars, variable.dims)
return DataArray(
variable, name=name, coords=static_coord_vars, jax_coords=jax_coord_vars)
def _flatten_dataset(dataset: xarray.Dataset) -> Tuple[
# Children (data variables, jax_coord_vars):
Tuple[Mapping[Hashable, xarray.Variable],
Mapping[Hashable, xarray.Variable]],
# Static auxiliary data (static_coord_vars):
_HashableCoords]:
"""Flattens a Dataset for jax.tree_util."""
variables = {name: data_array.variable
for name, data_array in dataset.data_vars.items()}
static_coord_vars, jax_coord_vars = _split_static_and_jax_coords(
dataset.coords)
children = (variables, jax_coord_vars)
aux = _HashableCoords(static_coord_vars)
return children, aux
def _unflatten_dataset(
aux: _HashableCoords,
children: Tuple[Mapping[Hashable, xarray.Variable],
Mapping[Hashable, xarray.Variable]],
) -> xarray.Dataset:
"""Unflattens a Dataset for jax.tree_util."""
data_vars, jax_coord_vars = children
static_coord_vars = aux
dataset = xarray.Dataset(data_vars)
# Drop static coords which have dims not present in any of the data_vars.
# See corresponding comment in _unflatten_data_array.
static_coord_vars = _drop_with_none_of_dims(static_coord_vars, dataset.dims)
return assign_coords(
dataset, coords=static_coord_vars, jax_coords=jax_coord_vars)
jax.tree_util.register_pytree_node(
xarray.Variable, _flatten_variable, _unflatten_variable)
# This is a subclass of Variable but still needs registering separately.
# Flatten/unflatten for IndexVariable is a bit of a corner case but we do
# need to support it.
jax.tree_util.register_pytree_node(
xarray.IndexVariable, _flatten_variable, _unflatten_variable)
jax.tree_util.register_pytree_node(
xarray.DataArray, _flatten_data_array, _unflatten_data_array)
jax.tree_util.register_pytree_node(
xarray.Dataset, _flatten_dataset, _unflatten_dataset)
|
graphcast-main
|
graphcast/xarray_jax.py
|
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Quick script to test that celeb_a_hq experiment can import and run."""
from absl import app
import jax
import jax.numpy as jnp
from functa import experiment_meta_learning as exp
def main(_):
"""Tests the meta learning experiment on celeba."""
config = exp.get_config()
exp_config = config.experiment_kwargs.config
exp_config.dataset.name = 'celeb_a_hq_custom'
exp_config.training.per_device_batch_size = 2
exp_config.evaluation.batch_size = 2
exp_config.model.width = 16
exp_config.model.depth = 2
exp_config.model.latent_dim = 16
print(exp_config)
xp = exp.Experiment('train', jax.random.PRNGKey(0), exp_config)
bcast = jax.pmap(lambda x: x)
global_step = bcast(jnp.zeros(jax.local_device_count()))
rng = bcast(jnp.stack([jax.random.PRNGKey(0)] * jax.local_device_count()))
print('Taking a single experiment step for test purposes.')
result = xp.step(global_step, rng)
print(f'Step successfully taken, resulting metrics are {result}')
if __name__ == '__main__':
app.run(main)
|
functa-main
|
test_celeb_a_hq.py
|
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Create modulation dataset for celeba."""
import os
from absl import app
from absl import flags
import dill
import haiku as hk
import numpy as np
import optax
from functa import data_utils
from functa import function_reps
from functa import helpers
from functa import pytree_conversions
flags.DEFINE_integer('mod_dim', 64,
'The dimensionality of modulation dimension to use.'
'Choose one of: 64, 128, 256, 512, 1024.')
flags.DEFINE_string('pretrained_weights_dir', '',
'Path to directory containing pre-trained weights.')
flags.DEFINE_string('save_to_dir', '',
'Path to directory where modulations should be saved.')
FLAGS = flags.FLAGS
# Define function that creates a dict of modulations & psnrs for each dataset
def create_modulation_dataset(model, params, ds, num_steps, coords, lr,
l2_weight, noise_std):
"""Creates a dataset of modulations and corresponding psnr values.
Args:
model: Haiku transformed model that outputs rgb given 2d pixel coord inputs.
params: Parameters of ModulatedSiren or LatentModulatedSiren model.
ds: Dataset iterator that gives a single image at each iteration.
num_steps: Number of SGD steps to use for fitting each modulation.
coords: 2D pixel coordinates of shape (H, W, 2).
lr: Learning rate of SGD optimizer.
l2_weight: Weight for L2 regularisation of modulations.
noise_std: standard deviation of Gaussian noise applied to modulations.
Returns:
mod_data: Array of modulations shape (data_size, mod_dim).
psnr_vals: Array of psnrs shape (data_size,).
psnr_mean: psnr corresponding to the mean rec loss across the dataset.
"""
# Define sgd optimizer that carries out 3 gradient steps wrt modulations
opt_inner = optax.sgd(lr)
mod_list = []
psnr_list = []
rec_loss_list = []
for i, datum in enumerate(ds):
fitted_params, _, psnr = helpers.inner_loop(
params=params,
model=model,
opt_inner=opt_inner,
inner_steps=num_steps,
coords=coords,
targets=datum['array'],
return_all_psnrs=False,
return_all_losses=False,
l2_weight=l2_weight,
noise_std=noise_std)
rec_loss = helpers.inverse_psnr_fn(psnr)
_, modulations = function_reps.partition_params(fitted_params)
modulations, _, _ = pytree_conversions.pytree_to_array(modulations)
mod_list.append(modulations)
psnr_list.append(psnr)
rec_loss_list.append(rec_loss)
print(f'data point {(i+1):5d} has psnr {psnr:2.2f} dB')
mod_data = np.stack(mod_list) # [num_data, mod_dim]
psnr_vals = np.array(psnr_list) # [num_data]
rec_losses = np.array(rec_loss_list) # [num_data]
mean_rec_loss = np.mean(rec_losses)
psnr_mean = helpers.psnr_fn(mean_rec_loss)
return mod_data, psnr_vals, psnr_mean
def main(_):
# Load params of LatentModulatedSiren model
## Define path to checkpoint, downloaded from codebase
mod_dim = FLAGS.mod_dim # choose one of 64, 128, 256, 512, 1024
assert mod_dim in [
64, 128, 256, 512, 1024
], f'`mod_dim` should be one of [64, 128, 256, 512, 1024], got {mod_dim}'
path = os.path.join(FLAGS.pretrained_weights_dir,
f'celeba_params_{mod_dim}_latents.npz')
## Check that checkpoint file exists
assert os.path.exists(path), 'Pretrained weights file does not exist.'
with open(path, 'rb') as f:
ckpt = dill.load(f)
params = ckpt['params']
config = ckpt['config']
assert config['model']['type'] == 'latent_modulated_siren'
print(f'Loaded params for model with {mod_dim} latent dimensions.')
## Create haiku transformed model that runs the forward pass.
## Only keep configs needed for model construction from model config
## `None` below ensures no error is given when already removed
model_config = config['model'].copy()
model_config.pop('type', None)
model_config.pop('l2_weight', None)
model_config.pop('noise_std', None)
def model_net(coords):
hk_model = function_reps.LatentModulatedSiren(
out_channels=config['dataset']['num_channels'], **model_config)
return hk_model(coords)
model = hk.without_apply_rng(hk.transform(model_net))
# Check that user specified directory exists if specified
if FLAGS.save_to_dir:
assert os.path.isdir(
FLAGS.save_to_dir
), f'User specified directory {FLAGS.save_to_dir} does not exist.'
# Setup celeba dataset
train_ds = data_utils.load_dataset('celeb_a_hq_custom', subset='train')
test_ds = data_utils.load_dataset('celeb_a_hq_custom', subset='test')
# Iterate across training set to produce train modulations
train_mod_data, train_psnr_vals, train_psnr_mean = create_modulation_dataset(
model=model,
params=params,
ds=train_ds,
num_steps=config['training']['inner_steps'],
coords=function_reps.get_coordinate_grid(config['dataset']['resolution']),
lr=config['opt_inner']['lr'],
l2_weight=config['model']['l2_weight'],
noise_std=config['model']['noise_std'],
)
print(f'Training set psnr: {train_psnr_mean}')
# Repeat with test set
test_mod_data, test_psnr_vals, test_psnr_mean = create_modulation_dataset(
model=model,
params=params,
ds=test_ds,
num_steps=config['training']['inner_steps'],
coords=function_reps.get_coordinate_grid(config['dataset']['resolution']),
lr=config['opt_inner']['lr'],
l2_weight=config['model']['l2_weight'],
noise_std=config['model']['noise_std'],
)
print(f'Test set psnr: {test_psnr_mean}')
# Save modulations to user specified directory
train_dict = dict(modulation=train_mod_data, psnr=train_psnr_vals)
test_dict = dict(modulation=test_mod_data, psnr=test_psnr_vals)
modulation_data = dict(train=train_dict, test=test_dict)
path = os.path.join(FLAGS.save_to_dir,
f'celeba_modulations_{mod_dim}_latents.npz')
with open(path, 'wb') as f:
dill.dump(modulation_data, f)
if __name__ == '__main__':
app.run(main)
|
functa-main
|
modulation_dataset_writer.py
|
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Quick script to test that srn_cars experiment can import and run."""
from absl import app
import jax
import jax.numpy as jnp
from functa import experiment_meta_learning as exp
def main(_):
"""Tests the meta learning experiment on srn_cars."""
config = exp.get_config()
exp_config = config.experiment_kwargs.config
exp_config.dataset.name = 'srn_cars'
exp_config.dataset.num_channels = 4
exp_config.dataset.resolution = 128
exp_config.dataset.type = 'scene'
exp_config.training.per_device_batch_size = 1
exp_config.evaluation.batch_size = 1
exp_config.model.width = 16
exp_config.model.depth = 2
exp_config.model.latent_dim = 16
exp_config.dataset.num_points_per_ray = 2
exp_config.training.subsample = True
exp_config.training.subsample_num_points = 2
exp_config.training.subsample_num_views = 2
print(exp_config)
xp = exp.Experiment('train', jax.random.PRNGKey(0), exp_config)
bcast = jax.pmap(lambda x: x)
global_step = bcast(jnp.zeros(jax.local_device_count()))
rng = bcast(jnp.stack([jax.random.PRNGKey(0)] * jax.local_device_count()))
print('Taking a single experiment step for test purposes.')
result = xp.step(global_step, rng)
print(f'Step successfully taken, resulting metrics are {result}')
if __name__ == '__main__':
app.run(main)
|
functa-main
|
test_srn_cars.py
|
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Minimal NeRF implementation.
A simplified version of:
- https://github.com/tancik/learnit/blob/main/Experiments/shapenet.ipynb
- https://github.com/bmild/nerf/blob/master/tiny_nerf.ipynb
"""
import functools
from typing import Tuple, Union
import chex
import jax
import jax.numpy as jnp
Array = jnp.ndarray
PRNGKey = chex.PRNGKey
MAX_DENSITY = 10.
def get_rays(height: int, width: int, focal: float, pose: Array):
"""Converts pose information to ray origins and directions for NeRF.
Args:
height: Height of image.
width: Width of image.
focal: Focal length.
pose: Pose (camera to world matrix) of shape (4, 4).
Returns:
Rays array of shape (2, H, W, 3), where rays[0] corresponds to ray
origins and rays[1] to ray directions.
"""
i, j = jnp.meshgrid(jnp.arange(width), jnp.arange(height), indexing='xy')
# use pixel center coordinates instead of corner coordinates.
extra_shift = .5
dirs = jnp.stack([(i - width * .5 + extra_shift) / focal,
-(j - height * .5 + extra_shift) / focal,
-jnp.ones_like(i)], -1)
rays_d = jnp.sum(dirs[..., jnp.newaxis, :] * pose[:3, :3], -1)
rays_o = jnp.broadcast_to(pose[:3, -1], rays_d.shape)
return jnp.stack([rays_o, rays_d], 0) # (2, H, W, 3)
# This batched function will output arrays of shape (2, B, H, W, 3) since we use
# out_axes=1 (i.e. batching is over 1st dimension *not* 0th dimension). Note
# that this is all for a *single scene*.
get_rays_batch = jax.vmap(get_rays, in_axes=[None, None, None, 0], out_axes=1)
def volumetric_rendering(rgb: Array, density: Array, z_vals: Array,
rays_d: Array, white_background: bool):
"""Volumetric rendering.
Args:
rgb: rgb at 3D coordinates. Array shape (..., num_points_per_ray, 3).
density: density at 3D coordinates. Array shape (..., num_points_per_ray).
z_vals: distances to 3D coordinates from ray origin.
Array shape (..., num_points_per_ray).
rays_d: ray directions. Array shape (..., 3)
white_background: If True sets default RGB value to be 1, otherwise will be
set to 0 (black).
Returns:
rgb_map: Rendered view(s). Array of shape (..., 3).
depth_map: Depth map of view(s). Array of shape (...).
weights: Weights for rendering rgb_map from rgb values.
Array of shape (..., num_points_per_ray).
"""
# Calculate distance between consecutive points along ray.
distance_between_points = z_vals[..., 1:] - z_vals[..., :-1]
# The following line is a slightly convoluted way of adding a single extra
# element to the distances array (since we made it 1 element shorter than
# full ray). This will now have the same length as opacities.
distances = jnp.concatenate([
distance_between_points,
1e-3 * jnp.ones_like(distance_between_points[..., :1])
], -1) # (..., num_points_per_ray)
# Correct distances by magnitude of ray direction
distances = distances * jnp.linalg.norm(rays_d[..., None, :], axis=-1)
# Alpha will have a value between 0 and 1
alpha = 1. - jnp.exp(-density * distances) # (..., num_points_per_ray)
# Ensure transmittance is <= 1 (and greater than 1e-10)
trans = jnp.minimum(1., 1. - alpha + 1e-10)
# Make the first transmittance value along the ray equal to 1 for every ray
trans = jnp.concatenate([jnp.ones_like(trans[..., :1]), trans[..., :-1]],
-1) # (..., num_points_per_ray)
cum_trans = jnp.cumprod(trans, -1) # T_i in Equation (3) of Nerf paper.
weights = alpha * cum_trans # (..., num_points_per_ray)
# Sum RGB values along the ray
rgb_map = jnp.sum(weights[..., None] * rgb, -2) # (..., 3)
# Optionally make background white
if white_background:
acc_map = jnp.sum(weights, -1) # Accumulate weights (...)
rgb_map = rgb_map + (1. - acc_map[..., None]) # Add white background
# Weigh distance along ray to get depth - weighted average of distances to
# points on ray
depth_map = jnp.sum(weights * z_vals, -1)
return rgb_map, depth_map, weights
def render_rays(model, params, rays: Array,
render_config: Tuple[int, float, float, bool],
rng: Union[int, PRNGKey] = 42, coord_noise: bool = False):
"""Renders rays through model of a single scene (with possibly many views).
Args:
model: Haiku transformed model, with input_size = 3, output_size = 4 (3
for RGB and 1 for density.)
params: Model params.
rays: Array of shape (2, ..., 3) containing ray origin and ray direction.
This is quite similar to coords in our other models. The ellipsis refers
to spatial dimensions and optional batch dimensions when using multiple
views. E.g. for a single view (H, W) or (H*W) and for B views (B, H, W)
or (B, H*W) or (B*H*W). Note that these can also be subsamples.
render_config: Tuple containing rendering configuration for NeRF.
This includes the following:
- num_points_per_ray (int): Number of coarse points per ray. Splits rays
into equally spaced points.
- near (float): Point nearest to the camera where ray starts.
- far (float): Point furthest from the camera where ray ends.
- white_background (bool): If True sets default RGB value to be 1,
otherwise will be set to 0 (black).
rng: PRNG key for adding coordinate noise.
coord_noise: whether to add coordinate noise or not.
Returns:
rgb_map: Rendered view(s). Array of shape (..., 3).
depth_map: Depth map of view(s). Array of shape (...).
"""
if isinstance(rng, int):
rng = jax.random.PRNGKey(rng)
# Unpack render config
num_points_per_ray, near, far, white_background = render_config
# Split rays into ray origins and ray directions
rays_o, rays_d = rays # both [..., 3]
# Compute 3D query coordinates
z_vals = jnp.linspace(near, far, num_points_per_ray)
# Optionally add coord noise (randomized stratified sampling)
if coord_noise:
mids = .5 * (z_vals[..., 1:] + z_vals[..., :-1])
upper = jnp.concatenate([mids, z_vals[..., -1:]], -1)
lower = jnp.concatenate([z_vals[..., :1], mids], -1)
t_rand = jax.random.uniform(rng, shape=(*rays_o.shape[:-1],
num_points_per_ray))
z_vals = lower + (upper - lower) * t_rand
else:
# broadcast to make returned shape consistent (..., num_points_per_ray)
z_vals = jnp.broadcast_to(z_vals[None, :],
(*rays_o.shape[:-1], num_points_per_ray))
# The below line uses (a lot of) broadcasting. In terms of shapes:
# (...,1,3) + (...,1,3) * (num_points_per_ray,1) = (...,num_points_per_ray,3)
coords = rays_o[..., None, :] + rays_d[..., None, :] * z_vals[..., :, None]
# Should be an instance of (Latent)ModulatedSiren that outputs 4-dim vector
out = model.apply(params, coords) # (..., num_points_per_ray, 4)
# Compute colors and volume densities
rgb, density = out[..., :3], out[
..., 3] # (..., num_points_per_ray, 3), (..., num_points_per_ray)
# Ensure density is positive (..., num_points_per_ray)
# This is different to the usual relu, but we found that this leads to more
# stable training for meta-learning.
density = jax.nn.elu(density, alpha=0.1) + 0.1
density = jnp.clip(density, 0., MAX_DENSITY) # upper bound density at 10
# Do volumetric rendering
rgb_map, depth_map, _ = volumetric_rendering(rgb, density, z_vals,
rays_d, white_background)
return rgb_map, depth_map
@functools.partial(
jax.jit, static_argnames=['model', 'height', 'width', 'render_config'])
def render_pose(model, params, height: int, width: int, focal: float,
pose: Array, render_config: Tuple[int, float, float, bool]):
"""Renders NeRF scene in a given pose.
Args:
model: Haiku transformed model, with input_size = 3, output_size = 4 (3
for RGB and 1 for density.)
params: Model params.
height: Height of image.
width: Width of image.
focal: Focal length.
pose: Can either contain a single pose or a batch of poses, i.e. an
array of shape (4, 4) or (B, 4, 4).
render_config: Tuple containing rendering configuration for NeRF.
This includes the following:
- num_points_per_ray (int): Number of points per ray. Splits rays
into equally spaced points.
- near (float): Point nearest to the camera where ray starts.
- far (float): Point furthest from the camera where ray ends.
- white_background (bool): If True sets default RGB value to be 1,
otherwise will be set to 0 (black).
Returns:
rgb_map: Rendered view(s). Array of shape (..., 3).
depth_map: Depth map of view(s). Array of shape (...).
"""
if pose.ndim == 3:
rays = get_rays_batch(height, width, focal, pose)
else:
rays = get_rays(height, width, focal, pose)
return render_rays(model, params, rays, render_config)
|
functa-main
|
minimal_nerf.py
|
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utils for loading and processing datasets."""
from typing import Mapping, Optional
import jax.numpy as jnp
import numpy as np
import tensorflow.compat.v2 as tf
import tensorflow_datasets as tfds
Array = jnp.ndarray
Batch = Mapping[str, np.ndarray]
DATASET_ATTRIBUTES = {
'celeb_a_hq_custom': {
'num_channels': 3,
'resolution': 64,
'type': 'image',
'train_size': 27_000,
'test_size': 3_000,
},
'srn_cars': {
'num_channels': 4,
'resolution': 128,
'type': 'scene',
'render_config': {
'near': 0.8,
'far': 1.8,
'white_background': True,
},
'train_size': 2458,
'test_size': 703,
},
}
def load_dataset(dataset_name: str,
subset: str,
batch_size: Optional[int] = None,
shuffle: bool = False,
repeat: bool = False,
num_examples: Optional[int] = None,
shuffle_buffer_size: int = 10000):
"""Tensorflow dataset loaders.
Args:
dataset_name (string): One of elements of DATASET_NAMES.
subset (string): One of 'train', 'test'.
batch_size (int):
shuffle (bool): Whether to shuffle dataset.
repeat (bool): Whether to repeat dataset.
num_examples (int): If not -1, returns only the first num_examples of the
dataset.
shuffle_buffer_size (int): Buffer size to use for shuffling dataset.
Returns:
Tensorflow dataset iterator.
"""
# Load dataset
if dataset_name.startswith('srn'):
ds = tfds.load(dataset_name, split=subset)
# Filter corrupted scenes that contain views with only white background
ds = ds.filter(filter_srn)
# Map pixels in [0,255] to [0,1] range
ds = ds.map(process_srn, num_parallel_calls=tf.data.experimental.AUTOTUNE)
elif dataset_name.startswith('celeb_a_hq'):
# CelebAHQ does not have a test dataset, so do a 90/10
# split on training data to create train and test sets
if subset == 'train':
subset = 'train[:90%]'
elif subset == 'test':
subset = 'train[90%:]'
ds = tfds.load(dataset_name, split=subset)
# Map pixels in [0,255] to [0,1] range and map resolution from 128 to 64.
ds = ds.map(process_celeba,
num_parallel_calls=tf.data.experimental.AUTOTUNE)
# Optionally subsample dataset
if num_examples is not None:
ds = ds.take(num_examples)
# Optionally shuffle dataset
if shuffle:
ds = ds.shuffle(shuffle_buffer_size)
# Optionally repeat dataset if repeat
if repeat:
ds = ds.repeat()
if batch_size is not None:
ds = ds.batch(batch_size)
# Convert from tf.Tensor to numpy arrays for use with Jax
return iter(tfds.as_numpy(ds))
def filter_srn(batch):
views = batch['images'] # shape [num_views, H, W, 3]
# Take min and max for each view
min_val = tf.math.reduce_min(views, axis=(1, 2, 3)) # [num_views]
max_val = tf.math.reduce_max(views, axis=(1, 2, 3)) # [num_views]
# Take the difference then the minimum across views
# Some views have only white background iff this min == 0
min_diff = tf.math.reduce_min(max_val - min_val) # scalar
return tf.math.not_equal(min_diff, 0)
def process_srn(batch: Batch):
batch['images'] = tf.cast(batch['images'], tf.float32) / 255.
return batch
def process_celeba(batch: Batch):
image = tf.cast(batch['image'], tf.float32) / 255.
# Resize from 128 to 64 resolution.
image = tf.image.resize(image, [64, 64]) # [64, 64, 3]
return {'array': image}
|
functa-main
|
data_utils.py
|
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Helper functions to convert between pytree and array representations."""
import jax
import jax.numpy as jnp
def flattened_pytree_to_array(flattened_pytree):
"""Converts a flattened pytree to a single concatenated array.
Args:
flattened_pytree (List of Array): List of arrays returned from
jax.tree_flatten. Note each array must be 1-dimensional.
Returns:
Concatenated array and concatenation indices.
"""
# Extract concatenation indices so we can later "unconcatenate" array to
# recreate pytree
concat_idx = []
current_idx = 0
for np_array in flattened_pytree:
current_idx += len(np_array)
concat_idx.append(current_idx)
# Return concatenated pytree and concatenation indices
return jnp.concatenate(flattened_pytree), concat_idx
def array_to_flattened_pytree(concat_array, concat_idx):
"""Converts a concatenated numpy array to a list of numpy arrays.
Args:
concat_array (Array):
concat_idx (List of int):
Returns:
A flattened pytree (i.e. a list of numpy arrays).
Notes:
Inverse function of flattened_pytree_to_array.
"""
# Split array according to concat idx
flattened_pytree = []
prev_idx = 0
for idx in concat_idx:
flattened_pytree.append(concat_array[prev_idx:idx])
prev_idx = idx
return flattened_pytree
def pytree_to_array(pytree):
"""Converts a pytree to single concatened array.
Args:
pytree (Pytree):
Returns:
Concatenated array, concatenation indices and tree definition which are
required to reconstruct pytree.
Notes:
Note that pytree must contain only one dimensional tensors (as is the case
for example with a pytree of modulations).
"""
flattened_pytree, tree_def = jax.tree_util.tree_flatten(pytree)
concat_array, concat_idx = flattened_pytree_to_array(flattened_pytree)
return concat_array, concat_idx, tree_def
def array_to_pytree(concat_array, concat_idx, tree_def):
"""Converts a concatenated array to a pytree.
Args:
concat_array (Array):
concat_idx (List of int):
tree_def (TreeDef):
Returns:
The reconstructed pytree.
"""
flattened_pytree = array_to_flattened_pytree(concat_array, concat_idx)
return jax.tree_util.tree_unflatten(tree_def, flattened_pytree)
|
functa-main
|
pytree_conversions.py
|
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Jaxline meta-learning experiment for functa."""
import sys
from typing import Generator, List, Mapping, Text, Tuple, Union
from absl import app
from absl import flags
from absl import logging
import functools
import chex
import haiku as hk
import jax
import jax.numpy as jnp
from jaxline import base_config
from jaxline import experiment
from jaxline import platform
from jaxline import utils
from ml_collections import config_dict
import optax
from functa import data_utils
from functa import function_reps
from functa import helpers
from functa import minimal_nerf
from functa import pytree_conversions
FLAGS = flags.FLAGS
Array = jnp.ndarray
Batch = Mapping[str, Array]
OptState = optax.OptState
PRNGKey = chex.PRNGKey
Scalars = Mapping[Text, Array]
def get_config():
"""Return config object for training."""
# Several config settings are defined internally for jaxline
# use this function to extract them
config = base_config.get_base_config()
# These are experiment specific config arguments
config.experiment_kwargs = config_dict.ConfigDict()
exp = config.experiment_kwargs.config = config_dict.ConfigDict()
# Dataset config
exp.dataset = config_dict.ConfigDict()
# 'celeb_a_hq_custom' or 'srn_cars'
exp.dataset.name = 'celeb_a_hq_custom'
exp.dataset.num_channels = data_utils.DATASET_ATTRIBUTES[
exp.dataset.name]['num_channels']
exp.dataset.resolution = data_utils.DATASET_ATTRIBUTES[
exp.dataset.name]['resolution']
exp.dataset.type = data_utils.DATASET_ATTRIBUTES[
exp.dataset.name]['type']
# Define num_points_per_ray for scene data.
if exp.dataset.type == 'scene':
exp.dataset.num_points_per_ray = 32
# Optimizer config
exp.opt_inner = config_dict.ConfigDict()
exp.opt_inner.lr = 1e-2
exp.opt_outer = config_dict.ConfigDict()
exp.opt_outer.lr = 3e-6
# Model config
exp.model = config_dict.ConfigDict()
exp.model.type = 'latent_modulated_siren'
exp.model.w0 = 30.
exp.model.width = 512
exp.model.depth = 15
exp.model.modulate_scale = False
exp.model.modulate_shift = True
exp.model.l2_weight = 0.
exp.model.noise_std = 0.
# The following three attributes are only used if model.type is
# 'latent_modulated_siren'
exp.model.latent_dim = 128
# Empty tuple below corresponds to a linear map. This always gave better PSNR
# compared to deeper MLPs.
exp.model.layer_sizes = ()
exp.model.latent_init_scale = 0.
# The following attributes are only required if using meta-SGD
exp.model.use_meta_sgd = True
exp.model.meta_sgd_init_range = (0.005, 0.1)
exp.model.meta_sgd_clip_range = (0., 1.)
# Training config
per_device_batch_size = 1 if exp.dataset.type == 'scene' else 16
exp.training = config_dict.ConfigDict()
exp.training.per_device_batch_size = per_device_batch_size
exp.training.inner_steps = 3
exp.training.repeat = True
exp.training.coord_noise = False
# Define subsampling options for scenes
if exp.dataset.type == 'scene':
exp.training.subsample = True
# Number of rays to subsample per view.
exp.training.subsample_num_points = 1024
# Number of views to subsample.
exp.training.subsample_num_views = 8
else:
exp.training.subsample = False
# Evaluation config
exp.evaluation = config_dict.ConfigDict()
exp.evaluation.batch_size = per_device_batch_size
# Number of examples used for eval logging.
# Should be small for scenes (e.g. 10).
# Otherwise set to -1 to evaluate on entire test set.
exp.evaluation.num_examples = 10 if exp.dataset.type == 'scene' else -1
exp.evaluation.inner_steps = 3
exp.evaluation.shuffle = True
# Training loop config: log and checkpoint every minute.
config.training_steps = int(5e5)
config.log_train_data_interval = 60
config.log_tensors_interval = 60
config.save_checkpoint_interval = 60
config.train_checkpoint_all_hosts = False
config.checkpoint_dir = '/tmp/training/'
config.eval_specific_checkpoint_dir = '/tmp/training/'
return config
class Experiment(experiment.AbstractExperiment):
"""Meta-learning experiment."""
# Holds a map from object properties that will be checkpointed to their name
# within a checkpoint. Currently it is assumed that these are all sharded
# device arrays if we use CHECKPOINT_ATTRS. Using NON_BROADCAST assumes we
# are using a single device
CHECKPOINT_ATTRS = {
'_params': 'params',
'_opt_state': 'opt_state',
}
def __init__(self, mode, init_rng, config):
"""Initializes experiment."""
super().__init__(mode=mode, init_rng=init_rng)
self.mode = mode
self.init_rng = init_rng
# This config holds all the experiment specific keys defined in get_config
self.config = config
self.num_devices = jax.local_device_count()
# Define model and forward function
self.forward = hk.without_apply_rng(hk.transform(self._forward_fn))
# Define coordinate grid of image
if config.dataset.type == 'image':
self.coords = function_reps.get_coordinate_grid(config.dataset.resolution)
elif config.dataset.type == 'scene':
self.coords = jnp.ones((1, 3))
render_config = data_utils.DATASET_ATTRIBUTES[
self.config.dataset.name]['render_config']
self.render_config = (
self.config.dataset.num_points_per_ray, render_config['near'],
render_config['far'], render_config['white_background'])
else:
raise f'Unrecognised data type {config.dataset.type}'
# Inner optimizer is used both for training and validation
self._opt_inner = optax.sgd(learning_rate=config.opt_inner.lr)
if self.mode == 'train':
# Broadcast RNG key so we can use same init on each device
init_rng = utils.bcast_local_devices(self.init_rng)
# Initialize parameters on each device using pmap
self._params = jax.pmap(self.forward.init)(init_rng,
utils.bcast_local_devices(
self.coords))
# Initialize optimizer
self._opt_outer = optax.adam(learning_rate=config.opt_outer.lr)
# Only outer optimizer has a state. Optimizer for inner loop is reset at
# every iteration. We also broadcast optimizer state so each device gets
# an identical copy
weights, _ = function_reps.partition_params(self._params)
self._opt_state = jax.pmap(self._opt_outer.init)(weights)
# Overwrite update_func method with its pmapped version (note that pmap
# automatically jits the function). We require an axis name as this will
# later be used to determine which axis to average the gradients over
# Note that all arguments will already be batched across devices.
self._update_func = jax.pmap(self._update_func, axis_name='i')
# Set up training dataset
self._train_input = self._build_train_input(
self.num_devices * self.config.training.per_device_batch_size)
else:
self._params = None
self._opt_state = None
self._eval_batch = jax.jit(self._eval_batch)
def _forward_fn(self, coords: Array) -> Array:
if self.config.model.type == 'modulated_siren':
model = function_reps.ModulatedSiren(
width=self.config.model.width,
depth=self.config.model.depth,
out_channels=self.config.dataset.num_channels,
w0=self.config.model.w0,
modulate_scale=self.config.model.modulate_scale,
modulate_shift=self.config.model.modulate_shift,
use_meta_sgd=self.config.model.use_meta_sgd,
meta_sgd_init_range=self.config.model.meta_sgd_init_range,
meta_sgd_clip_range=self.config.model.meta_sgd_clip_range)
elif self.config.model.type == 'latent_modulated_siren':
model = function_reps.LatentModulatedSiren(
width=self.config.model.width,
depth=self.config.model.depth,
out_channels=self.config.dataset.num_channels,
w0=self.config.model.w0,
modulate_scale=self.config.model.modulate_scale,
modulate_shift=self.config.model.modulate_shift,
latent_dim=self.config.model.latent_dim,
layer_sizes=self.config.model.layer_sizes,
latent_init_scale=self.config.model.latent_init_scale,
use_meta_sgd=self.config.model.use_meta_sgd,
meta_sgd_init_range=self.config.model.meta_sgd_init_range,
meta_sgd_clip_range=self.config.model.meta_sgd_clip_range)
return model(coords)
# _ _
# | |_ _ __ __ _(_)_ __
# | __| '__/ _` | | '_ \
# | |_| | | (_| | | | | |
# \__|_| \__,_|_|_| |_|
#
def step(self, global_step, rng, *unused_args, **unused_kwargs):
"""See base class."""
# rng has shape (num_devices, 2)
# Get batch of training data.
# Reshape training data and coords to have batch device dimension.
per_device_batch_size = self.config.training.per_device_batch_size
if self.config.dataset.type == 'scene':
train_batch_dict = next(self._train_input)
train_batch = train_batch_dict['images'] # [bs, num_views, H, W, C]
poses = train_batch_dict['poses'] # [bs, num_views, 4, 4]
focal = train_batch_dict['focal'] # [bs]
height, width = train_batch.shape[-3], train_batch.shape[-2]
bs = focal.shape[0]
all_rays = []
for i in range(bs):
rays = minimal_nerf.get_rays_batch(height, width, focal[i],
poses[i]) # [2, num_views, H, W, 3]
all_rays.append(rays)
coords = jnp.stack(all_rays) # [bs, 2, num_views, H, W, 3]
coords = coords.reshape(
self.num_devices, per_device_batch_size,
*coords.shape[1:]
) # [num_devices, per_device_bs, 2, num_views, H, W, 3]
else:
train_batch_dict = next(self._train_input)
# [bs, *spatial_dims, len(spatial_dims)]
train_batch = train_batch_dict['array']
# self.coords has shape [*spatial_dims, len(spatial_dims)]
# Stack and reshape as appropriate
bs = self.num_devices * per_device_batch_size
coords = jnp.stack([self.coords for _ in range(bs)])
coords = coords.reshape(
self.num_devices, per_device_batch_size,
*self.coords.shape
) # [num_devices, per_device_bs, *spatial_dims, len(spatial_dims)]
# [num_devices, per_device_bs, *spatial_dims, C] where
# for scenes, *spatial_dims = [num_views, H, W]
train_batch = train_batch.reshape(
self.num_devices, per_device_batch_size,
*train_batch.shape[1:])
# optionally subsample coordinates for scenes
if self.config.dataset.type == 'scene' and self.config.training.subsample:
# flatten [H,W] dims of train_batch & coords for each view
train_batch = train_batch.reshape(
self.num_devices, per_device_batch_size,
train_batch.shape[2], -1, train_batch.shape[-1])
coords = coords.reshape(
self.num_devices, per_device_batch_size,
coords.shape[2], coords.shape[3], -1, coords.shape[-1])
# Sample views
sample_view_idx = jax.random.choice(
utils.get_first(rng), jnp.arange(train_batch.shape[2]),
(self.config.training.subsample_num_views,))
# Sample [H,W] indices
sample_idx = jax.random.choice(
utils.get_first(rng), jnp.arange(train_batch.shape[3]),
(self.config.training.subsample_num_points,))
# Subsample along flattened spatial dimension
train_batch = train_batch[:, :, sample_view_idx]
train_batch = train_batch[:, :, :, sample_idx]
coords = coords[:, :, :, sample_view_idx]
coords = coords[:, :, :, :, sample_idx]
# Update model parameters
self._params, self._opt_state, scalars = (
self._update_func(self._params, self._opt_state, train_batch,
coords, rng))
# Scalars (and global step) have identical copies stored on each device, so
# get these from first device (but could have chosen any device) to host
scalars = utils.get_first(scalars)
# Print losses, useful for debugging locally
global_step = utils.get_first(global_step)
print(f"Step {global_step}: train PSNR {scalars['train_psnr']:.2f}dB")
return scalars
def _build_train_input(self, batch_size: int) -> Generator[Array, None, None]:
"""See base class."""
if self.config.dataset.type == 'image':
shuffle_buffer_size = 10_000
elif self.config.dataset.type == 'scene':
shuffle_buffer_size = 500
return data_utils.load_dataset(
self.config.dataset.name,
'train',
batch_size=batch_size,
shuffle=True,
repeat=self.config.training.repeat,
shuffle_buffer_size=shuffle_buffer_size)
def _update_func(self, params: hk.Params, opt_outer_state: OptState,
train_batch: Array,
coords: Array,
rng: PRNGKey) -> Tuple[hk.Params, OptState, Scalars]:
"""Updates meta-learned init of params.
This method assumes we are given a *batch* of data. This method is run
individually on each device and does not know about multi-device/pmap.
This will update weights only, and not modulations.
Args:
params:
opt_outer_state:
train_batch: Shape (bs, *spatial_dims, channels).
coords: Shape (bs, *spatial_dims, num_spatial_dims) or flattened
equivalent. E.g. for images (bs, height, width, 2).
rng: Random number generator key, shape (2,).
Returns:
Updated params, optimization state and scalars (losses).
"""
# Compute loss and gradients (individually on each device)
weights, modulations = function_reps.partition_params(params)
_, model_grad = jax.value_and_grad(self._loss_func)(
weights, modulations, train_batch, coords, rng)
# Average gradients across devices (as the _update_func itself is pmapped,
# it will perform computation in parallel on each separate device. However,
# as we want to apply the same parameter update for parameters on each
# device, we need to communicate gradients between devices. This cannot be
# done with pmap, so need to use jax.lax.pmean to achieve this)
model_grad = jax.lax.pmean(model_grad, axis_name='i')
updates, opt_outer_state = self._opt_outer.update(model_grad,
opt_outer_state)
# Extract initial modulations (not the fitted ones), since we do not
# update the meta-learned init
weights, modulations = function_reps.partition_params(params)
# Apply updates to weights only
weights = optax.apply_updates(weights, updates)
# Merge updated weights with initial (unchanged) modulations
params = function_reps.merge_params(weights, modulations)
# Track training PSNR. Need to fit params with inner loop to track training
# psnr, as the `modulations` above are initial modulations.
fitted_params, loss = self._fit_params(params, train_batch, coords, rng)
_, fitted_mods = function_reps.partition_params(fitted_params)
mods_array = jax.vmap(lambda x: pytree_conversions.pytree_to_array(x)[0])(
fitted_mods) # [bs, mod_dim]
squared_l2_norm = jnp.sum(mods_array**2, axis=-1) # [bs]
rec_loss = jnp.mean(loss - self.config.model.l2_weight * squared_l2_norm)
l2_norm = jnp.mean(jnp.sqrt(squared_l2_norm))
scalars = {'train_psnr': helpers.psnr_fn(rec_loss), 'mod_l2_norm': l2_norm,
'rec_loss': rec_loss, 'loss': jnp.mean(loss)}
scalars = jax.lax.pmean(scalars, axis_name='i') # Average across devices
return params, opt_outer_state, scalars
def _loss_func(self, weights: hk.Params, modulations: hk.Params,
train_batch: Array, coords: Array, rng: PRNGKey) -> Array:
"""loss function (which only meta-learns weights, not modulations).
Taking the gradient with respect to this loss function will backpropagate
through the entire inner loop.
Args:
weights: Model weights shared across train_batch.
modulations: Modulations specific to each image.
train_batch: Batch of data.
coords: Batch of coords.
rng: Random number generator key.
Returns:
loss.
"""
params = function_reps.merge_params(weights, modulations)
_, loss = self._fit_params(params, train_batch, coords, rng)
return jnp.mean(loss) # Take mean loss across batch
def _fit_params(self, params: hk.Params, train_batch: Array, coords: Array,
rng: PRNGKey) -> Tuple[hk.Params, Array]:
"""Fits params of a model by running inner loop.
Args:
params: Model parameters.
train_batch: Shape (bs, *spatial_dims, channels).
coords: Shape (bs, *spatial_dims, num_spatial_dims) or flattened
equivalent. E.g. for images (bs, height, width, 2).
rng: Shape (2,)
Returns:
fitted_params (bs, ...)
loss (bs)
"""
# Apply batchified inner loop, so return a *batch* of parameters, one set of
# parameters for each image.
# We have a batch of data, so vmap across 0th dimension of data.
rng = jax.random.split(rng, num=train_batch.shape[0]) # [bs, 2]
fitted_params, loss, _ = jax.vmap(
self._inner_loop, in_axes=[None, 0, 0, 0])(params, train_batch, coords,
rng)
return fitted_params, loss
def _inner_loop(self, params: hk.Params,
targets: Array, coords: Array,
rng: PRNGKey) -> Tuple[hk.Params, Array, Array]:
"""Performs MAML (Finn et al.'17) inner loop and returns all PSNRs.
This function takes `self.inner_steps` SGD steps in the inner loop to update
modulations while keeping weights fixed. This function is applied to a
single image.
Args:
params: ModulatedSiren model params.
targets: Data to be fitted. Shape (*spatial_dims, out_channels).
coords: Shape (*spatial_dims, num_spatial_dims).
For images: (height, width, 2) or (height * width, 2).
rng:
Returns:
Updated params, loss, PSNR
"""
if self.config.dataset.type == 'scene':
is_nerf = True
render_config = self.render_config
else:
is_nerf = False
render_config = None
return helpers.inner_loop(params, self.forward, self._opt_inner,
self.config.training.inner_steps, coords,
targets,
is_nerf=is_nerf,
render_config=render_config,
l2_weight=self.config.model.l2_weight,
noise_std=self.config.model.noise_std,
rng=rng,
coord_noise=self.config.training.coord_noise)
# _
# _____ ____ _| |
# / _ \ \ / / _` | |
# | __/\ V / (_| | |
# \___| \_/ \__,_|_|
#
def evaluate(self, global_step, rng, **unused_kwargs):
"""See base class."""
# This step is used to get global_step and scalars to host device when
# running on multiple devices
global_step = utils.get_first(global_step)
log_dict = jax.device_get(self._eval_epoch(rng))
scalars = log_dict['scalars']
# Print losses, useful for debugging locally
if self.config.dataset.type == 'scene':
key = (f'4_views_novel_val_psnr_'
f'{str(self.config.evaluation.inner_steps).zfill(2)}')
print(f'Step {global_step}: 1 view val PSNR {scalars[key]:.2f}dB')
logging.info('[Step %d] Eval scalars: %s', global_step, scalars[key])
else:
print(f"Step {global_step}: val PSNR {scalars['val_psnr']:.2f}dB")
logging.info('[Step %d] Eval scalars: %s', global_step,
scalars['val_psnr'])
return scalars
def _eval_inner_loop(
self, params: hk.Params, image: Array, coords: Array
) -> Union[Tuple[hk.Params, Array, List[Array]], Tuple[
hk.Params, Array, List[Array], List[Array]]]:
"""Performs MAML inner loop and returns all PSNRs.
Args:
params: ModulatedSiren model params.
image: Image to be fitted. Shape (height, width, out_channels).
coords: Shape (height, width, 2) or (height * width, 2).
Returns:
Updated params, loss, all PSNR values.
"""
if self.config.dataset.type == 'scene':
is_nerf = True
render_config = self.render_config
else:
is_nerf = False
render_config = None
return helpers.inner_loop(
params,
self.forward,
self._opt_inner,
self.config.evaluation.inner_steps,
coords,
image,
return_all_psnrs=True,
return_all_losses=True,
is_nerf=is_nerf,
render_config=render_config,
l2_weight=self.config.model.l2_weight)
def _eval_batch(self, params: hk.Params,
val_batch_dict: Mapping[str, Array], rng: Array) -> Scalars:
"""Evaluates a batch."""
if self.config.dataset.type == 'scene':
logged_scalars = {}
novel_view_idx = (23, 55, 82, 119) # some evenly spread out view indices
subsample_num_points = self.config.training.subsample_num_points
# we use the view indices used in PixelNeRF for comparison.
for context_view_idx in [(63,), (63, 127), (0, 31, 63, 127)]:
num_context_views = len(context_view_idx)
images = val_batch_dict[
'images'][:, context_view_idx] # [bs, num_context_views, H, W, C]
poses = val_batch_dict[
'poses'][:, context_view_idx] # [bs, num_context_views, 4, 4]
focal = val_batch_dict['focal'] # [bs]
height, width = images.shape[-3], images.shape[-2]
bs = focal.shape[0]
all_rays = []
for i in range(bs):
# [2, num_context_views, H, W, 3]
rays = minimal_nerf.get_rays_batch(height, width, focal[i], poses[i])
all_rays.append(rays)
coords = jnp.stack(all_rays) # [bs, 2, num_context_views, H, W, 3]
# Flatten images and coords for subsampling
images_sub = images.reshape(bs, images.shape[1], -1, images.shape[-1])
coords_sub = coords.reshape(bs, 2, coords.shape[2], -1,
coords.shape[-1])
# Sample [H,W] indices
sample_idx = jax.random.choice(
rng, jnp.arange(images_sub.shape[2]), (subsample_num_points,))
images_sub = images_sub[:, :, sample_idx]
coords_sub = coords_sub[:, :, :, sample_idx]
out = jax.vmap(
self._eval_inner_loop,
in_axes=[None, 0, 0])(params, images_sub, coords_sub)
# Unpack outputs, which are (in order):
# - The fitted params per batch example.
# - loss per batch example.
# - psnr per batch example at each inner loop iteration.
# - loss per batch example at each inner loop iteration.
new_params = out[0] # Nested dict with values of shape [bs, ...]
loss = out[1] # Array shape [bs]
val_psnrs = out[2] # List of arrays shape [bs]
val_losses = out[3] # List of arrays shape [bs]
# Record PSNR at every step
for i in range(len(val_psnrs)):
idx = str(i).zfill(2)
logged_scalars.update({
f'{num_context_views}_views_val_psnr_{idx}': val_psnrs[i],
f'{num_context_views}_views_loss_{idx}': val_losses[i]
})
# Record validation PSNR and loss corresponding to number of inner steps
# during training.
num_steps = self.config.training.inner_steps
logged_scalars.update({
f'{num_context_views}_views_val_psnr': val_psnrs[num_steps],
f'{num_context_views}_views_loss': val_losses[num_steps]
})
# Render novel views given context
images = val_batch_dict[
'images'][:, novel_view_idx] # [bs, num_novel_views, H, W, C]
poses = val_batch_dict[
'poses'][:, novel_view_idx] # [bs, num_novel_views, 4, 4]
# [bs, num_novel_views, H, W, 3]
rgb, _ = jax.vmap(
minimal_nerf.render_pose,
in_axes=[None, 0, None, None, 0, 0,
None])(self.forward, new_params, height, width, focal,
poses, self.render_config)
loss = jnp.mean((rgb - images)**2, axis=(1, 2, 3, 4)) # [bs]
val_psnr = helpers.psnr_fn(loss) # [bs]
idx = str(self.config.evaluation.inner_steps).zfill(2)
logged_scalars.update({
f'{num_context_views}_views_novel_loss_{idx}': loss,
f'{num_context_views}_views_novel_val_psnr_{idx}': val_psnr
})
# Record modulation norms
_, mods = function_reps.partition_params(new_params)
mods_array = jax.vmap(lambda x: pytree_conversions.pytree_to_array(x)[0])(
mods) # [bs, mod_dim]
l2_norm = jnp.sqrt(jnp.sum(mods_array**2, axis=-1)) # [bs]
logged_scalars.update({'mod_l2_norm': l2_norm})
# Returned scalars will be summed and finally divided by num_samples
log_dict = {'scalars': logged_scalars}
else:
val_batch = val_batch_dict['array'] # [bs, *spatial_dims, C]
out = jax.vmap(
self._eval_inner_loop,
in_axes=[None, 0, None])(params, val_batch, self.coords)
# Unpack outputs.
new_params = out[0] # params with leading dim bs
loss = out[1] # Array shape [bs]
val_psnrs = out[2] # List of arrays shape [bs]
val_losses = out[3]
# Record modulation norms
scalars = {}
_, mods = function_reps.partition_params(new_params)
mods_array = jax.vmap(lambda x: pytree_conversions.pytree_to_array(x)[0])(
mods) # [bs, mod_dim]
l2_norm = jnp.sqrt(jnp.sum(mods_array**2, axis=-1))
scalars['mod_l2_norm'] = l2_norm # [bs]
# Record PSNR and losses at every step
for i in range(len(val_psnrs)):
scalars[f'val_psnr_{str(i).zfill(2)}'] = val_psnrs[i]
scalars[f'loss_{str(i).zfill(2)}'] = val_losses[i]
# Record validation PSNR corresponding to number of inner steps during
# training and also loss for each image at the end of inner loop.
scalars['val_psnr'] = val_psnrs[self.config.training.inner_steps]
scalars['loss'] = loss
# Returned scalars will be summed and finally divided by num_samples
log_dict = {'scalars': scalars}
return log_dict
def _build_eval_input(self) -> Generator[Array, None, None]:
if self.config.dataset.type == 'image':
shuffle_buffer_size = 10000
else:
shuffle_buffer_size = 500
return data_utils.load_dataset(
self.config.dataset.name,
'test',
batch_size=self.config.evaluation.batch_size,
shuffle=self.config.evaluation.shuffle,
num_examples=self.config.evaluation.num_examples,
shuffle_buffer_size=shuffle_buffer_size)
def _eval_epoch(self, rng: Array):
"""Evaluates an epoch."""
num_samples = 0.
summed_scalars = None
rng = rng[0]
params = utils.get_first(self._params)
for i, val_batch_dict in enumerate(self._build_eval_input()):
rng, _ = jax.random.split(rng) # use new rng for each batch.
if self.config.dataset.type == 'scene':
views = val_batch_dict['images'] # [bs, 251, H, W, C]
num_samples += views.shape[0]
log_dict = self._eval_batch(params, val_batch_dict, rng)
else:
val_batch = val_batch_dict['array']
num_samples += val_batch.shape[0]
log_dict = self._eval_batch(params, val_batch_dict, rng)
scalars = log_dict['scalars']
# Accumulate the sum of scalars for each step
scalars = jax.tree_map(lambda x: jnp.sum(x, axis=0), scalars)
if summed_scalars is None:
summed_scalars = scalars
else:
summed_scalars = jax.tree_map(jnp.add, summed_scalars, scalars)
print(f'{i} eval iterations done')
logging.info('%d eval iterations done', i)
mean_scalars = jax.tree_map(lambda x: x / num_samples, summed_scalars)
return {'scalars': mean_scalars}
if __name__ == '__main__':
flags.mark_flag_as_required('config')
app.run(functools.partial(platform.main, Experiment))
|
functa-main
|
experiment_meta_learning.py
|
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""SIREN models with FiLM modulations."""
from typing import Any, Callable, Dict, Mapping, Optional, Tuple
import haiku as hk
import jax
import jax.numpy as jnp
import numpy as np
from functa import pytree_conversions
Array = jnp.ndarray
PRNGKey = Array
Batch = Mapping[str, np.ndarray]
OptState = Any
class Sine(hk.Module):
"""Applies a scaled sine transform to input: out = sin(w0 * in)."""
def __init__(self, w0: float = 1.):
"""Constructor.
Args:
w0 (float): Scale factor in sine activation (omega_0 factor from SIREN).
"""
super().__init__()
self.w0 = w0
def __call__(self, x: Array) -> Array:
return jnp.sin(self.w0 * x)
class FiLM(hk.Module):
"""Applies a FiLM modulation: out = scale * in + shift.
Notes:
We currently initialize FiLM layers as the identity. However, this may not
be optimal. In pi-GAN for example they initialize the layer with a random
normal.
"""
def __init__(self,
f_in: int,
modulate_scale: bool = True,
modulate_shift: bool = True):
"""Constructor.
Args:
f_in: Number of input features.
modulate_scale: If True, modulates scales.
modulate_shift: If True, modulates shifts.
"""
super().__init__()
# Must modulate at least one of shift and scale
assert modulate_scale or modulate_shift
self.f_in = f_in
# Initialize FiLM layers as identity
self.scale = 1.
self.shift = 0.
if modulate_scale:
self.scale = hk.get_parameter('scale', [self.f_in], init=jnp.ones)
if modulate_shift:
self.shift = hk.get_parameter('shift', [self.f_in], init=jnp.zeros)
def __call__(self, x: Array) -> Array:
return self.scale * x + self.shift
class ModulatedSirenLayer(hk.Module):
"""Applies a linear layer followed by a modulation and sine activation."""
def __init__(self,
f_in: int,
f_out: int,
w0: float = 1.,
is_first: bool = False,
is_last: bool = False,
modulate_scale: bool = True,
modulate_shift: bool = True,
apply_activation: bool = True):
"""Constructor.
Args:
f_in (int): Number of input features.
f_out (int): Number of output features.
w0 (float): Scale factor in sine activation.
is_first (bool): Whether this is first layer of model.
is_last (bool): Whether this is last layer of model.
modulate_scale: If True, modulates scales.
modulate_shift: If True, modulates shifts.
apply_activation: If True, applies sine activation.
"""
super().__init__()
self.f_in = f_in
self.f_out = f_out
self.w0 = w0
self.is_first = is_first
self.is_last = is_last
self.modulate_scale = modulate_scale
self.modulate_shift = modulate_shift
self.apply_activation = apply_activation
# Follow initialization scheme from SIREN
self.init_range = 1 / f_in if is_first else jnp.sqrt(6 / f_in) / w0
def __call__(self, x: Array) -> Array:
# Shape (n, f_in) -> (n, f_out)
x = hk.Linear(
output_size=self.f_out,
w_init=hk.initializers.RandomUniform(-self.init_range,
self.init_range))(x)
# Apply non-linearities
if self.is_last:
# We assume target data (e.g. RGB values of pixels) lies in [0, 1]. To
# learn zero-centered features we therefore shift output by .5
return x + .5
else:
# Optionally apply modulation
if self.modulate_scale or self.modulate_shift:
x = FiLM(
self.f_out,
modulate_scale=self.modulate_scale,
modulate_shift=self.modulate_shift)(x)
# Optionally apply activation
if self.apply_activation:
x = Sine(self.w0)(x)
return x
class MetaSGDLrs(hk.Module):
"""Module storing learning rates for meta-SGD.
Notes:
This module does not apply any transformation but simply stores the learning
rates. Since we also learn the learning rates we treat them the same as
model params.
"""
def __init__(self,
num_lrs: int,
lrs_init_range: Tuple[float, float] = (0.005, 0.1),
lrs_clip_range: Tuple[float, float] = (-5., 5.)):
"""Constructor.
Args:
num_lrs: Number of learning rates to learn.
lrs_init_range: Range from which initial learning rates will be
uniformly sampled.
lrs_clip_range: Range at which to clip learning rates. Default value will
effectively avoid any clipping, but typically learning rates should
be positive and small.
"""
super().__init__()
self.num_lrs = num_lrs
self.lrs_init_range = lrs_init_range
self.lrs_clip_range = lrs_clip_range
# Initialize learning rates
self.meta_sgd_lrs = hk.get_parameter(
'meta_sgd_lrs', [self.num_lrs],
init=hk.initializers.RandomUniform(*self.lrs_init_range))
def __call__(self) -> Array:
# Clip learning rate values
return jax.tree_map(lambda x: jnp.clip(x, *self.lrs_clip_range),
self.meta_sgd_lrs)
class ModulatedSiren(hk.Module):
"""SIREN model with FiLM modulations as in pi-GAN."""
def __init__(self,
width: int = 256,
depth: int = 5,
out_channels: int = 3,
w0: float = 1.,
modulate_scale: bool = True,
modulate_shift: bool = True,
use_meta_sgd: bool = False,
meta_sgd_init_range: Tuple[float, float] = (0.005, 0.1),
meta_sgd_clip_range: Tuple[float, float] = (-5., 5.),
name: Optional[str] = None):
"""Constructor.
Args:
width (int): Width of each hidden layer in MLP.
depth (int): Number of layers in MLP.
out_channels (int): Number of output channels.
w0 (float): Scale factor in sine activation in first layer.
modulate_scale: If True, modulates scales.
modulate_shift: If True, modulates shifts.
use_meta_sgd: Whether to use meta-SGD.
meta_sgd_init_range: Range from which initial meta_sgd learning rates will
be uniformly sampled.
meta_sgd_clip_range: Range at which to clip learning rates.
name: name.
"""
super().__init__(name=name)
self.width = width
self.depth = depth
self.out_channels = out_channels
self.w0 = w0
self.modulate_scale = modulate_scale
self.modulate_shift = modulate_shift
self.use_meta_sgd = use_meta_sgd
self.meta_sgd_init_range = meta_sgd_init_range
self.meta_sgd_clip_range = meta_sgd_clip_range
# Initialize meta-SGD learning rates
if self.use_meta_sgd:
# Compute total number of modulations in network
self.modulations_per_unit = int(modulate_scale) + int(modulate_shift)
self.num_modulations = width * (depth - 1) * self.modulations_per_unit
self.meta_sgd_lrs = MetaSGDLrs(self.num_modulations,
self.meta_sgd_init_range,
self.meta_sgd_clip_range)
def __call__(self, coords: Array) -> Array:
"""Evaluates model at a batch of coordinates.
Args:
coords (Array): Array of coordinates. Should have shape (height, width, 2)
for images and (depth/time, height, width, 3) for 3D shapes/videos.
Returns:
Output features at coords.
"""
# Flatten coordinates
x = jnp.reshape(coords, (-1, coords.shape[-1]))
# Initial layer
x = ModulatedSirenLayer(
f_in=x.shape[-1],
f_out=self.width,
is_first=True,
w0=self.w0,
modulate_scale=self.modulate_scale,
modulate_shift=self.modulate_shift)(x)
# Hidden layers
for _ in range(1, self.depth - 1):
# Add ModulatedSirenLayers
x = ModulatedSirenLayer(
f_in=x.shape[-1],
f_out=self.width,
w0=self.w0,
modulate_scale=self.modulate_scale,
modulate_shift=self.modulate_shift)(x)
# Final layer
out = ModulatedSirenLayer(
f_in=x.shape[-1],
f_out=self.out_channels,
is_last=True,
w0=self.w0,
modulate_scale=self.modulate_scale,
modulate_shift=self.modulate_shift)(x)
# Unflatten output. E.g. for images this corresponds to
# (num_pixels, out_channels) -> (height, width, out_channels)
return jnp.reshape(out, list(coords.shape[:-1]) + [self.out_channels])
class LatentVector(hk.Module):
"""Module that holds a latent vector.
Notes:
This module does not apply any transformation but simply stores a latent
vector. This is to make sure that all data necessary to represent an image
(or a NeRF scene or a video) is present in the model params. This also makes
it easier to use the partition_params function.
"""
def __init__(self, latent_dim: int, latent_init_scale: float = 0.0):
"""Constructor.
Args:
latent_dim: Dimension of latent vector.
latent_init_scale: Scale at which to randomly initialize latent vector.
"""
super().__init__()
self.latent_dim = latent_dim
self.latent_init_scale = latent_init_scale
# Initialize latent vector
self.latent_vector = hk.get_parameter(
'latent_vector', [latent_dim],
init=hk.initializers.RandomUniform(-latent_init_scale,
latent_init_scale))
def __call__(self) -> Array:
return self.latent_vector
class LatentToModulation(hk.Module):
"""Function mapping latent vector to a set of modulations."""
def __init__(self,
latent_dim: int,
layer_sizes: Tuple[int, ...],
width: int,
num_modulation_layers: int,
modulate_scale: bool = True,
modulate_shift: bool = True,
activation: Callable[[Array], Array] = jax.nn.relu):
"""Constructor.
Args:
latent_dim: Dimension of latent vector (input of LatentToModulation
network).
layer_sizes: List of hidden layer sizes for MLP parameterizing the map
from latent to modulations. Input dimension is inferred from latent_dim
and output dimension is inferred from number of modulations.
width: Width of each hidden layer in MLP of function rep.
num_modulation_layers: Number of layers in MLP that contain modulations.
modulate_scale: If True, returns scale modulations.
modulate_shift: If True, returns shift modulations.
activation: Activation function to use in MLP.
"""
super().__init__()
# Must modulate at least one of shift and scale
assert modulate_scale or modulate_shift
self.latent_dim = latent_dim
self.layer_sizes = tuple(layer_sizes) # counteract XM that converts to list
self.width = width
self.num_modulation_layers = num_modulation_layers
self.modulate_scale = modulate_scale
self.modulate_shift = modulate_shift
# MLP outputs all modulations. We apply modulations on every hidden unit
# (i.e on width number of units) at every modulation layer.
# At each of these we apply either a scale or a shift or both,
# hence total output size is given by following formula
self.modulations_per_unit = int(modulate_scale) + int(modulate_shift)
self.modulations_per_layer = width * self.modulations_per_unit
self.output_size = num_modulation_layers * self.modulations_per_layer
self.forward = hk.nets.MLP(
self.layer_sizes + (self.output_size,), activation=activation)
def __call__(self, latent_vector: Array) -> Dict[int, Dict[str, Array]]:
modulations = self.forward(latent_vector)
# Partition modulations into scales and shifts at every layer
outputs = {}
for i in range(self.num_modulation_layers):
single_layer_modulations = {}
# Note that we add 1 to scales so that outputs of MLP will be centered
# (since scale = 1 corresponds to identity function)
if self.modulate_scale and self.modulate_shift:
start = 2 * self.width * i
single_layer_modulations['scale'] = modulations[start:start +
self.width] + 1
single_layer_modulations['shift'] = modulations[start +
self.width:start +
2 * self.width]
elif self.modulate_scale:
start = self.width * i
single_layer_modulations['scale'] = modulations[start:start +
self.width] + 1
elif self.modulate_shift:
start = self.width * i
single_layer_modulations['shift'] = modulations[start:start +
self.width]
outputs[i] = single_layer_modulations
return outputs
class LatentModulatedSiren(hk.Module):
"""SIREN model with FiLM modulations generated from a latent vector."""
def __init__(self,
width: int = 256,
depth: int = 5,
out_channels: int = 3,
latent_dim: int = 64,
layer_sizes: Tuple[int, ...] = (256, 512),
w0: float = 1.,
modulate_scale: bool = True,
modulate_shift: bool = True,
latent_init_scale: float = 0.01,
use_meta_sgd: bool = False,
meta_sgd_init_range: Tuple[float, float] = (0.005, 0.1),
meta_sgd_clip_range: Tuple[float, float] = (-5., 5.)):
"""Constructor.
Args:
width (int): Width of each hidden layer in MLP.
depth (int): Number of layers in MLP.
out_channels (int): Number of output channels.
latent_dim: Dimension of latent vector (input of LatentToModulation
network).
layer_sizes: List of hidden layer sizes for MLP parameterizing the map
from latent to modulations. Input dimension is inferred from latent_dim
and output dimension is inferred from number of modulations.
w0 (float): Scale factor in sine activation in first layer.
modulate_scale: If True, modulates scales.
modulate_shift: If True, modulates shifts.
latent_init_scale: Scale at which to randomly initialize latent vector.
use_meta_sgd: Whether to use meta-SGD.
meta_sgd_init_range: Range from which initial meta_sgd learning rates will
be uniformly sampled.
meta_sgd_clip_range: Range at which to clip learning rates.
"""
super().__init__()
self.width = width
self.depth = depth
self.out_channels = out_channels
self.latent_dim = latent_dim
self.layer_sizes = layer_sizes
self.w0 = w0
self.modulate_scale = modulate_scale
self.modulate_shift = modulate_shift
self.latent_init_scale = latent_init_scale
self.use_meta_sgd = use_meta_sgd
self.meta_sgd_init_range = meta_sgd_init_range
self.meta_sgd_clip_range = meta_sgd_clip_range
# Initialize meta-SGD learning rates
if self.use_meta_sgd:
self.meta_sgd_lrs = MetaSGDLrs(self.latent_dim,
self.meta_sgd_init_range,
self.meta_sgd_clip_range)
# Initialize latent vector and map from latents to modulations
self.latent = LatentVector(latent_dim, latent_init_scale)
self.latent_to_modulation = LatentToModulation(
latent_dim=latent_dim,
layer_sizes=layer_sizes,
width=width,
num_modulation_layers=depth-1,
modulate_scale=modulate_scale,
modulate_shift=modulate_shift)
def modulate(self, x: Array, modulations: Dict[str, Array]) -> Array:
"""Modulates input according to modulations.
Args:
x: Hidden features of MLP.
modulations: Dict with keys 'scale' and 'shift' (or only one of them)
containing modulations.
Returns:
Modulated vector.
"""
if 'scale' in modulations:
x = modulations['scale'] * x
if 'shift' in modulations:
x = x + modulations['shift']
return x
def __call__(self, coords: Array) -> Array:
"""Evaluates model at a batch of coordinates.
Args:
coords (Array): Array of coordinates. Should have shape (height, width, 2)
for images and (depth/time, height, width, 3) for 3D shapes/videos.
Returns:
Output features at coords.
"""
# Compute modulations based on latent vector
latent_vector = self.latent()
modulations = self.latent_to_modulation(latent_vector)
# Flatten coordinates
x = jnp.reshape(coords, (-1, coords.shape[-1]))
# Initial layer (note all modulations are set to False here, since we
# directly apply modulations from latent_to_modulations output).
x = ModulatedSirenLayer(
f_in=x.shape[-1],
f_out=self.width,
is_first=True,
w0=self.w0,
modulate_scale=False,
modulate_shift=False,
apply_activation=False)(x)
x = self.modulate(x, modulations[0])
x = Sine(self.w0)(x)
# Hidden layers
for i in range(1, self.depth - 1):
x = ModulatedSirenLayer(
f_in=x.shape[-1],
f_out=self.width,
w0=self.w0,
modulate_scale=False,
modulate_shift=False,
apply_activation=False)(x)
x = self.modulate(x, modulations[i])
x = Sine(self.w0)(x)
# Final layer
out = ModulatedSirenLayer(
f_in=x.shape[-1],
f_out=self.out_channels,
is_last=True,
w0=self.w0,
modulate_scale=False,
modulate_shift=False)(x)
# Unflatten output
return jnp.reshape(out, list(coords.shape[:-1]) + [self.out_channels])
# Helper functions
def get_num_weights_and_modulations(params: hk.Params) -> Tuple[int, int]:
"""Returns the number of weights and modulations of ModulatedSiren model.
Args:
params (hk.Params): Parameters from ModulatedSiren model.
Returns:
Number of weights and modulations.
Notes:
This relies on the partition_params function which assumes all modulations
are stored in FiLM layers. If we change this in the future, this function
will break.
"""
weights, modulations = partition_params(params)
return hk.data_structures.tree_size(weights), hk.data_structures.tree_size(
modulations)
def partition_params(params: hk.Params) -> Tuple[hk.Params, hk.Params]:
"""Partitions ModulatedSiren parameters into weights and modulations.
Args:
params (hk.Params): Parameters of ModulatedSiren or LatentModulatedSiren
model.
Returns:
Weights and modulations of network.
"""
# If predicate is True, module contains FiLM parameters or a latent vector
# mapping to FiLM parameters
predicate = lambda module_name, name, value: 'fi_lm' in module_name or 'latent_vector' in module_name
modulations, weights = hk.data_structures.partition(predicate, params)
return weights, modulations
def partition_shared_params(
shared_params: hk.Params) -> Tuple[hk.Params, hk.Params]:
"""Partitions shared params parameters into weights and learning rates.
Args:
shared_params (hk.Params): Shared parameters of ModulatedSiren or
LatentModulatedSiren model, i.e. parameters that are not updated in inner
loop and are shared across datapoints.
Returns:
Weights and learning rates of network.
"""
predicate = lambda module_name, name, value: 'meta_sgd_lrs' in module_name
lrs, weights = hk.data_structures.partition(predicate, shared_params)
return weights, lrs
def merge_params(weights: hk.Params, modulations: hk.Params) -> hk.Params:
"""Merges weights and modulations into a single set of parameters.
Args:
weights (hk.Params):
modulations (hk.Params):
Returns:
Parameters of ModulatedSiren model.
"""
return hk.data_structures.merge(modulations, weights)
def update_params(params: hk.Params, modulation: Array) -> hk.Params:
"""Update ModulatedSiren parameters by only updating modulations.
Args:
params (hk.Params): Parameters of ModulatedSiren or LatentModulatedSiren
model.
modulation (Array): Array representation of modulations, shape (mod_dim,).
Returns:
Updated params.
"""
# extract non-modulation weights from params and tree structure for mods
weights, init_modulation = partition_params(params)
_, concat_idx, tree_def = pytree_conversions.pytree_to_array(init_modulation)
# update modulations and merge with non-modulation weights
modulation_tree = pytree_conversions.array_to_pytree(
modulation, concat_idx, tree_def)
modulated_params = merge_params(weights, modulation_tree)
return modulated_params
def get_coordinate_grid(res: int, centered: bool = True) -> Array:
"""Returns a normalized coordinate grid for a res by res sized image.
Args:
res (int): Resolution of image.
centered (bool): If True assumes coordinates lie at pixel centers. This is
equivalent to the align_corners argument in Pytorch. This should always be
set to True as this ensures we are consistent across different
resolutions, but keep False as option for backwards compatibility.
Returns:
Jnp array of shape (height, width, 2).
Notes:
Output will be in [0, 1] (i.e. coordinates are normalized to lie in [0, 1]).
"""
if centered:
half_pixel = 1. / (2. * res) # Size of half a pixel in grid
coords_one_dim = jnp.linspace(half_pixel, 1. - half_pixel, res)
else:
coords_one_dim = jnp.linspace(0, 1, res)
# Array will have shape (height, width, 2)
return jnp.stack(
jnp.meshgrid(coords_one_dim, coords_one_dim, indexing='ij'), axis=-1)
|
functa-main
|
function_reps.py
|
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Helper functions."""
from typing import List, Optional, Tuple, Union
from absl import logging
import chex
import haiku as hk
import jax
import jax.numpy as jnp
import numpy as np
import optax
from functa import function_reps
from functa import pytree_conversions
from functa.minimal_nerf import render_rays
Array = jnp.ndarray
PRNGKey = chex.PRNGKey
# Helper functions to compute MSE and PSNR
mse_fn = jax.jit(lambda x, y: jnp.mean((x - y)**2))
psnr_fn = jax.jit(lambda mse: -10 * jnp.log10(mse))
inverse_psnr_fn = jax.jit(lambda psnr: jnp.exp(-psnr*jnp.log(10) / 10))
def loss_fn_image(modulations: hk.Params, weights: hk.Params, model,
image: Array, coords: Array, l2_weight: float) -> Array:
"""Loss function for images.
Args:
modulations: Modulation parameters.
weights: Shared weight parameters.
model: Haiku transformed model.
image: Shape (height, width, channels).
coords: Shape (height, width, 2) or (height * width, 2). Note the coords
will be flattened in model call.
l2_weight: weight for L2 regularisation of modulations.
Returns:
MSE between ground truth image and image reconstructed by function rep.
"""
params = function_reps.merge_params(weights, modulations)
generated = model.apply(params, coords)
modulations_array, _, _ = pytree_conversions.pytree_to_array(modulations)
l2_loss = l2_weight * jnp.sum(modulations_array**2)
rec_loss = mse_fn(generated, image)
return rec_loss + l2_loss, rec_loss
def loss_fn_nerf(modulations: hk.Params, weights: hk.Params, model,
target: Array, rays: Array,
render_config: Tuple[int, float, float, bool],
l2_weight: float, rng: Union[int, PRNGKey] = 42,
coord_noise: bool = False):
"""Loss function for scenes.
Args:
modulations: Modulation parameters.
weights: Shared weight parameters.
model: Haiku transformed model.
target: Target pixel values for a single or a batch of images
*of the same scene*. Shape (H, W, 3) or (num_views, H, W, 3).
rays: Ray origin and direction for each target value.
Shape (2, H, W, 3) or (2, num_views, H, W, 3).
render_config: config for nerf.
l2_weight: weight for L2 regularisation of modulations.
rng: PRNG key for adding coordinate noise.
coord_noise: whether to add coordinate noise or not.
Returns:
loss: scalar MSE between ground truth view and image reconstructed by
function rep.
"""
params = function_reps.merge_params(weights, modulations)
rgb, _ = render_rays(model, params, rays, render_config, rng, coord_noise)
modulations_array, _, _ = pytree_conversions.pytree_to_array(modulations)
l2_loss = l2_weight * jnp.sum(modulations_array**2)
rec_loss = mse_fn(rgb, target)
return rec_loss + l2_loss, rec_loss
def inner_loop(
params: hk.Params,
model,
opt_inner: optax.GradientTransformation,
inner_steps: int,
coords: Array,
targets: Array,
return_all_psnrs: bool = False,
return_all_losses: bool = False,
is_nerf: bool = False,
render_config: Optional[Tuple[int, float, float, bool]] = None,
l2_weight: float = 0.,
noise_std: float = 0.,
rng: Union[int, PRNGKey] = 42,
coord_noise: bool = False,
) -> Union[Tuple[hk.Params, Array, Array], Tuple[
hk.Params, Array, Array, List[Array]], Tuple[hk.Params, Array, List[Array]],
Tuple[hk.Params, Array, List[Array], List[Array]]]:
"""Performs MAML (Finn et al.'17) inner loop: fits modulations to target data.
This function takes `inner_steps` SGD steps in the inner loop to fit
modulations to image, while keeping weights fixed. This function is applied
to a single target (e.g. image, video or 3d scene).
Args:
params: ModulatedSiren model params.
model: Haiku transformed model.
opt_inner: Optax optimizer (typically SGD).
inner_steps: Number of SGD steps to take to fit modulations to image.
coords: Coordinates at which function rep will be evaluated.
targets: Data to be fitted. Not batched. For example, a single image of
shape (height, width, 3).
return_all_psnrs: If True, returns a list of PSNRs at every step during
fitting, otherwise returns only final PSNR.
return_all_losses: If True, returns a list of losses at every step during
fitting. Only comes into effect when return_all_psnrs=True.
is_nerf: If True, uses nerf inner loop.
render_config: config for nerf.
l2_weight: weight for L2 regularisation of modulations.
noise_std: standard deviation of Gaussian noise applied to modulations.
rng:
coord_noise: whether to add coordinate noise or not. Only used if
`is_nerf=True`.
Returns:
Fitted params, loss and either final PSNR or all PSNR values.
"""
if isinstance(rng, int):
rng = jax.random.PRNGKey(rng)
# Partition params into trainable modulations and non-trainable weights
weights, modulations = function_reps.partition_params(params)
# Check if 'meta_sgd_lrs' is inside a key in weights. If it is, use meta-SGD
# to fit the data
use_meta_sgd = False
for key in weights:
if 'meta_sgd_lrs' in key:
use_meta_sgd = True
if use_meta_sgd:
# Extract learning rates
_, lrs = function_reps.partition_shared_params(weights)
# Flatten lrs so they can easily be multiplied with modulations when
# performing meta-SGD update
flat_lrs, _, _ = pytree_conversions.pytree_to_array(lrs)
# Inner optimizer should have no memory of its state, every time we do inner
# loop optimization we are solving a new problem from scratch, so optimizer
# should be reinitialized. As we only update modulations with opt_inner,
# initialize with modulations and not all params
# Only use optimizer if we are not using meta-SGD (where we learn learning
# rates per parameter)
if not use_meta_sgd:
opt_inner_state = opt_inner.init(modulations)
# Optionally store PSNR at every step
if return_all_psnrs:
psnr_vals = []
if return_all_losses:
loss_vals = []
# Only update modulations in inner loop
for _ in range(inner_steps):
# jax.grad takes gradient with respect to first positional argument only
if is_nerf:
(loss, rec_loss), modulations_grad = jax.value_and_grad(
loss_fn_nerf, has_aux=True)(modulations, weights, model, targets,
coords, render_config, l2_weight,
rng, coord_noise)
else:
(loss, rec_loss), modulations_grad = jax.value_and_grad(
loss_fn_image, has_aux=True)(modulations, weights, model, targets,
coords, l2_weight)
# Update modulations
if use_meta_sgd:
# modulations_grad is a pytree with the same keys as modulations. lrs is
# a pytree containing all learning rates as a single array in a single
# leaf. Flatten both to multiply them together and then reconstruct tree
# Note, learning rate flattening operation is done above, and we therefore
# apply flat_lrs here
# Note, the following two lines are awkward, but are required to satisfy
# linter (line-too-long).
out = pytree_conversions.pytree_to_array(modulations_grad)
flat_modulations_grads, concat_idx, tree_def = out
flat_modulations_updates = -flat_lrs * flat_modulations_grads
modulation_updates = pytree_conversions.array_to_pytree(
flat_modulations_updates, concat_idx, tree_def)
else:
modulation_updates, opt_inner_state = opt_inner.update(
modulations_grad, opt_inner_state)
# Apply gradient update
modulations = optax.apply_updates(modulations, modulation_updates)
# Optionally calculate PSNR value
if return_all_psnrs:
psnr_vals.append(psnr_fn(rec_loss))
if return_all_losses:
loss_vals.append(loss)
# Optionally add noise to fitted modulations, to make downstream task less
# sensitive to exact value of modulations.
if noise_std > 0.:
modulations_array, concat_idx, tree_def = pytree_conversions.pytree_to_array(
modulations)
modulations_array += noise_std * jax.random.normal(
rng, shape=modulations_array.shape)
modulations = pytree_conversions.array_to_pytree(modulations_array,
concat_idx, tree_def)
# Compute final loss using updated modulations
if is_nerf:
loss, rec_loss = loss_fn_nerf(modulations, weights, model, targets, coords,
render_config, l2_weight, rng, coord_noise)
else:
loss, rec_loss = loss_fn_image(modulations, weights, model, targets, coords,
l2_weight)
total_loss = loss
if return_all_psnrs:
psnr_vals.append(psnr_fn(rec_loss))
if return_all_losses:
loss_vals.append(loss)
# Merge weights and modulations and return
params = function_reps.merge_params(weights, modulations)
if return_all_psnrs and not return_all_losses:
return params, total_loss, psnr_vals
elif return_all_psnrs and return_all_losses:
return params, total_loss, psnr_vals, loss_vals
else:
return params, total_loss, psnr_fn(rec_loss)
def image_grid_from_batch(images: Array) -> Array:
"""Simple helper to generate a single image from a mini batch.
Args:
images: Batch of images of shape (batch_size, height, width, channels)
Returns:
A single image of shape (img_grid_height, img_grid_width, channels).
"""
batch_size = images.shape[0]
grid_size = int(np.floor(np.sqrt(batch_size)))
img_iter = iter(images[0:grid_size**2])
return jnp.squeeze(
jnp.vstack([
jnp.hstack([next(img_iter)
for _ in range(grid_size)][::-1])
for _ in range(grid_size)
]))
def log_params_info(params):
"""Log information about parameters."""
logging.info('Parameter shapes')
logging.info(jax.tree_map(jnp.shape, params))
num_params = hk.data_structures.tree_size(params)
byte_size = hk.data_structures.tree_bytes(params)
logging.info('%d params, size: %.2f MB', num_params, byte_size / 1e6)
# print each parameter and its shape
for mod, name, value in hk.data_structures.traverse(params):
logging.info('%s/%s: %s', mod, name, value.shape)
|
functa-main
|
helpers.py
|
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""tensorflow dataset (tfds) for srn_cars."""
import imageio
import numpy as np
import tensorflow as tf
import tensorflow_datasets as tfds
_DESCRIPTION = """
Convert srn_cars dataset into a tensorflow dataset (tfds).
It should also contain any processing which has been applied (if any),
(e.g. corrupted example skipped, images cropped,...):
"""
_CITATION = """
@article{dupont2020equivariant,
title={Equivariant Neural Rendering},
author={Dupont, Emilien and Miguel Angel, Bautista and Colburn, Alex and Sankar, Aditya and Guestrin, Carlos and Susskind, Josh and Shan, Qi},
journal={arXiv preprint arXiv:2006.07630},
year={2020}
}
"""
class SrnCars(tfds.core.GeneratorBasedBuilder):
"""DatasetBuilder for srn_cars dataset."""
VERSION = tfds.core.Version('1.0.0')
RELEASE_NOTES = {
'1.0.0': 'Initial release.',
}
def _info(self) -> tfds.core.DatasetInfo:
"""Returns the dataset metadata."""
return tfds.core.DatasetInfo(
builder=self,
description=_DESCRIPTION,
features=tfds.features.FeaturesDict({
'images':
tfds.features.Tensor(shape=(None, 128, 128, 3), dtype=tf.uint8),
'poses':
tfds.features.Tensor(shape=(None, 4, 4), dtype=tf.float32),
'focal':
tfds.features.Scalar(dtype=tf.float32)
}),
supervised_keys=None, # Set to `None` to disable
disable_shuffling=True, # Fix ordering as scenes are already shuffled
homepage='https://github.com/apple/ml-equivariant-neural-rendering',
citation=_CITATION,
)
def _split_generators(self, dl_manager: tfds.download.DownloadManager):
"""Returns SplitGenerators."""
path = dl_manager.download_and_extract(
'https://drive.google.com/uc?export=download&confirm=9iBg'
'&id=19yDsEJjx9zNpOKz9o6AaK-E8ED6taJWU')
return {
'train': self._generate_examples(path / 'cars_train'),
'test': self._generate_examples(path / 'cars_test'),
}
def _generate_examples(self, path):
"""Yields examples."""
# Coordinate transform matrix required to make SRN poses consistent
# with other NeRF models
coord_trans = np.diag(np.array([1., -1., -1., 1.], dtype=np.float32))
for key, scene_path in enumerate(sorted(path.iterdir())):
# Load intrinsics
intrinsics_path = scene_path / 'intrinsics.txt'
with intrinsics_path.open() as f:
lines = f.readlines()
# Extract focal length (required to obtain rays)
focal = float(lines[0].split()[0])
# Load images and their associated pose
pose_dir = scene_path / 'pose'
rgb_dir = scene_path / 'rgb'
pose_paths = sorted(list(pose_dir.iterdir()))
rgb_paths = sorted(list(rgb_dir.iterdir()))
all_imgs = []
all_poses = []
for rgb_path, pose_path in zip(rgb_paths, pose_paths):
img = imageio.imread(str(rgb_path))[..., :3]
pose = np.loadtxt(str(pose_path), dtype=np.float32).reshape(4, 4)
pose = pose @ coord_trans # Fix coordinate system
all_imgs.append(img)
all_poses.append(pose)
all_imgs = np.stack(all_imgs)
all_poses = np.stack(all_poses)
yield key, {
'images': all_imgs,
'poses': all_poses,
'focal': focal,
}
|
functa-main
|
srn_cars/srn_cars.py
|
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""srn_cars dataset."""
from .srn_cars import SrnCars
|
functa-main
|
srn_cars/__init__.py
|
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Customized tensorflow dataset (tfds) for celeb_a_hq/128."""
import tensorflow as tf
import tensorflow_datasets as tfds
_DESCRIPTION = """
Convert celeb_a_hq dataset into a tensorflow dataset (tfds).
This is necessary due to issues with the necessary manual preparation of the
celeb_a_hq dataset at:
https://www.tensorflow.org/datasets/catalog/celeb_a_hq
reported in the open issue:
https://github.com/tensorflow/datasets/issues/1496
Note that the resulting dataset has a different ordering to the tfds version,
hence any train/test split further down the line may be different.
Also note that the celeb_a_hq dataset at the given google drive link contains
images at 128 resolution.
"""
_CITATION = """
@article{karras2017progressive,
title={Progressive growing of gans for improved quality, stability, and variation},
author={Karras, Tero and Aila, Timo and Laine, Samuli and Lehtinen, Jaakko},
journal={arXiv preprint arXiv:1710.10196},
year={2017}
}
"""
class CelebAHqCustom(tfds.core.GeneratorBasedBuilder):
"""DatasetBuilder for custom celeb_a_hq dataset."""
VERSION = tfds.core.Version('1.0.0')
RELEASE_NOTES = {
'1.0.0': 'Initial release.',
}
def _info(self) -> tfds.core.DatasetInfo:
"""Returns the dataset metadata."""
return tfds.core.DatasetInfo(
builder=self,
description=_DESCRIPTION,
features=tfds.features.FeaturesDict({
'image':
tfds.features.Image(shape=(128, 128, 3), dtype=tf.uint8),
}),
supervised_keys=None, # Set to `None` to disable
homepage='https://github.com/tkarras/progressive_growing_of_gans',
citation=_CITATION,
)
def _split_generators(self, dl_manager: tfds.download.DownloadManager):
"""Returns SplitGenerators."""
path = dl_manager.download_and_extract(
'https://drive.google.com/uc?export=download&confirm=9iBg'
'&id=107vh6Tibfs1p8pbc3gql-eVwxiqCD2o4')
# Note that CelebAHQ does not come with a train/test split.
# This is later split into 90:10 during data processing to create a
# train/test split.
path = path / 'data128x128'
return {
'train': self._generate_examples(path),
}
def _generate_examples(self, path):
"""Yields examples."""
# The path contains images at 128 resolution.
for img_path in path.glob('*.jpg'):
# Yields (key, example)
yield img_path.name, {'image': img_path}
|
functa-main
|
celeb_a_hq_custom/celeb_a_hq_custom.py
|
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""celeb_a_hq_custom dataset."""
from .celeb_a_hq_custom import CelebAHqCustom
|
functa-main
|
celeb_a_hq_custom/__init__.py
|
# Copyright 2019 Deepmind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
|
deepmind-research-master
|
__init__.py
|
# Copyright 2019 Deepmind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Configurations for IODINE."""
# pylint: disable=missing-docstring, unused-variable
import math
def clevr6():
n_z = 64 # number of latent dimensions
num_components = 7 # number of components (K)
num_iters = 5
checkpoint_dir = "iodine/checkpoints/clevr6"
# For the paper we used 8 GPUs with a batch size of 4 each.
# This means a total batch size of 32, which is too large for a single GPU.
# When reducing the batch size, the learning rate should also be lowered.
batch_size = 4
learn_rate = 0.001 * math.sqrt(batch_size / 32)
data = {
"constructor": "iodine.modules.data.CLEVR",
"batch_size": batch_size,
"path": "multi_object_datasets/clevr_with_masks_train.tfrecords",
"max_num_objects": 6,
}
model = {
"constructor": "iodine.modules.iodine.IODINE",
"n_z": n_z,
"num_components": num_components,
"num_iters": num_iters,
"iter_loss_weight": "linspace",
"coord_type": "linear",
"decoder": {
"constructor": "iodine.modules.decoder.ComponentDecoder",
"pixel_decoder": {
"constructor": "iodine.modules.networks.BroadcastConv",
"cnn_opt": {
# Final channels is irrelevant with target_output_shape
"output_channels": [64, 64, 64, 64, None],
"kernel_shapes": [3],
"strides": [1],
"activation": "elu",
},
"coord_type": "linear",
},
},
"refinement_core": {
"constructor": "iodine.modules.refinement.RefinementCore",
"encoder_net": {
"constructor": "iodine.modules.networks.CNN",
"mode": "avg_pool",
"cnn_opt": {
"output_channels": [64, 64, 64, 64],
"strides": [2],
"kernel_shapes": [3],
"activation": "elu",
},
"mlp_opt": {
"output_sizes": [256, 256],
"activation": "elu"
},
},
"recurrent_net": {
"constructor": "iodine.modules.networks.LSTM",
"hidden_sizes": [256],
},
"refinement_head": {
"constructor": "iodine.modules.refinement.ResHead"
},
},
"latent_dist": {
"constructor": "iodine.modules.distributions.LocScaleDistribution",
"dist": "normal",
"scale_act": "softplus",
"scale": "var",
"name": "latent_dist",
},
"output_dist": {
"constructor": "iodine.modules.distributions.MaskedMixture",
"num_components": num_components,
"component_dist": {
"constructor":
"iodine.modules.distributions.LocScaleDistribution",
"dist":
"logistic",
"scale":
"fixed",
"scale_val":
0.03,
"name":
"pixel_distribution",
},
},
"factor_evaluator": {
"constructor":
"iodine.modules.factor_eval.FactorRegressor",
"mapping": [
("color", 9, "categorical"),
("shape", 4, "categorical"),
("size", 3, "categorical"),
("position", 3, "scalar"),
],
},
}
optimizer = {
"constructor": "tensorflow.train.AdamOptimizer",
"learning_rate": {
"constructor": "tensorflow.train.exponential_decay",
"learning_rate": learn_rate,
"global_step": {
"constructor": "tensorflow.train.get_or_create_global_step"
},
"decay_steps": 1000000,
"decay_rate": 0.1,
},
"beta1": 0.95,
}
def multi_dsprites():
n_z = 16 # number of latent dimensions
num_components = 6 # number of components (K)
num_iters = 5
checkpoint_dir = "iodine/checkpoints/multi_dsprites"
# For the paper we used 8 GPUs with a batch size of 16 each.
# This means a total batch size of 128, which is too large for a single GPU.
# When reducing the batch size, the learning rate should also be lowered.
batch_size = 16
learn_rate = 0.0003 * math.sqrt(batch_size / 128)
data = {
"constructor":
"iodine.modules.data.MultiDSprites",
"batch_size":
batch_size,
"path":
"multi_object_datasets/multi_dsprites_colored_on_grayscale.tfrecords",
"dataset_variant":
"colored_on_grayscale",
"min_num_objs":
3,
"max_num_objs":
3,
}
model = {
"constructor": "iodine.modules.iodine.IODINE",
"n_z": n_z,
"num_components": num_components,
"num_iters": num_iters,
"iter_loss_weight": "linspace",
"coord_type": "cos",
"coord_freqs": 3,
"decoder": {
"constructor": "iodine.modules.decoder.ComponentDecoder",
"pixel_decoder": {
"constructor": "iodine.modules.networks.BroadcastConv",
"cnn_opt": {
# Final channels is irrelevant with target_output_shape
"output_channels": [32, 32, 32, 32, None],
"kernel_shapes": [5],
"strides": [1],
"activation": "elu",
},
"coord_type": "linear",
},
},
"refinement_core": {
"constructor": "iodine.modules.refinement.RefinementCore",
"encoder_net": {
"constructor": "iodine.modules.networks.CNN",
"mode": "avg_pool",
"cnn_opt": {
"output_channels": [32, 32, 32],
"strides": [2],
"kernel_shapes": [5],
"activation": "elu",
},
"mlp_opt": {
"output_sizes": [128],
"activation": "elu"
},
},
"recurrent_net": {
"constructor": "iodine.modules.networks.LSTM",
"hidden_sizes": [128],
},
"refinement_head": {
"constructor": "iodine.modules.refinement.ResHead"
},
},
"latent_dist": {
"constructor": "iodine.modules.distributions.LocScaleDistribution",
"dist": "normal",
"scale_act": "softplus",
"scale": "var",
"name": "latent_dist",
},
"output_dist": {
"constructor": "iodine.modules.distributions.MaskedMixture",
"num_components": num_components,
"component_dist": {
"constructor":
"iodine.modules.distributions.LocScaleDistribution",
"dist":
"logistic",
"scale":
"fixed",
"scale_val":
0.03,
"name":
"pixel_distribution",
},
},
"factor_evaluator": {
"constructor":
"iodine.modules.factor_eval.FactorRegressor",
"mapping": [
("color", 3, "scalar"),
("shape", 4, "categorical"),
("scale", 1, "scalar"),
("x", 1, "scalar"),
("y", 1, "scalar"),
("orientation", 2, "angle"),
],
},
}
optimizer = {
"constructor": "tensorflow.train.AdamOptimizer",
"learning_rate": {
"constructor": "tensorflow.train.exponential_decay",
"learning_rate": learn_rate,
"global_step": {
"constructor": "tensorflow.train.get_or_create_global_step"
},
"decay_steps": 1000000,
"decay_rate": 0.1,
},
"beta1": 0.95,
}
def tetrominoes():
n_z = 32 # number of latent dimensions
num_components = 4 # number of components (K)
num_iters = 5
checkpoint_dir = "iodine/checkpoints/tetrominoes"
# For the paper we used 8 GPUs with a batch size of 32 each.
# This means a total batch size of 256, which is too large for a single GPU.
# When reducing the batch size, the learning rate should also be lowered.
batch_size = 128
learn_rate = 0.0003 * math.sqrt(batch_size / 256)
data = {
"constructor": "iodine.modules.data.Tetrominoes",
"batch_size": batch_size,
"path": "iodine/multi_object_datasets/tetrominoes_train.tfrecords",
}
model = {
"constructor": "iodine.modules.iodine.IODINE",
"n_z": n_z,
"num_components": num_components,
"num_iters": num_iters,
"iter_loss_weight": "linspace",
"coord_type": "cos",
"coord_freqs": 3,
"decoder": {
"constructor": "iodine.modules.decoder.ComponentDecoder",
"pixel_decoder": {
"constructor": "iodine.modules.networks.BroadcastConv",
"cnn_opt": {
# Final channels is irrelevant with target_output_shape
"output_channels": [32, 32, 32, 32, None],
"kernel_shapes": [5],
"strides": [1],
"activation": "elu",
},
"coord_type": "linear",
"coord_freqs": 3,
},
},
"refinement_core": {
"constructor": "iodine.modules.refinement.RefinementCore",
"encoder_net": {
"constructor": "iodine.modules.networks.CNN",
"mode": "avg_pool",
"cnn_opt": {
"output_channels": [32, 32, 32],
"strides": [2],
"kernel_shapes": [5],
"activation": "elu",
},
"mlp_opt": {
"output_sizes": [128],
"activation": "elu"
},
},
"recurrent_net": {
"constructor": "iodine.modules.networks.LSTM",
"hidden_sizes": [], # No recurrent layer used for this dataset
},
"refinement_head": {
"constructor": "iodine.modules.refinement.ResHead"
},
},
"latent_dist": {
"constructor": "iodine.modules.distributions.LocScaleDistribution",
"dist": "normal",
"scale_act": "softplus",
"scale": "var",
"name": "latent_dist",
},
"output_dist": {
"constructor": "iodine.modules.distributions.MaskedMixture",
"num_components": num_components,
"component_dist": {
"constructor":
"iodine.modules.distributions.LocScaleDistribution",
"dist":
"logistic",
"scale":
"fixed",
"scale_val":
0.03,
"name":
"pixel_distribution",
},
},
"factor_evaluator": {
"constructor":
"iodine.modules.factor_eval.FactorRegressor",
"mapping": [
("position", 2, "scalar"),
("color", 3, "scalar"),
("shape", 20, "categorical"),
],
},
}
optimizer = {
"constructor": "tensorflow.train.AdamOptimizer",
"learning_rate": {
"constructor": "tensorflow.train.exponential_decay",
"learning_rate": learn_rate,
"global_step": {
"constructor": "tensorflow.train.get_or_create_global_step"
},
"decay_steps": 1000000,
"decay_rate": 0.1,
},
"beta1": 0.95,
}
|
deepmind-research-master
|
iodine/configurations.py
|
# Copyright 2019 Deepmind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=g-importing-member, g-multiple-import, g-import-not-at-top
# pylint: disable=protected-access, g-bad-import-order, missing-docstring
# pylint: disable=unused-variable, invalid-name, no-value-for-parameter
from copy import deepcopy
import os.path
import warnings
from absl import logging
import numpy as np
from sacred import Experiment, SETTINGS
# Ignore all tensorflow deprecation warnings
logging._warn_preinit_stderr = 0
warnings.filterwarnings("ignore", module=".*tensorflow.*")
import tensorflow.compat.v1 as tf
tf.logging.set_verbosity(tf.logging.ERROR)
import sonnet as snt
from sacred.stflow import LogFileWriter
from iodine.modules import utils
from iodine import configurations
SETTINGS.CONFIG.READ_ONLY_CONFIG = False
ex = Experiment("iodine")
@ex.config
def default_config():
continue_run = False # set to continue experiment from an existing checkpoint
checkpoint_dir = ("checkpoints/iodine"
) # if continue_run is False, "_{run_id}" will be appended
save_summaries_steps = 10
save_checkpoint_steps = 1000
n_z = 64 # number of latent dimensions
num_components = 7 # number of components (K)
num_iters = 5
learn_rate = 0.001
batch_size = 4
stop_after_steps = int(1e6)
# Details for the dataset, model and optimizer are left empty here.
# They can be found in the configurations for individual datasets,
# which are provided in configurations.py and added as named configs.
data = {} # Dataset details will go here
model = {} # Model details will go here
optimizer = {} # Optimizer details will go here
ex.named_config(configurations.clevr6)
ex.named_config(configurations.multi_dsprites)
ex.named_config(configurations.tetrominoes)
@ex.capture
def build(identifier, _config):
config_copy = deepcopy(_config[identifier])
return utils.build(config_copy, identifier=identifier)
def get_train_step(model, dataset, optimizer):
loss, scalars, _ = model(dataset("train"))
global_step = tf.train.get_or_create_global_step()
grads = optimizer.compute_gradients(loss)
gradients, variables = zip(*grads)
global_norm = tf.global_norm(gradients)
gradients, global_norm = tf.clip_by_global_norm(
gradients, 5.0, use_norm=global_norm)
grads = zip(gradients, variables)
train_op = optimizer.apply_gradients(grads, global_step=global_step)
with tf.control_dependencies([train_op]):
overview = model.get_overview_images(dataset("summary"))
scalars["debug/global_grad_norm"] = global_norm
summaries = {
k: tf.summary.scalar(k, v) for k, v in scalars.items()
}
summaries.update(
{k: tf.summary.image(k, v) for k, v in overview.items()})
return tf.identity(global_step), scalars, train_op
@ex.capture
def get_checkpoint_dir(continue_run, checkpoint_dir, _run, _log):
if continue_run:
assert os.path.exists(checkpoint_dir)
_log.info("Continuing run from checkpoint at {}".format(checkpoint_dir))
return checkpoint_dir
run_id = _run._id
if run_id is None: # then no observer was added that provided an _id
if not _run.unobserved:
_log.warning(
"No run_id given or provided by an Observer. (Re-)using run_id=1.")
run_id = 1
checkpoint_dir = checkpoint_dir + "_{run_id}".format(run_id=run_id)
_log.info(
"Starting a new run using checkpoint dir: '{}'".format(checkpoint_dir))
return checkpoint_dir
@ex.capture
def get_session(chkp_dir, loss, stop_after_steps, save_summaries_steps,
save_checkpoint_steps):
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
hooks = [
tf.train.StopAtStepHook(last_step=stop_after_steps),
tf.train.NanTensorHook(loss),
]
return tf.train.MonitoredTrainingSession(
hooks=hooks,
config=config,
checkpoint_dir=chkp_dir,
save_summaries_steps=save_summaries_steps,
save_checkpoint_steps=save_checkpoint_steps,
)
@ex.command(unobserved=True)
def load_checkpoint(use_placeholder=False, session=None):
dataset = build("data")
model = build("model")
if use_placeholder:
inputs = dataset.get_placeholders()
else:
inputs = dataset()
info = model.eval(inputs)
if session is None:
session = tf.Session()
saver = tf.train.Saver()
checkpoint_dir = get_checkpoint_dir()
checkpoint_file = tf.train.latest_checkpoint(checkpoint_dir)
saver.restore(session, checkpoint_file)
print('Successfully restored Checkpoint "{}"'.format(checkpoint_file))
# print variables
variables = tf.global_variables() + tf.local_variables()
for row in snt.format_variables(variables, join_lines=False):
print(row)
return {
"session": session,
"model": model,
"info": info,
"inputs": inputs,
"dataset": dataset,
}
@ex.automain
@LogFileWriter(ex)
def main(save_summaries_steps):
checkpoint_dir = get_checkpoint_dir()
dataset = build("data")
model = build("model")
optimizer = build("optimizer")
gstep, train_step_exports, train_op = get_train_step(model, dataset,
optimizer)
loss, ari = [], []
with get_session(checkpoint_dir, train_step_exports["loss/total"]) as sess:
while not sess.should_stop():
out = sess.run({
"step": gstep,
"loss": train_step_exports["loss/total"],
"ari": train_step_exports["loss/ari_nobg"],
"train": train_op,
})
loss.append(out["loss"])
ari.append(out["ari"])
step = out["step"]
if step % save_summaries_steps == 0:
mean_loss = np.mean(loss)
mean_ari = np.mean(ari)
ex.log_scalar("loss", mean_loss, step)
ex.log_scalar("ari", mean_ari, step)
print("{step:>6d} Loss: {loss: >12.2f}\t\tARI-nobg:{ari: >6.2f}".format(
step=step, loss=mean_loss, ari=mean_ari))
loss, ari = [], []
|
deepmind-research-master
|
iodine/main.py
|
# Copyright 2019 Deepmind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Decoders for rendering images."""
# pylint: disable=missing-docstring
from iodine.modules.distributions import MixtureParameters
import shapeguard
import sonnet as snt
class ComponentDecoder(snt.AbstractModule):
def __init__(self, pixel_decoder, name="component_decoder"):
super().__init__(name=name)
self._pixel_decoder = pixel_decoder
self._sg = shapeguard.ShapeGuard()
def set_output_shapes(self, pixel, mask):
self._sg.guard(pixel, "K, H, W, Cp")
self._sg.guard(mask, "K, H, W, 1")
self._pixel_decoder.set_output_shapes(self._sg["H, W, 1 + Cp"])
def _build(self, z):
self._sg.guard(z, "B, K, Z")
z_flat = self._sg.reshape(z, "B*K, Z")
pixel_params = self._pixel_decoder(z_flat).params
self._sg.guard(pixel_params, "B*K, H, W, 1 + Cp")
mask_params = pixel_params[..., 0:1]
pixel_params = pixel_params[..., 1:]
output = MixtureParameters(
pixel=self._sg.reshape(pixel_params, "B, K, H, W, Cp"),
mask=self._sg.reshape(mask_params, "B, K, H, W, 1"),
)
del self._sg.B
return output
|
deepmind-research-master
|
iodine/modules/decoder.py
|
# Copyright 2019 Deepmind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Factor Evaluation Module."""
# pylint: disable=unused-variable
import collections
import functools
from iodine.modules import utils
import shapeguard
import sonnet as snt
import tensorflow.compat.v1 as tf
Factor = collections.namedtuple("Factor", ["name", "size", "type"])
class FactorRegressor(snt.AbstractModule):
"""Assess representations by learning a linear mapping to latents."""
def __init__(self, mapping=None, name="repres_content"):
super().__init__(name=name)
if mapping is None:
self._mapping = [
Factor("color", 3, "scalar"),
Factor("shape", 4, "categorical"),
Factor("scale", 1, "scalar"),
Factor("x", 1, "scalar"),
Factor("y", 1, "scalar"),
Factor("orientation", 2, "angle"),
]
else:
self._mapping = [Factor(*m) for m in mapping]
def _build(self, z, latent, visibility, pred_mask, true_mask):
sg = shapeguard.ShapeGuard()
z = sg.guard(z, "B, K, Z")
pred_mask = sg.guard(pred_mask, "B, K, H, W, 1")
true_mask = sg.guard(true_mask, "B, L, H, W, 1")
visibility = sg.guard(visibility, "B, L")
num_visible_obj = tf.reduce_sum(visibility)
# Map z to predictions for all latents
sg.M = sum([m.size for m in self._mapping])
self.predictor = snt.Linear(sg.M, name="predict_latents")
z_flat = sg.reshape(z, "B*K, Z")
all_preds = sg.guard(self.predictor(z_flat), "B*K, M")
all_preds = sg.reshape(all_preds, "B, 1, K, M")
all_preds = tf.tile(all_preds, sg["1, L, 1, 1"])
# prepare latents
latents = {}
mean_var_tot = {}
for m in self._mapping:
with tf.name_scope(m.name):
# preprocess, reshape, and tile
lat_preprocess = self.get_preprocessing(m)
lat = sg.guard(
lat_preprocess(latent[m.name]), "B, L, {}".format(m.size))
# compute mean over latent by training a variable using mse
if m.type in {"scalar", "angle"}:
mvt = utils.OnlineMeanVarEstimator(
axis=[0, 1], ddof=1, name="{}_mean_var".format(m.name))
mean_var_tot[m.name] = mvt(lat, visibility[:, :, tf.newaxis])
lat = tf.reshape(lat, sg["B, L, 1"] + [-1])
lat = tf.tile(lat, sg["1, 1, K, 1"])
latents[m.name] = lat
# prepare predictions
idx = 0
predictions = {}
for m in self._mapping:
with tf.name_scope(m.name):
assert m.name in latent, "{} not in {}".format(m.name, latent.keys())
pred = all_preds[..., idx:idx + m.size]
predictions[m.name] = sg.guard(pred, "B, L, K, {}".format(m.size))
idx += m.size
# compute error
total_pairwise_errors = None
for m in self._mapping:
with tf.name_scope(m.name):
error_fn = self.get_error_func(m)
sg.guard(latents[m.name], "B, L, K, {}".format(m.size))
sg.guard(predictions[m.name], "B, L, K, {}".format(m.size))
err = error_fn(latents[m.name], predictions[m.name])
sg.guard(err, "B, L, K")
if total_pairwise_errors is None:
total_pairwise_errors = err
else:
total_pairwise_errors += err
# determine best assignment by comparing masks
obj_mask = true_mask[:, :, tf.newaxis]
pred_mask = pred_mask[:, tf.newaxis]
pairwise_overlap = tf.reduce_sum(obj_mask * pred_mask, axis=[3, 4, 5])
best_match = sg.guard(tf.argmax(pairwise_overlap, axis=2), "B, L")
assignment = tf.one_hot(best_match, sg.K)
assignment *= visibility[:, :, tf.newaxis] # Mask non-visible objects
# total error
total_error = (
tf.reduce_sum(assignment * total_pairwise_errors) / num_visible_obj)
# compute scalars
monitored_scalars = {}
for m in self._mapping:
with tf.name_scope(m.name):
metric = self.get_metric(m)
scalar = metric(
latents[m.name],
predictions[m.name],
assignment[:, :, :, tf.newaxis],
mean_var_tot.get(m.name),
num_visible_obj,
)
monitored_scalars[m.name] = scalar
return total_error, monitored_scalars, mean_var_tot, predictions, assignment
@snt.reuse_variables
def predict(self, z):
sg = shapeguard.ShapeGuard()
z = sg.guard(z, "B, Z")
all_preds = sg.guard(self.predictor(z), "B, M")
idx = 0
predictions = {}
for m in self._mapping:
with tf.name_scope(m.name):
pred = all_preds[:, idx:idx + m.size]
predictions[m.name] = sg.guard(pred, "B, {}".format(m.size))
idx += m.size
return predictions
@staticmethod
def get_error_func(factor):
if factor.type in {"scalar", "angle"}:
return sse
elif factor.type == "categorical":
return functools.partial(
tf.losses.softmax_cross_entropy, reduction="none")
else:
raise KeyError(factor.type)
@staticmethod
def get_metric(factor):
if factor.type in {"scalar", "angle"}:
return r2
elif factor.type == "categorical":
return accuracy
else:
raise KeyError(factor.type)
@staticmethod
def one_hot(f, nr_categories):
return tf.one_hot(tf.cast(f[..., 0], tf.int32), depth=nr_categories)
@staticmethod
def angle_to_vector(theta):
return tf.concat([tf.math.cos(theta), tf.math.sin(theta)], axis=-1)
@staticmethod
def get_preprocessing(factor):
if factor.type == "scalar":
return tf.identity
elif factor.type == "categorical":
return functools.partial(
FactorRegressor.one_hot, nr_categories=factor.size)
elif factor.type == "angle":
return FactorRegressor.angle_to_vector
else:
raise KeyError(factor.type)
def sse(true, pred):
# run our own sum squared error because we want to reduce sum over last dim
return tf.reduce_sum(tf.square(true - pred), axis=-1)
def accuracy(labels, logits, assignment, mean_var_tot, num_vis):
del mean_var_tot # unused
pred = tf.argmax(logits, axis=-1, output_type=tf.int32)
labels = tf.argmax(labels, axis=-1, output_type=tf.int32)
correct = tf.cast(tf.equal(labels, pred), tf.float32)
return tf.reduce_sum(correct * assignment[..., 0]) / num_vis
def r2(labels, pred, assignment, mean_var_tot, num_vis):
del num_vis # unused
mean, var, _ = mean_var_tot
# labels, pred: (B, L, K, n)
ss_res = tf.reduce_sum(tf.square(labels - pred) * assignment, axis=2)
ss_tot = var[tf.newaxis, tf.newaxis, :] # (1, 1, n)
return tf.reduce_mean(1.0 - ss_res / ss_tot)
|
deepmind-research-master
|
iodine/modules/factor_eval.py
|
# Copyright 2019 Deepmind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Plotting tools for IODINE."""
# pylint: disable=unused-import, missing-docstring, unused-variable
# pylint: disable=invalid-name, unexpected-keyword-arg
import functools
from iodine.modules.utils import get_mask_plot_colors
from matplotlib.colors import hsv_to_rgb
import matplotlib.pyplot as plt
import numpy as np
__all__ = ("get_mask_plot_colors", "example_plot", "iterations_plot",
"inputs_plot")
def clean_ax(ax, color=None, lw=4.0):
ax.set_xticks([])
ax.set_yticks([])
if color is not None:
for spine in ax.spines.values():
spine.set_linewidth(lw)
spine.set_color(color)
def optional_ax(fn):
def _wrapped(*args, **kwargs):
if kwargs.get("ax", None) is None:
figsize = kwargs.pop("figsize", (4, 4))
fig, ax = plt.subplots(figsize=figsize)
kwargs["ax"] = ax
return fn(*args, **kwargs)
return _wrapped
def optional_clean_ax(fn):
def _wrapped(*args, **kwargs):
if kwargs.get("ax", None) is None:
figsize = kwargs.pop("figsize", (4, 4))
fig, ax = plt.subplots(figsize=figsize)
kwargs["ax"] = ax
color = kwargs.pop("color", None)
lw = kwargs.pop("lw", 4.0)
res = fn(*args, **kwargs)
clean_ax(kwargs["ax"], color, lw)
return res
return _wrapped
@optional_clean_ax
def show_img(img, mask=None, ax=None, norm=False):
if norm:
vmin, vmax = np.min(img), np.max(img)
img = (img - vmin) / (vmax - vmin)
if mask is not None:
img = img * mask + np.ones_like(img) * (1.0 - mask)
return ax.imshow(img.clip(0.0, 1.0), interpolation="nearest")
@optional_clean_ax
def show_mask(m, ax):
color_conv = get_mask_plot_colors(m.shape[0])
color_mask = np.dot(np.transpose(m, [1, 2, 0]), color_conv)
return ax.imshow(color_mask.clip(0.0, 1.0), interpolation="nearest")
@optional_clean_ax
def show_mat(m, ax, vmin=None, vmax=None, cmap="viridis"):
return ax.matshow(
m[..., 0], cmap=cmap, vmin=vmin, vmax=vmax, interpolation="nearest")
@optional_clean_ax
def show_coords(m, ax):
vmin, vmax = np.min(m), np.max(m)
m = (m - vmin) / (vmax - vmin)
color_conv = get_mask_plot_colors(m.shape[-1])
color_mask = np.dot(m, color_conv)
return ax.imshow(color_mask, interpolation="nearest")
def example_plot(rinfo,
b=0,
t=-1,
mask_components=False,
size=2,
column_titles=True):
image = rinfo["data"]["image"][b, 0]
recons = rinfo["outputs"]["recons"][b, t, 0]
pred_mask = rinfo["outputs"]["pred_mask"][b, t]
components = rinfo["outputs"]["components"][b, t]
K, H, W, C = components.shape
colors = get_mask_plot_colors(K)
nrows = 1
ncols = 3 + K
fig, axes = plt.subplots(ncols=ncols, figsize=(ncols * size, nrows * size))
show_img(image, ax=axes[0], color="#000000")
show_img(recons, ax=axes[1], color="#000000")
show_mask(pred_mask[..., 0], ax=axes[2], color="#000000")
for k in range(K):
mask = pred_mask[k] if mask_components else None
show_img(components[k], ax=axes[k + 3], color=colors[k], mask=mask)
if column_titles:
labels = ["Image", "Recons.", "Mask"
] + ["Component {}".format(k + 1) for k in range(K)]
for ax, title in zip(axes, labels):
ax.set_title(title)
plt.subplots_adjust(hspace=0.03, wspace=0.035)
return fig
def iterations_plot(rinfo, b=0, mask_components=False, size=2):
image = rinfo["data"]["image"][b]
true_mask = rinfo["data"]["true_mask"][b]
recons = rinfo["outputs"]["recons"][b]
pred_mask = rinfo["outputs"]["pred_mask"][b]
pred_mask_logits = rinfo["outputs"]["pred_mask_logits"][b]
components = rinfo["outputs"]["components"][b]
T, K, H, W, C = components.shape
colors = get_mask_plot_colors(K)
nrows = T + 1
ncols = 2 + K
fig, axes = plt.subplots(
nrows=nrows, ncols=ncols, figsize=(ncols * size, nrows * size))
for t in range(T):
show_img(recons[t, 0], ax=axes[t, 0])
show_mask(pred_mask[t, ..., 0], ax=axes[t, 1])
axes[t, 0].set_ylabel("iter {}".format(t))
for k in range(K):
mask = pred_mask[t, k] if mask_components else None
show_img(components[t, k], ax=axes[t, k + 2], color=colors[k], mask=mask)
axes[0, 0].set_title("Reconstruction")
axes[0, 1].set_title("Mask")
show_img(image[0], ax=axes[T, 0])
show_mask(true_mask[0, ..., 0], ax=axes[T, 1])
vmin = np.min(pred_mask_logits[T - 1])
vmax = np.max(pred_mask_logits[T - 1])
for k in range(K):
axes[0, k + 2].set_title("Component {}".format(k + 1)) # , color=colors[k])
show_mat(
pred_mask_logits[T - 1, k], ax=axes[T, k + 2], vmin=vmin, vmax=vmax)
axes[T, k + 2].set_xlabel(
"Mask Logits for\nComponent {}".format(k + 1)) # , color=colors[k])
axes[T, 0].set_xlabel("Input Image")
axes[T, 1].set_xlabel("Ground Truth Mask")
plt.subplots_adjust(wspace=0.05, hspace=0.05)
return fig
def inputs_plot(rinfo, b=0, t=0, size=2):
B, T, K, H, W, C = rinfo["outputs"]["components"].shape
colors = get_mask_plot_colors(K)
inputs = rinfo["inputs"]["spatial"]
rows = [
("image", show_img, False),
("components", show_img, False),
("dcomponents", functools.partial(show_img, norm=True), False),
("mask", show_mat, True),
("pred_mask", show_mat, True),
("dmask", functools.partial(show_mat, cmap="coolwarm"), True),
("posterior", show_mat, True),
("log_prob", show_mat, True),
("counterfactual", show_mat, True),
("coordinates", show_coords, False),
]
rows = [(n, f, mcb) for n, f, mcb in rows if n in inputs]
nrows = len(rows)
ncols = K + 1
fig, axes = plt.subplots(
nrows=nrows,
ncols=ncols,
figsize=(ncols * size - size * 0.9, nrows * size),
gridspec_kw={"width_ratios": [1] * K + [0.1]},
)
for r, (name, plot_fn, make_cbar) in enumerate(rows):
axes[r, 0].set_ylabel(name)
if make_cbar:
vmin = np.min(inputs[name][b, t])
vmax = np.max(inputs[name][b, t])
if np.abs(vmin - vmax) < 1e-6:
vmin -= 0.1
vmax += 0.1
plot_fn = functools.partial(plot_fn, vmin=vmin, vmax=vmax)
# print("range of {:<16}: [{:0.2f}, {:0.2f}]".format(name, vmin, vmax))
for k in range(K):
if inputs[name].shape[2] == 1:
m = inputs[name][b, t, 0]
color = (0.0, 0.0, 0.0)
else:
m = inputs[name][b, t, k]
color = colors[k]
mappable = plot_fn(m, ax=axes[r, k], color=color)
if make_cbar:
fig.colorbar(mappable, cax=axes[r, K])
else:
axes[r, K].set_visible(False)
for k in range(K):
axes[0, k].set_title("Component {}".format(k + 1)) # , color=colors[k])
plt.subplots_adjust(hspace=0.05, wspace=0.05)
return fig
|
deepmind-research-master
|
iodine/modules/plotting.py
|
# Copyright 2019 Deepmind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
|
deepmind-research-master
|
iodine/modules/__init__.py
|
# Copyright 2019 Deepmind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Iterative refinement modules."""
# pylint: disable=g-doc-bad-indent, unused-variable
from iodine.modules import utils
import shapeguard
import sonnet as snt
import tensorflow.compat.v1 as tf
class RefinementCore(snt.RNNCore):
"""Recurrent Refinement Module.
Refinement modules take as inputs:
* previous state (which could be an arbitrary nested structure)
* current inputs which include
* image-space inputs like pixel-based errors, or mask-posteriors
* latent-space inputs like the previous z_dist, or dz
They use these inputs to produce:
* output (usually a new z_dist)
* new_state
"""
def __init__(self,
encoder_net,
recurrent_net,
refinement_head,
name="refinement"):
super().__init__(name=name)
self._encoder_net = encoder_net
self._recurrent_net = recurrent_net
self._refinement_head = refinement_head
self._sg = shapeguard.ShapeGuard()
def initial_state(self, batch_size, **unused_kwargs):
return self._recurrent_net.initial_state(batch_size)
def _build(self, inputs, prev_state):
sg = self._sg
assert "spatial" in inputs, inputs.keys()
assert "flat" in inputs, inputs.keys()
assert "zp" in inputs["flat"], inputs["flat"].keys()
zp = sg.guard(inputs["flat"]["zp"], "B, K, Zp")
x = sg.guard(self.prepare_spatial_inputs(inputs["spatial"]), "B*K, H, W, C")
h1 = sg.guard(self._encoder_net(x).params, "B*K, H1")
h2 = sg.guard(self.prepare_flat_inputs(h1, inputs["flat"]), "B*K, H2")
h2_unflattened = sg.reshape(h2, "B, K, H2")
h3, next_state = self._recurrent_net(h2_unflattened, prev_state)
sg.guard(h3, "B, K, H3")
outputs = sg.guard(self._refinement_head(zp, h3), "B, K, Y")
del self._sg.B
return outputs, next_state
def prepare_spatial_inputs(self, inputs):
values = []
for name, val in sorted(inputs.items(), key=lambda it: it[0]):
if val.shape.as_list()[1] == 1:
self._sg.guard(val, "B, 1, H, W, _C")
val = tf.tile(val, self._sg["1, K, 1, 1, 1"])
else:
self._sg.guard(val, "B, K, H, W, _C")
values.append(val)
concat_inputs = self._sg.guard(tf.concat(values, axis=-1), "B, K, H, W, C")
return self._sg.reshape(concat_inputs, "B*K, H, W, C")
def prepare_flat_inputs(self, hidden, inputs):
values = [self._sg.guard(hidden, "B*K, H1")]
for name, val in sorted(inputs.items(), key=lambda it: it[0]):
self._sg.guard(val, "B, K, _")
val_flat = tf.reshape(val, self._sg["B*K"] + [-1])
values.append(val_flat)
return tf.concat(values, axis=-1)
class ResHead(snt.AbstractModule):
"""Updates Zp using a residual mechanism."""
def __init__(self, name="residual_head"):
super().__init__(name=name)
def _build(self, zp_old, inputs):
sg = shapeguard.ShapeGuard()
sg.guard(zp_old, "B, K, Zp")
sg.guard(inputs, "B, K, H")
update = snt.Linear(sg.Zp)
flat_zp = sg.reshape(zp_old, "B*K, Zp")
flat_inputs = sg.reshape(inputs, "B*K, H")
zp = flat_zp + update(flat_inputs)
return sg.reshape(zp, "B, K, Zp")
class PredictorCorrectorHead(snt.AbstractModule):
"""This refinement head is used for sequential data.
At every step it computes a prediction from the λ of the previous timestep
and an update from the refinement network of the current timestep.
The next step λ' is computed as a gated combination of both:
λ' = g * λ_corr + (1-g) * λ_pred
"""
def __init__(
self,
hidden_sizes=(64,),
pred_gate_bias=0.0,
corrector_gate_bias=0.0,
activation=tf.nn.elu,
name="predcorr_head",
):
super().__init__(name=name)
self._hidden_sizes = hidden_sizes
self._activation = utils.get_act_func(activation)
self._pred_gate_bias = pred_gate_bias
self._corrector_gate_bias = corrector_gate_bias
def _build(self, zp_old, inputs):
sg = shapeguard.ShapeGuard()
sg.guard(zp_old, "B, K, Zp")
sg.guard(inputs, "B, K, H")
update = snt.Linear(sg.Zp)
update_gate = snt.Linear(sg.Zp)
predict = snt.nets.MLP(
output_sizes=list(self._hidden_sizes) + [sg.Zp * 2],
activation=self._activation,
)
flat_zp = sg.reshape(zp_old, "B*K, Zp")
flat_inputs = sg.reshape(inputs, "B*K, H")
g = tf.nn.sigmoid(update_gate(flat_inputs) + self._corrector_gate_bias)
u = update(flat_inputs)
# a slightly more efficient way of computing the gated update
# (1-g) * flat_zp + g * u
zp_corrected = flat_zp + g * (u - flat_zp)
predicted = predict(flat_zp)
pred_up = predicted[:, :sg.Zp]
pred_gate = tf.nn.sigmoid(predicted[:, sg.Zp:] + self._pred_gate_bias)
zp = zp_corrected + pred_gate * (pred_up - zp_corrected)
return sg.reshape(zp, "B, K, Zp")
|
deepmind-research-master
|
iodine/modules/refinement.py
|
# Copyright 2019 Deepmind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Collection of sonnet modules that wrap useful distributions."""
# pylint: disable=missing-docstring, g-doc-args, g-short-docstring-punctuation
# pylint: disable=g-space-before-docstring-summary
# pylint: disable=g-no-space-after-docstring-summary
import collections
from iodine.modules.utils import get_act_func
from iodine.modules.utils import get_distribution
import shapeguard
import sonnet as snt
import tensorflow.compat.v1 as tf
import tensorflow_probability as tfp
tfd = tfp.distributions
FlatParameters = collections.namedtuple("ParameterOut", ["params"])
MixtureParameters = collections.namedtuple("MixtureOut", ["pixel", "mask"])
class DistributionModule(snt.AbstractModule):
"""Distribution Base class supporting shape inference & default priors."""
def __init__(self, name="distribution"):
super().__init__(name=name)
self._output_shape = None
def set_output_shape(self, shape):
self._output_shape = shape
@property
def output_shape(self):
return self._output_shape
@property
def input_shapes(self):
raise NotImplementedError()
def get_default_prior(self, batch_dim=(1,)):
return self(
tf.zeros(list(batch_dim) + self.input_shapes.params, dtype=tf.float32))
class BernoulliOutput(DistributionModule):
def __init__(self, name="bernoulli_output"):
super().__init__(name=name)
@property
def input_shapes(self):
return FlatParameters(self.output_shape)
def _build(self, params):
return tfd.Independent(
tfd.Bernoulli(logits=params, dtype=tf.float32),
reinterpreted_batch_ndims=1)
class LocScaleDistribution(DistributionModule):
"""Generic IID location / scale distribution.
Input parameters are concatenation of location and scale (2*Z,)
Args:
dist: Distribution or str Kind of distribution used. Supports Normal,
Logistic, Laplace, and StudentT distributions.
dist_kwargs: dict custom keyword arguments for the distribution
scale_act: function or str or None activation function to be applied to
the scale input
scale: str
different modes for computing the scale:
* stddev: scale is computed as scale_act(s)
* var: scale is computed as sqrt(scale_act(s))
* prec: scale is computed as 1./scale_act(s)
* fixed: scale is a global variable (same for all pixels) if
scale_val==-1. then it is a trainable variable initialized to 0.1
else it is fixed to scale_val (input shape is only (Z,) in this
case)
scale_val: float determines the scale value (only used if scale=='fixed').
loc_act: function or str or None activation function to be applied to the
location input. Supports optional activation functions for scale and
location.
Supports different "modes" for scaling:
* stddev:
"""
def __init__(
self,
dist=tfd.Normal,
dist_kwargs=None,
scale_act=tf.exp,
scale="stddev",
scale_val=1.0,
loc_act=None,
name="loc_scale_dist",
):
super().__init__(name=name)
self._scale_act = get_act_func(scale_act)
self._loc_act = get_act_func(loc_act)
# supports Normal, Logstic, Laplace, StudentT
self._dist = get_distribution(dist)
self._dist_kwargs = dist_kwargs or {}
assert scale in ["stddev", "var", "prec", "fixed"], scale
self._scale = scale
self._scale_val = scale_val
@property
def input_shapes(self):
if self._scale == "fixed":
param_shape = self.output_shape
else:
param_shape = self.output_shape[:-1] + [self.output_shape[-1] * 2]
return FlatParameters(param_shape)
def _build(self, params):
if self._scale == "fixed":
loc = params
scale = None # set later
else:
n_channels = params.get_shape().as_list()[-1]
assert n_channels % 2 == 0
assert n_channels // 2 == self.output_shape[-1]
loc = params[..., :n_channels // 2]
scale = params[..., n_channels // 2:]
# apply activation functions
if self._scale != "fixed":
scale = self._scale_act(scale)
loc = self._loc_act(loc)
# apply the correct parametrization
if self._scale == "var":
scale = tf.sqrt(scale)
elif self._scale == "prec":
scale = tf.reciprocal(scale)
elif self._scale == "fixed":
if self._scale_val == -1.0:
scale_val = tf.get_variable(
"scale", initializer=tf.constant(0.1, dtype=tf.float32))
else:
scale_val = self._scale_val
scale = tf.ones_like(loc) * scale_val
# else 'stddev'
dist = self._dist(loc=loc, scale=scale, **self._dist_kwargs)
return tfd.Independent(dist, reinterpreted_batch_ndims=1)
class MaskedMixture(DistributionModule):
def __init__(
self,
num_components,
component_dist,
mask_activation=None,
name="masked_mixture",
):
"""
Spatial Mixture Model composed of a categorical masking distribution and
a custom pixel-wise component distribution (usually logistic or
gaussian).
Args:
num_components: int Number of mixture components >= 2
component_dist: the distribution to use for the individual components
mask_activation: str or function or None activation function that
should be applied to the mask before the softmax.
name: str
"""
super().__init__(name=name)
self._num_components = num_components
self._dist = component_dist
self._mask_activation = get_act_func(mask_activation)
def set_output_shape(self, shape):
super().set_output_shape(shape)
self._dist.set_output_shape(shape)
def _build(self, pixel, mask):
sg = shapeguard.ShapeGuard()
# MASKING
sg.guard(mask, "B, K, H, W, 1")
mask = tf.transpose(mask, perm=[0, 2, 3, 4, 1])
mask = sg.reshape(mask, "B, H, W, K")
mask = self._mask_activation(mask)
mask = mask[:, tf.newaxis] # add K=1 axis since K is removed by mixture
mix_dist = tfd.Categorical(logits=mask)
# COMPONENTS
sg.guard(pixel, "B, K, H, W, Cp")
params = tf.transpose(pixel, perm=[0, 2, 3, 1, 4])
params = params[:, tf.newaxis] # add K=1 axis since K is removed by mixture
dist = self._dist(params)
return tfd.MixtureSameFamily(
mixture_distribution=mix_dist, components_distribution=dist)
@property
def input_shapes(self):
pixel = [self._num_components] + self._dist.input_shapes.params
mask = pixel[:-1] + [1]
return MixtureParameters(pixel, mask)
def get_default_prior(self, batch_dim=(1,)):
pixel = tf.zeros(
list(batch_dim) + self.input_shapes.pixel, dtype=tf.float32)
mask = tf.zeros(list(batch_dim) + self.input_shapes.mask, dtype=tf.float32)
return self(pixel, mask)
|
deepmind-research-master
|
iodine/modules/distributions.py
|
# Copyright 2019 Deepmind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Network modules."""
# pylint: disable=g-multiple-import, g-doc-args, g-short-docstring-punctuation
# pylint: disable=g-no-space-after-docstring-summary
from iodine.modules.distributions import FlatParameters
from iodine.modules.utils import flatten_all_but_last, get_act_func
import numpy as np
import shapeguard
import sonnet as snt
import tensorflow.compat.v1 as tf
class CNN(snt.AbstractModule):
"""ConvNet2D followed by an MLP.
This is a typical encoder architecture for VAEs, and has been found to work
well. One small improvement is to append coordinate channels on the input,
though for most datasets the improvement obtained is negligible.
"""
def __init__(self, cnn_opt, mlp_opt, mode="flatten", name="cnn"):
"""Constructor.
Args:
cnn_opt: Dictionary. Kwargs for the cnn. See vae_lib.ConvNet2D for
details.
mlp_opt: Dictionary. Kwargs for the mlp. See vae_lib.MLP for details.
name: String. Optional name.
"""
super().__init__(name=name)
if "activation" in cnn_opt:
cnn_opt["activation"] = get_act_func(cnn_opt["activation"])
self._cnn_opt = cnn_opt
if "activation" in mlp_opt:
mlp_opt["activation"] = get_act_func(mlp_opt["activation"])
self._mlp_opt = mlp_opt
self._mode = mode
def set_output_shapes(self, shape):
# assert self._mlp_opt['output_sizes'][-1] is None, self._mlp_opt
sg = shapeguard.ShapeGuard()
sg.guard(shape, "1, Y")
self._mlp_opt["output_sizes"][-1] = sg.Y
def _build(self, image):
"""Connect model to TensorFlow graph."""
assert self._mlp_opt["output_sizes"][-1] is not None, "set output_shapes"
sg = shapeguard.ShapeGuard()
flat_image, unflatten = flatten_all_but_last(image, n_dims=3)
sg.guard(flat_image, "B, H, W, C")
cnn = snt.nets.ConvNet2D(
activate_final=True,
paddings=("SAME",),
normalize_final=False,
**self._cnn_opt)
mlp = snt.nets.MLP(**self._mlp_opt)
# run CNN
net = cnn(flat_image)
if self._mode == "flatten":
# flatten
net_shape = net.get_shape().as_list()
flat_shape = net_shape[:-3] + [np.prod(net_shape[-3:])]
net = tf.reshape(net, flat_shape)
elif self._mode == "avg_pool":
net = tf.reduce_mean(net, axis=[1, 2])
else:
raise KeyError('Unknown mode "{}"'.format(self._mode))
# run MLP
output = sg.guard(mlp(net), "B, Y")
return FlatParameters(unflatten(output))
class MLP(snt.AbstractModule):
"""MLP."""
def __init__(self, name="mlp", **mlp_opt):
super().__init__(name=name)
if "activation" in mlp_opt:
mlp_opt["activation"] = get_act_func(mlp_opt["activation"])
self._mlp_opt = mlp_opt
assert mlp_opt["output_sizes"][-1] is None, mlp_opt
def set_output_shapes(self, shape):
sg = shapeguard.ShapeGuard()
sg.guard(shape, "1, Y")
self._mlp_opt["output_sizes"][-1] = sg.Y
def _build(self, data):
"""Connect model to TensorFlow graph."""
assert self._mlp_opt["output_sizes"][-1] is not None, "set output_shapes"
sg = shapeguard.ShapeGuard()
flat_data, unflatten = flatten_all_but_last(data)
sg.guard(flat_data, "B, N")
mlp = snt.nets.MLP(**self._mlp_opt)
# run MLP
output = sg.guard(mlp(flat_data), "B, Y")
return FlatParameters(unflatten(output))
class DeConv(snt.AbstractModule):
"""MLP followed by Deconv net.
This decoder is commonly used by vanilla VAE models. However, in practice
BroadcastConv (see below) seems to disentangle slightly better.
"""
def __init__(self, mlp_opt, cnn_opt, name="deconv"):
"""Constructor.
Args:
mlp_opt: Dictionary. Kwargs for vae_lib.MLP.
cnn_opt: Dictionary. Kwargs for vae_lib.ConvNet2D for the CNN.
name: Optional name.
"""
super().__init__(name=name)
assert cnn_opt["output_channels"][-1] is None, cnn_opt
if "activation" in cnn_opt:
cnn_opt["activation"] = get_act_func(cnn_opt["activation"])
self._cnn_opt = cnn_opt
if mlp_opt and "activation" in mlp_opt:
mlp_opt["activation"] = get_act_func(mlp_opt["activation"])
self._mlp_opt = mlp_opt
self._target_out_shape = None
def set_output_shapes(self, shape):
self._target_out_shape = shape
self._cnn_opt["output_channels"][-1] = self._target_out_shape[-1]
def _build(self, z):
"""Connect model to TensorFlow graph."""
sg = shapeguard.ShapeGuard()
flat_z, unflatten = flatten_all_but_last(z)
sg.guard(flat_z, "B, Z")
sg.guard(self._target_out_shape, "H, W, C")
mlp = snt.nets.MLP(**self._mlp_opt)
cnn = snt.nets.ConvNet2DTranspose(
paddings=("SAME",), normalize_final=False, **self._cnn_opt)
net = mlp(flat_z)
output = sg.guard(cnn(net), "B, H, W, C")
return FlatParameters(unflatten(output))
class BroadcastConv(snt.AbstractModule):
"""MLP followed by a broadcast convolution.
This decoder takes a latent vector z, (optionally) applies an MLP to it,
then tiles the resulting vector across space to have dimension [B, H, W, C]
i.e. tiles across H and W. Then coordinate channels are appended and a
convolutional layer is applied.
"""
def __init__(
self,
cnn_opt,
mlp_opt=None,
coord_type="linear",
coord_freqs=3,
name="broadcast_conv",
):
"""Args:
cnn_opt: dict Kwargs for vae_lib.ConvNet2D for the CNN.
mlp_opt: None or dict If dictionary, then kwargs for snt.nets.MLP. If
None, then the model will not process the latent vector by an mlp.
coord_type: ["linear", "cos", None] type of coordinate channels to
add.
None: add no coordinate channels.
linear: two channels with values linearly spaced from -1. to 1. in
the H and W dimension respectively.
cos: coord_freqs^2 many channels containing cosine basis functions.
coord_freqs: int number of frequencies used to construct the cosine
basis functions (only for coord_type=="cos")
name: Optional name.
"""
super().__init__(name=name)
assert cnn_opt["output_channels"][-1] is None, cnn_opt
if "activation" in cnn_opt:
cnn_opt["activation"] = get_act_func(cnn_opt["activation"])
self._cnn_opt = cnn_opt
if mlp_opt and "activation" in mlp_opt:
mlp_opt["activation"] = get_act_func(mlp_opt["activation"])
self._mlp_opt = mlp_opt
self._target_out_shape = None
self._coord_type = coord_type
self._coord_freqs = coord_freqs
def set_output_shapes(self, shape):
self._target_out_shape = shape
self._cnn_opt["output_channels"][-1] = self._target_out_shape[-1]
def _build(self, z):
"""Connect model to TensorFlow graph."""
assert self._target_out_shape is not None, "Call set_output_shape"
# reshape components into batch dimension before processing them
sg = shapeguard.ShapeGuard()
flat_z, unflatten = flatten_all_but_last(z)
sg.guard(flat_z, "B, Z")
sg.guard(self._target_out_shape, "H, W, C")
if self._mlp_opt is None:
mlp = tf.identity
else:
mlp = snt.nets.MLP(activate_final=True, **self._mlp_opt)
mlp_output = sg.guard(mlp(flat_z), "B, hidden")
# tile MLP output spatially and append coordinate channels
broadcast_mlp_output = tf.tile(
mlp_output[:, tf.newaxis, tf.newaxis],
multiples=tf.constant(sg["1, H, W, 1"]),
) # B, H, W, Z
dec_cnn_inputs = self.append_coordinate_channels(broadcast_mlp_output)
cnn = snt.nets.ConvNet2D(
paddings=("SAME",), normalize_final=False, **self._cnn_opt)
cnn_outputs = cnn(dec_cnn_inputs)
sg.guard(cnn_outputs, "B, H, W, C")
return FlatParameters(unflatten(cnn_outputs))
def append_coordinate_channels(self, output):
sg = shapeguard.ShapeGuard()
sg.guard(output, "B, H, W, C")
if self._coord_type is None:
return output
if self._coord_type == "linear":
w_coords = tf.linspace(-1.0, 1.0, sg.W)[None, None, :, None]
h_coords = tf.linspace(-1.0, 1.0, sg.H)[None, :, None, None]
w_coords = tf.tile(w_coords, sg["B, H, 1, 1"])
h_coords = tf.tile(h_coords, sg["B, 1, W, 1"])
return tf.concat([output, h_coords, w_coords], axis=-1)
elif self._coord_type == "cos":
freqs = sg.guard(tf.range(0.0, self._coord_freqs), "F")
valx = tf.linspace(0.0, np.pi, sg.W)[None, None, :, None, None]
valy = tf.linspace(0.0, np.pi, sg.H)[None, :, None, None, None]
x_basis = tf.cos(valx * freqs[None, None, None, :, None])
y_basis = tf.cos(valy * freqs[None, None, None, None, :])
xy_basis = tf.reshape(x_basis * y_basis, sg["1, H, W, F*F"])
coords = tf.tile(xy_basis, sg["B, 1, 1, 1"])[..., 1:]
return tf.concat([output, coords], axis=-1)
else:
raise KeyError('Unknown coord_type: "{}"'.format(self._coord_type))
class LSTM(snt.RNNCore):
"""Wrapper around snt.LSTM that supports multi-layers and runs K components in
parallel.
Expects input data of shape (B, K, H) and outputs data of shape (B, K, Y)
"""
def __init__(self, hidden_sizes, name="lstm"):
super().__init__(name=name)
self._hidden_sizes = hidden_sizes
with self._enter_variable_scope():
self._lstm_layers = [snt.LSTM(hidden_size=h) for h in self._hidden_sizes]
def initial_state(self, batch_size, **kwargs):
return [
lstm.initial_state(batch_size, **kwargs) for lstm in self._lstm_layers
]
def _build(self, data, prev_states):
assert not self._hidden_sizes or self._hidden_sizes[-1] is not None
assert len(prev_states) == len(self._hidden_sizes)
sg = shapeguard.ShapeGuard()
sg.guard(data, "B, K, H")
data = sg.reshape(data, "B*K, H")
out = data
new_states = []
for lstm, pstate in zip(self._lstm_layers, prev_states):
out, nstate = lstm(out, pstate)
new_states.append(nstate)
sg.guard(out, "B*K, Y")
out = sg.reshape(out, "B, K, Y")
return out, new_states
|
deepmind-research-master
|
iodine/modules/networks.py
|
# Copyright 2019 Deepmind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for IODINE."""
# pylint: disable=g-doc-bad-indent, g-doc-return-or-yield, g-doc-args
# pylint: disable=missing-docstring
import importlib
import math
from absl import logging
from matplotlib.colors import hsv_to_rgb
import numpy as np
import shapeguard
import sonnet as snt
import tensorflow.compat.v1 as tf
import tensorflow_probability as tfp
tfd = tfp.distributions
ACT_FUNCS = {
"identity": tf.identity,
"sigmoid": tf.nn.sigmoid,
"tanh": tf.nn.tanh,
"relu": tf.nn.relu,
"elu": tf.nn.elu,
"selu": tf.nn.selu,
"softplus": tf.nn.softplus,
"exp": tf.exp,
"softmax": tf.nn.softmax,
}
def get_act_func(name_or_func):
if name_or_func is None:
return tf.identity
if callable(name_or_func):
return name_or_func
elif isinstance(name_or_func, str):
return ACT_FUNCS[name_or_func.lower()]
else:
raise KeyError(
'Unknown activation function "{}" of type {}"'.format(
name_or_func, type(name_or_func)
)
)
DISTS = {
"normal": tfd.Normal,
"log_normal": tfd.LogNormal,
"laplace": tfd.Laplace,
"logistic": tfd.Logistic,
}
def get_distribution(name_or_dist):
if isinstance(name_or_dist, type(tfd.Normal)):
return name_or_dist
elif isinstance(name_or_dist, str):
return DISTS[name_or_dist.lower()]
raise KeyError(
'Unknown distribution "{}" of type {}"'.format(name_or_dist,
type(name_or_dist)))
def get_mask_plot_colors(nr_colors):
"""Get nr_colors uniformly spaced hues to plot mask values."""
hsv_colors = np.ones((nr_colors, 3), dtype=np.float32)
hsv_colors[:, 0] = np.linspace(0, 1, nr_colors, endpoint=False)
color_conv = hsv_to_rgb(hsv_colors)
return color_conv
def color_transform(masks):
with tf.name_scope("color_transform"):
n_components = masks.shape.as_list()[-1]
colors = tf.constant(get_mask_plot_colors(n_components), name="mask_colors")
return tf.tensordot(masks, colors, axes=1)
def construct_diagnostic_image(
images,
recons,
masks,
components,
border_width=2,
nr_images=6,
clip=True,
mask_components=False,
):
"""Construct a single image containing image, recons., mask, and components.
Args:
images: (B, H, W, C)
recons: (B, H, W, C)
masks: (B, H, W, K)
components: (B, H, W, K, C)
border_width: int. width of the border in pixels. (default=2)
nr_images: int. Number of images to include. (default=6)
clip: bool. Whether to clip the final image to range [0, 1].
Returns:
diag_images: (nr, H+border_width*2, (W+border_width*2) * (K+3), 3)
"""
with tf.name_scope("diagnostic_image"):
# transform the masks into RGB images
recolored_masks = color_transform(masks[:nr_images])
if images.get_shape().as_list()[-1] == 1:
# deal with grayscale images
images = tf.tile(images[:nr_images], [1, 1, 1, 3])
recons = tf.tile(recons[:nr_images], [1, 1, 1, 3])
components = tf.tile(components[:nr_images], [1, 1, 1, 1, 3])
if mask_components:
components *= masks[:nr_images, ..., tf.newaxis]
# Pad everything
no_pad, pad = (0, 0), (border_width, border_width)
paddings = tf.constant([no_pad, pad, pad, no_pad])
paddings_components = tf.constant([no_pad, pad, pad, no_pad, no_pad])
pad_images = tf.pad(images[:nr_images], paddings, constant_values=0.5)
pad_recons = tf.pad(recons[:nr_images], paddings, constant_values=0.5)
pad_masks = tf.pad(recolored_masks, paddings, constant_values=1.0)
pad_components = tf.pad(
components[:nr_images], paddings_components, constant_values=0.5
)
# reshape components into single wide image
pad_components = tf.transpose(pad_components, [0, 1, 3, 2, 4])
pc_shape = pad_components.shape.as_list()
pc_shape[2] = pc_shape[2] * pc_shape.pop(3)
pad_components = tf.reshape(pad_components, pc_shape)
# concatenate all parts along width
diag_imgs = tf.concat(
[pad_images, pad_recons, pad_masks, pad_components], axis=2
)
# concatenate all images along height
diag_shape = diag_imgs.shape.as_list()
final_img = tf.reshape(diag_imgs, [1, -1, diag_shape[2], diag_shape[3]])
if clip:
final_img = tf.clip_by_value(final_img, 0.0, 1.0)
return final_img
def construct_reconstr_image(images, recons, border_width=2,
nr_images=6, clip=True):
"""Construct a single image containing image, and recons.
Args:
images: (B, H, W, C)
recons: (B, H, W, C)
border_width: int. width of the border in pixels. (default=2)
nr_images: int. Number of images to include. (default=6)
clip: bool. Whether to clip the final image to range [0, 1].
Returns:
rec_images: (nr, H+border_width*2, (W+border_width*2) * 2, 3)
"""
with tf.name_scope("diagnostic_image"):
# Pad everything
no_pad, pad = (0, 0), (border_width, border_width)
paddings = tf.constant([no_pad, pad, pad, no_pad])
pad_images = tf.pad(images[:nr_images], paddings, constant_values=0.5)
pad_recons = tf.pad(recons[:nr_images], paddings, constant_values=0.5)
# concatenate all parts along width
diag_imgs = tf.concat([pad_images, pad_recons], axis=2)
# concatenate all images along height
diag_shape = diag_imgs.shape.as_list()
final_img = tf.reshape(diag_imgs, [1, -1, diag_shape[2], diag_shape[3]])
if clip:
final_img = tf.clip_by_value(final_img, 0.0, 1.0)
return final_img
def construct_iterations_image(
images, recons, masks, border_width=2, nr_seqs=2, clip=True
):
"""Construct a single image containing image, and recons.
Args:
images: (B, T, 1, H, W, C)
recons: (B, T, 1, H, W, C)
masks: (B, T, K, H, W, 1)
border_width: int. width of the border in pixels. (default=2)
nr_seqs: int. Number of sequences to include. (default=2)
clip: bool. Whether to clip the final image to range [0, 1].
Returns:
rec_images: (nr, H+border_width*2, (W+border_width*2) * 2, 3)
"""
sg = shapeguard.ShapeGuard()
sg.guard(recons, "B, T, 1, H, W, C")
if images.get_shape().as_list()[1] == 1:
images = tf.tile(images, sg["1, T, 1, 1, 1, 1"])
sg.guard(images, "B, T, 1, H, W, C")
sg.guard(masks, " B, T, K, H, W, 1")
if sg.C == 1: # deal with grayscale
images = tf.tile(images, [1, 1, 1, 1, 1, 3])
recons = tf.tile(recons, [1, 1, 1, 1, 1, 3])
sg.S = min(nr_seqs, sg.B)
with tf.name_scope("diagnostic_image"):
# convert masks to rgb
masks_trans = tf.transpose(masks[:nr_seqs], [0, 1, 5, 3, 4, 2])
recolored_masks = color_transform(masks_trans)
# Pad everything
no_pad, pad = (0, 0), (border_width, border_width)
paddings = tf.constant([no_pad, no_pad, no_pad, pad, pad, no_pad])
pad_images = tf.pad(images[:nr_seqs], paddings, constant_values=0.5)
pad_recons = tf.pad(recons[:nr_seqs], paddings, constant_values=0.5)
pad_masks = tf.pad(recolored_masks, paddings, constant_values=0.5)
# concatenate all parts along width
triples = tf.concat([pad_images, pad_recons, pad_masks], axis=3)
triples = sg.guard(triples[:, :, 0], "S, T, 3*Hp, Wp, 3")
# concatenate iterations along width and sequences along height
final = tf.reshape(
tf.transpose(triples, [0, 2, 1, 3, 4]), sg["1, S*3*Hp, Wp*T, 3"]
)
if clip:
final = tf.clip_by_value(final, 0.0, 1.0)
return final
def get_overview_image(image, output_dist, mask_components=False):
recons = output_dist.mean()[:, 0]
image = image[:, 0]
if hasattr(output_dist, "mixture_distribution") and hasattr(
output_dist, "components_distribution"
):
mask = output_dist.mixture_distribution.probs[:, 0]
components = output_dist.components_distribution.mean()[:, 0]
return construct_diagnostic_image(
image, recons, mask, components, mask_components=mask_components
)
else:
return construct_reconstr_image(image, recons)
class OnlineMeanVarEstimator(snt.AbstractModule):
"""Online estimator for mean and variance using Welford's algorithm."""
def __init__(self, axis=None, ddof=0.0, name="online_mean_var"):
super().__init__(name=name)
self._axis = axis
self._ddof = ddof
def _build(self, x, weights=None):
if weights is None:
weights = tf.ones_like(x)
if weights.get_shape().as_list() != x.get_shape().as_list():
weights = tf.broadcast_to(weights, x.get_shape().as_list())
sum_weights = tf.reduce_sum(weights, axis=self._axis)
shape = sum_weights.get_shape().as_list()
total = tf.get_variable(
"total",
shape=shape,
dtype=weights.dtype,
initializer=tf.zeros_initializer(),
trainable=False,
)
mean = tf.get_variable(
"mean",
shape=shape,
dtype=x.dtype,
initializer=tf.zeros_initializer(),
trainable=False,
)
m2 = tf.get_variable(
"M2",
shape=shape,
dtype=x.dtype,
initializer=tf.zeros_initializer(),
trainable=False,
)
total_update = tf.assign_add(total, sum_weights)
with tf.control_dependencies([total_update]):
delta = (x - mean) * weights
mean_update = tf.assign_add(
mean, tf.reduce_sum(delta, axis=self._axis) / total
)
with tf.control_dependencies([mean_update]):
delta2 = x - mean
m2_update = tf.assign_add(
m2, tf.reduce_sum(delta * delta2, axis=self._axis)
)
with tf.control_dependencies([m2_update]):
return tf.identity(mean), m2 / (total - self._ddof), tf.identity(total)
def print_shapes(name, value, indent=""):
if isinstance(value, dict):
print("{}{}:".format(indent, name))
for k, v in sorted(value.items(),
key=lambda x: (isinstance(x[1], dict), x[0])):
print_shapes(k, v, indent + " ")
elif isinstance(value, list):
print(
"{}{}[{}]: {} @ {}".format(
indent, name, len(value), value[0].shape, value[0].dtype
)
)
elif isinstance(value, np.ndarray):
print("{}{}: {} @ {}".format(indent, name, value.shape, value.dtype))
elif isinstance(value, tf.Tensor):
print(
"{}{}: {} @ {}".format(
indent, name, value.get_shape().as_list(), value.dtype
)
)
elif np.isscalar(value):
print("{}{}: {}".format(indent, name, value))
else:
print("{}{}.type: {}".format(indent, name, type(value)))
def _pad_images(images, image_border_value=0.5, border_width=2):
"""Pad images to create gray borders.
Args:
images: Tensor of shape [B, H], [B, H, W], or [B, H, W, C].
image_border_value: Scalar value of greyscale borderfor images.
border_width: Int. Border width in pixels.
Raises:
ValueError: if the image provided is not {2,3,4} dimensional.
Returns:
Tensor of same shape as images, except H and W being H + border_width and
W + border_width.
"""
image_rank = len(images.get_shape())
border_paddings = (border_width, border_width)
if image_rank == 2: # [B, H]
paddings = [(0, 0), border_paddings]
elif image_rank == 3: # [B, H, W]
paddings = [(0, 0), border_paddings, border_paddings]
elif image_rank == 4: # [B, H, W, C]
paddings = [(0, 0), border_paddings, border_paddings, (0, 0)]
else:
raise ValueError("expected image to be 2D, 3D or 4D, got %d" % image_rank)
paddings = tf.constant(paddings)
return tf.pad(images, paddings, "CONSTANT",
constant_values=image_border_value)
def images_to_grid(
images,
grid_height=None,
grid_width=4,
max_grid_height=4,
max_grid_width=4,
image_border_value=0.5,
):
"""Combine images and arrange them in a grid.
Args:
images: Tensor of shape [B, H], [B, H, W], or [B, H, W, C].
grid_height: Height of the grid of images to output, or None. Either
`grid_width` or `grid_height` must be set to an integer value.
If None, `grid_height` is set to ceil(B/`grid_width`), and capped at
`max_grid_height` when provided.
grid_width: Width of the grid of images to output, or None. Either
`grid_width` or `grid_height` must be set to an integer value.
If None, `grid_width` is set to ceil(B/`grid_height`), and capped at
`max_grid_width` when provided.
max_grid_height: Maximum allowable height of the grid of images to
output or None. Only used when `grid_height` is None.
max_grid_width: Maximum allowable width of the grid of images to output,
or None. Only used when `grid_width` is None.
image_border_value: None or scalar value of greyscale borderfor images.
If None, then no border is rendered.
Raises:
ValueError: if neither of grid_width or grid_height are set to a positive
integer.
Returns:
images: Tensor of shape [height*H, width*W, C].
C will be set to 1 if the input was provided with no channels.
Contains all input images in a grid.
"""
# If only one dimension is set, infer how big the other one should be.
if grid_height is None:
if not isinstance(grid_width, int) or grid_width <= 0:
raise ValueError(
"if `grid_height` is None, `grid_width` must be " "a positive integer"
)
grid_height = int(math.ceil(images.get_shape()[0].value / grid_width))
if max_grid_height is not None:
grid_height = min(max_grid_height, grid_height)
if grid_width is None:
if not isinstance(grid_height, int) or grid_height <= 0:
raise ValueError(
"if `grid_width` is None, `grid_height` must be " "a positive integer"
)
grid_width = int(math.ceil(images.get_shape()[0].value / grid_height))
if max_grid_width is not None:
grid_width = min(max_grid_width, grid_width)
images = images[: grid_height * grid_width, ...]
# Pad with extra blank frames if grid_height x grid_width is less than the
# number of frames provided.
pre_images_shape = images.get_shape().as_list()
if pre_images_shape[0] < grid_height * grid_width:
pre_images_shape[0] = grid_height * grid_width - pre_images_shape[0]
if image_border_value is not None:
dummy_frames = image_border_value * tf.ones(
shape=pre_images_shape, dtype=images.dtype
)
else:
dummy_frames = tf.zeros(shape=pre_images_shape, dtype=images.dtype)
images = tf.concat([images, dummy_frames], axis=0)
if image_border_value:
images = _pad_images(images, image_border_value=image_border_value)
images_shape = images.get_shape().as_list()
images = tf.reshape(images, [grid_height, grid_width] + images_shape[1:])
if len(images_shape) == 2:
images = tf.expand_dims(images, -1)
if len(images_shape) <= 3:
images = tf.expand_dims(images, -1)
image_height, image_width, channels = images.get_shape().as_list()[2:]
images = tf.transpose(images, perm=[0, 2, 1, 3, 4])
images = tf.reshape(
images, [grid_height * image_height, grid_width * image_width, channels]
)
return images
def flatten_all_but_last(tensor, n_dims=1):
shape = tensor.shape.as_list()
batch_dims = shape[:-n_dims]
flat_tensor = tf.reshape(tensor, [np.prod(batch_dims)] + shape[-n_dims:])
def unflatten(other_tensor):
other_shape = other_tensor.shape.as_list()
return tf.reshape(other_tensor, batch_dims + other_shape[1:])
return flat_tensor, unflatten
def ensure_3d(tensor):
if tensor.shape.ndims == 2:
return tensor[..., None]
assert tensor.shape.ndims == 3
return tensor
built_element_cache = {
"none": None,
"global_step": tf.train.get_or_create_global_step(),
}
def build(plan, identifier):
logging.debug("building %s", identifier)
if identifier in built_element_cache:
logging.debug("%s is already built, returning", identifier)
return built_element_cache[identifier]
elif not isinstance(plan, dict):
return plan
elif "constructor" in plan:
ctor = _resolve_constructor(plan)
kwargs = {
k: build(v, identifier=k) for k, v in plan.items() if k != "constructor"
}
with tf.variable_scope(identifier):
built_element_cache[identifier] = ctor(**kwargs)
return built_element_cache[identifier]
else:
return {k: build(v, identifier=k) for k, v in plan.items()}
def _resolve_constructor(plan_subsection):
assert "constructor" in plan_subsection, plan_subsection
if isinstance(plan_subsection["constructor"], str):
module, _, ctor = plan_subsection["constructor"].rpartition(".")
mod = importlib.import_module(module)
return getattr(mod, ctor)
else:
return plan_subsection["constructor"]
|
deepmind-research-master
|
iodine/modules/utils.py
|
# Copyright 2019 Deepmind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Stochastic Variational inference Auto-Encoder."""
# pylint: disable=unused-variable, g-bad-todo
import collections
from iodine.modules import utils
from multi_object_datasets.segmentation_metrics import adjusted_rand_index
import numpy as np
import shapeguard
import sonnet as snt
import tensorflow.compat.v1 as tf
import tensorflow_probability as tfp
tfd = tfp.distributions
logging = tf.logging
DEFAULT_INPUTS = (
"image",
"zp",
"mask",
"components",
"dmask",
"dzp",
"dcomponents",
"posterior",
"log_prob",
"pred_mask",
"capacity",
"flat_capacity",
"coordinates",
"counterfactual",
)
DEFAULT_PREPROCESSING = [
"dcomponents", "dmask", "dzp", "log_prob", "counterfactual"
]
DEFAULT_STOP_GRADIENT = ("dzp", "dmask", "dcomponents", "log_prob",
"counterfactual")
class IODINE(snt.AbstractModule):
"""Iterative Amortized Variational Autoencoder.
Args:
decoder (decoders.ComponentDecoder): The decoder.
refinement_core (refinement.RefinementCore): The recurrent (refinement)
encoder.
latent_dist (distributions.Distribution): The distribution of the latent
z variables.
output_dist (distributions.MaskedMixture): The pixel-wise output
distribution (a spatial mixture).
n_z (int): Dimensionality of the per-object latents z_k.
num_components (int): Number of available object slots (K).
num_iters (int): Number of refinement iterations.
sequential (bool): Whether the input data is sequential.
factor_evaluator (factor_eval.FactorRegressor): The factor evaluation
model that is trained to predict the true factors from the inferred
latents.
stop_gradients (List[str]): For which refinement inputs to stop
gradients from backpropagating through the iterations. (see inputs for
valid values)
Default is: ["dcomponents", "dmask", "dzp", "log_prob",
"counterfactual"]
iter_loss_weight ("linspace" | float | List[float]): How to weigh the
loss terms for each timestep.
Can be:
"linspace": Linearly increasing weights from 0 to 1.0.
float: A fixed value for all steps.
List[float]: Manually specify all weight.
inputs (List[str]): list of inputs to use for the refinement network.
Can include the following (default is to use all): ["image", "zp",
"mask", "components", "dmask", "dzp", "dcomponents", "posterior",
"log_prob", "pred_mask", "capacity", "flat_capacity",
"coordinates", "counterfactual"]
preprocess (List[str]): Specifies the subset of inputs that be
preprocessed with layernorm.
Default is: ["dcomponents", "dmask", "dzp", "log_prob",
"counterfactual"]
coord_type (str): Type of coordinate channels to append to the
refinement inputs. Can be "linear" (default) or "cos".
coord_freqs (int): If using cos based coordinate channels, then this
specifies the number of frequencies used.
name (str): Name of the module (within the tensorflow graph).
"""
def __init__(
self,
decoder,
refinement_core,
latent_dist,
output_dist,
n_z,
num_components,
num_iters,
sequential=False,
factor_evaluator=None,
stop_gradients=DEFAULT_STOP_GRADIENT,
iter_loss_weight="linspace",
inputs=DEFAULT_INPUTS,
preprocess=None,
coord_type="linear",
coord_freqs=3,
name="iodine",
):
super().__init__(name=name)
self._sg = shapeguard.ShapeGuard(dims={"K": num_components})
self.decoder = decoder
self.refinement_core = refinement_core
self.latent_dist = latent_dist
self.output_dist = output_dist
self.n_z = n_z
self.num_components = num_components
self.num_iters = num_iters
self.sequential = sequential
self.iter_loss_weights = self._parse_iter_loss_weights(iter_loss_weight)
self.factor_evaluator = factor_evaluator
self.stop_gradients = stop_gradients
self.inputs = inputs
self.preprocess = DEFAULT_PREPROCESSING if preprocess is None else preprocess
self.coord_type = coord_type
self.coord_freqs = coord_freqs
with self._enter_variable_scope():
self.latent_dist.set_output_shape([self.n_z])
logging.info("VAE: z shape: %s", [self.n_z])
with tf.name_scope("prior"):
self.prior = self.latent_dist.get_default_prior((self.num_components,))
self._sg.guard(self.prior, "K, Z")
with tf.variable_scope("preprocess"):
self._layernorms = {
name: snt.LayerNorm(name="layer_norm_" + name)
for name in self.preprocess
}
def _build(self, data):
data["image"] = data["image"][:, :self.num_iters + 1]
if "mask" in data:
data["mask"] = data["mask"][:, :self.num_iters + 1]
x = self._sg.guard(data["image"], "B, T, H, W, C")
self._propagate_shape_info(x.get_shape().as_list())
# run iterative encoder
iterations = self.encode(x)
z_dist = self._sg.guard(iterations["z_dist"][-1], "B, K, Z")
z = self._sg.guard(iterations["z"][-1], "B, K, Z")
# decode
x_params, x_dist = self.decode(z)
iterations["x_dist"].append(self._sg.guard(x_dist, "B, 1, H, W, C"))
# compute loss
kl = self._sg.guard(self._raw_kl(z_dist), "B, K")
img = self._get_image_for_iter(x, self.num_iters)
re = self._sg.guard(self._reconstruction_error(x_dist, img), "B")
iterations["kl"].append(kl)
iterations["re"].append(re)
iterations["recons_loss"] = [tf.reduce_mean(re) for re in iterations["re"]]
total_rec_loss = sum([
w * re
for w, re in zip(self.iter_loss_weights, iterations["recons_loss"])
])
total_kl_loss = sum([
w * tf.reduce_mean(tf.reduce_sum(kl, axis=1))
for w, kl in zip(self.iter_loss_weights, iterations["kl"])
])
total_loss = total_rec_loss + total_kl_loss
scalars = {
"loss/kl":
sum([
tf.reduce_mean(tf.reduce_sum(kl, axis=1))
for kl in iterations["kl"]
]),
"loss/recons":
total_rec_loss,
}
if self.factor_evaluator:
pred_mask = self._sg.guard(x_dist.mixture_distribution.probs,
"B, 1, H, W, K")
pred_mask = tf.transpose(pred_mask, [0, 4, 2, 3, 1])
mask_true = self._sg.guard(data["mask"], "B, T, L, H, W, 1")
mask_true = self._get_image_for_iter(mask_true, self.num_iters)
mask_true = mask_true[:, 0]
factor_loss, factor_scalars, _, _, _ = self.factor_evaluator(
tf.stop_gradient(z),
data["factors"],
data["visibility"],
tf.stop_gradient(pred_mask),
mask_true,
)
total_loss += factor_loss
scalars["factor/loss"] = factor_loss
scalars.update({"factor/" + k: v for k, v in factor_scalars.items()})
scalars["loss/total"] = total_loss
scalars.update(self._get_monitored_scalars(x_dist, data))
logging.info(self._sg.dims)
return total_loss, scalars, iterations
@snt.reuse_variables
def encode(self, images):
sg = self._sg
sg.guard(images, "B, T, H, W, C")
zp, z_dist, z = self._get_initial_z()
iterations = {
"z": [z],
"zp": [zp],
"z_dist": [z_dist],
"x_dist": [],
"inputs": [],
"kl": [],
"re": [],
}
state = self.refinement_core.initial_state(sg["B*K"][0])
for t in range(self.num_iters):
img = sg.guard(self._get_image_for_iter(images, t), "B, 1, H, W, C")
x_params, x_dist = self.decode(z)
# compute loss
kl = self._sg.guard(self._raw_kl(z_dist), "B, K")
re = self._sg.guard(self._reconstruction_error(x_dist, img), "B")
loss = tf.reduce_mean(re) + tf.reduce_mean(tf.reduce_sum(kl, axis=1))
inputs = self._get_inputs_for(x_params, x_dist, img, z_dist, zp, loss)
zp, state = self.refinement_core(inputs, state)
sg.guard(zp, "B, K, Zp")
z_dist = sg.guard(self.latent_dist(zp), "B, K, Z")
z = z_dist.sample()
# append local variables to iteration collections
for v, name in zip(
[z, zp, z_dist, x_dist, inputs, kl, re],
["z", "zp", "z_dist", "x_dist", "inputs", "kl", "re"],
):
iterations[name].append(v)
return iterations
@snt.reuse_variables
def decode(self, z):
sg = shapeguard.ShapeGuard()
sg.guard(z, "B, K, Z")
# legacy
z = tf.concat([z, 5.0 * tf.ones(sg["B, K, 1"], dtype=tf.float32)], axis=2)
params = self.decoder(z)
out_dist = self.output_dist(*params)
return params, out_dist
@snt.reuse_variables
def eval(self, data):
total_loss, scalars, iterations = self._build(data)
sg = shapeguard.ShapeGuard()
def get_components(dist):
return tf.transpose(dist.components_distribution.mean()[:, 0, :, :, :, :],
[0, 3, 1, 2, 4])
def get_mask(dist):
return tf.transpose(dist.mixture_distribution.probs[:, :, :, :, :],
[0, 4, 2, 3, 1])
def get_mask_logits(dist):
return tf.transpose(dist.mixture_distribution.logits[:, :, :, :, :],
[0, 4, 2, 3, 1])
def stack_iters(list_of_variables, pad_zero=False):
if pad_zero:
list_of_variables.insert(0, tf.zeros_like(list_of_variables[0]))
return tf.stack(list_of_variables, axis=1)
# data
image = sg.guard(data["image"], "B, 1, H, W, C")
true_mask = sg.guard(data["mask"], "B, 1, L, H, W, 1")
visibility = sg.guard(data["visibility"], "B, L")
factors = data["factors"]
# inputs
inputs_flat = {
k: stack_iters([inp["flat"][k] for inp in iterations["inputs"]],
pad_zero=True)
for k in iterations["inputs"][0]["flat"].keys()
}
inputs_spatial = {
k: stack_iters([inp["spatial"][k] for inp in iterations["inputs"]],
pad_zero=True)
for k in iterations["inputs"][0]["spatial"].keys()
}
# latent
z = sg.guard(stack_iters(iterations["z"]), "B, T, K, Z")
z_mean = stack_iters([zd.mean() for zd in iterations["z_dist"]])
z_std = stack_iters([zd.stddev() for zd in iterations["z_dist"]])
# outputs
recons = stack_iters([xd.mean() for xd in iterations["x_dist"]])
pred_mask = stack_iters([get_mask(xd) for xd in iterations["x_dist"]])
pred_mask_logits = stack_iters(
[get_mask_logits(xd) for xd in iterations["x_dist"]])
components = stack_iters(
[get_components(xd) for xd in iterations["x_dist"]])
# metrics
tm = tf.transpose(true_mask[..., 0], [0, 1, 3, 4, 2])
tm = tf.reshape(tf.tile(tm, sg["1, T, 1, 1, 1"]), sg["B * T, H * W, L"])
pm = tf.transpose(pred_mask[..., 0], [0, 1, 3, 4, 2])
pm = tf.reshape(pm, sg["B * T, H * W, K"])
ari = tf.reshape(adjusted_rand_index(tm, pm), sg["B, T"])
ari_nobg = tf.reshape(adjusted_rand_index(tm[..., 1:], pm), sg["B, T"])
mse = tf.reduce_mean(tf.square(recons - image[:, None]), axis=[2, 3, 4, 5])
# losses
loss_recons = stack_iters(iterations["re"])
kl = stack_iters(iterations["kl"])
info = {
"data": {
"image": sg.guard(image, "B, 1, H, W, C"),
"true_mask": sg.guard(true_mask, "B, 1, L, H, W, 1"),
"visibility": sg.guard(visibility, "B, L"),
"factors": factors,
},
"inputs": {
"flat": inputs_flat,
"spatial": inputs_spatial
},
"latent": {
"z": sg.guard(z, "B, T, K, Z"),
"z_mean": sg.guard(z_mean, "B, T, K, Z"),
"z_std": sg.guard(z_std, "B, T, K, Z"),
},
"outputs": {
"recons": sg.guard(recons, "B, T, 1, H, W, C"),
"pred_mask": sg.guard(pred_mask, "B, T, K, H, W, 1"),
"pred_mask_logits": sg.guard(pred_mask_logits, "B, T, K, H, W, 1"),
"components": sg.guard(components, "B, T, K, H, W, C"),
},
"losses": {
"total": total_loss,
"recons": sg.guard(loss_recons, "B, T"),
"kl": sg.guard(kl, "B, T, K"),
},
"metrics": {
"ari": ari,
"ari_nobg": ari_nobg,
"mse": mse
},
}
if self.factor_evaluator:
# factor evaluation information
factor_info = {
"loss": [],
"metrics": collections.defaultdict(list),
"predictions": collections.defaultdict(list),
"assignment": [],
}
for t in range(z.get_shape().as_list()[1]):
floss, fscalars, _, fpred, fass = self.factor_evaluator(
z[:, t], factors, visibility, pred_mask[:, t], true_mask[:, 0])
factor_info["loss"].append(floss)
factor_info["assignment"].append(fass)
for k in fpred:
factor_info["predictions"][k].append(
tf.reduce_sum(fpred[k] * fass[..., None], axis=2))
factor_info["metrics"][k].append(fscalars[k])
info["losses"]["factor"] = sg.guard(tf.stack(factor_info["loss"]), "T")
info["factor_regressor"] = {
"assignment":
sg.guard(stack_iters(factor_info["assignment"]), "B, T, L, K"),
"metrics": {
k: tf.stack(factor_info["metrics"][k], axis=0)
for k in factor_info["metrics"]
},
"predictions": {
k: stack_iters(factor_info["predictions"][k])
for k in factor_info["predictions"]
},
}
return info
@snt.reuse_variables
def get_sample_images(self, nr_samples=16):
with tf.name_scope("prior_samples"):
prior_z = self.prior.sample(nr_samples)
_, prior_out = self.decode(prior_z)
prior_out = tf.clip_by_value(prior_out.mean(), 0.0, 1.0)
return utils.images_to_grid(prior_out[:, 0])[tf.newaxis]
@snt.reuse_variables
def get_overview_images(self, data, nr_images=4, mask_components=False):
x = data["image"][:nr_images, :self.num_iters + 1]
old_b, self._sg.B = self._sg.B, x.get_shape().as_list()[0]
iterations = self.encode(x)
z = iterations["z"][-1]
_, x_dist = self.decode(z)
self._sg.B = old_b
t = min(self.num_iters, x.get_shape().as_list()[1]) - 1
# iterations view
recons = tf.stack([x_dist.mean() for x_dist in iterations["x_dist"]],
axis=1)
masks = tf.stack(
[
tf.transpose(x_dist.mixture_distribution.probs, [0, 4, 2, 3, 1])
for x_dist in iterations["x_dist"]
],
axis=1,
)
return {
"overview":
utils.get_overview_image(
x[:, t:t + 1], x_dist, mask_components=mask_components),
"sequence":
utils.construct_iterations_image(x[:, :t + 1, tf.newaxis], recons,
masks),
"samples":
self.get_sample_images(),
}
def _get_initial_z(self):
# Initial z distribution
zp_init = tf.get_variable(
"initial_sample_distribution",
shape=self.latent_dist.input_shapes.params,
dtype=tf.float32,
)
zp = tf.tile(zp_init[tf.newaxis, tf.newaxis], self._sg["B, K, 1"])
z_dist = self.latent_dist(zp)
z = z_dist.sample()
self._sg.guard(zp, "B, K, Zp")
self._sg.guard(z_dist, "B, K, Z")
self._sg.guard(z, "B, K, Z")
return zp, z_dist, z
def _parse_iter_loss_weights(self, iter_loss_weight):
if iter_loss_weight == "linspace":
iter_weights = np.linspace(0.0, 1.0, self.num_iters + 1).tolist()
elif isinstance(iter_loss_weight, (float, int)):
iter_weights = [float(iter_loss_weight)] * (self.num_iters + 1)
elif isinstance(iter_loss_weight, (tuple, list)):
iter_weights = [float(w) for w in iter_loss_weight]
else:
raise ValueError("Unknown iter_loss_weight type {}.".format(
repr(iter_loss_weight)))
assert len(iter_weights) == (self.num_iters + 1), iter_loss_weight
return iter_weights
def _propagate_shape_info(self, image_shape):
image_shape = image_shape[-3:] # ignore batch dims
logging.info("VAE: image shape: %s", image_shape)
z_param_shape = self._sg.guard(self.latent_dist.input_shapes.params, "Zp")
logging.info("VAE: z parameter shape: %s", z_param_shape)
self.output_dist.set_output_shape(image_shape)
out_param_shapes = self.output_dist.input_shapes
logging.info("VAE: output parameter shapes: %s", out_param_shapes)
self.decoder.set_output_shapes(*out_param_shapes)
def _get_image_for_iter(self, images, t):
"""Return current frame or first image."""
if self.sequential:
return images[:, t:t + 1]
else:
return images[:, :1]
@staticmethod
def _get_mask_posterior(out_dist, img):
p_comp = out_dist.components_distribution.prob(img[..., tf.newaxis, :])
posterior = p_comp / (tf.reduce_sum(p_comp, axis=-1, keepdims=True) + 1e-6)
return tf.transpose(posterior, [0, 4, 2, 3, 1])
def _get_inputs_for(self, out_params, out_dist, img, z_dist, zp, loss):
sg = self._sg
# gradients of loss wrt z, components and mask
dzp, dxp, dmp = tf.gradients(loss, [zp, out_params.pixel, out_params.mask])
log_prob = sg.guard(
out_dist.log_prob(img)[..., tf.newaxis], "B, 1, H, W, 1")
counterfactual_log_probs = []
for k in range(0, self.num_components):
mask = tf.concat([out_params.mask[:, :k], out_params.mask[:, k + 1:]],
axis=1)
pixel = tf.concat([out_params.pixel[:, :k], out_params.pixel[:, k + 1:]],
axis=1)
out_dist_k = self.output_dist(pixel, mask)
log_prob_k = out_dist_k.log_prob(img)[..., tf.newaxis]
counterfactual_log_probs.append(log_prob_k)
counterfactual = log_prob - tf.concat(counterfactual_log_probs, axis=1)
pred_mask = tf.transpose(out_dist.mixture_distribution.probs,
[0, 4, 2, 3, 1])
potential_inputs = {
# spatial
"image":
sg.guard(img, "B, 1, H, W, C"),
"log_prob":
sg.guard(log_prob, "B, 1, H, W, 1"),
"mask":
sg.guard(out_params.mask, "B, K, H, W, 1"),
"pred_mask":
sg.guard(pred_mask, "B, K, H, W, 1"),
"components":
sg.guard(out_params.pixel, "B, K, H, W, Cp"),
"dmask":
sg.guard(dmp, "B, K, H, W, Mp"),
"dcomponents":
sg.guard(dxp, "B, K, H, W, Cp"),
"posterior":
sg.guard(self._get_mask_posterior(out_dist, img), "B, K, H, W, 1"),
"capacity":
0.5 *
tf.ones(sg["B, K, H, W, 1"], dtype=tf.float32), # TODO: legacy
"coordinates":
self._get_coord_channels(),
"counterfactual":
self._sg.guard(counterfactual, "B, K, H, W, 1"),
# flat
"zp":
sg.guard(zp, "B, K, Zp"),
"dzp":
sg.guard(dzp, "B, K, Zp"),
"flat_capacity":
0.5 * tf.ones(sg["B, K, 1"], dtype=tf.float32), # TODO: legacy
}
# collect used inputs, stop gradients and preprocess where needed
final_inputs = {"spatial": {}, "flat": {}}
for k, v in potential_inputs.items():
# skip unused inputs
if k not in self.inputs:
continue
# stop gradients
if k in self.stop_gradients:
v = tf.stop_gradient(v)
# preprocess
v = self._apply_preprocessing(k, v)
# sort into flat / spatial according to their shape
structure = "flat" if len(v.get_shape().as_list()) == 3 else "spatial"
final_inputs[structure][k] = v
return final_inputs
def _apply_preprocessing(self, name, val):
if name in self.preprocess:
if self._sg.matches(val, "B, K, _z"):
flat_val = tf.reshape(val, self._sg["B*K"] + [-1])
elif self._sg.matches(val, "B, 1, _z"):
flat_val = val[:, 0, :]
elif self._sg.matches(val, "B, K, H, W, _c"):
flat_val = tf.reshape(val, self._sg["B*K, H*W"] + [-1])
elif self._sg.matches(val, "B, 1, H, W, _c"):
flat_val = tf.reshape(val, self._sg["B, H*W"] + [-1])
else:
raise ValueError("Cannot handle shape {}".format(
val.get_shape().as_list()))
ln = self._layernorms[name]
norm_val = ln(flat_val)
return tf.reshape(norm_val, val.shape.as_list())
else:
return val
def _get_coord_channels(self):
if self.coord_type == "linear":
x_coords = tf.linspace(-1.0, 1.0, self._sg.W)[None, None, None, :, None]
y_coords = tf.linspace(-1.0, 1.0, self._sg.H)[None, None, :, None, None]
x_coords = tf.tile(x_coords, self._sg["B, 1, H, 1, 1"])
y_coords = tf.tile(y_coords, self._sg["B, 1, 1, W, 1"])
return tf.concat([x_coords, y_coords], axis=-1)
elif self.coord_type == "cos":
freqs = self._sg.guard(tf.range(0.0, self.coord_freqs), "F")
valx = tf.linspace(0.0, np.pi, self._sg.W)[None, None, None, :, None,
None]
valy = tf.linspace(0.0, np.pi, self._sg.H)[None, None, :, None, None,
None]
x_basis = tf.cos(valx * freqs[None, None, None, None, :, None])
y_basis = tf.cos(valy * freqs[None, None, None, None, None, :])
xy_basis = tf.reshape(x_basis * y_basis, self._sg["1, 1, H, W, F*F"])
coords = tf.tile(xy_basis, self._sg["B, 1, 1, 1, 1"])[..., 1:]
return coords
else:
raise KeyError('Unknown coord_type: "{}"'.format(self.coord_type))
def _raw_kl(self, z_dist):
return tfd.kl_divergence(z_dist, self.prior)
def _reconstruction_error(self, x_dist, img):
log_prob = self._sg.guard(x_dist.log_prob(img), "B, 1, H, W")
return -tf.reduce_sum(log_prob, axis=[1, 2, 3])
def _get_monitored_scalars(self, out_dist, data):
self._sg.guard(out_dist, "B, 1, H, W, C")
img = self._get_image_for_iter(data["image"], self.num_iters)
scalars = {}
with tf.name_scope("monitored_scalars"):
# ######### Loss Monitoring #########
scalars["loss/mse"] = tf.losses.mean_squared_error(
img, out_dist.mean())
# ########## Mask Monitoring #######
if "mask" in data:
true_mask = self._sg.guard(data["mask"], "B, T, L, H, W, 1")
true_mask = tf.transpose(true_mask[:, -1, ..., 0], [0, 2, 3, 1])
true_mask = self._sg.reshape(true_mask, "B, H*W, L")
else:
true_mask = None
pred_mask = self._sg.guard(out_dist.mixture_distribution.probs,
"B, 1, H, W, K")
pred_mask = self._sg.reshape(pred_mask, "B, H*W, K")
if pred_mask is not None and true_mask is not None:
self._sg.guard(pred_mask, "B, H*W, K")
self._sg.guard(true_mask, "B, H*W, L")
scalars["loss/ari"] = tf.reduce_mean(
adjusted_rand_index(true_mask, pred_mask))
scalars["loss/ari_nobg"] = tf.reduce_mean(
adjusted_rand_index(true_mask[..., 1:], pred_mask))
return scalars
|
deepmind-research-master
|
iodine/modules/iodine.py
|
# Copyright 2019 Deepmind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Data loading functionality for IODINE."""
# pylint: disable=g-multiple-import, missing-docstring, unused-import
import os.path
from iodine.modules.utils import flatten_all_but_last, ensure_3d
from multi_object_datasets import (
clevr_with_masks,
multi_dsprites,
tetrominoes,
objects_room,
)
from shapeguard import ShapeGuard
import sonnet as snt
import tensorflow.compat.v1 as tf
class IODINEDataset(snt.AbstractModule):
num_true_objects = 1
num_channels = 3
factors = {}
def __init__(
self,
path,
batch_size,
image_dim,
crop_region=None,
shuffle_buffer=1000,
max_num_objects=None,
min_num_objects=None,
grayscale=False,
name="dataset",
**kwargs,
):
super().__init__(name=name)
self.path = os.path.abspath(os.path.expanduser(path))
self.batch_size = batch_size
self.crop_region = crop_region
self.image_dim = image_dim
self.shuffle_buffer = shuffle_buffer
self.max_num_objects = max_num_objects
self.min_num_objects = min_num_objects
self.grayscale = grayscale
self.dataset = None
def _build(self, subset="train"):
dataset = self.dataset
# filter by number of objects
if self.max_num_objects is not None or self.min_num_objects is not None:
dataset = self.dataset.filter(self.filter_by_num_objects)
if subset == "train":
# normal mode returns a shuffled dataset iterator
if self.shuffle_buffer is not None:
dataset = dataset.shuffle(self.shuffle_buffer)
elif subset == "summary":
# for generating summaries and overview images
# returns a single fixed batch
dataset = dataset.take(self.batch_size)
# repeat and batch
dataset = dataset.repeat().batch(self.batch_size, drop_remainder=True)
iterator = dataset.make_one_shot_iterator()
data = iterator.get_next()
# preprocess the data to ensure correct format, scale images etc.
data = self.preprocess(data)
return data
def filter_by_num_objects(self, d):
if "visibility" not in d:
return tf.constant(True)
min_num_objects = self.max_num_objects or 0
max_num_objects = self.max_num_objects or 6
min_predicate = tf.greater_equal(
tf.reduce_sum(d["visibility"]),
tf.constant(min_num_objects - 1e-5, dtype=tf.float32),
)
max_predicate = tf.less_equal(
tf.reduce_sum(d["visibility"]),
tf.constant(max_num_objects + 1e-5, dtype=tf.float32),
)
return tf.logical_and(min_predicate, max_predicate)
def preprocess(self, data):
sg = ShapeGuard(dims={
"B": self.batch_size,
"H": self.image_dim[0],
"W": self.image_dim[1]
})
image = sg.guard(data["image"], "B, h, w, C")
mask = sg.guard(data["mask"], "B, L, h, w, 1")
# to float
image = tf.cast(image, tf.float32) / 255.0
mask = tf.cast(mask, tf.float32) / 255.0
# crop
if self.crop_region is not None:
height_slice = slice(self.crop_region[0][0], self.crop_region[0][1])
width_slice = slice(self.crop_region[1][0], self.crop_region[1][1])
image = image[:, height_slice, width_slice, :]
mask = mask[:, :, height_slice, width_slice, :]
flat_mask, unflatten = flatten_all_but_last(mask, n_dims=3)
# rescale
size = tf.constant(
self.image_dim, dtype=tf.int32, shape=[2], verify_shape=True)
image = tf.image.resize_images(
image, size, method=tf.image.ResizeMethod.BILINEAR)
mask = tf.image.resize_images(
flat_mask, size, method=tf.image.ResizeMethod.NEAREST_NEIGHBOR)
if self.grayscale:
image = tf.reduce_mean(image, axis=-1, keepdims=True)
output = {
"image": sg.guard(image[:, None], "B, T, H, W, C"),
"mask": sg.guard(unflatten(mask)[:, None], "B, T, L, H, W, 1"),
"factors": self.preprocess_factors(data, sg),
}
if "visibility" in data:
output["visibility"] = sg.guard(data["visibility"], "B, L")
else:
output["visibility"] = tf.ones(sg["B, L"], dtype=tf.float32)
return output
def preprocess_factors(self, data, sg):
return {
name: sg.guard(ensure_3d(data[name]), "B, L, *")
for name in self.factors
}
def get_placeholders(self, batch_size=None):
batch_size = batch_size or self.batch_size
sg = ShapeGuard(
dims={
"B": batch_size,
"H": self.image_dim[0],
"W": self.image_dim[1],
"L": self.num_true_objects,
"C": 3,
"T": 1,
})
return {
"image": tf.placeholder(dtype=tf.float32, shape=sg["B, T, H, W, C"]),
"mask": tf.placeholder(dtype=tf.float32, shape=sg["B, T, L, H, W, 1"]),
"visibility": tf.placeholder(dtype=tf.float32, shape=sg["B, L"]),
"factors": {
name:
tf.placeholder(dtype=dtype, shape=sg["B, L, {}".format(size)])
for name, (dtype, size) in self.factors
},
}
class CLEVR(IODINEDataset):
num_true_objects = 11
num_channels = 3
factors = {
"color": (tf.uint8, 1),
"shape": (tf.uint8, 1),
"size": (tf.uint8, 1),
"position": (tf.float32, 3),
"rotation": (tf.float32, 1),
}
def __init__(
self,
path,
crop_region=((29, 221), (64, 256)),
image_dim=(128, 128),
name="clevr",
**kwargs,
):
super().__init__(
path=path,
crop_region=crop_region,
image_dim=image_dim,
name=name,
**kwargs)
self.dataset = clevr_with_masks.dataset(self.path)
def preprocess_factors(self, data, sg):
return {
"color": sg.guard(ensure_3d(data["color"]), "B, L, 1"),
"shape": sg.guard(ensure_3d(data["shape"]), "B, L, 1"),
"size": sg.guard(ensure_3d(data["color"]), "B, L, 1"),
"position": sg.guard(ensure_3d(data["pixel_coords"]), "B, L, 3"),
"rotation": sg.guard(ensure_3d(data["rotation"]), "B, L, 1"),
}
class MultiDSprites(IODINEDataset):
num_true_objects = 6
num_channels = 3
factors = {
"color": (tf.float32, 3),
"shape": (tf.uint8, 1),
"scale": (tf.float32, 1),
"x": (tf.float32, 1),
"y": (tf.float32, 1),
"orientation": (tf.float32, 1),
}
def __init__(
self,
path,
# variant from ['binarized', 'colored_on_grayscale', 'colored_on_colored']
dataset_variant="colored_on_grayscale",
image_dim=(64, 64),
name="multi_dsprites",
**kwargs,
):
super().__init__(path=path, name=name, image_dim=image_dim, **kwargs)
self.dataset_variant = dataset_variant
self.dataset = multi_dsprites.dataset(self.path, self.dataset_variant)
class Tetrominoes(IODINEDataset):
num_true_objects = 6
num_channels = 3
factors = {
"color": (tf.uint8, 3),
"shape": (tf.uint8, 1),
"position": (tf.float32, 2),
}
def __init__(self, path, image_dim=(35, 35), name="tetrominoes", **kwargs):
super().__init__(path=path, name=name, image_dim=image_dim, **kwargs)
self.dataset = tetrominoes.dataset(self.path)
def preprocess_factors(self, data, sg):
pos_x = ensure_3d(data["x"])
pos_y = ensure_3d(data["y"])
position = tf.concat([pos_x, pos_y], axis=2)
return {
"color": sg.guard(ensure_3d(data["color"]), "B, L, 3"),
"shape": sg.guard(ensure_3d(data["shape"]), "B, L, 1"),
"position": sg.guard(ensure_3d(position), "B, L, 2"),
}
|
deepmind-research-master
|
iodine/modules/data.py
|
# Copyright 2020 DeepMind Technologies Limited.
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Configuration parameters for MMV."""
def get_model_config(ckpt_path):
"""Returns the model configuration to be used with each checkpoint."""
config = {
'audio_backbone': 'resnet50',
'audio_model_kwargs': {
'bn_config': {
'create_offset': True,
'create_scale': True,
'decay_rate': 0.9,
'eps': 1.0e-5
}
},
'bn_config_proj': {
'create_offset': True,
'create_scale': True,
'decay_rate': 0.9,
'eps': 1.0e-5
},
'config_audio_text': {
'embedding_dim': 512,
'toaud_bn_after_proj': False,
'toaud_head_mode': 'linear',
'totxt_bn_after_proj': False,
'totxt_head_mode': 'linear'
},
'config_video_audio': {
'embedding_dim': 512,
'toaud_bn_after_proj': True,
'toaud_head_mode': 'mlp@512',
'tovid_bn_after_proj': False,
'tovid_head_mode': 'linear'
},
'config_video_text': {
'embedding_dim': 256,
'totxt_bn_after_proj': True,
'totxt_head_mode': 'linear',
'tovid_bn_after_proj': False,
'tovid_head_mode': 'linear'
},
'mm_embedding_graph': 'fac_relu',
'name': 'text_audio_video',
'sentence_dim': 2048,
'use_xreplica_bn': True,
'vision_model_kwargs': {
'bn_config': {
'create_offset': True,
'create_scale': True,
'decay_rate': 0.9,
'eps': 1.0e-5
},
'n_frames': 32,
'width_mult': 1,
},
}
if 's3d' in ckpt_path:
config['visual_backbone'] = 's3d'
if 'tsm_resnet_x1' in ckpt_path:
config['visual_backbone'] = 'resnet50tsm'
if 'tsm_resnet_x2' in ckpt_path:
config['visual_backbone'] = 'resnet50tsm'
config['vision_model_kwargs']['width_mult'] = 2
return config
|
deepmind-research-master
|
mmv/config.py
|
# Copyright 2020 DeepMind Technologies Limited.
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""UCF101 linear evaluation."""
import functools
from typing import Any, Dict, Optional
from absl import app
from absl import flags
import haiku as hk
import jax
import jax.numpy as jnp
import numpy as np
import sklearn
from sklearn import preprocessing
import sklearn.linear_model
import sklearn.svm
import tensorflow as tf
import tensorflow_datasets as tfds
from mmv import config
from mmv.models import mm_embeddings
from mmv.utils import checkpoint
from mmv.utils import ucf101_dataset
flags.DEFINE_string('checkpoint_path', '~/tmp/mmv_s3d.pkl',
'The directory to load pre-trained weights from.')
flags.DEFINE_string('dataset_folder', '/tmp/ucf101',
'The directory with the ucf101 dataset.')
flags.DEFINE_integer('eval_batch_size', 1,
'The batch size for evaluation.')
flags.DEFINE_integer('train_batch_size', 16,
'The batch size for training.')
flags.DEFINE_integer('num_train_epochs', 10,
'How many epochs to collect features during training.')
flags.DEFINE_integer('num_test_windows', 10,
'How many windows to average on during test.')
flags.DEFINE_integer('min_resize', 224,
'Min value to resize images to during preprocessing.')
flags.DEFINE_integer('crop_size', 224,
'Value to resize images to during preprocessing.')
flags.DEFINE_integer('num_frames', 32,
'Number of video frames.')
flags.DEFINE_integer('stride', 2,
'Stride for video frames.')
flags.DEFINE_integer('ucf101_split', 1,
'Which split of ucf101 to use.')
FLAGS = flags.FLAGS
def get_sampling_offset(sequence: tf.Tensor,
num_steps: Optional[int],
is_training: bool,
stride: int = 1,
seed: Optional[int] = None) -> tf.Tensor:
"""Calculates the initial offset for a sequence where all steps will fit.
Args:
sequence: any tensor where the first dimension is timesteps.
num_steps: The number of timesteps we will output. If None,
deterministically start at the first frame.
is_training: A boolean indicates whether the graph is for training or not.
If False, the starting frame always the first frame.
stride: distance to sample between timesteps.
seed: a deterministic seed to use when sampling.
Returns:
The first index to begin sampling from. A best effort is made to provide a
starting index such that all requested steps fit within the sequence (i.e.
offset + 1 + (num_steps - 1) * stride < len(sequence)). If this is not
satisfied, the starting index is chosen randomly from the full sequence.
"""
if num_steps is None or not is_training:
return tf.constant(0)
sequence_length = tf.shape(sequence)[0]
max_offset = tf.cond(
tf.greater(sequence_length, (num_steps - 1) * stride),
lambda: sequence_length - (num_steps - 1) * stride,
lambda: sequence_length)
offset = tf.random.uniform(
(),
maxval=tf.cast(max_offset, tf.int32),
dtype=tf.int32,
seed=seed)
return offset
def sample_or_pad_sequence_indices(sequence: tf.Tensor,
num_steps: Optional[int],
is_training: bool,
repeat_sequence: bool = True,
stride: int = 1,
offset: Optional[int] = None) -> tf.Tensor:
"""Returns indices to take for sampling or padding a sequence to fixed size.
Samples num_steps from the sequence. If the sequence is shorter than
num_steps, the sequence loops. If the sequence is longer than num_steps and
is_training is True, then we seek to a random offset before sampling. If
offset is provided, we use that deterministic offset.
This method is appropriate for sampling from a tensor where you want every
timestep between a start and end time. See sample_stacked_sequence_indices for
more flexibility.
Args:
sequence: any tensor where the first dimension is timesteps.
num_steps: how many steps (e.g. frames) to take. If None, all steps from
start to end are considered and `is_training` has no effect.
is_training: A boolean indicates whether the graph is for training or not.
If False, the starting frame is deterministic.
repeat_sequence: A boolean indicates whether the sequence will repeat to
have enough steps for sampling. If False, a runtime error is thrown if
num_steps * stride is longer than sequence length.
stride: distance to sample between timesteps.
offset: a deterministic offset to use regardless of the is_training value.
Returns:
Indices to gather from the sequence Tensor to get a fixed size sequence.
"""
sequence_length = tf.shape(sequence)[0]
sel_idx = tf.range(sequence_length)
if num_steps:
if offset is None:
offset = get_sampling_offset(sequence, num_steps, is_training, stride)
if repeat_sequence:
# Repeats sequence until num_steps are available in total.
num_repeats = tf.cast(
tf.math.ceil(
tf.math.divide(
tf.cast(num_steps * stride + offset, tf.float32),
tf.cast(sequence_length, tf.float32)
)), tf.int32)
sel_idx = tf.tile(sel_idx, [num_repeats])
steps = tf.range(offset, offset + num_steps * stride, stride)
else:
steps = tf.range(0, sequence_length, stride)
return tf.gather(sel_idx, steps)
def random_sample_sequence(sequence: tf.Tensor,
num_steps: int,
stride: int = 1) -> tf.Tensor:
"""Randomly sample a segment of size num_steps from a given sequence."""
indices = sample_or_pad_sequence_indices(
sequence=sequence,
num_steps=num_steps,
is_training=True, # Random sample.
repeat_sequence=True, # Will repeat the sequence if request more.
stride=stride,
offset=None)
indices.set_shape((num_steps,))
output = tf.gather(sequence, indices)
return output
def sample_linspace_sequence(sequence: tf.Tensor,
num_windows: int,
num_steps: int,
stride: int = 1) -> tf.Tensor:
"""Samples num_windows segments from sequence with linearly spaced offsets.
The samples are concatenated in a single Tensor in order to have the same
format structure per timestep (e.g. a single frame). If num_steps * stride is
bigger than the number of timesteps, the sequence is repeated. This function
can be used in evaluation in order to extract enough segments in order to span
the entire sequence.
Args:
sequence: Any tensor where the first dimension is timesteps.
num_windows: Number of windows retrieved from the sequence.
num_steps: Number of steps (e.g. frames) to take.
stride: Distance to sample between timesteps.
Returns:
A single Tensor with first dimension num_windows * num_steps. The Tensor
contains the concatenated list of num_windows tensors which offsets have
been linearly spaced from input.
"""
sequence_length = tf.shape(sequence)[0]
max_offset = tf.maximum(0, sequence_length - num_steps * stride)
offsets = tf.linspace(0.0, tf.cast(max_offset, tf.float32), num_windows)
offsets = tf.cast(offsets, tf.int32)
all_indices = []
for i in range(num_windows):
all_indices.append(
sample_or_pad_sequence_indices(
sequence=sequence,
num_steps=num_steps,
is_training=False,
repeat_sequence=True, # Will repeat the sequence if request more.
stride=stride,
offset=offsets[i]))
indices = tf.concat(all_indices, axis=0)
indices.set_shape((num_windows * num_steps,))
output = tf.gather(sequence, indices)
return output
def resize_smallest(frames: tf.Tensor, min_resize: int) -> tf.Tensor:
"""Resizes frames so that min(height, width) is equal to min_resize.
This function will not do anything if the min(height, width) is already equal
to min_resize. This allows to save compute time.
Args:
frames: A Tensor of dimension [timesteps, input_h, input_w, channels].
min_resize: Minimum size of the final image dimensions.
Returns:
A Tensor of shape [timesteps, output_h, output_w, channels] of type
frames.dtype where min(output_h, output_w) = min_resize.
"""
shape = tf.shape(frames)
input_h = shape[1]
input_w = shape[2]
output_h = tf.maximum(min_resize, (input_h * min_resize) // input_w)
output_w = tf.maximum(min_resize, (input_w * min_resize) // input_h)
def resize_fn():
frames_resized = tf.image.resize(frames, (output_h, output_w))
return tf.cast(frames_resized, frames.dtype)
should_resize = tf.math.logical_or(tf.not_equal(input_w, output_w),
tf.not_equal(input_h, output_h))
frames = tf.cond(should_resize, resize_fn, lambda: frames)
return frames
def process_samples(features_dict, num_frames=32, stride=1, is_training=True,
min_resize=224, crop_size=224, num_windows=1):
"""Process video frames."""
video = features_dict['video']
if is_training:
assert num_windows == 1
video = random_sample_sequence(video, num_frames, stride)
is_flipped = tf.random.uniform((), minval=0, maxval=2, dtype=tf.int32)
video = tf.cond(tf.equal(is_flipped, 1),
true_fn=lambda: tf.image.flip_left_right(video),
false_fn=lambda: video)
else:
video = sample_linspace_sequence(video, num_windows, num_frames, stride)
# Resize smallest side.
video = resize_smallest(video, min_resize)
if is_training:
# Random crop.
video = tf.image.random_crop(video, [num_frames, crop_size, crop_size, 3])
else:
# Central crop.
video = tf.image.resize_with_crop_or_pad(video, crop_size, crop_size)
video = tf.cast(video, tf.float32)
video /= 255.0 # Set between [0, 1].
features_dict['video'] = video
return features_dict
def space_to_depth_batch(features_dict):
images = features_dict['video']
_, l, h, w, c = images.shape
images = tf.reshape(images, [-1, l // 2, 2, h // 2, 2, w // 2, 2, c])
images = tf.transpose(images, [0, 1, 3, 5, 2, 4, 6, 7])
images = tf.reshape(images, [-1, l // 2, h // 2, w // 2, 8 * c])
features_dict['video'] = images
return features_dict
def reshape_windows(features_dict, num_frames):
x = features_dict['video']
x = tf.reshape(x, (-1, num_frames, x.shape[2], x.shape[3], x.shape[4]))
features_dict['video'] = x
return features_dict
def compute_accuracy_metrics(pred, gt, prefix=''):
order_pred = np.argsort(pred, axis=1)
assert len(gt.shape) == len(order_pred.shape) == 2
top1_pred = order_pred[:, -1:]
top5_pred = order_pred[:, -5:]
top1_acc = np.mean(top1_pred == gt)
top5_acc = np.mean(np.max(top5_pred == gt, 1))
return {prefix + 'top1': top1_acc,
prefix + 'top5': top5_acc}
def forward_fn(images: jnp.ndarray,
audio_spectrogram: jnp.ndarray,
word_ids: jnp.ndarray,
is_training: bool,
model_config: Dict[str, Any]):
"""Forward pass of the model."""
# This should contain the pre-trained weights. We set it to zero because it
# will be loaded from the checkpoint.
language_model_vocab_size = 65536
word_embedding_dim = 300
dummy_embedding_matrix = jnp.zeros(shape=(language_model_vocab_size,
word_embedding_dim))
module = mm_embeddings.AudioTextVideoEmbedding(
**model_config,
word_embedding_matrix=dummy_embedding_matrix)
return module(images=images,
audio_spectrogram=audio_spectrogram,
word_ids=word_ids,
is_training=is_training)['vid_repr']
def main(argv):
del argv
sklearn_reg = 0.001
model_config = config.get_model_config(FLAGS.checkpoint_path)
forward = hk.without_apply_rng(hk.transform_with_state(forward_fn))
forward_apply = jax.jit(functools.partial(forward.apply,
is_training=False,
model_config=model_config))
# Get the UCF101 config.
dset_config = tfds.video.ucf101.Ucf101.BUILDER_CONFIGS[FLAGS.ucf101_split]
builder = ucf101_dataset.ModUcf101(
data_dir=FLAGS.dataset_folder,
config=dset_config)
# Create the tfrecord files (no-op if already exists)
dl_config = tfds.download.DownloadConfig(verify_ssl=False)
builder.download_and_prepare(download_config=dl_config)
# Generate the training dataset.
train_ds = builder.as_dataset(split='train', shuffle_files=False)
train_ds = train_ds.map(lambda x: process_samples( # pylint: disable=g-long-lambda
x, num_frames=FLAGS.num_frames, stride=FLAGS.stride, is_training=True,
min_resize=FLAGS.min_resize, crop_size=FLAGS.crop_size))
train_ds = train_ds.batch(batch_size=FLAGS.train_batch_size)
if model_config['visual_backbone'] == 's3d':
train_ds = train_ds.map(space_to_depth_batch)
train_ds = train_ds.repeat(FLAGS.num_train_epochs)
# Generate the test dataset.
test_ds = builder.as_dataset(split='test', shuffle_files=False)
test_ds = test_ds.map(lambda x: process_samples( # pylint: disable=g-long-lambda
x, num_frames=FLAGS.num_frames, stride=FLAGS.stride, is_training=False,
min_resize=FLAGS.min_resize, crop_size=FLAGS.crop_size,
num_windows=FLAGS.num_test_windows))
test_ds = test_ds.batch(batch_size=FLAGS.eval_batch_size)
test_ds = test_ds.map(lambda x: reshape_windows( # pylint: disable=g-long-lambda
x, num_frames=FLAGS.num_frames))
if model_config['visual_backbone'] == 's3d':
test_ds = test_ds.map(space_to_depth_batch)
test_ds = test_ds.repeat(1)
pretrained_weights = checkpoint.load_checkpoint(FLAGS.checkpoint_path)
params = pretrained_weights['params']
state = pretrained_weights['state']
# Collect training samples.
audio_frames = 96
mel_filters = 40
num_tokens = 16
dummy_audio = jnp.zeros(
shape=(FLAGS.train_batch_size, audio_frames, mel_filters, 1))
dummy_word_ids = jnp.zeros(
shape=(FLAGS.train_batch_size, num_tokens), dtype=jnp.int32)
train_features = []
train_labels = []
print('Computing features on train')
training_examples = iter(tfds.as_numpy(train_ds))
for train_ex in training_examples:
vid_representation, _ = forward_apply(params=params,
state=state,
images=train_ex['video'],
audio_spectrogram=dummy_audio,
word_ids=dummy_word_ids)
train_labels.append(train_ex['label'])
train_features.append(vid_representation)
if len(train_labels) % 50 == 0:
print(f'Processed {len(train_labels)} examples.')
train_labels = np.concatenate(train_labels, axis=0)
train_features = np.concatenate(train_features, axis=0)
print(f'Finish collecting train features of shape {train_features.shape}')
# Collect test samples.
dummy_audio = jnp.zeros(
shape=(FLAGS.eval_batch_size, audio_frames, mel_filters, 1))
dummy_word_ids = jnp.zeros(
shape=(FLAGS.eval_batch_size, num_tokens), dtype=jnp.int32)
test_features = []
test_labels = []
print('Computing features on test')
test_examples = iter(tfds.as_numpy(test_ds))
for test_ex in test_examples:
vid_representation_test, _ = forward_apply(params=params,
state=state,
images=test_ex['video'],
audio_spectrogram=dummy_audio,
word_ids=dummy_word_ids)
test_labels.append(test_ex['label'])
test_features.append(vid_representation_test)
if len(test_labels) % 50 == 0:
print(f'Processed {len(test_labels)} examples.')
test_features = np.concatenate(test_features, axis=0)
test_labels = np.concatenate(test_labels, axis=0)
print(f'Finish collecting test features of shape {test_features.shape}')
# Train classifier
print('Training linear classifier!')
classifier = sklearn.svm.LinearSVC(C=sklearn_reg)
scaler = preprocessing.StandardScaler().fit(train_features)
train_features = scaler.transform(train_features)
classifier.fit(train_features, train_labels.ravel())
print('Training done !')
# Evaluation.
test_features = scaler.transform(test_features)
print('Running inference on train')
pred_train = classifier.decision_function(train_features)
print('Running inference on test')
pred_test = classifier.decision_function(test_features)
if FLAGS.num_test_windows > 1:
pred_test = np.reshape(
pred_test, (test_labels.shape[0], -1, pred_test.shape[1]))
pred_test = pred_test.mean(axis=1)
# Compute accuracies.
metrics = compute_accuracy_metrics(pred_train, train_labels[:, None],
prefix='train_')
metrics.update(
compute_accuracy_metrics(pred_test, test_labels[:, None], prefix='test_'))
print(metrics)
if __name__ == '__main__':
app.run(main)
|
deepmind-research-master
|
mmv/eval_ucf101.py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.