repo_name
stringlengths
1
62
dataset
stringclasses
1 value
lang
stringclasses
11 values
pr_id
int64
1
20.1k
owner
stringlengths
2
34
reviewer
stringlengths
2
39
diff_hunk
stringlengths
15
262k
code_review_comment
stringlengths
1
99.6k
axlearn
github_2023
python
568
apple
markblee
@@ -202,6 +202,11 @@ class Config(BaseLayer.Config): ) lconv: LConvLayer.Config = LConvLayer.default_config() norm: LayerNorm.Config = LayerNorm.default_config() + # Layer order. If None, default to "mhsa_before_conv", i.e., conformer layer order as + # secified in https://arxiv.org/abs/2005.08100. + # If not None, only "lconv_before_ff" "lconv_before_mhsa" "mhsa_before_lconv" is allowed.
```suggestion # If not None, only "lconv_before_ff" "lconv_before_mhsa" "mhsa_before_lconv" are allowed. ``` You can also consider a Literal[...] typing.
axlearn
github_2023
python
568
apple
markblee
@@ -253,6 +258,12 @@ def __init__(self, cfg: Config, *, parent: Module): f"cfg.right_context must be greater or equal to 0, get {cfg.right_context}." ) + if cfg.layer_order is not None: + supperted_layer_order = ["lconv_before_ff", "lconv_before_mhsa", "mhsa_before_lconv"] + if cfg.layer_order not in supperted_layer_order: + msg = f"Only {supperted_layer_order} is allowed, got {cfg.layer_order}" + raise ValueError(msg)
```suggestion if cfg.layer_order is not None: supported_layer_order = ["lconv_before_ff", "lconv_before_mhsa", "mhsa_before_lconv"] if cfg.layer_order not in supported_layer_order: raise ValueError(f"Layer order must be one of {supported_layer_order}, got {cfg.layer_order}.") ```
axlearn
github_2023
python
568
apple
markblee
@@ -253,6 +259,11 @@ def __init__(self, cfg: Config, *, parent: Module): f"cfg.right_context must be greater or equal to 0, get {cfg.right_context}." ) + if cfg.layer_order is not None: + supperted_layer_order = ["lconv_before_ff", "lconv_before_mhsa", "mhsa_before_lconv"]
```suggestion supported_layer_order = ["lconv_before_ff", "lconv_before_mhsa", "mhsa_before_lconv"] ``` This doesn't look fixed?
axlearn
github_2023
python
568
apple
markblee
@@ -253,6 +259,11 @@ def __init__(self, cfg: Config, *, parent: Module): f"cfg.right_context must be greater or equal to 0, get {cfg.right_context}." ) + if cfg.layer_order is not None: + supperted_layer_order = ["lconv_before_ff", "lconv_before_mhsa", "mhsa_before_lconv"] + if cfg.layer_order not in supperted_layer_order: + raise ValueError("Only {supperted_layer_order} is allowed, got {cfg.layer_order}")
```suggestion raise ValueError(f"Only {supperted_layer_order} is allowed, got {cfg.layer_order}") ``` This should be a fstring
axlearn
github_2023
others
568
apple
markblee
@@ -90,6 +90,19 @@ ENV PIP_FIND_LINKS=https://storage.googleapis.com/jax-releases/libtpu_releases.h RUN pip install .[tpu] COPY . . +################################################################################
Can you rebase the files?
axlearn
github_2023
others
517
apple
markblee
@@ -90,6 +90,25 @@ ENV PIP_FIND_LINKS=https://storage.googleapis.com/jax-releases/libtpu_releases.h RUN pip install .[tpu] COPY . . +################################################################################ +# GPU container spec. # +################################################################################ + +FROM base AS gpu + + +RUN apt-get install -y google-perftools + +RUN JAX_VERSION=$(grep "jax==" pyproject.toml | sed -E 's/"jax==([0-9.]+)",/\1/') && \ + pip install --no-cache-dir --upgrade \ + "jax[cuda12_pip]==${JAX_VERSION}" \ + -f https://storage.googleapis.com/jax-releases/jax_cuda_releases.html
Could this be in pyproject too, since we already specify `PIP_FIND_LINKS` below?
axlearn
github_2023
python
517
apple
markblee
@@ -639,6 +639,327 @@ def _execute(self) -> Any: ) +class GPUGKEJob(GKEJob): + """A GPU job represented as a k8s JobSet. + + See also `gke_runner` as an example. + """ + + @config_class + class Config(GKEJob.Config): + """Configures GPUGKEJob. + + Attributes: + accelerator: GPU configuration. + """ + + accelerator: AcceleratorConfig = AcceleratorConfig() + + @classmethod + def define_flags(cls, fv: flags.FlagValues): + super().define_flags(fv) + common_kwargs = dict(flag_values=fv, allow_override=True) + accelerator_flags(**common_kwargs) + + @classmethod + def from_flags(cls, fv: flags.FlagValues, **kwargs) -> Config: + cfg: GPUGKEJob.Config = super().from_flags(fv, **kwargs) + cfg.accelerator.set(instance_type=fv.instance_type, num_replicas=fv.num_replicas) + return cfg + + def __init__(self, cfg: Config): + bundler_cfg = cfg.bundler + bundler_cfg = getattr(bundler_cfg, "inner", bundler_cfg) + if bundler_cfg is None or not issubclass(bundler_cfg.klass, BaseDockerBundler): + raise NotImplementedError(f"Only docker bundler supported, got: {bundler_cfg}") + super().__init__(cfg) + + def _build_sidecar_container(self) -> Nested[Any]: + cfg: GPUGKEJob.Config = self.config + + if cfg.accelerator.instance_type.startswith("a3-highgpu"): + volume_mounts = [
Add a reference/comment on what this sidecar is doing and why we only do this for a3-highgpu?
axlearn
github_2023
python
517
apple
markblee
@@ -639,6 +639,327 @@ def _execute(self) -> Any: ) +class GPUGKEJob(GKEJob): + """A GPU job represented as a k8s JobSet. + + See also `gke_runner` as an example. + """ + + @config_class + class Config(GKEJob.Config): + """Configures GPUGKEJob. + + Attributes: + accelerator: GPU configuration. + """ + + accelerator: AcceleratorConfig = AcceleratorConfig() + + @classmethod + def define_flags(cls, fv: flags.FlagValues): + super().define_flags(fv) + common_kwargs = dict(flag_values=fv, allow_override=True) + accelerator_flags(**common_kwargs) + + @classmethod + def from_flags(cls, fv: flags.FlagValues, **kwargs) -> Config: + cfg: GPUGKEJob.Config = super().from_flags(fv, **kwargs) + cfg.accelerator.set(instance_type=fv.instance_type, num_replicas=fv.num_replicas) + return cfg + + def __init__(self, cfg: Config): + bundler_cfg = cfg.bundler + bundler_cfg = getattr(bundler_cfg, "inner", bundler_cfg) + if bundler_cfg is None or not issubclass(bundler_cfg.klass, BaseDockerBundler): + raise NotImplementedError(f"Only docker bundler supported, got: {bundler_cfg}") + super().__init__(cfg) + + def _build_sidecar_container(self) -> Nested[Any]: + cfg: GPUGKEJob.Config = self.config + + if cfg.accelerator.instance_type.startswith("a3-highgpu"): + volume_mounts = [ + { + "name": "nvidia-install-dir-host", + "mountPath": "/usr/local/nvidia/lib64", + }, + { + "name": "tcpx-socket", + "mountPath": "/run/tcpx", + }, + ] + + command = [ + "bash", + "-c", + """ + /tcpgpudmarxd/build/app/tcpgpudmarxd --gpu_nic_preset a3vm \ + --gpu_shmem_type fd --uds_path /run/tcpx" \ + --setup_param "--verbose 128 2 0" & + while [ ! -f /run/tcpx/terminated ]; do sleep 10; done; " + """, + ] + + return dict( + name="tcpx-daemon", + image="us-docker.pkg.dev/gce-ai-infra/gpudirect-tcpx/tcpgpudmarxd-dev:v2.0.11", + securityContext={"privileged": True}, + command=command, + env=[{"name": "LD_LIBRARY_PATH", "value": "/usr/local/nvidia/lib64"}], + volumeMounts=volume_mounts, + ) + + return {} + + def _build_main_container(self) -> Nested[Any]: + """Builds the config for the container running the job. + + Returns: + A nested dict corresponding to a k8s Container config. + """ + cfg: GPUGKEJob.Config = self.config + volume_mounts = [] + if cfg.accelerator.instance_type.startswith("a3-highgpu"):
nit suggestion: construct the default volume mounts, env vars, etc. upfront, and then group all of the `a3-highgpu` specific changes under one branch.
axlearn
github_2023
python
517
apple
markblee
@@ -639,6 +639,327 @@ def _execute(self) -> Any: ) +class GPUGKEJob(GKEJob): + """A GPU job represented as a k8s JobSet. + + See also `gke_runner` as an example. + """ + + @config_class + class Config(GKEJob.Config): + """Configures GPUGKEJob. + + Attributes: + accelerator: GPU configuration. + """ + + accelerator: AcceleratorConfig = AcceleratorConfig() + + @classmethod + def define_flags(cls, fv: flags.FlagValues): + super().define_flags(fv) + common_kwargs = dict(flag_values=fv, allow_override=True) + accelerator_flags(**common_kwargs) + + @classmethod + def from_flags(cls, fv: flags.FlagValues, **kwargs) -> Config: + cfg: GPUGKEJob.Config = super().from_flags(fv, **kwargs) + cfg.accelerator.set(instance_type=fv.instance_type, num_replicas=fv.num_replicas) + return cfg + + def __init__(self, cfg: Config): + bundler_cfg = cfg.bundler + bundler_cfg = getattr(bundler_cfg, "inner", bundler_cfg) + if bundler_cfg is None or not issubclass(bundler_cfg.klass, BaseDockerBundler): + raise NotImplementedError(f"Only docker bundler supported, got: {bundler_cfg}") + super().__init__(cfg) + + def _build_sidecar_container(self) -> Nested[Any]: + cfg: GPUGKEJob.Config = self.config + + if cfg.accelerator.instance_type.startswith("a3-highgpu"): + volume_mounts = [ + { + "name": "nvidia-install-dir-host", + "mountPath": "/usr/local/nvidia/lib64", + }, + { + "name": "tcpx-socket", + "mountPath": "/run/tcpx", + }, + ] + + command = [ + "bash", + "-c", + """ + /tcpgpudmarxd/build/app/tcpgpudmarxd --gpu_nic_preset a3vm \ + --gpu_shmem_type fd --uds_path /run/tcpx" \ + --setup_param "--verbose 128 2 0" & + while [ ! -f /run/tcpx/terminated ]; do sleep 10; done; " + """, + ] + + return dict( + name="tcpx-daemon", + image="us-docker.pkg.dev/gce-ai-infra/gpudirect-tcpx/tcpgpudmarxd-dev:v2.0.11", + securityContext={"privileged": True}, + command=command, + env=[{"name": "LD_LIBRARY_PATH", "value": "/usr/local/nvidia/lib64"}], + volumeMounts=volume_mounts, + ) + + return {} + + def _build_main_container(self) -> Nested[Any]: + """Builds the config for the container running the job. + + Returns: + A nested dict corresponding to a k8s Container config. + """ + cfg: GPUGKEJob.Config = self.config + volume_mounts = [] + if cfg.accelerator.instance_type.startswith("a3-highgpu"): + volume_mounts.extend( + [ + {"name": "tcpx-socket", "mountPath": "/run/tcpx"}, + {"name": "shared-memory", "mountPath": "/dev/shm"}, + {"name": "nvidia-install-dir-host", "mountPath": "/usr/local/nvidia/lib64"}, + {"name": "tcpx-nccl-plugin-volume", "mountPath": "/usr/local/tcpx"}, + ] + ) + + env_vars: Dict[str, str] = {} + env_vars["DISTRIBUTED_COORDINATOR"] = f"{cfg.name}-job-0-0.{cfg.name}:8080" + env_vars["NUM_PROCESSES"] = f"{cfg.accelerator.num_replicas}" + if cfg.accelerator.instance_type.startswith("a3-highgpu"): + env_vars["LD_LIBRARY_PATH"] = "/usr/local/tcpx/lib64:/usr/local/nvidia/lib64"
nit -- `env_vars.update({ ... })` may be slightly easier to read.
axlearn
github_2023
python
517
apple
markblee
@@ -639,6 +639,327 @@ def _execute(self) -> Any: ) +class GPUGKEJob(GKEJob): + """A GPU job represented as a k8s JobSet. + + See also `gke_runner` as an example. + """ + + @config_class + class Config(GKEJob.Config): + """Configures GPUGKEJob. + + Attributes: + accelerator: GPU configuration. + """ + + accelerator: AcceleratorConfig = AcceleratorConfig() + + @classmethod + def define_flags(cls, fv: flags.FlagValues): + super().define_flags(fv) + common_kwargs = dict(flag_values=fv, allow_override=True) + accelerator_flags(**common_kwargs) + + @classmethod + def from_flags(cls, fv: flags.FlagValues, **kwargs) -> Config: + cfg: GPUGKEJob.Config = super().from_flags(fv, **kwargs) + cfg.accelerator.set(instance_type=fv.instance_type, num_replicas=fv.num_replicas) + return cfg + + def __init__(self, cfg: Config): + bundler_cfg = cfg.bundler + bundler_cfg = getattr(bundler_cfg, "inner", bundler_cfg) + if bundler_cfg is None or not issubclass(bundler_cfg.klass, BaseDockerBundler): + raise NotImplementedError(f"Only docker bundler supported, got: {bundler_cfg}") + super().__init__(cfg) + + def _build_sidecar_container(self) -> Nested[Any]: + cfg: GPUGKEJob.Config = self.config + + if cfg.accelerator.instance_type.startswith("a3-highgpu"): + volume_mounts = [ + { + "name": "nvidia-install-dir-host", + "mountPath": "/usr/local/nvidia/lib64", + }, + { + "name": "tcpx-socket", + "mountPath": "/run/tcpx", + }, + ] + + command = [ + "bash", + "-c", + """ + /tcpgpudmarxd/build/app/tcpgpudmarxd --gpu_nic_preset a3vm \ + --gpu_shmem_type fd --uds_path /run/tcpx" \ + --setup_param "--verbose 128 2 0" & + while [ ! -f /run/tcpx/terminated ]; do sleep 10; done; " + """, + ] + + return dict( + name="tcpx-daemon", + image="us-docker.pkg.dev/gce-ai-infra/gpudirect-tcpx/tcpgpudmarxd-dev:v2.0.11", + securityContext={"privileged": True}, + command=command, + env=[{"name": "LD_LIBRARY_PATH", "value": "/usr/local/nvidia/lib64"}], + volumeMounts=volume_mounts, + ) + + return {} + + def _build_main_container(self) -> Nested[Any]: + """Builds the config for the container running the job. + + Returns: + A nested dict corresponding to a k8s Container config. + """ + cfg: GPUGKEJob.Config = self.config + volume_mounts = [] + if cfg.accelerator.instance_type.startswith("a3-highgpu"): + volume_mounts.extend( + [ + {"name": "tcpx-socket", "mountPath": "/run/tcpx"}, + {"name": "shared-memory", "mountPath": "/dev/shm"}, + {"name": "nvidia-install-dir-host", "mountPath": "/usr/local/nvidia/lib64"}, + {"name": "tcpx-nccl-plugin-volume", "mountPath": "/usr/local/tcpx"}, + ] + ) + + env_vars: Dict[str, str] = {} + env_vars["DISTRIBUTED_COORDINATOR"] = f"{cfg.name}-job-0-0.{cfg.name}:8080" + env_vars["NUM_PROCESSES"] = f"{cfg.accelerator.num_replicas}" + if cfg.accelerator.instance_type.startswith("a3-highgpu"): + env_vars["LD_LIBRARY_PATH"] = "/usr/local/tcpx/lib64:/usr/local/nvidia/lib64" + env_vars["NCCL_CROSS_NIC"] = "0" + env_vars["NCCL_ALGO"] = "Ring" + env_vars["NCCL_PROTO"] = "Simple" + env_vars["NCCL_DEBUG"] = "WARN" + env_vars["NCCL_DEBUG_SUBSYS"] = "INIT,GRAPH,ENV,TUNING,NET,VERSION" + env_vars["NCCL_NET_GDR_LEVEL"] = "PIX" + env_vars["NCCL_P2P_PXN_LEVEL"] = "0" + env_vars["NCCL_GPUDIRECTTCPX_FORCE_ACK"] = "0" + env_vars["NCCL_GPUDIRECTTCPX_TX_COMPLETION_NANOSLEEP"] = "1000" + env_vars["NCCL_GPUDIRECTTCPX_PROGRAM_FLOW_STEERING_WAIT_MICROS"] = "1000000" + env_vars[ + "NCCL_GPUDIRECTTCPX_TX_BINDINGS" + ] = "eth1:8-21,112-125;eth2:8-21,112-125;eth3:60-73,164-177;eth4:60-73,164-177" + env_vars[ + "NCCL_GPUDIRECTTCPX_RX_BINDINGS" + ] = "eth1:22-35,124-139;eth2:22-35,124-139;eth3:74-87,178-191;eth4:74-87,178-191" + env_vars["NCCL_GPUDIRECTTCPX_SOCKET_IFNAME"] = "eth1,eth2,eth3,eth4" + env_vars["NCCL_GPUDIRECTTCPX_CTRL_DEV"] = "eth0" + env_vars["NCCL_GPUDIRECTTCPX_UNIX_CLIENT_PREFIX"] = "/run/tcpx" + env_vars["NCCL_DYNAMIC_CHUNK_SIZE"] = "524288" + env_vars["NCCL_P2P_NET_CHUNKSIZE"] = "524288" + env_vars["NCCL_P2P_PCI_CHUNKSIZE"] = "524288" + env_vars["NCCL_P2P_NVL_CHUNKSIZE"] = "1048576" + env_vars["NCCL_NSOCKS_PERTHREAD"] = "4" + env_vars["NCCL_SOCKET_NTHREADS"] = "1" + env_vars["NCCL_SOCKET_IFNAME"] = "eth0" + env_vars["NCCL_NVLS_ENABLE"] = "0" + + env_vars.update(cfg.env_vars) + # K8s expects each env variable to be a dict + k8s_env_vars = [{"name": name, "value": value} for name, value in env_vars.items()] + k8s_env_vars.append( + { + "name": "PROCESS_ID", + "valueFrom": { + "fieldRef": { + "fieldPath": ( + "metadata.annotations['batch.kubernetes.io/" "job-completion-index']" + ), + } + }, + }, + ) + + command = ["bash", "-c", cfg.command] + if cfg.accelerator.instance_type.startswith("a3-highgpu"): + command.append("touch /run/tcpx/terminated")
What is this file used for? Is it read by the sidecar?
axlearn
github_2023
python
517
apple
markblee
@@ -70,7 +70,7 @@ flags.DEFINE_string("jax_backend", None, "Specifies the XLA backend to use.", required=True) flags.DEFINE_string( "distributed_coordinator", - None, + os.environ.get("DISTRIBUTED_COORDINATOR", None),
Do we need this change for the GPU runner, or can we just supply the flags for now?
axlearn
github_2023
python
517
apple
markblee
@@ -446,9 +446,18 @@ def from_flags(cls, fv: flags.FlagValues, **kwargs): return cfg +class GPUGKERunnerJob(GKERunnerJob): + """A GKERunnerJob that uses GPUGKEJob.""" + + inner = GPUGKEJob + pre_provisioner = TPUNodePoolProvisioner
A reminder to change `TPUNodePoolProvisioner` when ready. Maybe we can just use the default implementation that raises NotImplementedError.
axlearn
github_2023
python
517
apple
markblee
@@ -639,6 +639,327 @@ def _execute(self) -> Any: ) +class GPUGKEJob(GKEJob): + """A GPU job represented as a k8s JobSet. + + See also `gke_runner` as an example. + """ + + @config_class + class Config(GKEJob.Config): + """Configures GPUGKEJob. + + Attributes: + accelerator: GPU configuration. + """ + + accelerator: AcceleratorConfig = AcceleratorConfig() + + @classmethod + def define_flags(cls, fv: flags.FlagValues): + super().define_flags(fv) + common_kwargs = dict(flag_values=fv, allow_override=True) + accelerator_flags(**common_kwargs) + + @classmethod + def from_flags(cls, fv: flags.FlagValues, **kwargs) -> Config: + cfg: GPUGKEJob.Config = super().from_flags(fv, **kwargs) + cfg.accelerator.set(instance_type=fv.instance_type, num_replicas=fv.num_replicas) + return cfg + + def __init__(self, cfg: Config): + bundler_cfg = cfg.bundler + bundler_cfg = getattr(bundler_cfg, "inner", bundler_cfg) + if bundler_cfg is None or not issubclass(bundler_cfg.klass, BaseDockerBundler): + raise NotImplementedError(f"Only docker bundler supported, got: {bundler_cfg}") + super().__init__(cfg) + + def _build_sidecar_container(self) -> Nested[Any]: + cfg: GPUGKEJob.Config = self.config + + if cfg.accelerator.instance_type.startswith("a3-highgpu"): + volume_mounts = [ + { + "name": "nvidia-install-dir-host", + "mountPath": "/usr/local/nvidia/lib64", + }, + { + "name": "tcpx-socket", + "mountPath": "/run/tcpx", + }, + ] + + command = [ + "bash", + "-c", + """ + /tcpgpudmarxd/build/app/tcpgpudmarxd --gpu_nic_preset a3vm \ + --gpu_shmem_type fd --uds_path /run/tcpx" \ + --setup_param "--verbose 128 2 0" & + while [ ! -f /run/tcpx/terminated ]; do sleep 10; done; " + """, + ] + + return dict( + name="tcpx-daemon", + image="us-docker.pkg.dev/gce-ai-infra/gpudirect-tcpx/tcpgpudmarxd-dev:v2.0.11", + securityContext={"privileged": True}, + command=command, + env=[{"name": "LD_LIBRARY_PATH", "value": "/usr/local/nvidia/lib64"}], + volumeMounts=volume_mounts, + ) + + return {} + + def _build_main_container(self) -> Nested[Any]: + """Builds the config for the container running the job. + + Returns: + A nested dict corresponding to a k8s Container config. + """ + cfg: GPUGKEJob.Config = self.config + volume_mounts = [] + if cfg.accelerator.instance_type.startswith("a3-highgpu"): + volume_mounts.extend( + [ + {"name": "tcpx-socket", "mountPath": "/run/tcpx"}, + {"name": "shared-memory", "mountPath": "/dev/shm"}, + {"name": "nvidia-install-dir-host", "mountPath": "/usr/local/nvidia/lib64"}, + {"name": "tcpx-nccl-plugin-volume", "mountPath": "/usr/local/tcpx"}, + ] + ) + + env_vars: Dict[str, str] = {} + env_vars["DISTRIBUTED_COORDINATOR"] = f"{cfg.name}-job-0-0.{cfg.name}:8080" + env_vars["NUM_PROCESSES"] = f"{cfg.accelerator.num_replicas}" + if cfg.accelerator.instance_type.startswith("a3-highgpu"): + env_vars["LD_LIBRARY_PATH"] = "/usr/local/tcpx/lib64:/usr/local/nvidia/lib64" + env_vars["NCCL_CROSS_NIC"] = "0" + env_vars["NCCL_ALGO"] = "Ring" + env_vars["NCCL_PROTO"] = "Simple" + env_vars["NCCL_DEBUG"] = "WARN" + env_vars["NCCL_DEBUG_SUBSYS"] = "INIT,GRAPH,ENV,TUNING,NET,VERSION" + env_vars["NCCL_NET_GDR_LEVEL"] = "PIX" + env_vars["NCCL_P2P_PXN_LEVEL"] = "0" + env_vars["NCCL_GPUDIRECTTCPX_FORCE_ACK"] = "0" + env_vars["NCCL_GPUDIRECTTCPX_TX_COMPLETION_NANOSLEEP"] = "1000" + env_vars["NCCL_GPUDIRECTTCPX_PROGRAM_FLOW_STEERING_WAIT_MICROS"] = "1000000" + env_vars[ + "NCCL_GPUDIRECTTCPX_TX_BINDINGS" + ] = "eth1:8-21,112-125;eth2:8-21,112-125;eth3:60-73,164-177;eth4:60-73,164-177" + env_vars[ + "NCCL_GPUDIRECTTCPX_RX_BINDINGS" + ] = "eth1:22-35,124-139;eth2:22-35,124-139;eth3:74-87,178-191;eth4:74-87,178-191" + env_vars["NCCL_GPUDIRECTTCPX_SOCKET_IFNAME"] = "eth1,eth2,eth3,eth4" + env_vars["NCCL_GPUDIRECTTCPX_CTRL_DEV"] = "eth0" + env_vars["NCCL_GPUDIRECTTCPX_UNIX_CLIENT_PREFIX"] = "/run/tcpx" + env_vars["NCCL_DYNAMIC_CHUNK_SIZE"] = "524288" + env_vars["NCCL_P2P_NET_CHUNKSIZE"] = "524288" + env_vars["NCCL_P2P_PCI_CHUNKSIZE"] = "524288" + env_vars["NCCL_P2P_NVL_CHUNKSIZE"] = "1048576" + env_vars["NCCL_NSOCKS_PERTHREAD"] = "4" + env_vars["NCCL_SOCKET_NTHREADS"] = "1" + env_vars["NCCL_SOCKET_IFNAME"] = "eth0" + env_vars["NCCL_NVLS_ENABLE"] = "0" + + env_vars.update(cfg.env_vars) + # K8s expects each env variable to be a dict + k8s_env_vars = [{"name": name, "value": value} for name, value in env_vars.items()] + k8s_env_vars.append( + { + "name": "PROCESS_ID", + "valueFrom": { + "fieldRef": { + "fieldPath": ( + "metadata.annotations['batch.kubernetes.io/" "job-completion-index']" + ), + } + }, + }, + ) + + command = ["bash", "-c", cfg.command] + if cfg.accelerator.instance_type.startswith("a3-highgpu"): + command.append("touch /run/tcpx/terminated") + + return dict( + name=cfg.name, + image=self._bundler.id(cfg.name), + ports=[ + dict(containerPort=8080), # Port for MXLA coordinator. + ], + securityContext=dict(privileged=True), + # TODO(markblee): Improve SIGTERM behavior for command. + command=command, + resources=dict(limits={"nvidia.com/gpu": "8"}), + env=k8s_env_vars, + volumeMounts=volume_mounts, + ) + + def _build_init_container(self) -> Nested[Any]: + """Builds a config for a single container.""" + cfg: GPUGKEJob.Config = self.config + if cfg.accelerator.instance_type.startswith("a3-highgpu"):
I noticed that we have this check in a lot of places. What are the other instance types that we intend to support?
axlearn
github_2023
python
517
apple
markblee
@@ -639,6 +639,359 @@ def _execute(self) -> Any: ) +class GPUGKEJob(GKEJob): + """A GPU job represented as a k8s JobSet. + + See also `gke_runner` as an example. + """ + + @config_class + class Config(GKEJob.Config): + """Configures GPUGKEJob. + + Attributes: + accelerator: GPU configuration.
Document `queue` and the behavior when `None`?
axlearn
github_2023
python
517
apple
markblee
@@ -639,6 +639,359 @@ def _execute(self) -> Any: ) +class GPUGKEJob(GKEJob): + """A GPU job represented as a k8s JobSet. + + See also `gke_runner` as an example. + """ + + @config_class + class Config(GKEJob.Config): + """Configures GPUGKEJob. + + Attributes: + accelerator: GPU configuration. + """ + + accelerator: AcceleratorConfig = AcceleratorConfig() + queue: Optional[str] = None + + @classmethod + def define_flags(cls, fv: flags.FlagValues): + super().define_flags(fv) + common_kwargs = dict(flag_values=fv, allow_override=True) + accelerator_flags(**common_kwargs) + flags.DEFINE_string( + "queue", + None, + "The name of the Kueue LocalQueue to use.", + **common_kwargs, + ) + + @classmethod + def from_flags(cls, fv: flags.FlagValues, **kwargs) -> Config: + cfg: GPUGKEJob.Config = super().from_flags(fv, **kwargs) + cfg.accelerator.set(instance_type=fv.instance_type, num_replicas=fv.num_replicas) + return cfg + + def __init__(self, cfg: Config): + bundler_cfg = cfg.bundler + bundler_cfg = getattr(bundler_cfg, "inner", bundler_cfg) + if bundler_cfg is None or not issubclass(bundler_cfg.klass, BaseDockerBundler): + raise NotImplementedError(f"Only docker bundler supported, got: {bundler_cfg}") + super().__init__(cfg) + + def _build_sidecar_container(self) -> Nested[Any]: + """Builds a sidecar container which is required by A3 and A3 Mega + for GPU to GPU RDMA like networking. + + Returns: + A nested dict on machine types that require a sidecar. Otherwise, + returns {}. + """ + cfg: GPUGKEJob.Config = self.config + + # Different machine types require different sidecar containers + # for example A3 requires a tcpx socket but A3 Mega does not
nit -- comments should be full sentences with punctuation. (Please also fix below.) ```suggestion # Different machine types require different sidecar containers. # For example A3 requires a tcpx socket but A3 Mega does not. ``` Also, should we add a pointer to e.g. https://cloud.google.com/kubernetes-engine/docs/how-to/gpu-bandwidth-gpudirect-tcpx#add-gpudirect-manifests?
axlearn
github_2023
python
517
apple
markblee
@@ -639,6 +639,359 @@ def _execute(self) -> Any: ) +class GPUGKEJob(GKEJob): + """A GPU job represented as a k8s JobSet. + + See also `gke_runner` as an example. + """ + + @config_class + class Config(GKEJob.Config): + """Configures GPUGKEJob. + + Attributes: + accelerator: GPU configuration. + """ + + accelerator: AcceleratorConfig = AcceleratorConfig() + queue: Optional[str] = None + + @classmethod + def define_flags(cls, fv: flags.FlagValues): + super().define_flags(fv) + common_kwargs = dict(flag_values=fv, allow_override=True) + accelerator_flags(**common_kwargs) + flags.DEFINE_string( + "queue", + None, + "The name of the Kueue LocalQueue to use.", + **common_kwargs, + ) + + @classmethod + def from_flags(cls, fv: flags.FlagValues, **kwargs) -> Config: + cfg: GPUGKEJob.Config = super().from_flags(fv, **kwargs) + cfg.accelerator.set(instance_type=fv.instance_type, num_replicas=fv.num_replicas) + return cfg + + def __init__(self, cfg: Config): + bundler_cfg = cfg.bundler + bundler_cfg = getattr(bundler_cfg, "inner", bundler_cfg) + if bundler_cfg is None or not issubclass(bundler_cfg.klass, BaseDockerBundler): + raise NotImplementedError(f"Only docker bundler supported, got: {bundler_cfg}") + super().__init__(cfg) + + def _build_sidecar_container(self) -> Nested[Any]: + """Builds a sidecar container which is required by A3 and A3 Mega + for GPU to GPU RDMA like networking. + + Returns: + A nested dict on machine types that require a sidecar. Otherwise, + returns {}. + """ + cfg: GPUGKEJob.Config = self.config + + # Different machine types require different sidecar containers + # for example A3 requires a tcpx socket but A3 Mega does not + if cfg.accelerator.instance_type.startswith("gpu-a3-highgpu"): + volume_mounts = [ + { + "name": "nvidia-install-dir-host", + "mountPath": "/usr/local/nvidia/lib64", + }, + { + "name": "tcpx-socket", + "mountPath": "/run/tcpx", + }, + ] + + command = [ + "bash", + "-c", + 'set -x; /tcpgpudmarxd/build/app/tcpgpudmarxd --gpu_nic_preset a3vm \ + --gpu_shmem_type fd --uds_path /run/tcpx \ + --setup_param "--verbose 128 2 0" & \n\ + while [ ! -f /run/tcpx/terminated ]; do sleep 10; done;', + ] + + return dict( + name="tcpx-daemon", + image="us-docker.pkg.dev/gce-ai-infra/gpudirect-tcpx/tcpgpudmarxd-dev:v2.0.11", + securityContext={"privileged": True}, + command=command, + env=[{"name": "LD_LIBRARY_PATH", "value": "/usr/local/nvidia/lib64"}], + volumeMounts=volume_mounts, + ) + + return {} + + def _build_main_container(self) -> Nested[Any]: + """Builds the config for the container running the job. + + Returns: + A nested dict corresponding to a k8s Container config. + """ + cfg: GPUGKEJob.Config = self.config + volume_mounts = [{"name": "shared-memory", "mountPath": "/dev/shm"}] + env_vars: Dict[str, str] = {} + env_vars["DISTRIBUTED_COORDINATOR"] = f"{cfg.name}-job-0-0.{cfg.name}:8080" + env_vars["NUM_PROCESSES"] = f"{cfg.accelerator.num_replicas}" + + default_xla_flags = [ + "--xla_gpu_enable_latency_hiding_scheduler=true", + "--xla_gpu_all_reduce_contiguous", + "--xla_gpu_all_reduce_combine_threshold_bytes=1073741824", + "--xla_gpu_all_gather_combine_threshold_bytes=1073741824", + "--xla_gpu_reduce_scatter_combine_threshold_bytes=1073741824",
These can be workload dependent -- should we omit and let XLA decide as the default?
axlearn
github_2023
python
517
apple
markblee
@@ -639,6 +639,359 @@ def _execute(self) -> Any: ) +class GPUGKEJob(GKEJob): + """A GPU job represented as a k8s JobSet. + + See also `gke_runner` as an example. + """ + + @config_class + class Config(GKEJob.Config): + """Configures GPUGKEJob. + + Attributes: + accelerator: GPU configuration. + """ + + accelerator: AcceleratorConfig = AcceleratorConfig() + queue: Optional[str] = None + + @classmethod + def define_flags(cls, fv: flags.FlagValues): + super().define_flags(fv) + common_kwargs = dict(flag_values=fv, allow_override=True) + accelerator_flags(**common_kwargs) + flags.DEFINE_string( + "queue", + None, + "The name of the Kueue LocalQueue to use.", + **common_kwargs, + ) + + @classmethod + def from_flags(cls, fv: flags.FlagValues, **kwargs) -> Config: + cfg: GPUGKEJob.Config = super().from_flags(fv, **kwargs) + cfg.accelerator.set(instance_type=fv.instance_type, num_replicas=fv.num_replicas) + return cfg + + def __init__(self, cfg: Config): + bundler_cfg = cfg.bundler + bundler_cfg = getattr(bundler_cfg, "inner", bundler_cfg) + if bundler_cfg is None or not issubclass(bundler_cfg.klass, BaseDockerBundler): + raise NotImplementedError(f"Only docker bundler supported, got: {bundler_cfg}") + super().__init__(cfg) + + def _build_sidecar_container(self) -> Nested[Any]: + """Builds a sidecar container which is required by A3 and A3 Mega + for GPU to GPU RDMA like networking. + + Returns: + A nested dict on machine types that require a sidecar. Otherwise, + returns {}. + """ + cfg: GPUGKEJob.Config = self.config + + # Different machine types require different sidecar containers + # for example A3 requires a tcpx socket but A3 Mega does not + if cfg.accelerator.instance_type.startswith("gpu-a3-highgpu"): + volume_mounts = [ + { + "name": "nvidia-install-dir-host", + "mountPath": "/usr/local/nvidia/lib64", + }, + { + "name": "tcpx-socket", + "mountPath": "/run/tcpx", + }, + ] + + command = [ + "bash", + "-c", + 'set -x; /tcpgpudmarxd/build/app/tcpgpudmarxd --gpu_nic_preset a3vm \ + --gpu_shmem_type fd --uds_path /run/tcpx \ + --setup_param "--verbose 128 2 0" & \n\ + while [ ! -f /run/tcpx/terminated ]; do sleep 10; done;', + ] + + return dict( + name="tcpx-daemon", + image="us-docker.pkg.dev/gce-ai-infra/gpudirect-tcpx/tcpgpudmarxd-dev:v2.0.11", + securityContext={"privileged": True}, + command=command, + env=[{"name": "LD_LIBRARY_PATH", "value": "/usr/local/nvidia/lib64"}], + volumeMounts=volume_mounts, + ) + + return {} + + def _build_main_container(self) -> Nested[Any]: + """Builds the config for the container running the job. + + Returns: + A nested dict corresponding to a k8s Container config. + """ + cfg: GPUGKEJob.Config = self.config + volume_mounts = [{"name": "shared-memory", "mountPath": "/dev/shm"}] + env_vars: Dict[str, str] = {} + env_vars["DISTRIBUTED_COORDINATOR"] = f"{cfg.name}-job-0-0.{cfg.name}:8080" + env_vars["NUM_PROCESSES"] = f"{cfg.accelerator.num_replicas}" + + default_xla_flags = [ + "--xla_gpu_enable_latency_hiding_scheduler=true", + "--xla_gpu_all_reduce_contiguous", + "--xla_gpu_all_reduce_combine_threshold_bytes=1073741824", + "--xla_gpu_all_gather_combine_threshold_bytes=1073741824", + "--xla_gpu_reduce_scatter_combine_threshold_bytes=1073741824", + ] + env_vars["XLA_FLAGS"] = " ".join(default_xla_flags) + + if cfg.accelerator.instance_type.startswith("gpu-a3-highgpu"): + volume_mounts.extend( + [ + {"name": "tcpx-socket", "mountPath": "/run/tcpx"}, + {"name": "nvidia-install-dir-host", "mountPath": "/usr/local/nvidia/lib64"}, + {"name": "tcpx-nccl-plugin-volume", "mountPath": "/usr/local/tcpx"}, + ] + ) + env_vars.update( + {
I think this whole section can benefit from more comments/pointers to what these are doing or how we decided on the defaults.
axlearn
github_2023
python
517
apple
markblee
@@ -639,6 +639,359 @@ def _execute(self) -> Any: ) +class GPUGKEJob(GKEJob): + """A GPU job represented as a k8s JobSet. + + See also `gke_runner` as an example. + """ + + @config_class + class Config(GKEJob.Config): + """Configures GPUGKEJob. + + Attributes: + accelerator: GPU configuration. + """ + + accelerator: AcceleratorConfig = AcceleratorConfig() + queue: Optional[str] = None + + @classmethod + def define_flags(cls, fv: flags.FlagValues): + super().define_flags(fv) + common_kwargs = dict(flag_values=fv, allow_override=True) + accelerator_flags(**common_kwargs) + flags.DEFINE_string( + "queue", + None, + "The name of the Kueue LocalQueue to use.", + **common_kwargs, + ) + + @classmethod + def from_flags(cls, fv: flags.FlagValues, **kwargs) -> Config: + cfg: GPUGKEJob.Config = super().from_flags(fv, **kwargs) + cfg.accelerator.set(instance_type=fv.instance_type, num_replicas=fv.num_replicas) + return cfg + + def __init__(self, cfg: Config): + bundler_cfg = cfg.bundler + bundler_cfg = getattr(bundler_cfg, "inner", bundler_cfg) + if bundler_cfg is None or not issubclass(bundler_cfg.klass, BaseDockerBundler): + raise NotImplementedError(f"Only docker bundler supported, got: {bundler_cfg}") + super().__init__(cfg) + + def _build_sidecar_container(self) -> Nested[Any]: + """Builds a sidecar container which is required by A3 and A3 Mega + for GPU to GPU RDMA like networking. + + Returns: + A nested dict on machine types that require a sidecar. Otherwise, + returns {}. + """ + cfg: GPUGKEJob.Config = self.config + + # Different machine types require different sidecar containers + # for example A3 requires a tcpx socket but A3 Mega does not + if cfg.accelerator.instance_type.startswith("gpu-a3-highgpu"): + volume_mounts = [ + { + "name": "nvidia-install-dir-host", + "mountPath": "/usr/local/nvidia/lib64", + }, + { + "name": "tcpx-socket", + "mountPath": "/run/tcpx", + }, + ] + + command = [ + "bash", + "-c", + 'set -x; /tcpgpudmarxd/build/app/tcpgpudmarxd --gpu_nic_preset a3vm \ + --gpu_shmem_type fd --uds_path /run/tcpx \ + --setup_param "--verbose 128 2 0" & \n\ + while [ ! -f /run/tcpx/terminated ]; do sleep 10; done;', + ] + + return dict( + name="tcpx-daemon", + image="us-docker.pkg.dev/gce-ai-infra/gpudirect-tcpx/tcpgpudmarxd-dev:v2.0.11", + securityContext={"privileged": True}, + command=command, + env=[{"name": "LD_LIBRARY_PATH", "value": "/usr/local/nvidia/lib64"}], + volumeMounts=volume_mounts, + ) + + return {} + + def _build_main_container(self) -> Nested[Any]: + """Builds the config for the container running the job. + + Returns: + A nested dict corresponding to a k8s Container config. + """ + cfg: GPUGKEJob.Config = self.config + volume_mounts = [{"name": "shared-memory", "mountPath": "/dev/shm"}] + env_vars: Dict[str, str] = {} + env_vars["DISTRIBUTED_COORDINATOR"] = f"{cfg.name}-job-0-0.{cfg.name}:8080" + env_vars["NUM_PROCESSES"] = f"{cfg.accelerator.num_replicas}" + + default_xla_flags = [ + "--xla_gpu_enable_latency_hiding_scheduler=true", + "--xla_gpu_all_reduce_contiguous", + "--xla_gpu_all_reduce_combine_threshold_bytes=1073741824", + "--xla_gpu_all_gather_combine_threshold_bytes=1073741824", + "--xla_gpu_reduce_scatter_combine_threshold_bytes=1073741824", + ] + env_vars["XLA_FLAGS"] = " ".join(default_xla_flags) + + if cfg.accelerator.instance_type.startswith("gpu-a3-highgpu"): + volume_mounts.extend( + [ + {"name": "tcpx-socket", "mountPath": "/run/tcpx"}, + {"name": "nvidia-install-dir-host", "mountPath": "/usr/local/nvidia/lib64"}, + {"name": "tcpx-nccl-plugin-volume", "mountPath": "/usr/local/tcpx"}, + ] + ) + env_vars.update( + { + "LD_LIBRARY_PATH": "/usr/local/tcpx/lib64:/usr/local/nvidia/lib64", + "NCCL_CROSS_NIC": "0", + "NCCL_ALGO": "Ring", + "NCCL_PROTO": "Simple", + "NCCL_DEBUG": "WARN", + "NCCL_DEBUG_SUBSYS": "INIT,GRAPH,ENV,TUNING,NET,VERSION", + "NCCL_NET_GDR_LEVEL": "PIX", + "NCCL_P2P_PXN_LEVEL": "0", + "NCCL_GPUDIRECTTCPX_FORCE_ACK": "0", + "NCCL_GPUDIRECTTCPX_TX_COMPLETION_NANOSLEEP": "1000", + "NCCL_GPUDIRECTTCPX_PROGRAM_FLOW_STEERING_WAIT_MICROS": "1000000", + "NCCL_GPUDIRECTTCPX_TX_BINDINGS": ( + "eth1:8-21,112-125;eth2:8-21,112-125;" + "eth3:60-73,164-177;eth4:60-73,164-177" + ), + "NCCL_GPUDIRECTTCPX_RX_BINDINGS": ( + "eth1:22-35,124-139;eth2:22-35,124-139;" + "eth3:74-87,178-191;eth4:74-87,178-191" + ), + "NCCL_GPUDIRECTTCPX_SOCKET_IFNAME": "eth1,eth2,eth3,eth4", + "NCCL_GPUDIRECTTCPX_CTRL_DEV": "eth0", + "NCCL_GPUDIRECTTCPX_UNIX_CLIENT_PREFIX": "/run/tcpx", + "NCCL_DYNAMIC_CHUNK_SIZE": "524288", + "NCCL_P2P_NET_CHUNKSIZE": "524288", + "NCCL_P2P_PCI_CHUNKSIZE": "524288", + "NCCL_P2P_NVL_CHUNKSIZE": "1048576", + "NCCL_NSOCKS_PERTHREAD": "4", + "NCCL_SOCKET_NTHREADS": "1", + "NCCL_SOCKET_IFNAME": "eth0", + "NCCL_NVLS_ENABLE": "0", + } + ) + + # Override env vars with user provided env vars + env_vars.update(cfg.env_vars) + # K8s expects each env variable to be a dict + k8s_env_vars = [{"name": name, "value": value} for name, value in env_vars.items()] + k8s_env_vars.append( + { + "name": "PROCESS_ID", + "valueFrom": { + "fieldRef": { + "fieldPath": ( + "metadata.annotations['batch.kubernetes.io/" "job-completion-index']" + ), + } + }, + }, + ) + + user_cmd = cfg.command + # This is needed to make the sidecar exit when the main container exits. + if cfg.accelerator.instance_type.startswith("gpu-a3-highgpu"): + user_cmd += "\ntouch /run/tcpx/terminated"
Is it expected that user command and the `touch` command are separated only by newline? (Should we add a semicolon or similar?)
axlearn
github_2023
python
517
apple
markblee
@@ -109,6 +109,9 @@ def named_trainer_configs() -> Dict[str, TrainerConfigFn]: ) kwargs = fuji.get_trainer_kwargs(model_size, vocab_size=vocab_size, version=version) max_sequence_length = kwargs.pop("max_sequence_length") + + # TODO remove before merging + kwargs["max_step"] = 1000
You can create a separate module with your changes and point to it via `--module`, similar to your previous fuji experiments. Let me know if you prefer a more concrete example.
axlearn
github_2023
python
517
apple
markblee
@@ -639,6 +639,384 @@ def _execute(self) -> Any: ) +class GPUGKEJob(GKEJob): + """A GPU job represented as a k8s JobSet. + + See also `gke_runner` as an example. + """ + + @config_class + class Config(GKEJob.Config): + """Configures GPUGKEJob. + + Attributes: + accelerator: GPU configuration. + queue: The Kueue LocalQueue to use. If not set, no queue is used. + """ + + accelerator: AcceleratorConfig = AcceleratorConfig() + queue: Optional[str] = None + + @classmethod + def define_flags(cls, fv: flags.FlagValues): + super().define_flags(fv) + common_kwargs = dict(flag_values=fv, allow_override=True) + accelerator_flags(**common_kwargs) + flags.DEFINE_string( + "queue", + None, + "The name of the Kueue LocalQueue to use. If not set, no queue is used.", + **common_kwargs, + ) + + @classmethod + def from_flags(cls, fv: flags.FlagValues, **kwargs) -> Config: + cfg: GPUGKEJob.Config = super().from_flags(fv, **kwargs) + cfg.accelerator.set(instance_type=fv.instance_type, num_replicas=fv.num_replicas) + return cfg + + def __init__(self, cfg: Config): + bundler_cfg = cfg.bundler + bundler_cfg = getattr(bundler_cfg, "inner", bundler_cfg) + if bundler_cfg is None or not issubclass(bundler_cfg.klass, BaseDockerBundler): + raise NotImplementedError(f"Only docker bundler supported, got: {bundler_cfg}") + super().__init__(cfg) + if cfg.gcsfuse_mount: + raise NotImplementedError("GCSFuse is not supported on GKE with GPU.") + if cfg.enable_pre_provisioner: + raise NotImplementedError("Pre-provisioner is not supported on GKE with GPU.") + instance_type = cfg.accelerator.instance_type + if not instance_type.startswith("gpu-a3-highgpu"): + raise NotImplementedError( + f"The instance type {instance_type} is not supported on GKE with GPU." + "Only gpu-a3-highgpu-8g is supported"
```suggestion f"The instance type {instance_type} is not supported on GKE with GPU. " "Only gpu-a3-highgpu-8g is supported." ``` (With this check, the other checks for `instance_type.startswith("gpu-a3-highgpu")` are now redundant.)
axlearn
github_2023
python
517
apple
markblee
@@ -639,6 +639,384 @@ def _execute(self) -> Any: ) +class GPUGKEJob(GKEJob): + """A GPU job represented as a k8s JobSet. + + See also `gke_runner` as an example. + """ + + @config_class + class Config(GKEJob.Config): + """Configures GPUGKEJob. + + Attributes: + accelerator: GPU configuration. + queue: The Kueue LocalQueue to use. If not set, no queue is used. + """ + + accelerator: AcceleratorConfig = AcceleratorConfig() + queue: Optional[str] = None + + @classmethod + def define_flags(cls, fv: flags.FlagValues): + super().define_flags(fv) + common_kwargs = dict(flag_values=fv, allow_override=True) + accelerator_flags(**common_kwargs) + flags.DEFINE_string( + "queue", + None, + "The name of the Kueue LocalQueue to use. If not set, no queue is used.", + **common_kwargs, + ) + + @classmethod + def from_flags(cls, fv: flags.FlagValues, **kwargs) -> Config: + cfg: GPUGKEJob.Config = super().from_flags(fv, **kwargs) + cfg.accelerator.set(instance_type=fv.instance_type, num_replicas=fv.num_replicas) + return cfg + + def __init__(self, cfg: Config): + bundler_cfg = cfg.bundler + bundler_cfg = getattr(bundler_cfg, "inner", bundler_cfg) + if bundler_cfg is None or not issubclass(bundler_cfg.klass, BaseDockerBundler): + raise NotImplementedError(f"Only docker bundler supported, got: {bundler_cfg}") + super().__init__(cfg) + if cfg.gcsfuse_mount: + raise NotImplementedError("GCSFuse is not supported on GKE with GPU.") + if cfg.enable_pre_provisioner: + raise NotImplementedError("Pre-provisioner is not supported on GKE with GPU.") + instance_type = cfg.accelerator.instance_type + if not instance_type.startswith("gpu-a3-highgpu"): + raise NotImplementedError( + f"The instance type {instance_type} is not supported on GKE with GPU." + "Only gpu-a3-highgpu-8g is supported" + ) + + def _build_sidecar_container(self) -> Nested[Any]: + """Builds a sidecar container which is required by A3 and A3 Mega + for GPU to GPU RDMA like networking. + + Returns: + A nested dict on machine types that require a sidecar. Otherwise, + returns {}. + """ + cfg: GPUGKEJob.Config = self.config + + # Different machine types require different sidecar containers. + # For example A3 requires a tcpx socket but A3 Mega does not. + # GCP docs for A3 TCPX: + # https://cloud.google.com/kubernetes-engine/docs/how-to/gpu-bandwidth-gpudirect-tcpx#add-gpudirect-manifests + if cfg.accelerator.instance_type.startswith("gpu-a3-highgpu"): + volume_mounts = [ + { + "name": "nvidia-install-dir-host", + "mountPath": "/usr/local/nvidia/lib64", + }, + { + "name": "tcpx-socket", + "mountPath": "/run/tcpx", + }, + ] + + command = [ + "bash", + "-c", + 'set -x; /tcpgpudmarxd/build/app/tcpgpudmarxd --gpu_nic_preset a3vm \ + --gpu_shmem_type fd --uds_path /run/tcpx \ + --setup_param "--verbose 128 2 0" & \n\ + while [ ! -f /run/tcpx/terminated ]; do sleep 10; done;', + ] + + return dict( + name="tcpx-daemon", + image="us-docker.pkg.dev/gce-ai-infra/gpudirect-tcpx/tcpgpudmarxd-dev:v2.0.11", + securityContext={"privileged": True}, + command=command, + env=[{"name": "LD_LIBRARY_PATH", "value": "/usr/local/nvidia/lib64"}], + volumeMounts=volume_mounts, + ) + + return {} + + def _build_main_container(self) -> Nested[Any]: + """Builds the config for the container running the job. + + Returns: + A nested dict corresponding to a k8s Container config. + """ + cfg: GPUGKEJob.Config = self.config + volume_mounts = [{"name": "shared-memory", "mountPath": "/dev/shm"}] + env_vars: Dict[str, str] = {} + env_vars["DISTRIBUTED_COORDINATOR"] = f"{cfg.name}-job-0-0.{cfg.name}:8080" + env_vars["NUM_PROCESSES"] = f"{cfg.accelerator.num_replicas}" + + default_xla_flags = [ + "--xla_gpu_enable_latency_hiding_scheduler=true", + "--xla_gpu_all_reduce_contiguous", + "--xla_gpu_all_reduce_combine_threshold_bytes=1073741824", + "--xla_gpu_all_gather_combine_threshold_bytes=1073741824", + "--xla_gpu_reduce_scatter_combine_threshold_bytes=1073741824", + ] + env_vars["XLA_FLAGS"] = " ".join(default_xla_flags) + + if cfg.accelerator.instance_type.startswith("gpu-a3-highgpu"): + volume_mounts.extend( + [ + {"name": "tcpx-socket", "mountPath": "/run/tcpx"}, + {"name": "nvidia-install-dir-host", "mountPath": "/usr/local/nvidia/lib64"}, + {"name": "tcpx-nccl-plugin-volume", "mountPath": "/usr/local/tcpx"}, + ] + ) + env_vars.update( + { + "LD_LIBRARY_PATH": "/usr/local/tcpx/lib64:/usr/local/nvidia/lib64", + # Set to 0 to encourage rail alignment. + "NCCL_CROSS_NIC": "0", + # TCPX only supports Ring algorithm. + "NCCL_ALGO": "Ring", + # TCPX only supports Simple protocol. + "NCCL_PROTO": "Simple", + "NCCL_DEBUG": "WARN", + "NCCL_DEBUG_SUBSYS": "INIT,GRAPH,ENV,TUNING,NET,VERSION", + # Enable GPU Direct RDMA when GPU and NIC are same PCI switch. + "NCCL_NET_GDR_LEVEL": "PIX", + # TCPX requires disabling PXN. + "NCCL_P2P_PXN_LEVEL": "0", + # The NCCL_GPU_DIRECTTCPX variables can not be tweaked. + "NCCL_GPUDIRECTTCPX_FORCE_ACK": "0", + "NCCL_GPUDIRECTTCPX_TX_COMPLETION_NANOSLEEP": "1000", + "NCCL_GPUDIRECTTCPX_PROGRAM_FLOW_STEERING_WAIT_MICROS": "1000000", + "NCCL_GPUDIRECTTCPX_TX_BINDINGS": ( + "eth1:8-21,112-125;eth2:8-21,112-125;" + "eth3:60-73,164-177;eth4:60-73,164-177" + ), + "NCCL_GPUDIRECTTCPX_RX_BINDINGS": ( + "eth1:22-35,124-139;eth2:22-35,124-139;" + "eth3:74-87,178-191;eth4:74-87,178-191" + ), + "NCCL_GPUDIRECTTCPX_SOCKET_IFNAME": "eth1,eth2,eth3,eth4", + "NCCL_GPUDIRECTTCPX_CTRL_DEV": "eth0", + "NCCL_GPUDIRECTTCPX_UNIX_CLIENT_PREFIX": "/run/tcpx", + # Improves performance but can be tweaked. + "NCCL_DYNAMIC_CHUNK_SIZE": "524288", + "NCCL_P2P_NET_CHUNKSIZE": "524288", + "NCCL_P2P_PCI_CHUNKSIZE": "524288", + "NCCL_P2P_NVL_CHUNKSIZE": "1048576", + # The number of sockets per thread improves performance. + "NCCL_NSOCKS_PERTHREAD": "4", + "NCCL_SOCKET_NTHREADS": "1", + # Use the system NIC for NCCL control plane comms. + "NCCL_SOCKET_IFNAME": "eth0", + # TCPX is not compatible with NVLS. + "NCCL_NVLS_ENABLE": "0", + } + ) + + # Override env vars with user provided env vars. + env_vars.update(cfg.env_vars) + # K8s expects each env variable to be a dict. + k8s_env_vars = [{"name": name, "value": value} for name, value in env_vars.items()] + k8s_env_vars.append( + { + "name": "PROCESS_ID", + "valueFrom": { + "fieldRef": { + "fieldPath": ( + "metadata.annotations['batch.kubernetes.io/" "job-completion-index']"
```suggestion "metadata.annotations['batch.kubernetes.io/job-completion-index']" ```
axlearn
github_2023
python
517
apple
markblee
@@ -639,6 +639,384 @@ def _execute(self) -> Any: ) +class GPUGKEJob(GKEJob): + """A GPU job represented as a k8s JobSet. + + See also `gke_runner` as an example. + """ + + @config_class + class Config(GKEJob.Config): + """Configures GPUGKEJob. + + Attributes: + accelerator: GPU configuration. + queue: The Kueue LocalQueue to use. If not set, no queue is used. + """ + + accelerator: AcceleratorConfig = AcceleratorConfig() + queue: Optional[str] = None + + @classmethod + def define_flags(cls, fv: flags.FlagValues): + super().define_flags(fv) + common_kwargs = dict(flag_values=fv, allow_override=True) + accelerator_flags(**common_kwargs) + flags.DEFINE_string( + "queue", + None, + "The name of the Kueue LocalQueue to use. If not set, no queue is used.", + **common_kwargs, + ) + + @classmethod + def from_flags(cls, fv: flags.FlagValues, **kwargs) -> Config: + cfg: GPUGKEJob.Config = super().from_flags(fv, **kwargs) + cfg.accelerator.set(instance_type=fv.instance_type, num_replicas=fv.num_replicas) + return cfg + + def __init__(self, cfg: Config): + bundler_cfg = cfg.bundler + bundler_cfg = getattr(bundler_cfg, "inner", bundler_cfg) + if bundler_cfg is None or not issubclass(bundler_cfg.klass, BaseDockerBundler): + raise NotImplementedError(f"Only docker bundler supported, got: {bundler_cfg}") + super().__init__(cfg) + if cfg.gcsfuse_mount: + raise NotImplementedError("GCSFuse is not supported on GKE with GPU.") + if cfg.enable_pre_provisioner: + raise NotImplementedError("Pre-provisioner is not supported on GKE with GPU.") + instance_type = cfg.accelerator.instance_type + if not instance_type.startswith("gpu-a3-highgpu"): + raise NotImplementedError( + f"The instance type {instance_type} is not supported on GKE with GPU." + "Only gpu-a3-highgpu-8g is supported" + ) + + def _build_sidecar_container(self) -> Nested[Any]: + """Builds a sidecar container which is required by A3 and A3 Mega + for GPU to GPU RDMA like networking. + + Returns: + A nested dict on machine types that require a sidecar. Otherwise, + returns {}. + """ + cfg: GPUGKEJob.Config = self.config + + # Different machine types require different sidecar containers. + # For example A3 requires a tcpx socket but A3 Mega does not. + # GCP docs for A3 TCPX: + # https://cloud.google.com/kubernetes-engine/docs/how-to/gpu-bandwidth-gpudirect-tcpx#add-gpudirect-manifests + if cfg.accelerator.instance_type.startswith("gpu-a3-highgpu"): + volume_mounts = [ + { + "name": "nvidia-install-dir-host", + "mountPath": "/usr/local/nvidia/lib64", + }, + { + "name": "tcpx-socket", + "mountPath": "/run/tcpx", + }, + ] + + command = [ + "bash", + "-c", + 'set -x; /tcpgpudmarxd/build/app/tcpgpudmarxd --gpu_nic_preset a3vm \ + --gpu_shmem_type fd --uds_path /run/tcpx \ + --setup_param "--verbose 128 2 0" & \n\ + while [ ! -f /run/tcpx/terminated ]; do sleep 10; done;', + ] + + return dict( + name="tcpx-daemon", + image="us-docker.pkg.dev/gce-ai-infra/gpudirect-tcpx/tcpgpudmarxd-dev:v2.0.11", + securityContext={"privileged": True}, + command=command, + env=[{"name": "LD_LIBRARY_PATH", "value": "/usr/local/nvidia/lib64"}], + volumeMounts=volume_mounts, + ) + + return {} + + def _build_main_container(self) -> Nested[Any]: + """Builds the config for the container running the job. + + Returns: + A nested dict corresponding to a k8s Container config. + """ + cfg: GPUGKEJob.Config = self.config + volume_mounts = [{"name": "shared-memory", "mountPath": "/dev/shm"}] + env_vars: Dict[str, str] = {} + env_vars["DISTRIBUTED_COORDINATOR"] = f"{cfg.name}-job-0-0.{cfg.name}:8080" + env_vars["NUM_PROCESSES"] = f"{cfg.accelerator.num_replicas}" + + default_xla_flags = [ + "--xla_gpu_enable_latency_hiding_scheduler=true", + "--xla_gpu_all_reduce_contiguous", + "--xla_gpu_all_reduce_combine_threshold_bytes=1073741824", + "--xla_gpu_all_gather_combine_threshold_bytes=1073741824", + "--xla_gpu_reduce_scatter_combine_threshold_bytes=1073741824", + ] + env_vars["XLA_FLAGS"] = " ".join(default_xla_flags) + + if cfg.accelerator.instance_type.startswith("gpu-a3-highgpu"): + volume_mounts.extend( + [ + {"name": "tcpx-socket", "mountPath": "/run/tcpx"}, + {"name": "nvidia-install-dir-host", "mountPath": "/usr/local/nvidia/lib64"}, + {"name": "tcpx-nccl-plugin-volume", "mountPath": "/usr/local/tcpx"}, + ] + ) + env_vars.update( + { + "LD_LIBRARY_PATH": "/usr/local/tcpx/lib64:/usr/local/nvidia/lib64", + # Set to 0 to encourage rail alignment. + "NCCL_CROSS_NIC": "0", + # TCPX only supports Ring algorithm. + "NCCL_ALGO": "Ring", + # TCPX only supports Simple protocol. + "NCCL_PROTO": "Simple", + "NCCL_DEBUG": "WARN", + "NCCL_DEBUG_SUBSYS": "INIT,GRAPH,ENV,TUNING,NET,VERSION", + # Enable GPU Direct RDMA when GPU and NIC are same PCI switch. + "NCCL_NET_GDR_LEVEL": "PIX", + # TCPX requires disabling PXN. + "NCCL_P2P_PXN_LEVEL": "0", + # The NCCL_GPU_DIRECTTCPX variables can not be tweaked. + "NCCL_GPUDIRECTTCPX_FORCE_ACK": "0", + "NCCL_GPUDIRECTTCPX_TX_COMPLETION_NANOSLEEP": "1000", + "NCCL_GPUDIRECTTCPX_PROGRAM_FLOW_STEERING_WAIT_MICROS": "1000000", + "NCCL_GPUDIRECTTCPX_TX_BINDINGS": ( + "eth1:8-21,112-125;eth2:8-21,112-125;" + "eth3:60-73,164-177;eth4:60-73,164-177" + ), + "NCCL_GPUDIRECTTCPX_RX_BINDINGS": ( + "eth1:22-35,124-139;eth2:22-35,124-139;" + "eth3:74-87,178-191;eth4:74-87,178-191" + ), + "NCCL_GPUDIRECTTCPX_SOCKET_IFNAME": "eth1,eth2,eth3,eth4", + "NCCL_GPUDIRECTTCPX_CTRL_DEV": "eth0", + "NCCL_GPUDIRECTTCPX_UNIX_CLIENT_PREFIX": "/run/tcpx", + # Improves performance but can be tweaked. + "NCCL_DYNAMIC_CHUNK_SIZE": "524288", + "NCCL_P2P_NET_CHUNKSIZE": "524288", + "NCCL_P2P_PCI_CHUNKSIZE": "524288", + "NCCL_P2P_NVL_CHUNKSIZE": "1048576", + # The number of sockets per thread improves performance. + "NCCL_NSOCKS_PERTHREAD": "4", + "NCCL_SOCKET_NTHREADS": "1", + # Use the system NIC for NCCL control plane comms. + "NCCL_SOCKET_IFNAME": "eth0", + # TCPX is not compatible with NVLS. + "NCCL_NVLS_ENABLE": "0", + } + ) + + # Override env vars with user provided env vars. + env_vars.update(cfg.env_vars) + # K8s expects each env variable to be a dict. + k8s_env_vars = [{"name": name, "value": value} for name, value in env_vars.items()] + k8s_env_vars.append( + { + "name": "PROCESS_ID", + "valueFrom": { + "fieldRef": { + "fieldPath": ( + "metadata.annotations['batch.kubernetes.io/" "job-completion-index']" + ), + } + }, + }, + ) + + user_cmd = cfg.command + if user_cmd is None: + user_cmd = ""
Maybe we should `raise ValueError("Command should not be None.")` in this case?
axlearn
github_2023
python
543
apple
markblee
@@ -438,7 +448,18 @@ def check_supported(*supported_layers: Type): check_supported(BertPooler) axlearn_to_torch(layer.linear, src["linear"], dst.dense) # Note: always use tanh as activation here. - elif isinstance(dst, (hf_bert.BertModel, hf_roberta.RobertaModel)): + elif isinstance(dst, hf_bert.BertModel): + check_supported(TextEmbeddingEncoder, BertModel) + axlearn_to_torch(layer.encoder.emb, src["encoder"]["emb"], dst.embeddings) + axlearn_to_torch(layer.encoder.transformer, src["encoder"]["transformer"], dst.encoder) + has_pooler = "head" in src and "pooler" in src["head"] and src["head"]["pooler"] != {} + if has_pooler != (dst.pooler is not None): + raise ValueError( + "Input layer and output layer must either both have pooler, or both not." + ) + if has_pooler: + axlearn_to_torch(layer.head.pooler, src["head"]["pooler"], dst.pooler)
nit -- ```suggestion src_pooler = src.get("head", {}).get("pooler", None) if (src_pooler is not None) != (dst.pooler is not None): raise ValueError( "Input layer and output layer must either both have pooler, or both not." ) if src_pooler: axlearn_to_torch(layer.head.pooler, src_pooler, dst.pooler) ```
axlearn
github_2023
python
507
apple
markblee
@@ -233,8 +233,8 @@ def _call_model( (outputs, output_collection), where `outputs` are the return value of self._model.method(...). """ + input_batch = self._dispatch_global_batch(input_batch) # Shard and (possibly) dispatch the input batch.
Should the comment be above L236?
axlearn
github_2023
python
507
apple
markblee
@@ -0,0 +1,242 @@ +# Copyright © 2024 Apple Inc. +"""Utility to help dispatching input batches from hosts to devices.""" + +import copy +from typing import Dict, Optional, Sequence + +import jax +from jax import numpy as jnp + +from axlearn.common.config import REQUIRED, Required, config_class +from axlearn.common.module import Module +from axlearn.common.utils import PHYSICAL_TO_LOGICAL_DISPATCH_KEY, Nested, Tensor, as_numpy_array + + +class InputDispatcher(Module): + """A Module to dispatch per-feed logical input batches to global logical batches on device. + + The dispatch process consists of three steps: + - Convert each logical feed batch to a physical feed batch (logical_to_physical_batch); + - Assemble a global physical batch from per-feed batches (utils.host_to_global_device_array); + - Convert a global physical batch to a global logical batch (physical_to_logical_batch). + + This process is needed because utils.host_to_global_device_array requires that global batch + size be divisible by number of devices. + + One should set up the local input generator to read the logical shard as specified by + `feed_read_config` and batch the examples by `feed_logical_batch_size`. + One should then call `logical_to_physical_batch` on each per-feed batch, followed by + `utils.host_to_global_device_array` to generate the input array for pjit, then finally + `physical_to_logical_batch` inside pjit. + """ + + @config_class + class Config(Module.Config): + """Configuration for InputDispatcher.""" + + global_logical_batch_size: Required[int] = REQUIRED + + # Usually left unset. Defaults to + # max(feed_logical_batch_size * num_physical_feeds, jax.device_count()). + global_physical_batch_size: Optional[int] = None + + # The total number of physical feeds across all hosts. Defaults to jax.process_count(). + num_physical_feeds: Optional[int] = None + + # The local physical feed index. Must be in [0, num_physical_feeds). + # Defaults to jax.process_index(). + physical_feed_index: Optional[int] = None + + # Usually left unset. + # If not None, a list of length num_logical_feeds. logical_feed_indices[i] is an integer in + # [0, num_physical_feeds), representing the physical feed index for the i'th logical feed. + logical_feed_indices: Optional[Sequence[int]] = None + + def __init__(self, cfg: Config, *, parent: Optional[Module]): + cfg.num_physical_feeds = cfg.num_physical_feeds or jax.process_count()
Would this mutate the instantiating config?
axlearn
github_2023
python
507
apple
markblee
@@ -0,0 +1,242 @@ +# Copyright © 2024 Apple Inc. +"""Utility to help dispatching input batches from hosts to devices.""" + +import copy +from typing import Dict, Optional, Sequence + +import jax +from jax import numpy as jnp + +from axlearn.common.config import REQUIRED, Required, config_class +from axlearn.common.module import Module +from axlearn.common.utils import PHYSICAL_TO_LOGICAL_DISPATCH_KEY, Nested, Tensor, as_numpy_array + + +class InputDispatcher(Module): + """A Module to dispatch per-feed logical input batches to global logical batches on device. + + The dispatch process consists of three steps: + - Convert each logical feed batch to a physical feed batch (logical_to_physical_batch); + - Assemble a global physical batch from per-feed batches (utils.host_to_global_device_array); + - Convert a global physical batch to a global logical batch (physical_to_logical_batch). + + This process is needed because utils.host_to_global_device_array requires that global batch + size be divisible by number of devices. + + One should set up the local input generator to read the logical shard as specified by + `feed_read_config` and batch the examples by `feed_logical_batch_size`. + One should then call `logical_to_physical_batch` on each per-feed batch, followed by + `utils.host_to_global_device_array` to generate the input array for pjit, then finally + `physical_to_logical_batch` inside pjit. + """ + + @config_class + class Config(Module.Config): + """Configuration for InputDispatcher.""" + + global_logical_batch_size: Required[int] = REQUIRED + + # Usually left unset. Defaults to + # max(feed_logical_batch_size * num_physical_feeds, jax.device_count()). + global_physical_batch_size: Optional[int] = None + + # The total number of physical feeds across all hosts. Defaults to jax.process_count(). + num_physical_feeds: Optional[int] = None + + # The local physical feed index. Must be in [0, num_physical_feeds). + # Defaults to jax.process_index(). + physical_feed_index: Optional[int] = None + + # Usually left unset. + # If not None, a list of length num_logical_feeds. logical_feed_indices[i] is an integer in + # [0, num_physical_feeds), representing the physical feed index for the i'th logical feed. + logical_feed_indices: Optional[Sequence[int]] = None + + def __init__(self, cfg: Config, *, parent: Optional[Module]): + cfg.num_physical_feeds = cfg.num_physical_feeds or jax.process_count() + cfg.physical_feed_index = cfg.physical_feed_index or jax.process_index() + if cfg.logical_feed_indices is None: + num_logical_feeds = min(cfg.global_logical_batch_size, cfg.num_physical_feeds) + cfg.logical_feed_indices = list(range(num_logical_feeds)) + if cfg.global_physical_batch_size is None: + num_logical_feeds = len(cfg.logical_feed_indices) + feed_logical_batch_size = cfg.global_logical_batch_size // num_logical_feeds + cfg.global_physical_batch_size = max( + feed_logical_batch_size * cfg.num_physical_feeds, jax.device_count() + ) + super().__init__(cfg, parent=parent) + cfg = self.config + if cfg.global_logical_batch_size % self.num_logical_feeds != 0: + raise ValueError( + f"global_logical_batch_size {cfg.global_logical_batch_size} must be " + f"divisible by num_logical_feeds {self.num_logical_feeds}" + ) + if cfg.global_physical_batch_size % cfg.num_physical_feeds != 0: + raise ValueError( + f"global_logical_batch_size {cfg.global_physical_batch_size} must be " + f"divisible by num_logical_feeds {self.num_physical_feeds}" + ) + if self.feed_physical_batch_size < self.feed_logical_batch_size: + raise ValueError( + f"feed_physical_batch_size {self.feed_physical_batch_size} must be " + f">= feed_logical_batch_size {self.feed_logical_batch_size}" + ) + if not 0 <= cfg.physical_feed_index < cfg.num_physical_feeds: + raise ValueError( + f"physical_feed_index {cfg.physical_feed_index} must be " + f"in range [0, {cfg.num_physical_feeds})" + ) + if not all(0 <= ix < cfg.num_physical_feeds for ix in cfg.logical_feed_indices): + raise ValueError( + f"Invalid physical feed index in {cfg.logical_feed_indices}: must be " + f"in range [0, {cfg.num_physical_feeds})" + ) + if len(set(cfg.logical_feed_indices)) != len(cfg.logical_feed_indices): + raise ValueError(f"logical_feed_indices must be unique: {cfg.logical_feed_indices}") + + @property + def num_logical_feeds(self) -> int: + return len(self.config.logical_feed_indices) + + @property + def logical_feed_index(self) -> Optional[int]: + cfg = self.config + if cfg.physical_feed_index in cfg.logical_feed_indices: + return cfg.logical_feed_indices.index(cfg.physical_feed_index) + return None + + @property + def feed_logical_batch_size(self) -> int: + return self.config.global_logical_batch_size // self.num_logical_feeds + + @property + def feed_physical_batch_size(self) -> int: + cfg = self.config + return cfg.global_physical_batch_size // cfg.num_physical_feeds + + def feed_read_config(self) -> Dict[str, int]: + """Generates the read configuration for the local physical feed. + + Returns: + A dict containing: + - "num_shards": The total number of logical shards to split the source by; + - "shard_index": The logical shard index to read for the local physical feed. + """ + cfg = self.config + # Set the read config to draw unique data for num logical feed indices only. + num_shards = self.num_logical_feeds + shard_index = self.logical_feed_index + if shard_index is None: + # Read an arbitrary shard. It doesn't matter which shard we read, since the results are + # not included by the result of `physical_to_logical_batch`. + # Here we try to distribute the logical shards evenly. + non_logical_feed_indices = [ + ix for ix in range(cfg.num_physical_feeds) if ix not in cfg.logical_feed_indices + ] + shard_index = non_logical_feed_indices.index(cfg.physical_feed_index) % num_shards + return dict(num_shards=num_shards, shard_index=shard_index) + + def logical_to_physical_batch(self, logical_feed_batch: Nested[Tensor]) -> Nested[Tensor]: + """Converts a per-feed logical batch to a per-feed physical batch. + + Specifically, pads the batch to feed_physical_batch_size and adds a dispatch Tensor under + key PHYSICAL_TO_LOGICAL_DISPATCH_KEY, which will be used by physical_to_logical_batch later. + + Args: + logical_feed_batch: A per-feed logical batch, where every leaf Tensor should be of + shape [feed_logical_batch_size, ...]. + + Returns: + A per-feed physical batch, where every leaf Tensor should be of shape + [feed_physical_batch_size, ...]. + """ + cfg = self.config + if ( + cfg.global_logical_batch_size == cfg.global_physical_batch_size + and cfg.num_physical_feeds == self.num_logical_feeds + ): + return copy.deepcopy(logical_feed_batch) + feed_physical_batch_size = self.feed_physical_batch_size + feed_logical_batch_size = self.feed_logical_batch_size + + def pad_to_physical_batch_size(x: Tensor): + if x.ndim < 1 or x.shape[0] != feed_logical_batch_size: + raise NotImplementedError( + "Shape does not match logical batch size: " + f"{x.shape} vs. {feed_logical_batch_size}" + ) + if cfg.physical_feed_index not in cfg.logical_feed_indices: + x = jnp.zeros_like(as_numpy_array(x)) + if feed_logical_batch_size == feed_physical_batch_size: + return x + pad_size = feed_physical_batch_size - feed_logical_batch_size + if pad_size < 0: + raise ValueError(f"{feed_physical_batch_size} < {feed_logical_batch_size}")
Is this a case that we expect we can run into? Should it be an assertion given checks in init?
axlearn
github_2023
python
507
apple
markblee
@@ -0,0 +1,242 @@ +# Copyright © 2024 Apple Inc. +"""Utility to help dispatching input batches from hosts to devices.""" + +import copy +from typing import Dict, Optional, Sequence + +import jax +from jax import numpy as jnp + +from axlearn.common.config import REQUIRED, Required, config_class +from axlearn.common.module import Module +from axlearn.common.utils import PHYSICAL_TO_LOGICAL_DISPATCH_KEY, Nested, Tensor, as_numpy_array + + +class InputDispatcher(Module): + """A Module to dispatch per-feed logical input batches to global logical batches on device. + + The dispatch process consists of three steps: + - Convert each logical feed batch to a physical feed batch (logical_to_physical_batch); + - Assemble a global physical batch from per-feed batches (utils.host_to_global_device_array); + - Convert a global physical batch to a global logical batch (physical_to_logical_batch). + + This process is needed because utils.host_to_global_device_array requires that global batch + size be divisible by number of devices. + + One should set up the local input generator to read the logical shard as specified by + `feed_read_config` and batch the examples by `feed_logical_batch_size`. + One should then call `logical_to_physical_batch` on each per-feed batch, followed by + `utils.host_to_global_device_array` to generate the input array for pjit, then finally + `physical_to_logical_batch` inside pjit. + """ + + @config_class + class Config(Module.Config): + """Configuration for InputDispatcher.""" + + global_logical_batch_size: Required[int] = REQUIRED + + # Usually left unset. Defaults to + # max(feed_logical_batch_size * num_physical_feeds, jax.device_count()). + global_physical_batch_size: Optional[int] = None + + # The total number of physical feeds across all hosts. Defaults to jax.process_count(). + num_physical_feeds: Optional[int] = None + + # The local physical feed index. Must be in [0, num_physical_feeds). + # Defaults to jax.process_index(). + physical_feed_index: Optional[int] = None + + # Usually left unset. + # If not None, a list of length num_logical_feeds. logical_feed_indices[i] is an integer in + # [0, num_physical_feeds), representing the physical feed index for the i'th logical feed. + logical_feed_indices: Optional[Sequence[int]] = None + + def __init__(self, cfg: Config, *, parent: Optional[Module]): + cfg.num_physical_feeds = cfg.num_physical_feeds or jax.process_count() + cfg.physical_feed_index = cfg.physical_feed_index or jax.process_index() + if cfg.logical_feed_indices is None: + num_logical_feeds = min(cfg.global_logical_batch_size, cfg.num_physical_feeds) + cfg.logical_feed_indices = list(range(num_logical_feeds)) + if cfg.global_physical_batch_size is None: + num_logical_feeds = len(cfg.logical_feed_indices) + feed_logical_batch_size = cfg.global_logical_batch_size // num_logical_feeds + cfg.global_physical_batch_size = max( + feed_logical_batch_size * cfg.num_physical_feeds, jax.device_count() + ) + super().__init__(cfg, parent=parent) + cfg = self.config + if cfg.global_logical_batch_size % self.num_logical_feeds != 0: + raise ValueError( + f"global_logical_batch_size {cfg.global_logical_batch_size} must be " + f"divisible by num_logical_feeds {self.num_logical_feeds}" + ) + if cfg.global_physical_batch_size % cfg.num_physical_feeds != 0: + raise ValueError( + f"global_logical_batch_size {cfg.global_physical_batch_size} must be " + f"divisible by num_logical_feeds {self.num_physical_feeds}" + ) + if self.feed_physical_batch_size < self.feed_logical_batch_size: + raise ValueError( + f"feed_physical_batch_size {self.feed_physical_batch_size} must be " + f">= feed_logical_batch_size {self.feed_logical_batch_size}" + ) + if not 0 <= cfg.physical_feed_index < cfg.num_physical_feeds: + raise ValueError( + f"physical_feed_index {cfg.physical_feed_index} must be " + f"in range [0, {cfg.num_physical_feeds})" + ) + if not all(0 <= ix < cfg.num_physical_feeds for ix in cfg.logical_feed_indices): + raise ValueError( + f"Invalid physical feed index in {cfg.logical_feed_indices}: must be " + f"in range [0, {cfg.num_physical_feeds})" + ) + if len(set(cfg.logical_feed_indices)) != len(cfg.logical_feed_indices): + raise ValueError(f"logical_feed_indices must be unique: {cfg.logical_feed_indices}") + + @property + def num_logical_feeds(self) -> int: + return len(self.config.logical_feed_indices) + + @property + def logical_feed_index(self) -> Optional[int]: + cfg = self.config + if cfg.physical_feed_index in cfg.logical_feed_indices: + return cfg.logical_feed_indices.index(cfg.physical_feed_index) + return None + + @property + def feed_logical_batch_size(self) -> int: + return self.config.global_logical_batch_size // self.num_logical_feeds + + @property + def feed_physical_batch_size(self) -> int: + cfg = self.config + return cfg.global_physical_batch_size // cfg.num_physical_feeds + + def feed_read_config(self) -> Dict[str, int]: + """Generates the read configuration for the local physical feed. + + Returns: + A dict containing: + - "num_shards": The total number of logical shards to split the source by; + - "shard_index": The logical shard index to read for the local physical feed. + """ + cfg = self.config + # Set the read config to draw unique data for num logical feed indices only. + num_shards = self.num_logical_feeds + shard_index = self.logical_feed_index + if shard_index is None: + # Read an arbitrary shard. It doesn't matter which shard we read, since the results are + # not included by the result of `physical_to_logical_batch`. + # Here we try to distribute the logical shards evenly. + non_logical_feed_indices = [ + ix for ix in range(cfg.num_physical_feeds) if ix not in cfg.logical_feed_indices + ] + shard_index = non_logical_feed_indices.index(cfg.physical_feed_index) % num_shards + return dict(num_shards=num_shards, shard_index=shard_index) + + def logical_to_physical_batch(self, logical_feed_batch: Nested[Tensor]) -> Nested[Tensor]:
I suppose it's also intended to work with `Nested[tf.Tensor]`?
axlearn
github_2023
python
507
apple
markblee
@@ -0,0 +1,242 @@ +# Copyright © 2024 Apple Inc. +"""Utility to help dispatching input batches from hosts to devices.""" + +import copy +from typing import Dict, Optional, Sequence + +import jax +from jax import numpy as jnp + +from axlearn.common.config import REQUIRED, Required, config_class +from axlearn.common.module import Module +from axlearn.common.utils import PHYSICAL_TO_LOGICAL_DISPATCH_KEY, Nested, Tensor, as_numpy_array + + +class InputDispatcher(Module): + """A Module to dispatch per-feed logical input batches to global logical batches on device. + + The dispatch process consists of three steps: + - Convert each logical feed batch to a physical feed batch (logical_to_physical_batch); + - Assemble a global physical batch from per-feed batches (utils.host_to_global_device_array); + - Convert a global physical batch to a global logical batch (physical_to_logical_batch). + + This process is needed because utils.host_to_global_device_array requires that global batch + size be divisible by number of devices. + + One should set up the local input generator to read the logical shard as specified by + `feed_read_config` and batch the examples by `feed_logical_batch_size`. + One should then call `logical_to_physical_batch` on each per-feed batch, followed by + `utils.host_to_global_device_array` to generate the input array for pjit, then finally + `physical_to_logical_batch` inside pjit. + """ + + @config_class + class Config(Module.Config): + """Configuration for InputDispatcher.""" + + global_logical_batch_size: Required[int] = REQUIRED + + # Usually left unset. Defaults to + # max(feed_logical_batch_size * num_physical_feeds, jax.device_count()). + global_physical_batch_size: Optional[int] = None + + # The total number of physical feeds across all hosts. Defaults to jax.process_count(). + num_physical_feeds: Optional[int] = None + + # The local physical feed index. Must be in [0, num_physical_feeds). + # Defaults to jax.process_index(). + physical_feed_index: Optional[int] = None + + # Usually left unset. + # If not None, a list of length num_logical_feeds. logical_feed_indices[i] is an integer in + # [0, num_physical_feeds), representing the physical feed index for the i'th logical feed. + logical_feed_indices: Optional[Sequence[int]] = None + + def __init__(self, cfg: Config, *, parent: Optional[Module]): + cfg.num_physical_feeds = cfg.num_physical_feeds or jax.process_count() + cfg.physical_feed_index = cfg.physical_feed_index or jax.process_index() + if cfg.logical_feed_indices is None: + num_logical_feeds = min(cfg.global_logical_batch_size, cfg.num_physical_feeds) + cfg.logical_feed_indices = list(range(num_logical_feeds)) + if cfg.global_physical_batch_size is None: + num_logical_feeds = len(cfg.logical_feed_indices) + feed_logical_batch_size = cfg.global_logical_batch_size // num_logical_feeds + cfg.global_physical_batch_size = max( + feed_logical_batch_size * cfg.num_physical_feeds, jax.device_count() + ) + super().__init__(cfg, parent=parent) + cfg = self.config + if cfg.global_logical_batch_size % self.num_logical_feeds != 0: + raise ValueError( + f"global_logical_batch_size {cfg.global_logical_batch_size} must be " + f"divisible by num_logical_feeds {self.num_logical_feeds}" + ) + if cfg.global_physical_batch_size % cfg.num_physical_feeds != 0: + raise ValueError( + f"global_logical_batch_size {cfg.global_physical_batch_size} must be " + f"divisible by num_logical_feeds {self.num_physical_feeds}" + ) + if self.feed_physical_batch_size < self.feed_logical_batch_size: + raise ValueError( + f"feed_physical_batch_size {self.feed_physical_batch_size} must be " + f">= feed_logical_batch_size {self.feed_logical_batch_size}" + ) + if not 0 <= cfg.physical_feed_index < cfg.num_physical_feeds: + raise ValueError( + f"physical_feed_index {cfg.physical_feed_index} must be " + f"in range [0, {cfg.num_physical_feeds})" + ) + if not all(0 <= ix < cfg.num_physical_feeds for ix in cfg.logical_feed_indices): + raise ValueError( + f"Invalid physical feed index in {cfg.logical_feed_indices}: must be " + f"in range [0, {cfg.num_physical_feeds})" + ) + if len(set(cfg.logical_feed_indices)) != len(cfg.logical_feed_indices): + raise ValueError(f"logical_feed_indices must be unique: {cfg.logical_feed_indices}") + + @property + def num_logical_feeds(self) -> int: + return len(self.config.logical_feed_indices) + + @property + def logical_feed_index(self) -> Optional[int]: + cfg = self.config + if cfg.physical_feed_index in cfg.logical_feed_indices: + return cfg.logical_feed_indices.index(cfg.physical_feed_index) + return None + + @property + def feed_logical_batch_size(self) -> int: + return self.config.global_logical_batch_size // self.num_logical_feeds + + @property + def feed_physical_batch_size(self) -> int: + cfg = self.config + return cfg.global_physical_batch_size // cfg.num_physical_feeds + + def feed_read_config(self) -> Dict[str, int]: + """Generates the read configuration for the local physical feed. + + Returns: + A dict containing: + - "num_shards": The total number of logical shards to split the source by; + - "shard_index": The logical shard index to read for the local physical feed. + """ + cfg = self.config + # Set the read config to draw unique data for num logical feed indices only. + num_shards = self.num_logical_feeds + shard_index = self.logical_feed_index + if shard_index is None: + # Read an arbitrary shard. It doesn't matter which shard we read, since the results are + # not included by the result of `physical_to_logical_batch`. + # Here we try to distribute the logical shards evenly. + non_logical_feed_indices = [ + ix for ix in range(cfg.num_physical_feeds) if ix not in cfg.logical_feed_indices + ]
nit -- might be worth an assertion on the length of `non_logical_feed_indices`?
axlearn
github_2023
python
507
apple
markblee
@@ -249,6 +249,16 @@ def _call_model( is_training=False, ) + def _dispatch_global_batch(self, input_batch: NestedTensor) -> NestedTensor: + module = self.parent + while module is not None: + if isinstance(module, SpmdEvaler): + break
nit -- you could also fold the break into the while condition.
axlearn
github_2023
python
507
apple
markblee
@@ -0,0 +1,242 @@ +# Copyright © 2024 Apple Inc.
```suggestion # Copyright © 2024 Apple Inc. ```
axlearn
github_2023
python
507
apple
markblee
@@ -863,6 +868,79 @@ def fn(ds: tf.data.Dataset) -> tf.data.Dataset: return fn +def per_feed_batch( + feed_batch_size: int, + *, + is_training: bool, + pad_example_fn: PadExampleFn, + prefetch_buffer_size: Optional[int] = None, + post_batch_processor: Optional[ConfigOr[DatasetToDatasetFn]] = None, + repeat: Optional[int] = None, +) -> DatasetToDatasetFn: + """Returns a function that generates a tf.data.Dataset object.
nit -- the comment seems to suggest it returns a source instead of a processor.
axlearn
github_2023
python
507
apple
markblee
@@ -863,6 +868,79 @@ def fn(ds: tf.data.Dataset) -> tf.data.Dataset: return fn +def per_feed_batch( + feed_batch_size: int, + *, + is_training: bool, + pad_example_fn: PadExampleFn, + prefetch_buffer_size: Optional[int] = None, + post_batch_processor: Optional[ConfigOr[DatasetToDatasetFn]] = None, + repeat: Optional[int] = None, +) -> DatasetToDatasetFn: + """Returns a function that generates a tf.data.Dataset object. + + Note: per_feed_batch(is_training=True) requires sufficient number of examples + per feed. When your data is too small, you should add `ds = ds.repeat()` + before batching. + + Args: + feed_batch_size: The per-feed batch size. + is_training: Whether the examples are used for training. + pad_example_fn: Create padded examples with the given function. + prefetch_buffer_size: Size of prefetch buffer. This allows later + elements to be prepared while the current element is being + processed. If not set, `tf.data.experimental.AUTOTUNE` is used. + post_batch_processor: An optional processor (or config instantiating to a processor) that + applies batch-wise processing functions. + repeat: The number of times to repeat the batches from the dataset. + If None, repeat indefinitely if is_training=True and do not repeat otherwise. + Otherwise must be a positive integer. + + Returns: + A DatasetToDataset fn. + + Raises: + ValueError: If repeat is not a positive integer. + """ + if repeat is not None and (not isinstance(repeat, int) or repeat <= 0): + raise ValueError(f"Invalid repeat (must be a positive integer): {repeat}") + + def fn(ds: tf.data.Dataset) -> tf.data.Dataset: + if not is_training: + # Pad for evaluation. + ds = _pad_for_evaluation( + ds, + per_feed_batch_size=feed_batch_size, + pad_example_fn=pad_example_fn, + ) + + # Batch. + ds = ds.batch(feed_batch_size, drop_remainder=True) + + # Post batch processing methods at batch-level. + if post_batch_processor: + ds = maybe_instantiate(post_batch_processor)(ds) + + if not is_training and jax.process_count() > 1: + num_eval_batches = _infer_cardinality(ds) + logging.info("Feed has %s eval batches.", num_eval_batches) + multihost_utils.assert_equal( + num_eval_batches, + f"Number of eval batches are not all equal ({num_eval_batches})", + ) + + if repeat is None: + if is_training: + ds = ds.repeat() + else: + ds = ds.repeat(repeat)
nit -- ```suggestion if repeat is not None: ds = ds.repeat(repeat) elif is_training: ds = ds.repeat() ```
axlearn
github_2023
python
507
apple
markblee
@@ -1064,9 +1159,24 @@ class Config(Module.Config): # A config that instantiates to a DatasetToDatasetFn, which performs batching of examples. batcher: InstantiableConfig = config_for_function(batch) + # If not None, creates an InputDispatcher and use it for dispatching per-feed batches to
```suggestion # If not None, creates an InputDispatcher and uses it for dispatching per-feed batches to ```
axlearn
github_2023
python
507
apple
markblee
@@ -1064,9 +1159,24 @@ class Config(Module.Config): # A config that instantiates to a DatasetToDatasetFn, which performs batching of examples. batcher: InstantiableConfig = config_for_function(batch) + # If not None, creates an InputDispatcher and use it for dispatching per-feed batches to + # global batches. + input_dispatcher: Optional[InputDispatcher] = None + def __init__(self, cfg: Config, *, parent: Optional[Module]): super().__init__(cfg, parent=parent) cfg = self.config + if cfg.input_dispatcher is not None: + self._add_child("input_dispatcher", cfg.input_dispatcher) + # Let input_dispatcher determine num_shards and shard_index for tfds_read_config. + feed_read_config = self.input_dispatcher.feed_read_config() + set_read_config_recursively(cfg.source, **feed_read_config) + if cfg.batcher.fn is per_feed_batch: + # If using `per_feed_batch`, set feed_batch_size according to `input_batcher`.
```suggestion # If using `per_feed_batch`, set feed_batch_size according to `input_dispatcher`. ```
axlearn
github_2023
python
528
apple
markblee
@@ -1963,21 +1963,24 @@ def rel_pos_to_abs_pos(x: Tensor) -> Tensor: Args: x: a Tensor of shape [T, 2*T - 1], where x[i, j] represents the bias between query[i] and absolute position k = i + j - (T - 1), if 0 <= k < T, otherwise the value is not used. + T >= 1. Returns: y: a Tensor of shape [T, T], s.t. y[i, k] = x[i, j] where k = i + j - (T - 1), if 0 <= k < T. """ t, offset_length = x.shape assert offset_length == 2 * t - 1 + if t == 1:
nit -- ```suggestion if t <= 1: ```
axlearn
github_2023
python
528
apple
markblee
@@ -1963,21 +1963,24 @@ def rel_pos_to_abs_pos(x: Tensor) -> Tensor: Args: x: a Tensor of shape [T, 2*T - 1], where x[i, j] represents the bias between query[i] and absolute position k = i + j - (T - 1), if 0 <= k < T, otherwise the value is not used. + T >= 1. Returns: y: a Tensor of shape [T, T], s.t. y[i, k] = x[i, j] where k = i + j - (T - 1), if 0 <= k < T. """ t, offset_length = x.shape assert offset_length == 2 * t - 1 + if t == 1: + return x # [t * (2t - 1)]. x = x.reshape([-1]) # [t * (2t - 2)]. x = x[t - 1 : -1] # [t, 2t - 2]. x = x.reshape([t, -1]) - # [t, t]. - x = x[:, : -(t - 2)] + # [t, t]. When t = 2, do not trim. + x = x[:, : -(t - 2) or None]
An if statement is probably preferable here?
axlearn
github_2023
python
528
apple
markblee
@@ -1963,21 +1963,25 @@ def rel_pos_to_abs_pos(x: Tensor) -> Tensor: Args: x: a Tensor of shape [T, 2*T - 1], where x[i, j] represents the bias between query[i] and absolute position k = i + j - (T - 1), if 0 <= k < T, otherwise the value is not used. + T >= 1.
```suggestion T is expected to be >= 1. ```
axlearn
github_2023
python
525
apple
jinglu1
@@ -425,6 +428,15 @@ def _build_container(self) -> Nested[Any]: if cfg.enable_tpu_ici_resiliency is not None: env_vars["ENABLE_ICI_RESILIENCY"] = str(cfg.enable_tpu_ici_resiliency).lower() + resources = {"limits": {"google.com/tpu": system.chips_per_vm}} + # Set request memory by host machine type. + machine_memory_gb = GCE_MACHINE_TYPE_TO_REQUEST_MEMORY_CHARACTERISTICS.get( + system.gce_machine_type, None + ) + if machine_memory_gb is not None: + resources["limits"]["memory"] = f"{machine_memory_gb}G" + resources["requests"] = {"memory": f"{round(machine_memory_gb * 0.8, 2)}G"}
Does it mean that we are reserving 20% of memory for system software?
axlearn
github_2023
python
450
apple
jiya-zhang
@@ -149,4 +152,50 @@ def make_single_host_config(base_config_name: str) -> SpmdTrainer.Config: config_map[f"{config_name}-single-host"] = functools.partial( make_single_host_config, config_name ) + + if model_size == "test": + + def make_simple_test_config( + base_config_name: str, + batch_size: int = 32, + eval_every_n_steps: int = 2000, + max_step: int = 3000, + ) -> SpmdTrainer.Config: + """Make a variant of fuji-test of the test config + that terminates early and saves checkpoints frequently. + + Args: + base_config_name: The test config name. + batch_size: The global batch size for training inputs. + eval_every_n_steps: How often to run evaluation. + max_step: The maximum number of training steps. + + Returns: + A trainer config that can run for a short period of time. + """ + + # pytype: disable=annotation-type-mismatch + cfg: SpmdTrainer.Config = config_map[base_config_name]().clone() + # pytype: enable=annotation-type-mismatch + + cfg.input.batcher.global_batch_size = batch_size + for evaler in cfg.evalers.values(): + evaler.input.batcher.global_batch_size = batch_size + evaler.set( + eval_policy=config_for_function(eval_every_n_steps_policy).set( + n=eval_every_n_steps + ) + ) + cfg.max_step = max_step + cfg.mesh_shape = mesh_shape_from_axes(data=-1, fsdp=4) + cfg.summary_writer.write_every_n_steps = eval_every_n_steps + cfg.checkpointer.save_policy = config_for_function(every_n_steps_policy).set( + n=eval_every_n_steps
Is it possible to save checkpointer more frequently than eval? Something like save ckpt every 500 steps, eval every 1500 steps. This allows us to identify issues separately if the job hangs
axlearn
github_2023
python
450
apple
markblee
@@ -149,4 +152,51 @@ def make_single_host_config(base_config_name: str) -> SpmdTrainer.Config: config_map[f"{config_name}-single-host"] = functools.partial( make_single_host_config, config_name ) + + if model_size == "test": + + def make_simple_test_config( + base_config_name: str, + batch_size: int = 32, + eval_every_n_steps: int = 1500, + save_every_n_steps: int = 500, + max_step: int = 3000, + ) -> SpmdTrainer.Config: + """Make a variant of fuji-test of the test config + that terminates early and saves checkpoints frequently. + + Args: + base_config_name: The test config name. + batch_size: The global batch size for training inputs. + eval_every_n_steps: How often to run evaluation. + max_step: The maximum number of training steps. + + Returns: + A trainer config that can run for a short period of time. + """ + + # pytype: disable=annotation-type-mismatch + cfg: SpmdTrainer.Config = config_map[base_config_name]().clone() + # pytype: enable=annotation-type-mismatch + + cfg.input.batcher.global_batch_size = batch_size + for evaler in cfg.evalers.values(): + evaler.input.batcher.global_batch_size = batch_size + evaler.set( + eval_policy=config_for_function(eval_every_n_steps_policy).set( + n=eval_every_n_steps + ) + ) + cfg.max_step = max_step + cfg.mesh_shape = mesh_shape_from_axes(data=-1, fsdp=4) + cfg.summary_writer.write_every_n_steps = eval_every_n_steps + cfg.checkpointer.save_policy = config_for_function(every_n_steps_policy).set( + n=save_every_n_steps + )
For these, have you considered tweaking the kwargs here directly? https://github.com/apple/axlearn/blob/c2c8a935a8ea339cdf0e0ffad6d48e005455dbe4/axlearn/experiments/text/gpt/fuji.py#L85-L104 It looks like we can add a mesh rule for the accelerators that you are testing on, too. For reference, the kwargs will be passed to https://github.com/apple/axlearn/blob/c2c8a935a8ea339cdf0e0ffad6d48e005455dbe4/axlearn/experiments/text/gpt/common.py#L466-L482.
axlearn
github_2023
python
450
apple
markblee
@@ -140,6 +140,29 @@ def get_trainer_kwargs(model_size: str, *, vocab_size: int, version: Version) -> ), ), ) + elif model_size == "simple":
Thanks! Does this need to be separate from `"test"` (which is itself intended to be the testing configuration)? In particular, we can configure `mesh_rules` for the accelerator that you are testing on. This way, it'll run on both CPU and the target testing hardware. The only other differences seem to be batch sizes and eval/saving more frequently, which seem tolerable as defaults. WDYT?
axlearn
github_2023
python
450
apple
markblee
@@ -98,8 +98,11 @@ def get_trainer_kwargs(model_size: str, *, vocab_size: int, version: Version) -> weight_decay=0.01, ), max_sequence_length=64, - train_batch_size=16, + train_batch_size=32, + eval_batch_size=32, max_step=3000, + eval_every_n_steps=1500, + save_every_n_steps=500, mesh_shape=mesh_shape_from_axes(), # cpu
You can probably get away with just changing this to ```suggestion mesh_shape=mesh_shape_from_axes(data=-1), ``` On CPU, this completes to `(1,1,1,1,1)`, on v4-8 it completes to `(4,1,1,1,1)`. You can also do `fsdp=-1` if you instead want to test against `(1,1,4,1,1)`, although the configs are small enough that it probably does not matter.
axlearn
github_2023
python
420
apple
markblee
@@ -199,7 +199,7 @@ class WandBWriter(BaseWriter): Note: This utility does not support restarts gracefully. - If the job is pre-empted, the logger will create a new run. + If the job is pre-emptied, the logger will create a new run.
```suggestion If the job is preempted, the logger will create a new run. ```
axlearn
github_2023
python
485
apple
jiya-zhang
@@ -0,0 +1,142 @@ +# Copyright © 2024 Apple Inc. + +"""A script to compute goodput and upload to Cloud Monitoring. + +This can be run as a daemon for each training job for which `GoodputRecorder` is configured. + +Example: + + python3 -m axlearn.experiments.calculate_goodput --job_name=my-test-job + +""" + +import time +from datetime import datetime + +from absl import app, flags, logging +from googleapiclient import discovery, errors +from ml_goodput_measurement import goodput + +from axlearn.cloud.gcp.config import gcp_settings +from axlearn.cloud.gcp.utils import get_credentials + +FLAGS = flags.FLAGS +_METRIC_NAME = "goodput" + + +def _private_flags(): + flags.DEFINE_string("project", None, "GCP project.") + flags.DEFINE_string("zone", None, "GCP zone.") + flags.DEFINE_string("job_name", None, "Name of job.", required=True) + + +def _monitoring_resource() -> discovery.Resource: + return discovery.build( + "monitoring", "v3", credentials=get_credentials(), cache_discovery=False + ).projects() + + +def create_custom_metric(*, project: str, metric_name: str): + """Creates a custom metric if it doesn't already exist.""" + resource = _monitoring_resource().metricDescriptors() + metric_id = f"custom.googleapis.com/{metric_name}" + + try: + metric = resource.get(name=f"projects/{project}/metricDescriptors/{metric_id}").execute() + except errors.HttpError as e: + if e.status_code != 404: + raise + metric = None + + if metric is None: + logging.info("Metric %s does not exist, creating it...", metric_name) + metric = resource.create( + name=f"projects/{project}", + body={ + "name": metric_name, + "type": metric_id, + "description": f"{metric_name.capitalize()} metric.", + "displayName": metric_name.capitalize(), + "metricKind": "GAUGE", + "valueType": "DOUBLE", + }, + ).execute() + + logging.info("Using %s metric: %s", metric_name, metric) + + +def write_time_series_metric( + *, + project: str, + metric_name: str, + value: float, + resource_labels: dict, + metric_labels: dict, + end_time: float, +): + """Writes a custom time-series metric value.""" + resource = _monitoring_resource().timeSeries() + utc_end_time = datetime.utcfromtimestamp(end_time) + resource.create( + name=f"projects/{project}", + body={ + "timeSeries": [ + { + "metric": { + "type": f"custom.googleapis.com/{metric_name}", + "labels": { + metric_name: str(value), + "event_time": utc_end_time.strftime("%d %b %Y %H:%M:%S UTC"), + **metric_labels, + }, + }, + "resource": { + "labels": { + # The namespace/node_id labels are mandatory. + "namespace": "namespace", + "node_id": "node_id", + **resource_labels, + }, + "type": "generic_node", + }, + "points": [ + { + "interval": {"endTime": utc_end_time.strftime("%Y-%m-%dT%H:%M:%SZ")}, + "value": {"doubleValue": value}, + }, + ], + } + ] + }, + ).execute() + + +def main(_): + project, zone = gcp_settings("project"), gcp_settings("zone") + create_custom_metric(project=project, metric_name=_METRIC_NAME) + + goodput_calculator = goodput.GoodputCalculator( + job_name=FLAGS.job_name, + logger_name=f"goodput_logger_{FLAGS.job_name}", + ) + + start_time = time.time() + current_goodput = goodput_calculator.get_job_goodput() + end_time = time.time() + + print(f"Job goodput: {current_goodput:.4f}%") + print(f"Fetch time: {end_time - start_time:.2f} seconds") + + write_time_series_metric(
I believe this only writes one data point, containing the current time and current goodput? Is this the intended user journey?
axlearn
github_2023
others
485
apple
jiya-zhang
@@ -85,6 +85,7 @@ gcp = [ "google-auth[pyopenssl]", # Ensures that we have compatible pyopenssl/cryptography pins. "google-cloud-storage==2.16.0", "google-cloud-core==2.3.3", + "ml_goodput_measurement==0.0.2",
Should we wait until they release the newer version to merge? The release can be as soon as next week
axlearn
github_2023
python
481
apple
markblee
@@ -0,0 +1,17 @@ +"""Tests for AXLearn environment.""" +# pylint: disable=no-self-use,redundant-keyword-arg,too-many-function-args
OOI where did `redundant-keyword-arg,too-many-function-args` come from?
axlearn
github_2023
others
479
apple
tuzhucheng
@@ -48,9 +48,16 @@ conda install -c apple tensorflow-deps # Manually build tensorflow-text until a collaborator build is available. # This was tested using clang version 15 - you may get non-working wheels with earlier versions of clang. mkdir ~/builds && git clone https://github.com/tensorflow/text.git ~/builds/text -cd ~/builds/text && git checkout 6064f1bf8fd078777b6c8690986b908c28764a94 +cd ~/builds/text && git checkout 0f9f6df5b4da19bc7a734ba05fc4fa12bccbedbe +# Patch tensorflow-text to support tf 2.16.1. +git pull origin pull/1273/head -X ours
Thanks to @jiya-zhang's tip, if we install TF manually before trying to build `tensorflow-text`, it will not attempt to install TF again. ```suggestion pip install tensorflow==2.16.1 cd ~/builds/text && git checkout 0f9f6df5b4da19bc7a734ba05fc4fa12bccbedbe ```
axlearn
github_2023
python
476
apple
markblee
@@ -186,8 +188,10 @@ def model_config( if ffn_dim is None: ffn_dim = scaled_hidden_dim(scale=8 / 3, round_up_to_multiples_of=256) if num_kv_heads: + atten_cfg = GroupedQueryAttention.default_config()
May be worth adding a unit test?
axlearn
github_2023
python
472
apple
markblee
@@ -24,22 +25,43 @@ def sweep(self, jobs: Dict[str, JobSpec]) -> Sequence[str]: raise NotImplementedError(type(self)) +class AggregationType(Enum): + """The aggregation rule for CompositeCleaner.
```suggestion """The aggregation rule for CompositeCleaner. ```
axlearn
github_2023
python
457
apple
markblee
@@ -196,19 +196,30 @@ def _compute_target_paddings( target_labels: Tensor = input_batch["target_labels"] # Infer target_paddings from out-of-range labels. target_paddings = jnp.logical_or(cfg.vocab_size <= target_labels, target_labels < 0) + return target_paddings + def _input_stats_summary( + self, input_batch: Nested[Tensor] + ) -> Dict[str, Union[WeightedScalar, Tensor]]: + target_labels: Tensor = input_batch["target_labels"] + target_paddings = self._compute_target_paddings(target_labels) batch_size = target_labels.shape[0] target_lengths = jnp.sum(1 - target_paddings, axis=-1) - self.add_summary( - "input_stats/average_target_length", - WeightedScalar(jnp.mean(target_lengths), batch_size), - ) source_lengths = jnp.sum(1 - input_batch["paddings"], axis=-1) - self.add_summary( - "input_stats/average_source_length", - WeightedScalar(jnp.mean(source_lengths), batch_size), - ) - return target_paddings + # pytype: disable=attribute-error + ret_dict = { + "input_stats/average_target_length": WeightedScalar( + jnp.mean(target_lengths), batch_size + ), + "input_stats/average_source_length": WeightedScalar( + jnp.mean(source_lengths), batch_size + ), + "input_stats/frame_packing_effiency": WeightedScalar( + jnp.sum(source_lengths) / input_batch["paddings"].size, input_batch["paddings"].size
Guard against division by 0 here and below?
axlearn
github_2023
python
457
apple
markblee
@@ -196,19 +196,30 @@ def _compute_target_paddings( target_labels: Tensor = input_batch["target_labels"] # Infer target_paddings from out-of-range labels. target_paddings = jnp.logical_or(cfg.vocab_size <= target_labels, target_labels < 0) + return target_paddings + def _input_stats_summary( + self, input_batch: Nested[Tensor] + ) -> Dict[str, Union[WeightedScalar, Tensor]]: + target_labels: Tensor = input_batch["target_labels"] + target_paddings = self._compute_target_paddings(target_labels) batch_size = target_labels.shape[0] target_lengths = jnp.sum(1 - target_paddings, axis=-1) - self.add_summary( - "input_stats/average_target_length", - WeightedScalar(jnp.mean(target_lengths), batch_size), - ) source_lengths = jnp.sum(1 - input_batch["paddings"], axis=-1) - self.add_summary( - "input_stats/average_source_length", - WeightedScalar(jnp.mean(source_lengths), batch_size), - ) - return target_paddings + # pytype: disable=attribute-error + ret_dict = { + "input_stats/average_target_length": WeightedScalar( + jnp.mean(target_lengths), batch_size + ), + "input_stats/average_source_length": WeightedScalar( + jnp.mean(source_lengths), batch_size + ), + "input_stats/frame_packing_effiency": WeightedScalar( + jnp.sum(source_lengths) / input_batch["paddings"].size, input_batch["paddings"].size + ), + } + # pytype: enable=attribute-error + return ret_dict
Is there a specific reason to prefer returning the summaries instead of just adding them here?
axlearn
github_2023
python
457
apple
markblee
@@ -196,19 +196,30 @@ def _compute_target_paddings( target_labels: Tensor = input_batch["target_labels"] # Infer target_paddings from out-of-range labels. target_paddings = jnp.logical_or(cfg.vocab_size <= target_labels, target_labels < 0) + return target_paddings + def _input_stats_summary(
```suggestion def _input_stats_summaries( ``` or `def _add_input_stats_summaries` if we decide to inline the add, which may be more similar to other callsites in the repo.
axlearn
github_2023
python
457
apple
markblee
@@ -257,6 +268,64 @@ def predict(self, input_batch: Nested[Tensor]) -> Tensor: logits = self.lm_head(inputs) return logits * (1 - paddings[..., None]) + def _input_stats_summary( + self, input_batch: Nested[Tensor], per_example_weight: Tensor + ) -> Dict[str, Union[WeightedScalar, Tensor]]: + paddings = input_batch["paddings"] + target_paddings = self._compute_target_paddings(input_batch) + valid_frame_mask = (1.0 - paddings) * per_example_weight[:, None] + valid_label_mask = (1.0 - target_paddings) * per_example_weight[:, None] + num_valid_frames = jnp.sum(valid_frame_mask) + num_valid_labels = jnp.sum(valid_label_mask) + num_valid_examples = jnp.maximum(per_example_weight.sum(), 1.0) + # pytype: disable=attribute-error + ret_dict = { + "input_stats/average_target_length": WeightedScalar( + num_valid_labels / num_valid_examples, num_valid_examples + ), + "input_stats/average_source_length": WeightedScalar( + num_valid_frames / num_valid_examples, num_valid_examples + ), + "input_stats/frame_packing_effiency": WeightedScalar( + num_valid_frames / input_batch["paddings"].size, input_batch["paddings"].size + ), + } + # pytype: enable=attribute-error + return ret_dict + + def _loss_summary( + self, + *, + total_ctc_loss: Tensor, + per_example_weight: Tensor, + paddings: Tensor, + target_paddings: Tensor, + ) -> Dict[str, Union[WeightedScalar, Tensor]]: + valid_frame_mask = (1.0 - paddings) * per_example_weight[:, None] + valid_label_mask = (1.0 - target_paddings) * per_example_weight[:, None] + + num_valid_frames = jnp.sum(valid_frame_mask) + num_valid_labels = jnp.sum(valid_label_mask) + per_frame_loss = total_ctc_loss / num_valid_frames + per_label_loss = total_ctc_loss / num_valid_labels + batch_size = per_example_weight.shape[0]
Same here?
axlearn
github_2023
python
457
apple
markblee
@@ -257,6 +269,65 @@ def predict(self, input_batch: Nested[Tensor]) -> Tensor: logits = self.lm_head(inputs) return logits * (1 - paddings[..., None]) + def _input_stats_summaries( + self, input_batch: Nested[Tensor], per_example_weight: Tensor + ) -> Dict[str, Union[WeightedScalar, Tensor]]: + paddings = input_batch["paddings"] + target_paddings = self._compute_target_paddings(input_batch) + valid_frame_mask = (1.0 - paddings) * per_example_weight[:, None] + valid_label_mask = (1.0 - target_paddings) * per_example_weight[:, None] + num_valid_frames = jnp.sum(valid_frame_mask) + num_valid_labels = jnp.sum(valid_label_mask) + num_valid_examples = jnp.maximum(per_example_weight.sum(), 1.0) + # pytype: disable=attribute-error + num_total_frames = jnp.maximum(input_batch["paddings"].size, 1) + ret_dict = { + "input_stats/average_target_length": WeightedScalar( + num_valid_labels / num_valid_examples, num_valid_examples + ), + "input_stats/average_source_length": WeightedScalar( + num_valid_frames / num_valid_examples, num_valid_examples + ), + "input_stats/frame_packing_effiency": WeightedScalar( + num_valid_frames / num_total_frames, num_total_frames + ), + } + # pytype: enable=attribute-error + return ret_dict + + def _loss_summaries( + self, + *, + total_ctc_loss: Tensor, + per_example_weight: Tensor, + paddings: Tensor, + target_paddings: Tensor, + ) -> Dict[str, Union[WeightedScalar, Tensor]]: + valid_frame_mask = (1.0 - paddings) * per_example_weight[:, None] + valid_label_mask = (1.0 - target_paddings) * per_example_weight[:, None] + + num_valid_frames = jnp.sum(valid_frame_mask) + num_valid_labels = jnp.sum(valid_label_mask) + per_frame_loss = total_ctc_loss / num_valid_frames + per_label_loss = total_ctc_loss / num_valid_labels
Here too?
axlearn
github_2023
python
457
apple
markblee
@@ -257,6 +269,65 @@ def predict(self, input_batch: Nested[Tensor]) -> Tensor: logits = self.lm_head(inputs) return logits * (1 - paddings[..., None]) + def _input_stats_summaries( + self, input_batch: Nested[Tensor], per_example_weight: Tensor + ) -> Dict[str, Union[WeightedScalar, Tensor]]: + paddings = input_batch["paddings"] + target_paddings = self._compute_target_paddings(input_batch) + valid_frame_mask = (1.0 - paddings) * per_example_weight[:, None] + valid_label_mask = (1.0 - target_paddings) * per_example_weight[:, None] + num_valid_frames = jnp.sum(valid_frame_mask) + num_valid_labels = jnp.sum(valid_label_mask) + num_valid_examples = jnp.maximum(per_example_weight.sum(), 1.0)
A couple nits -- since we sum over weights, 1.0 may not always be appropriate. We might also consider renaming `num_valid_examples` to `total_example_weight`.
axlearn
github_2023
others
454
apple
samos123
@@ -109,6 +109,10 @@ dataflow = [ "google-apitools", # for beam pipeline "orjson==3.9.10", ] +# Triton kernel dependency. +triton = [
maybe rename `triton` to `gpu` if this is a gpu specific dependency? Not sure if there will be other gpu only dependencies
axlearn
github_2023
python
456
apple
markblee
@@ -2358,7 +2361,10 @@ class Config(BaseLayer.Config): add_dead_neuron_summary: Optional[bool] = None # Adds summary of RMS norms of the specified values. Supported value are: + # - "inputs": inputs of the layer. + # - "linear1_outputs": outputs of linear1. # - "linear2_outputs": outputs of linear2. + # TODO: deprecate this feature since we use TensorStats.
```suggestion # TODO(tlei3): deprecate this feature since we use TensorStats. ``` here and elsewhere?
axlearn
github_2023
python
456
apple
ruomingp
@@ -182,6 +182,18 @@ def add_stats(self, name: str, value: Nested[Tensor]): self.add_summary("max_abs", jnp.abs(value).max().astype(jnp.float32)) +class DefaultTensorStats(CompositeTensorStats): + """Default tensor stats that compute RMS norm and max value.""" + + @config_class + class Config(CompositeTensorStats.Config): + tensor_stats: Dict[str, TensorStats.Config] = { + "norm": TensorRMSNorm.default_config(), + "max": TensorMaxAbs.default_config(), + } + inline_child_summaries: bool = True
Nit: do we need class? Maybe a function is enough: ``` def default_tensor_stats_config() -> TensorStats.Config: ```
axlearn
github_2023
python
446
apple
apghml
@@ -126,6 +127,61 @@ def apply(self, prng_key: Tensor, params: NestedTensor) -> NestedTensor: raise NotImplementedError(self) +class TensorStats(Module): + """An abstract Module to add summaries about the given Tensors.""" + + def add_stats(self, name: str, value: Nested[Tensor]): + """Subclasses must implement this method.""" + raise NotImplementedError(type(self)) + + +class CompositeTensorStats(TensorStats): + """A TensorStats consists of multiple child TensorStats."""
```suggestion """A TensorStats consisting of multiple child TensorStats.""" ```
axlearn
github_2023
python
446
apple
apghml
@@ -296,6 +365,8 @@ def initialize_parameters_recursively( parameter_spec=spec, ) for name, child in self._children.items(): + if not isinstance(child, BaseLayer): + continue
Could you add a comment about why this is needed and a test that fails without this change? E.g., why shouldn't we error in this case?
axlearn
github_2023
python
444
apple
markblee
@@ -249,6 +249,9 @@ def _gcloud_storage_rsync( timeout=timeout_s, capture_output=True, text=True, + # Avoid "No space left on device": + # https://cloud.google.com/knowledge/kb/error-message-while-running-the-command-gsutil-rsync-000004577 + env={"TMPDIR": f"{_LOG_DIR}/rsync"},
nit -- I wonder if we should use /var/tmp/rsync explicitly, given that log dir is often the directory being rsync'ed itself?
axlearn
github_2023
python
425
apple
ruomingp
@@ -523,24 +523,24 @@ def forward(self, inputs: Tensor) -> Tensor: ) return jnp.transpose(time_major_outputs, [1, 0, 2]) - def init_step_states(self, *, batch_size: int) -> Nested[Tensor]: + def init_states(self, *, batch_size: int) -> Nested[Tensor]: """Returns the prediction network initial step states, to be used by `extend_step`.""" - return self.rnn.init_step_states(batch_size=batch_size) + return self.rnn.init_states(batch_size=batch_size) def extend_step( self, *, - inputs: Tensor, - step_states: Nested[Tensor], + cached_states: Nested[Tensor], + data: Tensor, ) -> Tuple[Nested[Tensor], Tensor]: """Computes prediction network outputs and RNN state updates for one step. Args: - inputs: An int Tensor of shape [batch_size, num_labels]. - step_states: The step states returned by `init_step_states` or `extend_step`. + cached_states: A NestedTensor returned by `init_states` or `extend_step`. + data: An int Tensor of shape [batch_size, num_labels]. Returns: (updated_step_states, outputs), where `outputs` is a Tensor of shape [batch_size, output_dim].
```suggestion (updated_cache_states, outputs), where `outputs` is a Tensor of shape [batch_size, output_dim]. ```
axlearn
github_2023
python
425
apple
ruomingp
@@ -284,41 +284,41 @@ def initialize_parameters_recursively( ) return state - def init_step_states(self, *, batch_size: int) -> List[NestedTensor]: + def init_states(self, *, batch_size: int) -> List[Nested[Tensor]]: """Returns a list of initial step states from all layers.""" - states_list = [layer.init_step_states(batch_size=batch_size) for layer in self._layers] + states_list = [layer.init_states(batch_size=batch_size) for layer in self._layers] return states_list def extend_step( self, *, - inputs: NestedTensor, - step_states: List[NestedTensor], - ) -> Tuple[List[NestedTensor], Tensor]: + cached_states: List[Nested[Tensor]], + data: Tensor, + ) -> Tuple[List[Nested[Tensor]], Tensor]: """Computes the outputs and all layers state updates for one step. Args: - inputs: The inputs for the current step, often a Tensor of shape - [batch_size, input_dim]. - step_states: The list of step states from all layers returned by `init_step_states` + cached_states: A list of cached states from all layers returned by `init_states` or `extend_step`. + data: A Tensor of shape [batch_size, input_dim], the inputs for the current step. Returns: - (updated_step_states, outputs), where `outputs` are usually a Tensor of shape - [batch_size, output_dim], and updated_step_states is a list of states from all layers. + (updated_step_states, outputs), where: + `updated_step_states` is a list of states from all layers; + `outputs` is a Tensor of shape [batch_size, output_dim].
```suggestion (updated_cache_states, outputs), where: `updated_cache_states` is a list of states from all layers; `outputs` is a Tensor of shape [batch_size, output_dim]. ```
axlearn
github_2023
python
425
apple
ruomingp
@@ -338,27 +338,26 @@ def output_dim(self): class _RNNRepeat(Repeat): """A Repeat layer with layer = children class of BaseRNNCell.""" - def init_step_states(self, *, batch_size: int) -> NestedTensor: - """Returns the initial step states of all layers.""" + def init_states(self, *, batch_size: int) -> Nested[Tensor]: + """Returns the initial states of all layers.""" def layer_fn(_): - return VDict(self.layer.init_step_states(batch_size=batch_size)) + return VDict(self.layer.init_states(batch_size=batch_size)) cfg = self.config return jax.vmap(layer_fn)(jnp.empty(cfg.num_layers)) def extend_step( self, *, - inputs: NestedTensor, - step_states: NestedTensor, - ) -> Tuple[NestedTensor, NestedTensor]: + cached_states: Nested[Tensor], + data: Tensor, + ) -> Tuple[Nested[Tensor], Tensor]: """Computes the outputs and state updates for one step for all layers. Args: - inputs: The inputs for the current step, often a Tensor of shape - [batch_size, input_dim]. - step_states: The step states returned by `init_step_states` or `extend_step`. + cached_states: A NestedTensor returned by `init_states()` or `extend_step()`. + data: A Tensor of shape [batch_size, input_dim], the inputs for the current step. Returns: (updated_step_states, outputs), where `outputs` are usually a Tensor of shape
Ditto.
axlearn
github_2023
python
428
apple
kelvin-zou
@@ -6,12 +6,13 @@ The fuji models are set up to imitate LLaMA-1 (https://arxiv.org/abs/2302.13971).
nit, fix comment?
axlearn
github_2023
python
428
apple
kelvin-zou
@@ -444,19 +444,23 @@ def evaler_config_dict( return evalers -def make_config_name(arch: str, model_size: str) -> str: +def make_config_name(arch: str, model_size: str, version: Optional[str] = None) -> str: """Makes config name string as a function of architecture and model-size. Useful to keep config names synced with fine-tuning configs. Args: arch: The architecture of the model. model_size: The number of transformer parameters (not including vocab embeddings). + version: An optional version string. Returns: - f"{arch}-{model_size}". + f"{arch}-{model_size}" or f"{arch}-{model_size}-{version}". """ - return f"{arch}-{model_size}" + name = f"{arch}-{model_size}"
nit: add v1 as an default for backward compatibility?
axlearn
github_2023
python
428
apple
kelvin-zou
@@ -54,17 +105,21 @@ def get_trainer_kwargs(model_size: str, *, vocab_size: int) -> Dict[str, Any]: num_layers=32, hidden_dim=128 * 32, num_heads=32, + num_kv_heads=num_kv_heads, + rope_theta=rope_theta, ), learner_kwargs=dict(peak_lr=3e-4, weight_decay=0.1), - train_batch_size=4 * 1024 * 1024 // MAX_SEQUENCE_LENGTH, # 4M tokens. - max_step=500_000, # 2T tokens // 4M tokens/step. + max_sequence_length=max_sequence_length, + train_batch_size=train_batch_size, + max_step=max_step, mesh_shape=mesh_shape_from_axes(fsdp=-1), mesh_rules=( # tpu-v4. step time: 3.03s. ("tpu-v4-(1024|2048)", mesh_shape_from_axes(data=-1, fsdp=16)), # tpu-v5e. step time: TBD. ("tpu-v5litepod-256", mesh_shape_from_axes(data=-1, fsdp=16)), # H100/A100 80G. Maximum per-node batch size = 64, hence need >= 32 nodes. + # p5.48xlarge 8x64. v1 step time: 1.54s.
Nit, add a note for v3 model? I believe 1024 GPUs won't work for v3 model since global bs is only 512 due to 8k seq length.
axlearn
github_2023
python
423
apple
ruomingp
@@ -439,11 +439,18 @@ def _postprocess_outputs(self, *, sequences: Tensor, paddings: Tensor, scores: T ) -def _map_label_sequences(inputs: Tensor, *, blank_id: int = 0, pad_id: int = 0) -> Nested[Tensor]: - """Removes blanks, paddings, and repeats from the input sequences, as seen in CTC. +def _map_label_sequences( + inputs: Tensor, *, remove_repeats: bool = True, blank_id: int = 0, pad_id: int = 0
Should we leave `remove_repeats` without a default value, since neither is the best value? ```suggestion inputs: Tensor, *, remove_repeats: bool, blank_id: int = 0, pad_id: int = 0 ```
axlearn
github_2023
python
416
apple
samos123
@@ -75,16 +81,46 @@ def _get_job_credentials( ) -class TPUJob(GCPJob): +@config_class +class AcceleratorConfig(ConfigBase): + """Configures job resources, e.g. TPU or GPU. + + Attributes: + instance_type: Instance type, e.g. tpu-v4-8.
add example for what this should be on GPU (can be AWS or GCP)
axlearn
github_2023
python
421
apple
markblee
@@ -476,3 +477,69 @@ def _map_label_sequences(inputs: Tensor, *, blank_id: int = 0, pad_id: int = 0) if pad_id != 0: sequences = jnp.where(paddings, pad_id, sequences) return dict(sequences=sequences, paddings=paddings, lengths=lens) + + +class RNNPredictionNetwork(BaseLayer): + """RNN prediction network internal language model.""" + + @config_class + class Config(BaseLayer.Config): + """Configs RNNPredictionNetwork.""" + + # Vocab size. + vocab_size: Required[int] = REQUIRED + # The embedding dim. + emb_dim: Required[int] = REQUIRED + # The output dim. + output_dim: Required[int] = REQUIRED + + # Embedding lookup layer. + embedding: Embedding.Config = Embedding.default_config() + # RNN cell of the internal LM. Defaults to a 1 layer LSTM. + rnn_cell: BaseRNNCell.Config = LSTMCell.default_config() + + def __init__(self, cfg: Config, *, parent: Optional[Module]): + super().__init__(cfg, parent=parent) + cfg = self.config + self._add_child( + "embedding", cfg.embedding.set(num_embeddings=cfg.vocab_size, dim=cfg.emb_dim) + ) + self._add_child("rnn", cfg.rnn_cell.set(input_dim=cfg.emb_dim, output_dim=cfg.output_dim)) + + def forward(self, inputs: Tensor) -> Tensor: + """Computes prediction network output from the inputs. + + Args: + inputs: An int Tensor of shape [batch_size, num_labels]. Valid tokens are in the range + [0, vocab_size). Out-of-range token ids are clamped to the bounds of the array. + See https://jax.readthedocs.io/en/latest/notebooks/Common_Gotchas_in_JAX.html + #out-of-bounds-indexing.
```suggestion See https://jax.readthedocs.io/en/latest/notebooks/Common_Gotchas_in_JAX.html #out-of-bounds-indexing. ```
axlearn
github_2023
python
421
apple
markblee
@@ -476,3 +477,69 @@ def _map_label_sequences(inputs: Tensor, *, blank_id: int = 0, pad_id: int = 0) if pad_id != 0: sequences = jnp.where(paddings, pad_id, sequences) return dict(sequences=sequences, paddings=paddings, lengths=lens) + + +class RNNPredictionNetwork(BaseLayer): + """RNN prediction network internal language model.""" + + @config_class + class Config(BaseLayer.Config): + """Configs RNNPredictionNetwork.""" + + # Vocab size. + vocab_size: Required[int] = REQUIRED + # The embedding dim. + emb_dim: Required[int] = REQUIRED + # The output dim. + output_dim: Required[int] = REQUIRED + + # Embedding lookup layer. + embedding: Embedding.Config = Embedding.default_config() + # RNN cell of the internal LM. Defaults to a 1 layer LSTM. + rnn_cell: BaseRNNCell.Config = LSTMCell.default_config() + + def __init__(self, cfg: Config, *, parent: Optional[Module]): + super().__init__(cfg, parent=parent) + cfg = self.config + self._add_child( + "embedding", cfg.embedding.set(num_embeddings=cfg.vocab_size, dim=cfg.emb_dim) + ) + self._add_child("rnn", cfg.rnn_cell.set(input_dim=cfg.emb_dim, output_dim=cfg.output_dim)) + + def forward(self, inputs: Tensor) -> Tensor: + """Computes prediction network output from the inputs. + + Args: + inputs: An int Tensor of shape [batch_size, num_labels]. Valid tokens are in the range + [0, vocab_size). Out-of-range token ids are clamped to the bounds of the array. + See https://jax.readthedocs.io/en/latest/notebooks/Common_Gotchas_in_JAX.html + #out-of-bounds-indexing. + + Returns: A Tensor of shape [batch_size, num_labels, output_dim].
```suggestion Returns: A Tensor of shape [batch_size, num_labels, output_dim]. ```
axlearn
github_2023
others
417
apple
ruomingp
@@ -13,7 +13,7 @@ requires-python = ">=3.9" # Every time we upgrade JAX, we should try to bring the rest to the newest versions. dependencies = [ "attrs>=23.1.0", # We use `type` in `attrs.field` - "absl-py", + "absl-py<2", # breaks axlearn.cli.utils_test on 2.1.0
Can we pin to a specific version of absl-py?
axlearn
github_2023
python
404
apple
ruomingp
@@ -306,6 +308,7 @@ def from_spec(cls, spec: List[str], *, fv: Optional[flags.FlagValues]) -> Config - platform: The image target platform. - allow_dirty: Whether to ignore dirty git status. - cache_from: A comma-separated list of cache sources. + - skip_bundle: Whether to skip the build + push.
When is it safe to enable `skip_bundle`? Please add a comment.
axlearn
github_2023
python
401
apple
markblee
@@ -708,3 +713,66 @@ def temp_chdir(new_cwd: Union[pathlib.Path, str]): yield finally: os.chdir(old_cwd) + + +L = TypeVar("L", bound=BaseLayer) + + +@contextlib.contextmanager +def bind_layer( + layer: ConfigOr[L], + *, + is_training: bool = True, + prng_key: Optional[jax.random.PRNGKey] = None, + state: Optional[Nested[Tensor]] = None, +) -> Iterator[L]: + """Creates a context in which `module` has state initialized using `init_method`. + + This lets you write tests that make calls to a module without needing to call `functional()` + yourself. + + It is similar in spirit to FLAX's `module.bind()` although that works differently due to the + fact that FLAX state is only associated with an instane of a module, whereas AXLearn state is
```suggestion fact that FLAX state is only associated with an instance of a module, whereas AXLearn state is ```
axlearn
github_2023
python
401
apple
markblee
@@ -708,3 +713,66 @@ def temp_chdir(new_cwd: Union[pathlib.Path, str]): yield finally: os.chdir(old_cwd) + + +L = TypeVar("L", bound=BaseLayer) + + +@contextlib.contextmanager +def bind_layer( + layer: ConfigOr[L], + *, + is_training: bool = True, + prng_key: Optional[jax.random.PRNGKey] = None, + state: Optional[Nested[Tensor]] = None, +) -> Iterator[L]: + """Creates a context in which `module` has state initialized using `init_method`. + + This lets you write tests that make calls to a module without needing to call `functional()` + yourself. + + It is similar in spirit to FLAX's `module.bind()` although that works differently due to the + fact that FLAX state is only associated with an instane of a module, whereas AXLearn state is + global. + + Example: + ``` + cfg = Linear.default_config().set(input_dim=5, output_dim=7) + with test_utils.bind_layer(cfg) as layer: + result = layer(jnp.ones(5)) + assert result.shape == (7,) + ``` + + Args: + layer: The layer to initialize. + is_training: Tell the layer it is in training or not. + prng_key: The PRNG key to use. If None, `jax.random.PRNGKey(0)`. + state: The state to use. If None, call `initialize_parameters_recursively()` to initialize + the state. + + Returns: + The Initialized module. + """ + if prng_key is None: + prng_key = jax.random.PRNGKey(0) + + init_key, ctx_key = jax.random.split(prng_key) + if isinstance(layer, InstantiableConfig): + if isinstance(layer, BaseLayer.Config) and isinstance(
Should it more generally be `Module` rather than `BaseLayer`?
axlearn
github_2023
python
401
apple
markblee
@@ -708,3 +713,66 @@ def temp_chdir(new_cwd: Union[pathlib.Path, str]): yield finally: os.chdir(old_cwd) + + +L = TypeVar("L", bound=BaseLayer) + + +@contextlib.contextmanager +def bind_layer(
```suggestion def bind_module( ``` Which is the base class associated with invocation contexts?
axlearn
github_2023
python
399
apple
markblee
@@ -242,6 +242,13 @@ def add_summary( name: The name of the item to add. value: The value to add. """ +
(Not from this PR, but just noticed that `add_summary` typing seems out of date.)
axlearn
github_2023
python
399
apple
markblee
@@ -738,6 +739,20 @@ def test_drop_output(self): ctx.output_collection.module_outputs["nested"], ) + def test_add_summary_validation(self): + """Tests validation in `add_summary()`.""" + + class MySummary(summary.Summary): + val: str + + def validate(self): + if self.val == "s": + raise ValueError("not allowed") + + with self._dummy_context() as ctx: + with self.assertRaises(ValueError): + ctx.add_summary("summary", MySummary("s"))
Should we test with nested container of summaries and non-summaries to exercise tree_map?
axlearn
github_2023
python
399
apple
markblee
@@ -56,6 +64,103 @@ def test_add_summary_image(self): ) chex.assert_trees_all_close(logged_grayscale_image / 255, grayscale_image[..., None]) + def test_with_tree_paths(self): + """Tests that `ImageSummary` works with `tree_paths()`.""" + img = jnp.ones((1, 1, 1, 3)) + s = dict(a=ImageSummary(img), b=ImageSummary(img)) + self.assertEqual( + tree_paths(s), dict(a=ImageSummary("a/_value"), b=ImageSummary("b/_value")) + ) + # Check validation still happens if only some leaves are str. + with self.assertRaises(AttributeError): + ImageSummary(("asdf", img)) + + def test_with_flatten_items(self): + """Tests that `ImageSummary` works with `flatten_items()`.""" + img = jnp.ones((1, 1, 1, 3)) + s = dict(a=ImageSummary(img), b=ImageSummary(img)) + self.assertSequenceEqual(flatten_items(s), [("a/_value", img), ("b/_value", img)]) + + def test_end_to_end(self): + """Tests that `ImageSummary` works with `SpmdTrainer` and `SpmdEvaler` in an end-to-end + fashion. + """ + img = jnp.broadcast_to(jnp.array([0.0, 0.5, 1.0]), shape=(1, 1, 3)) + img = jnp.array([img, img]) + + class ImageSummaryModel(trainer_test.DummyModel): + def forward(self, *args, **kwargs): + self.add_summary("img", ImageSummary(img)) + return super().forward(*args, **kwargs) + + cfg: SpmdTrainer.Config = SpmdTrainer.default_config().set(name="test_trainer") + with tempfile.TemporaryDirectory() as cfg.dir: + cfg.mesh_axis_names = ("data", "model") + cfg.mesh_shape = (1, 1) + cfg.model = ImageSummaryModel.default_config().set(dtype=jnp.float32) + cfg.input = trainer_test.DummyInput.default_config() + cfg.learner = learner.Learner.default_config().set( + optimizer=config_for_function(optimizers.sgd_optimizer).set( + learning_rate=0.1, + decouple_weight_decay=True, + momentum=0.9, + weight_decay=1e-4, + ) + ) + + evaler_cfg = SpmdEvaler.default_config() + evaler_cfg.input = DummyInput.default_config().set(total_num_batches=2) + evaler_cfg.eval_policy.n = 2 + cfg.evalers = dict(eval_dummy=evaler_cfg) + cfg.checkpointer.save_policy.n = 5 + cfg.max_step = 8 + trainer: SpmdTrainer = cfg.instantiate(parent=None) + trainer.run(prng_key=jax.random.PRNGKey(123)) + + @dataclasses.dataclass + class Expected: + """Information about expected logged image summaries.""" + + path: str + count: int + key: str + + def shape(self): + # The trainer / evaler makes `count` calls to forward(). + # Each call to forward logs a batch of two images. + return (self.count, 2, 1, 1, 3) + + def img(self): + return jnp.broadcast_to(img, self.shape()) + + expected = [ + Expected(path=os.path.join(cfg.dir, "summaries", "eval_dummy"), count=4, key="img"), + Expected( + path=os.path.join(cfg.dir, "summaries", "train_train"), count=8, key="model/img" + ), + ] + + for info in expected: + ea = event_accumulator.EventAccumulator(info.path) + ea.Reload() + + print(ea.tensors.Keys())
```suggestion ```
axlearn
github_2023
python
399
apple
markblee
@@ -56,6 +64,103 @@ def test_add_summary_image(self): ) chex.assert_trees_all_close(logged_grayscale_image / 255, grayscale_image[..., None]) + def test_with_tree_paths(self): + """Tests that `ImageSummary` works with `tree_paths()`.""" + img = jnp.ones((1, 1, 1, 3)) + s = dict(a=ImageSummary(img), b=ImageSummary(img)) + self.assertEqual( + tree_paths(s), dict(a=ImageSummary("a/_value"), b=ImageSummary("b/_value")) + ) + # Check validation still happens if only some leaves are str. + with self.assertRaises(AttributeError): + ImageSummary(("asdf", img)) + + def test_with_flatten_items(self): + """Tests that `ImageSummary` works with `flatten_items()`.""" + img = jnp.ones((1, 1, 1, 3)) + s = dict(a=ImageSummary(img), b=ImageSummary(img)) + self.assertSequenceEqual(flatten_items(s), [("a/_value", img), ("b/_value", img)]) + + def test_end_to_end(self): + """Tests that `ImageSummary` works with `SpmdTrainer` and `SpmdEvaler` in an end-to-end + fashion. + """ + img = jnp.broadcast_to(jnp.array([0.0, 0.5, 1.0]), shape=(1, 1, 3)) + img = jnp.array([img, img]) + + class ImageSummaryModel(trainer_test.DummyModel): + def forward(self, *args, **kwargs): + self.add_summary("img", ImageSummary(img)) + return super().forward(*args, **kwargs) + + cfg: SpmdTrainer.Config = SpmdTrainer.default_config().set(name="test_trainer") + with tempfile.TemporaryDirectory() as cfg.dir: + cfg.mesh_axis_names = ("data", "model") + cfg.mesh_shape = (1, 1) + cfg.model = ImageSummaryModel.default_config().set(dtype=jnp.float32) + cfg.input = trainer_test.DummyInput.default_config() + cfg.learner = learner.Learner.default_config().set( + optimizer=config_for_function(optimizers.sgd_optimizer).set( + learning_rate=0.1, + decouple_weight_decay=True, + momentum=0.9, + weight_decay=1e-4, + ) + ) + + evaler_cfg = SpmdEvaler.default_config() + evaler_cfg.input = DummyInput.default_config().set(total_num_batches=2) + evaler_cfg.eval_policy.n = 2 + cfg.evalers = dict(eval_dummy=evaler_cfg) + cfg.checkpointer.save_policy.n = 5 + cfg.max_step = 8 + trainer: SpmdTrainer = cfg.instantiate(parent=None) + trainer.run(prng_key=jax.random.PRNGKey(123)) + + @dataclasses.dataclass + class Expected: + """Information about expected logged image summaries.""" + + path: str + count: int + key: str + + def shape(self): + # The trainer / evaler makes `count` calls to forward(). + # Each call to forward logs a batch of two images. + return (self.count, 2, 1, 1, 3) + + def img(self): + return jnp.broadcast_to(img, self.shape()) + + expected = [ + Expected(path=os.path.join(cfg.dir, "summaries", "eval_dummy"), count=4, key="img"), + Expected( + path=os.path.join(cfg.dir, "summaries", "train_train"), count=8, key="model/img" + ), + ] + + for info in expected: + ea = event_accumulator.EventAccumulator(info.path) + ea.Reload() + + print(ea.tensors.Keys()) + + logged_evaler_img = tf.stack( + [ + tf.stack( + [ + tf.image.decode_image(im) + for im in tf.make_ndarray(event.tensor_proto)[2:] + ] + ) + for event in ea.Tensors(info.key) + ] + ) + self.assertEqual(logged_evaler_img.shape, info.shape()) + # TB uses lossy compression. + chex.assert_trees_all_close(logged_evaler_img / 255, info.img(), rtol=0.01)
nit -- use `self.assertNestedAllClose`?
axlearn
github_2023
others
366
apple
markblee
@@ -65,6 +65,7 @@ disable=abstract-method, coerce-builtin, coerce-method, delslice-method, + # disallowed-name, # copied from pyproject.toml
```suggestion # disallowed-name, ``` The comment probably adds little value after this PR (please also remove below)
axlearn
github_2023
python
366
apple
markblee
@@ -208,6 +208,7 @@ def _int32_binary_search( def loop_body(i: int, solution: Tensor) -> Tensor: # Loop over the non-sign bits. bit = jnp.int32(1 << 30 - i) + # pylint: disable-next=unsupported-binary-operation # TODO this might be a real bug?
```suggestion # pylint: disable-next=unsupported-binary-operation ```
axlearn
github_2023
others
366
apple
markblee
@@ -26,12 +26,19 @@ download_assets() { } set -o xtrace +if [[ "${1:-x}" = "--skip-pre-commit" ]] ; then + SKIP_PRECOMMIT=true + shift +fi UNQUOTED_PYTEST_FILES=$(echo $1 | tr -d "'") -pre-commit install -pre-commit run --all-files || exit_if_error $? "pre-commit failed." -# Run pytype separately to utilize all cpus and for better output. -pytype -j auto . || exit_if_error $? "pytype failed." +# skip pre-commit on parallel CI because it is run as a separate job
```suggestion # Skip pre-commit on parallel CI because it is run as a separate job. ```
axlearn
github_2023
python
373
apple
tgunter
@@ -1502,7 +1506,7 @@ def adastar_optimizer( eps: (float) regularization constant added to the square root of smoothed_gradient_squares. eps_square: (float) regularization constant added to gradient_squares. raw_update_clipping_threshold: If not None, clips the norms of the raw updates - to this value. + to this value. `raw_update_norm` summaries will be clipped either way.
Did you mean: ```suggestion to this value. `raw_update_norm` summaries will be logged either way. ``` ?
axlearn
github_2023
python
5
apple
markblee
@@ -86,7 +86,7 @@ def _convert_translation_to_transform(translations: tf.Tensor) -> tf.Tensor: Returns: A transformation matrix of shape (num_images, 8) to be used by - https://github.com/keras-team/keras/blob/v2.9.0/keras/layers/preprocessing/image_preprocessing.py#L898-L985
FWIW I think tags are ok too.
axlearn
github_2023
python
27
apple
ruomingp
@@ -2437,18 +2437,33 @@ class Config(BaseTransformerLayer.Config): class StackedTransformerLayer(BaseStackedTransformerLayer): """A simple implementation of BaseStackedTransformerLayer.""" - Config = BaseStackedTransformerLayer.Config + @config_class + class Config(BaseStackedTransformerLayer.Config): + """Configures StackedTransformerLayer.""" + + layer: Union[ + BaseTransformerLayer.Config, Sequence[BaseTransformerLayer.Config] + ] = TransformerLayer.default_config()
Add comments that `len(layer)` should match `num_layers`?
axlearn
github_2023
python
358
apple
markblee
@@ -326,8 +335,16 @@ def schedule( for resource_type, demand in job_demands.items(): resource_usages[resource_type] += demand job_verdicts[project_id][job_id] = verdict + project_usages[project_id] = resource_usages - return Scheduler.ScheduleResults(project_limits=project_limits, job_verdicts=job_verdicts) + # Some resources may remain after scheduling jobs within per-project limits due to per-job + # demand sizes. +
```suggestion ```
axlearn
github_2023
python
350
apple
apghml
@@ -146,76 +146,96 @@ def calculate( ) -> Dict[str, float]: """Calculates per-project limits on available resources, quotas, and demands. + We assume that `limit` and `demands` are all integers, reflecting number of resource units, + e.g., number of GPUs. The allocations will also be integers. + + TODO(rpang): change the API to take integers. + Args: limit: The total amount of available resources. quotas: A mapping from project ids to quotas. If a project id is missing, assume - quota of 0. + quota of 0. Quotas must be non-negative, but do not have to add up to `limit`. + Available resources will be allocated proportional to quotas. demands: A mapping from project ids to demands. If a project id is missing, assume demand of 0. Returns: - A mapping from project ids to resource limits. + A mapping from project ids to resource limits for each project id in `demands`. Raises: - ValueError: if total quota exceeds `limit`.
Would it make sense to add a test that it behaves correctly when the total quota exceeds `limit`, if there isn't already such a test?
axlearn
github_2023
python
349
apple
zetaqubit
@@ -214,7 +214,7 @@ def from_flags(cls, fv: flags.FlagValues, action: str, **kwargs) -> Config: """ cfg = super().from_flags(fv, **kwargs) if not cfg.bastion_name: - cfg.bastion_name = shared_bastion_name(fv) + cfg.bastion_name = fv.bastion or shared_bastion_name(fv)
For future: might be good to centralize all of the config reading and overriding
axlearn
github_2023
python
346
apple
markblee
@@ -335,6 +336,7 @@ def example_fn(example: Dict[str, Tensor]) -> Dict[str, Tensor]: randaug_magnitude=randaug_magnitude, randaug_exclude_ops=randaug_exclude_ops, erasing_probability=erasing_probability, + use_whitening=use_whitening,
Not for this PR, but a more 'configurable' way would be to take a `config_for_function(crop_augment_whiten)` as an input processor, configure it at the caller, and chain it here. This avoids the need to propagate all args in the fn signature.
axlearn
github_2023
python
338
apple
tgunter
@@ -1255,6 +1255,86 @@ def compute_loss(param_values): test_results = _compute_updates(test_opt) self.assertNestedAllClose(base_results, test_results, atol=1e-6, rtol=1e-6) + @parameterized.parameters( + dict( + learning_rate=0.01, + b1=0.95, + b2=0.995, + eps_square=1e-30, + update_schedule=config_for_function(schedule.cosine_with_linear_warmup).set( + peak_lr=1, warmup_steps=100, max_step=1000 + ), + clipping_threshold=1.0, + weight_decay=3e-4, + ), + ) + def test_adastar_summaries( + self, + learning_rate, + b1, + b2, + eps_square, + update_schedule, + clipping_threshold, + weight_decay, + ): + test_opt = adastar_optimizer( + learning_rate=learning_rate, + # adafactor does not apply smoothing on gradients (but on raw updates). + gradient_ema_decay=None, + gradient_ema_debias=None, + gradient_square_ema_decay=b2, + gradient_square_ema_debias=True, + eps=0, + eps_square=eps_square, + # Clipping is applied on raw updates by per-param norm (not global norm). + raw_update_clipping_threshold=clipping_threshold, + # Smoothing is applied on raw updates. + update_ema_decay=b1, + # ... but without debiasing (!). + update_ema_debias=False, + weight_decay=weight_decay, + update_schedule=update_schedule, + ) + + def _compute_updates(opt) -> Tensor: + params = dict( + layer=VDict( + w=OptParam( + value=jnp.asarray([[0, 10, 2, -3], [1, -3, 2, 4]], dtype=jnp.float32), + factorization_spec=None, + weight_decay_scale=1.0, + ) + ) + ) + print(f"params={params}") + state = opt.init(params) + + def compute_loss(param_values): + return -jnp.mean(jax.nn.log_softmax(param_values["layer"]["w"])[..., 1]) + + param_values = jax.tree_util.tree_map(lambda p: p.value, params) + grads = jax.grad(compute_loss)(param_values) + print(f"grads={grads}")
Print statement left in intentionally?
axlearn
github_2023
python
319
apple
markblee
@@ -317,11 +317,23 @@ def _execute(self): class SubmitBastionJob(BaseSubmitBastionJob): """A job to submit a command to bastion. + TODO(rpang): rename this class to BastionRemoteJob. + Main differences from base submit: - Emits gsutil commands to view logs. - Emits a warning if the bastion doesn't exist in GCE. """ + @config_class + class Config(BaseSubmitBastionJob.Config): + zone: Required[str] = REQUIRED + + @classmethod + def from_flags(cls, fv: flags.FlagValues, **kwargs) -> Config: + cfg = super().from_flags(fv, **kwargs) + cfg.zone = fv.zone
I think we can remove this (`from_flags` defaults to setting configs that match flag names).
axlearn
github_2023
python
319
apple
markblee
@@ -207,6 +210,7 @@ def from_flags(cls, fv: flags.FlagValues, action: str, **kwargs) -> Config: # Default output_dir depends on the final value of --name. fv.set_default("output_dir", f"gs://{gcp_settings('ttl_bucket')}/axlearn/jobs/{fv.name}") cfg = super().from_flags(fv, **kwargs) + cfg.zone = fv.zone
Same here.
axlearn
github_2023
python
319
apple
markblee
@@ -335,7 +341,8 @@ def _execute(self): "\nView bastion outputs with:\n" f"gsutil cat {os.path.join(self.bastion_dir, 'logs', cfg.job_name)}\n" "\nCheck job history with:\n" - f"{infer_cli_name()} gcp bastion history --name={cfg.name} --job_name={cfg.job_name}" + f"axlearn gcp bastion history --name={cfg.name} "
Was the `infer_cli_name()` -> `axlearn` change necessary for some reason?
axlearn
github_2023
others
259
apple
tgunter
@@ -12,6 +12,9 @@ echo "=== AXLearn start_tpu.sh ===" # Random sleep to prevent all TPU-VMs overwhelming pypi etc for large slices. sleep $((1 + $RANDOM % 30)) +sudo sh -c "echo 'root soft nofile 100000' >> /etc/security/limits.conf"
nit: Is it worth a comment to explain?
axlearn
github_2023
others
259
apple
samos123
@@ -12,6 +12,10 @@ echo "=== AXLearn start_tpu.sh ===" # Random sleep to prevent all TPU-VMs overwhelming pypi etc for large slices. sleep $((1 + $RANDOM % 30)) +# Increase file descriptor limits for `root` to avoid "Too many open files" errors.
nit: the change the comment to an echo statement so someone reading the logs knows what's going on. That would have helped in catching whether the correct startup script was run as well.
axlearn
github_2023
python
302
apple
ruomingp
@@ -67,8 +67,7 @@ def main(_): setup(jax_backend="cpu") trainer_config_fn: TrainerConfigFn = get_named_trainer_config( FLAGS.config, - config_module=FLAGS.module, - root_module="axlearn", + config_module=f"axlearn.{FLAGS.module}",
How about ```suggestion config_module=FLAGS.module", ``` ? This will make the script work for other repos that depend on axlearn
axlearn
github_2023
python
155
apple
markblee
@@ -361,7 +361,7 @@ def forward(self, image: Tensor, is_masked: Optional[Tensor] = None) -> Dict[str Args: image: The input image. Shape: (batch, height, width, channels). - is_masked: a boolen Tensor in shape (batch, length), representing masked positions + is_masked: a boolean Tensor in shape (batch, length), representing masked positions
```suggestion is_masked: A boolean Tensor in shape (batch, length), representing masked positions ```
axlearn
github_2023
python
297
apple
markblee
@@ -1033,6 +1033,113 @@ def update_fn(updates, state, params=None): ) +class SkipClipState(NamedTuple): + """State returned by functions in skip_and_clip_by_global_norm().""" + + nonvalid_count: Union[Tensor, TensorSpec] # Number of non-valid steps. + inner_state: Any # State of the inner PartitionedGradientTransformation. + + +def skip_and_clip_by_global_norm( + inner: ConfigOr[PartitionedGradientTransformation], + *, + drop_norm: Optional[float] = None, + max_norm: Optional[float] = None, + eps: float = 1e-8, +) -> PartitionedGradientTransformation: + """Skip updates when global norm >= drop_norm, otherwise clip the global norm. + If we detect abnormal gradients that have global norm >= drop_norm, we skip the gradient updates + and state updates. Otherwise we scale the gradients s.t. global norm <= max_norm, and apply the + wrapped gradient transformation `inner`. Note the difference compared to clip_by_global_norm() + is that this version skips all updates while clip_by_global_norm() still performs parameter + updates and optimizer state updates. + Example usage:
Can we fix the docstring spacing (newline before example, args, returns)?
axlearn
github_2023
python
296
apple
ruomingp
@@ -219,6 +219,9 @@ def __init__(self, cfg: Config, *, parent: Optional[Module]): with self.mesh(): self._add_child("model", cfg.model) self._model_param_specs = self.model.create_parameter_specs_recursively() + if cfg.inference_dtype is not None: + self._model_param_specs = self._inference_cast(self._model_param_specs)
```suggestion self._model_param_specs = self._inference_cast(self._model_param_specs) ```