repo_name
stringlengths
1
62
dataset
stringclasses
1 value
lang
stringclasses
11 values
pr_id
int64
1
20.1k
owner
stringlengths
2
34
reviewer
stringlengths
2
39
diff_hunk
stringlengths
15
262k
code_review_comment
stringlengths
1
99.6k
axlearn
github_2023
python
992
apple
ruomingp
@@ -277,3 +286,51 @@ def __call__(self, cfg: SpmdTrainer.Config) -> SpmdTrainer.Config: for config_modifier_fn in self._config_modifiers: cfg = config_modifier_fn(cfg) return cfg + + +class FP8ConfigModifier(ConfigModifier): + """Update the trainer config to use FP8 training.""" + + @config_class + class Config(ConfigModifier.Config): + """Configure FP8ConfigModifier. See QuantizedDotGeneral.Config.""" + + fp8_amax_history_length: Required[int] = REQUIRED + + def __call__(self, cfg: SpmdTrainer.Config) -> SpmdTrainer.Config: + """Override dense layer to use FP8 quantized dot and add gradient rules for FP8 stats.""" + override_cfg: FP8ConfigModifier.Config = self.config + quantized_dot_general = QuantizedDotGeneral.default_config().set( + quantization_type=DotGeneralQuantizationType.FP_8, + fp8_amax_history_length=override_cfg.fp8_amax_history_length, + ) + + def visit_fn(_, value): + if isinstance(value, DenseGeneralBaseLayer.Config): + value.quantized_dot_general = quantized_dot_general + + def enter_fn(_, value, default_kv): + return None if isinstance(value, DenseGeneralBaseLayer.Config) else default_kv + + cfg.visit(visit_fn=visit_fn, enter_fn=enter_fn) + + update_cfg: OverrideInplaceUpdateTransformation.Config = ( + OverrideInplaceUpdateTransformation.default_config() + ) + update_cfg.rules = [ + f".*/{x}" + for x in [ + "input_scale", + "kernel_scale", + "output_grad_scale", + "input_amax_history", + "kernel_amax_history", + "output_grad_amax_history", + ] + ]
Can we use update_rules? See https://github.com/apple/axlearn/blob/daec8c530741594eeea12faba1a454bb42b6c03c/axlearn/common/learner_test.py#L343.
axlearn
github_2023
python
992
apple
ruomingp
@@ -1158,6 +1159,55 @@ def prune_tree( return in_tree +def tree_merge_default_override_fn(primary: Any, secondary: Any): + is_primary_empty = False + is_secondary_empty = False + try: + is_primary_empty = len(primary) == 0 + is_secondary_empty = len(secondary) == 0 + except TypeError: + # A TypeError will be raised if primary/secondary don't have length, + # e.g. if they are scalars. + pass + if primary is None or is_primary_empty: + return secondary + if secondary is None or is_secondary_empty: + return primary + raise ValueError( + f"Encountered incompatible subtree leaves: {primary=}, {secondary=}. Specify " + "a custom override function to resolve incompatible subtree merges." + ) + + +def tree_merge( + primary: Nested[Any], + *, + secondary: Nested[Any], + override_fn: Callable[[Any, Any], Any] = tree_merge_default_override_fn, +) -> Nested[Any]: + """Merge `secondary` into `primary`. The result contains shallow copies of subtrees from both.
Consider making them deep copies to be consistent with the jax tree utils. Deep copies are also less error prone by decoupling side effects.
axlearn
github_2023
python
992
apple
ruomingp
@@ -1158,6 +1159,55 @@ def prune_tree( return in_tree +def tree_merge_default_override_fn(primary: Any, secondary: Any): + is_primary_empty = False + is_secondary_empty = False + try: + is_primary_empty = len(primary) == 0 + is_secondary_empty = len(secondary) == 0 + except TypeError: + # A TypeError will be raised if primary/secondary don't have length, + # e.g. if they are scalars. + pass + if primary is None or is_primary_empty: + return secondary + if secondary is None or is_secondary_empty: + return primary + raise ValueError( + f"Encountered incompatible subtree leaves: {primary=}, {secondary=}. Specify " + "a custom override function to resolve incompatible subtree merges." + ) + + +def tree_merge( + primary: Nested[Any], + *, + secondary: Nested[Any], + override_fn: Callable[[Any, Any], Any] = tree_merge_default_override_fn, +) -> Nested[Any]: + """Merge `secondary` into `primary`. The result contains shallow copies of subtrees from both. + + Two trees are mergable if there does not exists a path in `secondary` that is a subpath of any + path in `primary`. If there are identical path with different leaves, `override_fn` is used to + determine which leaf is kept in the resulting tree. + + The default `override_fn` choses the non-empty leaf. If both leaves are non-empty, an error + will be raised. + """ + if isinstance(primary, dict) ^ isinstance(secondary, dict): + raise ValueError(f"Trying to merge incompatible subtrees: {primary=}, {secondary=}") + if not (isinstance(primary, dict) or isinstance(secondary, dict)): + return override_fn(primary, secondary) + # Use type() so that if primary is a VDict, out_tree is also a VDict.
Can we be even more strict? ```suggestion if type(primary) != type(secondary): raise ValueError(...) ```
axlearn
github_2023
python
992
apple
ruomingp
@@ -1158,6 +1159,55 @@ def prune_tree( return in_tree +def tree_merge_default_override_fn(primary: Any, secondary: Any): + is_primary_empty = False + is_secondary_empty = False + try: + is_primary_empty = len(primary) == 0 + is_secondary_empty = len(secondary) == 0 + except TypeError: + # A TypeError will be raised if primary/secondary don't have length, + # e.g. if they are scalars. + pass + if primary is None or is_primary_empty: + return secondary + if secondary is None or is_secondary_empty: + return primary + raise ValueError( + f"Encountered incompatible subtree leaves: {primary=}, {secondary=}. Specify " + "a custom override function to resolve incompatible subtree merges." + ) + + +def tree_merge( + primary: Nested[Any], + *, + secondary: Nested[Any], + override_fn: Callable[[Any, Any], Any] = tree_merge_default_override_fn, +) -> Nested[Any]: + """Merge `secondary` into `primary`. The result contains shallow copies of subtrees from both. + + Two trees are mergable if there does not exists a path in `secondary` that is a subpath of any + path in `primary`. If there are identical path with different leaves, `override_fn` is used to + determine which leaf is kept in the resulting tree. + + The default `override_fn` choses the non-empty leaf. If both leaves are non-empty, an error + will be raised. + """ + if isinstance(primary, dict) ^ isinstance(secondary, dict): + raise ValueError(f"Trying to merge incompatible subtrees: {primary=}, {secondary=}") + if not (isinstance(primary, dict) or isinstance(secondary, dict)): + return override_fn(primary, secondary) + # Use type() so that if primary is a VDict, out_tree is also a VDict. + out_tree = type(primary)(primary) + for k in secondary: + if k in primary: + out_tree[k] = tree_merge(primary[k], secondary=secondary[k], override_fn=override_fn) + else: + out_tree[k] = secondary[k]
```suggestion out_tree[k] = copy.deepcopy(secondary[k]) ```
axlearn
github_2023
python
992
apple
ruomingp
@@ -1158,6 +1159,55 @@ def prune_tree( return in_tree +def tree_merge_default_override_fn(primary: Any, secondary: Any): + is_primary_empty = False + is_secondary_empty = False + try: + is_primary_empty = len(primary) == 0 + is_secondary_empty = len(secondary) == 0 + except TypeError: + # A TypeError will be raised if primary/secondary don't have length, + # e.g. if they are scalars. + pass + if primary is None or is_primary_empty: + return secondary + if secondary is None or is_secondary_empty: + return primary + raise ValueError( + f"Encountered incompatible subtree leaves: {primary=}, {secondary=}. Specify " + "a custom override function to resolve incompatible subtree merges." + ) + + +def tree_merge( + primary: Nested[Any], + *, + secondary: Nested[Any], + override_fn: Callable[[Any, Any], Any] = tree_merge_default_override_fn, +) -> Nested[Any]: + """Merge `secondary` into `primary`. The result contains shallow copies of subtrees from both. + + Two trees are mergable if there does not exists a path in `secondary` that is a subpath of any + path in `primary`. If there are identical path with different leaves, `override_fn` is used to + determine which leaf is kept in the resulting tree. + + The default `override_fn` choses the non-empty leaf. If both leaves are non-empty, an error + will be raised. + """ + if isinstance(primary, dict) ^ isinstance(secondary, dict): + raise ValueError(f"Trying to merge incompatible subtrees: {primary=}, {secondary=}") + if not (isinstance(primary, dict) or isinstance(secondary, dict)): + return override_fn(primary, secondary)
```suggestion return copy.deepcopy(override_fn(primary, secondary)) ```
axlearn
github_2023
python
992
apple
ruomingp
@@ -1158,6 +1159,55 @@ def prune_tree( return in_tree +def tree_merge_default_override_fn(primary: Any, secondary: Any): + is_primary_empty = False + is_secondary_empty = False + try: + is_primary_empty = len(primary) == 0 + is_secondary_empty = len(secondary) == 0 + except TypeError: + # A TypeError will be raised if primary/secondary don't have length, + # e.g. if they are scalars. + pass + if primary is None or is_primary_empty: + return secondary + if secondary is None or is_secondary_empty: + return primary + raise ValueError( + f"Encountered incompatible subtree leaves: {primary=}, {secondary=}. Specify " + "a custom override function to resolve incompatible subtree merges." + ) + + +def tree_merge( + primary: Nested[Any], + *, + secondary: Nested[Any], + override_fn: Callable[[Any, Any], Any] = tree_merge_default_override_fn,
```suggestion leaf_merge_fn: Callable[[Any, Any], Any] = tree_merge_default_override_fn, ```
axlearn
github_2023
python
992
apple
ruomingp
@@ -277,3 +286,51 @@ def __call__(self, cfg: SpmdTrainer.Config) -> SpmdTrainer.Config: for config_modifier_fn in self._config_modifiers: cfg = config_modifier_fn(cfg) return cfg + + +class FP8ConfigModifier(ConfigModifier): + """Update the trainer config to use FP8 training.""" + + @config_class + class Config(ConfigModifier.Config): + """Configure FP8ConfigModifier. See QuantizedDotGeneral.Config.""" + + fp8_amax_history_length: Required[int] = REQUIRED + + def __call__(self, cfg: SpmdTrainer.Config) -> SpmdTrainer.Config: + """Override dense layer to use FP8 quantized dot and add gradient rules for FP8 stats.""" + override_cfg: FP8ConfigModifier.Config = self.config + quantized_dot_general = QuantizedDotGeneral.default_config().set( + quantization_type=DotGeneralQuantizationType.FP_8, + fp8_amax_history_length=override_cfg.fp8_amax_history_length, + ) + + def visit_fn(_, value): + if isinstance(value, DenseGeneralBaseLayer.Config): + value.quantized_dot_general = quantized_dot_general + + def enter_fn(_, value, default_kv): + return None if isinstance(value, DenseGeneralBaseLayer.Config) else default_kv + + cfg.visit(visit_fn=visit_fn, enter_fn=enter_fn) + + update_cfg: OverrideInplaceUpdateTransformation.Config = ( + OverrideInplaceUpdateTransformation.default_config() + ) + update_cfg.rules = [ + f".*/{x}" + for x in [ + "input_scale", + "kernel_scale", + "output_grad_scale", + "input_amax_history", + "kernel_amax_history", + "output_grad_amax_history", + ] + ] + transformation = maybe_instantiate(cfg.learner.optimizer)
Usually we avoid instantiation when constructing configs to preserve the readability of configs. Can we defer the `maybe_instantiate` call to `OverrideInplaceUpdateTransformation.__init__`?
axlearn
github_2023
python
992
apple
ruomingp
@@ -277,3 +286,51 @@ def __call__(self, cfg: SpmdTrainer.Config) -> SpmdTrainer.Config: for config_modifier_fn in self._config_modifiers: cfg = config_modifier_fn(cfg) return cfg + + +class FP8ConfigModifier(ConfigModifier): + """Update the trainer config to use FP8 training.""" + + @config_class + class Config(ConfigModifier.Config): + """Configure FP8ConfigModifier. See QuantizedDotGeneral.Config.""" + + fp8_amax_history_length: Required[int] = REQUIRED + + def __call__(self, cfg: SpmdTrainer.Config) -> SpmdTrainer.Config: + """Override dense layer to use FP8 quantized dot and add gradient rules for FP8 stats.""" + override_cfg: FP8ConfigModifier.Config = self.config + quantized_dot_general = QuantizedDotGeneral.default_config().set( + quantization_type=DotGeneralQuantizationType.FP_8, + fp8_amax_history_length=override_cfg.fp8_amax_history_length, + ) + + def visit_fn(_, value): + if isinstance(value, DenseGeneralBaseLayer.Config): + value.quantized_dot_general = quantized_dot_general + + def enter_fn(_, value, default_kv): + return None if isinstance(value, DenseGeneralBaseLayer.Config) else default_kv + + cfg.visit(visit_fn=visit_fn, enter_fn=enter_fn) + + update_cfg: OverrideInplaceUpdateTransformation.Config = ( + OverrideInplaceUpdateTransformation.default_config() + ) + update_cfg.rules = [ + f".*/{x}" + for x in [ + "input_scale", + "kernel_scale", + "output_grad_scale", + "input_amax_history", + "kernel_amax_history", + "output_grad_amax_history",
I wonder how we can keep this list in sync with quantized_dot_general/layers.py. Consider * Defining them as an enum in `quantized_dot_general/common.py` and enumerate the enum members here * Move this class to `quantized_dot_general/update_transformation.py` * Change quantized_dot_general/layers.py to use the enums instead of the string literals?
axlearn
github_2023
python
992
apple
ruomingp
@@ -263,3 +271,77 @@ def mask_tree(tree: dict, *, keep: dict, mask_value: Any) -> dict: tree, is_leaf=lambda x: x is None, ) + + +class OverrideInplaceUpdateTransformation(WrappedPartitionedGradientTransformation): + """An update transformation that provides rules to override inplace updates. + + This update transformation moves gradients that match rules in `delta_updates` to + `inplace_updates`, then applies `PartionedGradientTransformation.update`. Also, optimizer + states won't be created for parameters that match these rules.
Another way to look at this is that we are applying different optimizers to different params. Specifically, we apply a special optimizer for some params where the optimzier simply takes the "gradient" as the new param value. If this makes sense, we can introduce the concept of a composite optimizer containing sub-optimizers, similar to https://github.com/apple/axlearn/blob/fde078db3a4733cfd4c3a67b83276329f2fb7716/axlearn/common/learner.py#L379-L388 and it can be configured accordingly. Would this be a more general solution?
axlearn
github_2023
python
992
apple
ruomingp
@@ -1512,6 +1519,158 @@ def _check_masking(self, tree: Nested[Any], rule: str): F(learner, method="update", prng_key=None, state=state, inputs=[updates], is_training=True) + @parameterized.parameters(False, True) + def test_fp8_override_update(self, use_override_inplace_update): + """Tests FP8 with `OverrideInplaceUpdateTransformation` + + FP8 should work correctly when using `OverrideInplaceUpdateTransformation` + (use_override_inplace_update=True) and it doesn't work otherwise. + """ + if jax.default_backend() != "gpu": + self.skipTest("Need H100 for this test.") + # Arbitrary values that don't matter. + learning_rate = config_for_function(schedule.stepwise).set( + sub=[0.1, 0.01, 0.001], + start_step=[100, 200], + ) + transformation = config_for_function(chain).set( + args=( + config_for_function(clip_by_global_norm), + config_for_function(adamw_optimizer).set( + learning_rate=learning_rate, b1=0.9, b2=0.95, eps=1e-7 + ), + ), + ) + if use_override_inplace_update: + optimizer: OverrideInplaceUpdateTransformation.Config = ( + OverrideInplaceUpdateTransformation.default_config() + ) + optimizer.transformation = transformation + optimizer.rules = [ + f".*{x}" + for x in [ + "input_scale", + "kernel_scale", + "output_grad_scale", + "input_amax_history", + "kernel_amax_history", + "output_grad_amax_history",
Use the constants?
axlearn
github_2023
python
992
apple
ruomingp
@@ -1512,6 +1519,158 @@ def _check_masking(self, tree: Nested[Any], rule: str): F(learner, method="update", prng_key=None, state=state, inputs=[updates], is_training=True) + @parameterized.parameters(False, True) + def test_fp8_override_update(self, use_override_inplace_update): + """Tests FP8 with `OverrideInplaceUpdateTransformation` + + FP8 should work correctly when using `OverrideInplaceUpdateTransformation` + (use_override_inplace_update=True) and it doesn't work otherwise. + """ + if jax.default_backend() != "gpu": + self.skipTest("Need H100 for this test.") + # Arbitrary values that don't matter. + learning_rate = config_for_function(schedule.stepwise).set( + sub=[0.1, 0.01, 0.001], + start_step=[100, 200], + ) + transformation = config_for_function(chain).set( + args=( + config_for_function(clip_by_global_norm), + config_for_function(adamw_optimizer).set( + learning_rate=learning_rate, b1=0.9, b2=0.95, eps=1e-7 + ), + ), + ) + if use_override_inplace_update: + optimizer: OverrideInplaceUpdateTransformation.Config = ( + OverrideInplaceUpdateTransformation.default_config() + ) + optimizer.transformation = transformation + optimizer.rules = [ + f".*{x}" + for x in [ + "input_scale", + "kernel_scale", + "output_grad_scale", + "input_amax_history", + "kernel_amax_history", + "output_grad_amax_history", + ] + ] + else: + optimizer = transformation + cfg = Learner.default_config().set(name="test", optimizer=optimizer) + learner: Learner = cfg.instantiate(parent=None) + + q_dot_cfg: QuantizedDotGeneral.Config = QuantizedDotGeneral.default_config() + q_dot_cfg.quantization_type = DotGeneralQuantizationType.FP_8 + q_dot_cfg.name = "quantized_dot_general_layer" + q_dot_cfg.fp8_amax_history_length = 2 + quantized_dot_general_layer: QuantizedDotGeneral = q_dot_cfg.instantiate(parent=None) + params = quantized_dot_general_layer.initialize_parameters_recursively( + prng_key=jax.random.PRNGKey(123) + ) + inputs = [ + "bd,dh->bh", + jax.random.normal( + jax.random.PRNGKey(0), + [32, 32], + dtype=jnp.bfloat16, + ), + jax.random.normal( + jax.random.PRNGKey(1), + [32, 32], + dtype=jnp.bfloat16, + ), + ] + + model_params = jax.tree.map(lambda x: OptParam(x, None, 1.0), params) + state = learner.init(model_params=model_params) + + def forward_fn(model_params, inputs): + model_output_collection = new_output_collection() + with child_context( + "quantized_dot_general_layer", + module=quantized_dot_general_layer, + state=model_params, + prng_key=jax.random.PRNGKey(5), + output_collection=model_output_collection, + ): + out = quantized_dot_general_layer.einsum_maybe_quantized( + inputs[0], activation=inputs[1], kernel=inputs[2] + ) + loss = jnp.sum(out) + return ForwardOutputs( + loss=loss, + aux=out, + output_collection=model_output_collection, + ) + + def step(): + return F( + learner, + method="forward_and_backward", + is_training=True, + prng_key=jax.random.PRNGKey(123), + state=state, + inputs=dict(fn=forward_fn, inputs=inputs, opt_params=model_params), + ) + + fwd_bwd_outputs, learner_output_collection = step() + visited_adam_state = False + # Before updating the amax history for delayed scaling, fp8 is less accurate. + self.assertFalse( + jnp.allclose( + jnp.einsum(*inputs), fwd_bwd_outputs.forward_outputs.aux, atol=0.5, rtol=0.2 + ) + ) + self.assertTrue( + jnp.allclose(jnp.einsum(*inputs), fwd_bwd_outputs.forward_outputs.aux, atol=0.5, rtol=1) + ) + + # Assert that no optimizer states will be created for FP8 scales and amax history. + def assert_empty_optimizer(adam_state: optax.ScaleByAdamState): + nonlocal visited_adam_state + if isinstance(adam_state, optax.ScaleByAdamState): + is_leaf = lambda x: x is None + if use_override_inplace_update: + jax.tree.map(self.assertIsNone, adam_state.mu, is_leaf=is_leaf) + jax.tree.map(self.assertIsNone, adam_state.nu, is_leaf=is_leaf) + else: + jax.tree.map(self.assertIsNotNone, adam_state.mu, is_leaf=is_leaf) + jax.tree.map(self.assertIsNotNone, adam_state.nu, is_leaf=is_leaf) + visited_adam_state = True + + jax.tree.map( + assert_empty_optimizer, + learner_output_collection.state_updates, + is_leaf=lambda x: isinstance(x, optax.ScaleByAdamState), + ) + self.assertTrue(visited_adam_state) + + updated_params = fwd_bwd_outputs.backward_outputs.updated_params + # After learner update, amax history should be positive. + if use_override_inplace_update: + self.assertGreater(updated_params["input_amax_history"][0], 0.0) + self.assertGreater(updated_params["kernel_amax_history"][0], 0.0) + self.assertGreater(updated_params["output_grad_amax_history"][0], 0.0) + else: + # If using a regualr update transformation, learner will try to use adam rule for + # these parameters, resulting in negative values. + self.assertLess(updated_params["input_amax_history"][0], 0.0) + self.assertLess(updated_params["kernel_amax_history"][0], 0.0) + self.assertLess(updated_params["output_grad_amax_history"][0], 0.0)
Use the constants from FP8_AMAX_HISTORY_PARAM_NAMES?
axlearn
github_2023
python
992
apple
ruomingp
@@ -63,6 +64,14 @@ class ClippingChoice(Enum): OUTPUT_ACTIVATION = 1 +FP8_SCALE_PARAM_NAMES = ["input_scale", "kernel_scale", "output_grad_scale"]
Consider using enum: ```suggestion class Fp8ScaleParam(enum.Enum): INPUT_SCALE = "input_scale" ... ``` This avoids spelling mistakes.
axlearn
github_2023
python
992
apple
ruomingp
@@ -174,5 +178,42 @@ def test_chain_config_modifier(self): self.assertEqual(cfg.learner.forward_fn_transformation.steps, 4) +class FP8ConfigModifierTest(test_utils.TestCase): + @parameterized.parameters([True, False]) + def test_fp8_config_modifier(self, use_config_fn): + cfg: SpmdTrainer.Config = SpmdTrainer.default_config().set( + model=DummyModel.default_config() + ) + if use_config_fn: + cfg.learner.optimizer = config_for_function(sgd_optimizer).set( + learning_rate=0.5, + decouple_weight_decay=True, + ) + else: + cfg.learner.optimizer = sgd_optimizer( + learning_rate=0.5, + decouple_weight_decay=True, + ) + + cfg_modifier = ( + FP8ConfigModifier.default_config().set(fp8_amax_history_length=1).instantiate() + ) + cfg = cfg_modifier(cfg) + + self.assertIsInstance(cfg.learner.optimizer, OverrideInplaceUpdateTransformation.Config) + self.assertEqual( + cfg.learner.optimizer.rules, + [ + ".*/input_scale", + ".*/kernel_scale", + ".*/output_grad_scale", + ".*/input_amax_history", + ".*/kernel_amax_history", + ".*/output_grad_amax_history",
Use the constant names?
axlearn
github_2023
python
992
apple
ruomingp
@@ -1158,6 +1159,63 @@ def prune_tree( return in_tree +def tree_merge_default_leaf_merge_fn(primary: Any, secondary: Any):
The behavior of this function still seem too complicated to be the default function for tree_merge. Can we ask the user to specify the merge function explicitly?
axlearn
github_2023
python
993
apple
kelvin-zou
@@ -352,6 +354,20 @@ def get_trainer_kwargs( ), *trn2_config.module_modifications, *trn2_config.partition_spec_modifications, + GradientAccumulationModifier.default_config().set( + grad_acc_steps=4, + minibatch_partitioner=config_for_function( + partition_by_path_rank + ).set( + path_rank_to_partition={ + # Note: the batch axes are different here than in + # `cfg.batch_axis_names`, + # as we partition sequence dim over `seq`. + (None, 1): PartitionSpec(("data", "expert", "fsdp")),
I am wondering, if we have a default input partition with axis=0 on ("data", "expert", "fsdp") and axis=1 on "seq", do we still need this?
axlearn
github_2023
python
993
apple
kelvin-zou
@@ -73,23 +75,25 @@ def _make_scan_minibatch_inputs( param_noise_key: The `param_noise_key` from the ForwardFn inputs minibatch_size: Size of the minibatch. minibatch_index: Current scan minibatch index. + minibatch_partitioner: If not None, applies additional sharding constraints + on each minibatch created. Returns: A tuple of minibatch inputs which of the same structure as `inputs` and new (carry) forward_key and param_noise_key. """ - minibatch_input = with_sharding_constraint( - jax.tree.map( - lambda x: jax.lax.dynamic_slice_in_dim( - x, - start_index=minibatch_index * minibatch_size, - slice_size=minibatch_size, - axis=0, - ), - inputs["input_batch"], + minibatch_input = jax.tree.map( + lambda x: jax.lax.dynamic_slice_in_dim( + x, + start_index=minibatch_index * minibatch_size, + slice_size=minibatch_size, + axis=0, ), - input_partition_spec(),
To me, it seems rather a hack than a proper solution, that is, we want to have a different `input_partition_spec()` than the default one, then we need this?
axlearn
github_2023
python
993
apple
apghml
@@ -57,39 +59,38 @@ def _make_scan_minibatch_inputs( param_noise_key: Tensor, minibatch_size: int, minibatch_index: int, + minibatch_partitioner: Optional[InputPartitionFn], ) -> tuple[Nested[Tensor], Tensor, Tensor]: """Creates minibatch inputs from inputs. This is a utility function that is only meant to be called from within a scan function body and is meant to slice the inputs into `minibatch_size` sized slices to run the ForwardFn on. - Note that this only preserves the input sharding if the `input_partition_spec` - returns the correct partition spec to shard the input slices with. - Args: inputs: Same pytree as ForwardFn inputs. forward_key: The `forward_key` from the ForwardFn inputs param_noise_key: The `param_noise_key` from the ForwardFn inputs minibatch_size: Size of the minibatch. minibatch_index: Current scan minibatch index. + minibatch_partitioner: If not None, applies additional sharding constraints
Could we add unit tests that cover this argument?
axlearn
github_2023
python
993
apple
apghml
@@ -57,39 +59,38 @@ def _make_scan_minibatch_inputs( param_noise_key: Tensor, minibatch_size: int, minibatch_index: int, + minibatch_partitioner: Optional[InputPartitionFn],
Echoing Kelvin's comment, could you explain concretely why we need this functionality? If it's just something that might be useful, maybe we can wait until we are certain that we will need it?
axlearn
github_2023
python
993
apple
apghml
@@ -134,16 +136,32 @@ def with_minibatch_steps( TODO(cemkoc): Investigate the slight difference in loss curves when decorated. + A minibatch_partitioner is used to partition minibatch inputs to the original_func. + Note that if minibatch_partitioner is None, the default minibatch partitioner is used which + partitions the minibatch along (("data", "expert", "fsdp"), "seq"). Otherwise the + minibatch_partitioner passed in is used. + Args: steps: Number of gradient accumulation steps. metric_accumulator: A `MetricAccumulator` to accumulate minibatch summaries from the forward output. grad_dtype: Optional dtype to cast the grads back to after accumulating in fp32. + minibatch_partitioner: If not None, contains config for a partitioner that applies + additional sharding constraints on each minibatch created. Returns: Decorated ForwardFn. """ + # Default partitioner for minibatches. + if not minibatch_partitioner: + minibatch_partitioner = partition_by_path_rank( + path_rank_to_partition={
Can we default this to the same sharding the input is already using along all non-batch axes?
axlearn
github_2023
python
993
apple
apghml
@@ -78,18 +75,16 @@ def _make_scan_minibatch_inputs( A tuple of minibatch inputs which of the same structure as `inputs` and new (carry) forward_key and param_noise_key. """ - minibatch_input = with_sharding_constraint( - jax.tree.map( - lambda x: jax.lax.dynamic_slice_in_dim( - x, - start_index=minibatch_index * minibatch_size, - slice_size=minibatch_size, - axis=0, - ), - inputs["input_batch"], + minibatch_input = jax.tree.map( + lambda x: jax.lax.dynamic_slice_in_dim( + x, + start_index=minibatch_index * minibatch_size, + slice_size=minibatch_size, + axis=0, ), - input_partition_spec(), + inputs["input_batch"],
Suppose we have a global input batch of size 100 running on 10 chips (so a per chip size of 10) and we want to switch to doing 10 grad accumulation steps each with a global batch size of 10 (1 per chip per accumulation step). Suppose that the input is originally sharded evenly across the chips (first 10 on first chip, second 10 on second chip, etc). Then when we get the first slice of 10 for the first grad accumulation step, won't all these examples be on the same chip? Will that cause a problem? (E.g., if we worry XLA might not automatically reshard the examples across chips?) Maybe we should reshard the batch axis only?
axlearn
github_2023
python
993
apple
apghml
@@ -172,12 +167,26 @@ def fwd_helper( otherwise None. """ minibatch_size = _compute_minibatch_size(inputs["input_batch"], steps=steps) + + # Create a sample minibatch for the carry buffer creation below
Could you explain in more detail why this is needed?
axlearn
github_2023
python
993
apple
apghml
@@ -172,12 +160,56 @@ def fwd_helper( otherwise None. """ minibatch_size = _compute_minibatch_size(inputs["input_batch"], steps=steps) + + def reshape_for_scan(x: Tensor): + """Helper function that adds a minibatch dimension while evenly dividing + batches across gradient accumulation iterations. + + Input dimension is [GBS, seq], this first reshaped to [MBS, steps, seq],
Replace the acronyms with full names?
axlearn
github_2023
python
993
apple
apghml
@@ -172,12 +160,56 @@ def fwd_helper( otherwise None. """ minibatch_size = _compute_minibatch_size(inputs["input_batch"], steps=steps) + + def reshape_for_scan(x: Tensor): + """Helper function that adds a minibatch dimension while evenly dividing + batches across gradient accumulation iterations. + + Input dimension is [GBS, seq], this first reshaped to [MBS, steps, seq], + then transposed to [steps, MBS, seq] this ensures that batches picked + up from the global batch in a staggered pattern. + + The main benefit is that this avoids extra communication incurred in reshard + for every minibatch. + + Args: + x: Tensor to be reshaped. + + Returns: + The reshaped tensor. + """ + if x.shape[0] % minibatch_size != 0: + raise ValueError( + f"minibatch_size {minibatch_size} does not evenly divide " + f"global batch size of {x.shape[0]}" + ) + + x = x.reshape(minibatch_size, -1, *x.shape[1:]) + # Set up transpose to swap the first two dimensions. + dims = list(range(x.ndim)) + dims[0], dims[1] = dims[1], dims[0] + return x.transpose(dims)
Could we replace these three lines with one line if we use `jnp.moveaxis`?
axlearn
github_2023
python
993
apple
apghml
@@ -1,15 +1,143 @@ # Copyright © 2024 Apple Inc. """Test module for gradient_accumulation.py""" +from typing import Callable + import chex import jax import jax.numpy as jnp +import numpy as np +import pytest from absl.testing import absltest, parameterized +from jax.experimental.pjit import pjit from axlearn.common import gradient_accumulation, test_utils from axlearn.common.metrics import MetricAccumulator, WeightedScalar from axlearn.common.module import new_output_collection from axlearn.common.update_transformation import ForwardOutputs +from axlearn.common.utils import Nested, PartitionSpec, Tensor, tree_paths + + +class TestMinibatchSharding(test_utils.TestCase): + """Test `with_minibatch_steps` decorator keeps the same sharding + for minibatches as the global input batch.""" + + def create_dummy_inputs(self, steps): + # Multiply by accumulation steps + self.batch_size = 4 * steps + self.seq_len = 8 + self.params = dict( + w=jnp.asarray([0.0, 2.0, 2.0, -3.0]), + b=jnp.asarray([0.0, -1.0, 0.0, 0.0]), + ) + self.params_sharding = dict( + w=None, + b=None, + ) + + self.input_batch = { + "input_ids": jnp.ones((self.batch_size, self.seq_len), dtype=jnp.int32), + "target_labels": jnp.ones((self.batch_size, self.seq_len), dtype=jnp.int32), + "target_num_bytes": jnp.ones((self.batch_size,), dtype=jnp.int32), + } + self.input_batch_sharding = { + "input_ids": PartitionSpec(("data"), "seq"), + "target_labels": PartitionSpec(("data"), "seq"), + "target_num_bytes": PartitionSpec("data"), + } + forward_key, param_noise_key = jax.random.split(jax.random.PRNGKey(0), 2) + self.inputs = dict( + input_batch=self.input_batch, + forward_key=forward_key, + param_noise_key=param_noise_key, + ) + self.inputs_sharding = dict( + input_batch=self.input_batch_sharding, + forward_key=None, + param_noise_key=None, + ) + + def create_loss_fn(self, expected_minibatch_sharding): + """Simple ForwardFn with a check for minibatch sharding.""" + + def _check_equal_sharding(input_batch: Nested[Tensor], expected: dict): + """Checks if sharding for input_batch matches expected.""" + + def callback_sharding( + *, + input_batch: Nested[Tensor], + callback: Callable[[str, jax.sharding.Sharding], None], + ): + """Invokes callback with the sharding. + The callback is invoked with (path: str, sharding: Sharding). + """ + + def check_sharding(path, value): + jax.debug.inspect_array_sharding( + value, callback=lambda sharding: callback(path, sharding) + ) + + jax.tree_map(check_sharding, tree_paths(input_batch), input_batch) + return input_batch + + callback = lambda path, sharding: self.assertEqual(expected[path], sharding.spec) + + callback_sharding( + input_batch=input_batch, + callback=callback, + ) + + def loss_fn(*, model_params, inputs) -> ForwardOutputs: + """Simple ForwardFn.""" + _check_equal_sharding( + input_batch=inputs["input_batch"], + expected=expected_minibatch_sharding, + ) + loss = -jax.nn.log_softmax(model_params["w"] + model_params["b"])[1] + output_collection = new_output_collection() + output_collection.state_updates["w"] = model_params["w"] + 1 + output_collection.state_updates["loss"] = WeightedScalar(loss, 1) + return ForwardOutputs(loss=loss, aux={}, output_collection=output_collection)
IIUC, the issue you fixed regarding the batch size in the output collection only happens if the output collection contains something with a leading `batch_size` dim, which I couldn't find here? Do the tests in the current version of the PR fail without that specific fix? If not, can we add a test that fails without that specific fix (or make sure one of the existing tests will fail without it)?
axlearn
github_2023
python
993
apple
apghml
@@ -28,11 +157,14 @@ def test_minibatch_steps_grads_and_loss(self, steps): def loss_fn(*, model_params, inputs) -> ForwardOutputs: """Simple ForwardFn.""" - del inputs loss = -jax.nn.log_softmax(model_params["w"] + model_params["b"])[1] output_collection = new_output_collection() output_collection.state_updates["w"] = model_params["w"] + 1 output_collection.state_updates["loss"] = WeightedScalar(loss, 1) + # This output_collection entry is used to check if the gradient accumulation decorator + # correctly handles outputs that depend on batch size during carry buffer creation. + output_collection.summaries["output_with_batch_dimension"] = inputs["input_batch"]
Do we have an assert statement somewhere that does an equality / allclose check on this value returned in the output collection to make sure the correct value ultimately gets returned when using minibatching?
axlearn
github_2023
python
993
apple
markblee
@@ -172,12 +167,54 @@ def fwd_helper( otherwise None. """ minibatch_size = _compute_minibatch_size(inputs["input_batch"], steps=steps) + + def reshape_for_scan(x: Tensor): + """Helper function that adds a minibatch dimension while evenly dividing + batches across gradient accumulation iterations. + + Input dimension is [Global logical Batch Size, Sequence], this first reshaped to + [Minibatch Size, Steps, Sequence], + then transposed to [steps, Minibatch Size, Sequence] this ensures that + batches picked up from the global batch in a staggered pattern. + + The main benefit is that this avoids extra communication incurred in reshard + for every minibatch. + + Args: + x: Tensor to be reshaped. + + Returns: + The reshaped tensor. + """ + if x.shape[0] % minibatch_size != 0: + raise ValueError( + f"minibatch_size {minibatch_size} does not evenly divide " + f"global batch size of {x.shape[0]}" + ) + + x = x.reshape(minibatch_size, -1, *x.shape[1:]) + return jnp.swapaxes(x, 0, 1)
I guess this retains any sharding annotations on global batch size to the mini batch size -- OOI what happens if the minibatch doesn't divide the global batch sharding? Is it silently ignored?
axlearn
github_2023
python
993
apple
markblee
@@ -172,12 +167,54 @@ def fwd_helper( otherwise None. """ minibatch_size = _compute_minibatch_size(inputs["input_batch"], steps=steps) + + def reshape_for_scan(x: Tensor): + """Helper function that adds a minibatch dimension while evenly dividing + batches across gradient accumulation iterations. + + Input dimension is [Global logical Batch Size, Sequence], this first reshaped to + [Minibatch Size, Steps, Sequence], + then transposed to [steps, Minibatch Size, Sequence] this ensures that + batches picked up from the global batch in a staggered pattern. + + The main benefit is that this avoids extra communication incurred in reshard + for every minibatch. + + Args: + x: Tensor to be reshaped. + + Returns: + The reshaped tensor. + """ + if x.shape[0] % minibatch_size != 0: + raise ValueError( + f"minibatch_size {minibatch_size} does not evenly divide " + f"global batch size of {x.shape[0]}" + ) + + x = x.reshape(minibatch_size, -1, *x.shape[1:]) + return jnp.swapaxes(x, 0, 1) + + inputs["input_batch"] = jax.tree_map(reshape_for_scan, inputs["input_batch"]) + + # Create a sample minibatch for the carry buffer creation below + ( + sample_minibatch_inputs, + _, + _, + ) = _make_scan_minibatch_inputs(
nit -- ```suggestion sample_minibatch_inputs, *_ = _make_scan_minibatch_inputs( ```
axlearn
github_2023
python
993
apple
markblee
@@ -172,12 +167,54 @@ def fwd_helper( otherwise None. """ minibatch_size = _compute_minibatch_size(inputs["input_batch"], steps=steps) + + def reshape_for_scan(x: Tensor): + """Helper function that adds a minibatch dimension while evenly dividing + batches across gradient accumulation iterations. + + Input dimension is [Global logical Batch Size, Sequence], this first reshaped to
nit -- ```suggestion Input dimension is [global_logical_batch_size, seq_len], this first reshaped to ``` ditto for below.
axlearn
github_2023
python
932
apple
markblee
@@ -219,6 +221,26 @@ def __init__( super().__init__(cfg, parent=parent) cfg = self.config + self.gcp_workload_monitor = None
The trainer shouldn't have any dependencies on GCP specific logic. BTW, I wonder whether the monitoring events can be inferred from the events recorded by the `measurement` utils -- what are the main differences?
axlearn
github_2023
python
1,055
apple
markblee
@@ -95,6 +96,20 @@ def _cleanup(self): def _get_flink_cluster_name(self) -> str: return f"{self.config.name}-flink-cluster" + def _get_single_node_topology(self) -> str: + """This method returns single node topology for large slice of TPU."""
```suggestion """This method returns the single node topology for the configured TPU type.""" ```
axlearn
github_2023
python
1,055
apple
markblee
@@ -95,6 +96,20 @@ def _cleanup(self): def _get_flink_cluster_name(self) -> str: return f"{self.config.name}-flink-cluster" + def _get_single_node_topology(self) -> str: + """This method returns single node topology for large slice of TPU.""" + tpu_type = infer_tpu_type(self.config.accelerator.instance_type) + cores, hosts = infer_tpu_cores(tpu_type), infer_tpu_workers(tpu_type) + if cores % hosts != 0: + raise ValueError( + f"Number of cores:{cores} is not divisible by hosts:{hosts} for TPU type:{tpu_type}" + ) + single_host_cores = cores // hosts + single_host_tpu_name = f"{infer_tpu_version(tpu_type)}-{single_host_cores}" + if not single_host_tpu_name in USER_FACING_NAME_TO_SYSTEM_CHARACTERISTICS: + raise RuntimeError(f"Can't find spect for {single_host_tpu_name}")
```suggestion raise RuntimeError(f"Can't find specs for {single_host_tpu_name}.") ```
axlearn
github_2023
python
1,050
apple
markblee
@@ -296,21 +296,21 @@ def check_tpu_splash_attention( ) if has_segment_ids: raise SplashAttentionUnsupportedError( - "The public API for SplashAttention that we " - "currently use does not support segment ids." + "The public API for SplashAttention that we currently use does not support segment ids." + ) + if is_decoding: + # tpu_decoding.py covers this scenario. + raise SplashAttentionUnsupportedError( + "It does not support decoding because the query len must be a block size or bigger."
```suggestion "Please use `tpu_decoding` for decoding." ```
axlearn
github_2023
python
1,046
apple
ruomingp
@@ -23,12 +24,303 @@ def __call__(self, ids: Tensor, *, max_len: int) -> Tensor: ... +_PackingFn = Callable[[Dataset, int, Callable, int, str, grain.ReadOptions], Dataset] + + +class _StreamingPackingDatasetIterator(grain.DatasetIterator): + """An iterator that yields packed examples in a streaming fashion. + + This implementation does not require maintaining a fixed buffer of `window_size` elements in + memory. Instead, it yields packed examples and flushes the buffer as soon as an example is + ready. This significantly improves the first-time read, especially for datasets which have much + higher tokens per sequence, as well as reduces the peak memory requirements for packing. + """ + + def __init__( + self, + parent: grain.DatasetIterator, + *, + max_len: int, + window_size: Optional[int] = None,
Do we still need `window_size`? If so, explain why?
axlearn
github_2023
python
1,046
apple
ruomingp
@@ -23,12 +24,303 @@ def __call__(self, ids: Tensor, *, max_len: int) -> Tensor: ... +_PackingFn = Callable[[Dataset, int, Callable, int, str, grain.ReadOptions], Dataset] + + +class _StreamingPackingDatasetIterator(grain.DatasetIterator): + """An iterator that yields packed examples in a streaming fashion. + + This implementation does not require maintaining a fixed buffer of `window_size` elements in + memory. Instead, it yields packed examples and flushes the buffer as soon as an example is + ready. This significantly improves the first-time read, especially for datasets which have much + higher tokens per sequence, as well as reduces the peak memory requirements for packing. + """ + + def __init__( + self, + parent: grain.DatasetIterator, + *, + max_len: int, + window_size: Optional[int] = None, + input_key: str = "target_labels",
Add comments for the args?
axlearn
github_2023
python
1,046
apple
ruomingp
@@ -23,12 +24,303 @@ def __call__(self, ids: Tensor, *, max_len: int) -> Tensor: ... +_PackingFn = Callable[[Dataset, int, Callable, int, str, grain.ReadOptions], Dataset] + + +class _StreamingPackingDatasetIterator(grain.DatasetIterator): + """An iterator that yields packed examples in a streaming fashion. + + This implementation does not require maintaining a fixed buffer of `window_size` elements in + memory. Instead, it yields packed examples and flushes the buffer as soon as an example is + ready. This significantly improves the first-time read, especially for datasets which have much + higher tokens per sequence, as well as reduces the peak memory requirements for packing. + """ + + def __init__( + self, + parent: grain.DatasetIterator, + *, + max_len: int, + window_size: Optional[int] = None, + input_key: str = "target_labels", + ): + super().__init__(parent) + self._max_len = max_len + self._window_size = window_size + self._input_key = input_key + + # Index of the parent. + self._index = 0 + self._current_token_count = 0 + self._current_examples_list = [] + # Both of them are used for checkpointing. + self._parent_sequence_start_state = None + self._parent_sequence_end_state = None
Add comments for the internal variables?
axlearn
github_2023
python
1,046
apple
ruomingp
@@ -23,12 +24,303 @@ def __call__(self, ids: Tensor, *, max_len: int) -> Tensor: ... +_PackingFn = Callable[[Dataset, int, Callable, int, str, grain.ReadOptions], Dataset] + + +class _StreamingPackingDatasetIterator(grain.DatasetIterator): + """An iterator that yields packed examples in a streaming fashion. + + This implementation does not require maintaining a fixed buffer of `window_size` elements in + memory. Instead, it yields packed examples and flushes the buffer as soon as an example is + ready. This significantly improves the first-time read, especially for datasets which have much + higher tokens per sequence, as well as reduces the peak memory requirements for packing. + """ + + def __init__( + self, + parent: grain.DatasetIterator, + *, + max_len: int, + window_size: Optional[int] = None, + input_key: str = "target_labels", + ): + super().__init__(parent) + self._max_len = max_len + self._window_size = window_size + self._input_key = input_key + + # Index of the parent. + self._index = 0 + self._current_token_count = 0 + self._current_examples_list = [] + # Both of them are used for checkpointing. + self._parent_sequence_start_state = None + self._parent_sequence_end_state = None + self._parent_state = self._parent.get_state() + + def _reach_window_limit(self) -> bool: + """Determines if we have already reached window limit.""" + return self._window_size is not None and self._index % self._window_size == 0 + + def _pop_element(self) -> Optional[dict]: + """Pops element from self._current_example_list.""" + if not self._current_examples_list: + return None + concat_target_labels = np.concatenate( + [x[self._input_key] for x in self._current_examples_list], axis=-1 + ) + # Total tokens to pop could be up to self._max_len + total_tokens_to_pop = min(len(concat_target_labels), self._max_len) + self._current_token_count -= total_tokens_to_pop + if self._current_token_count > 0: + self._current_examples_list = [{self._input_key: concat_target_labels[self._max_len :]}] + self._parent_sequence_start_state = self._parent_sequence_end_state + else: + self._current_examples_list = [] + self._parent_sequence_start_state = None + assert self._current_token_count >= 0 + + # If all the concat target labels is empty, early return. + if total_tokens_to_pop == 0: + return None
Maybe we can raise StopIteration instead of returning None?
axlearn
github_2023
python
1,046
apple
ruomingp
@@ -23,12 +24,303 @@ def __call__(self, ids: Tensor, *, max_len: int) -> Tensor: ... +_PackingFn = Callable[[Dataset, int, Callable, int, str, grain.ReadOptions], Dataset] + + +class _StreamingPackingDatasetIterator(grain.DatasetIterator): + """An iterator that yields packed examples in a streaming fashion. + + This implementation does not require maintaining a fixed buffer of `window_size` elements in + memory. Instead, it yields packed examples and flushes the buffer as soon as an example is + ready. This significantly improves the first-time read, especially for datasets which have much + higher tokens per sequence, as well as reduces the peak memory requirements for packing. + """ + + def __init__( + self, + parent: grain.DatasetIterator, + *, + max_len: int, + window_size: Optional[int] = None, + input_key: str = "target_labels", + ): + super().__init__(parent) + self._max_len = max_len + self._window_size = window_size + self._input_key = input_key + + # Index of the parent. + self._index = 0 + self._current_token_count = 0 + self._current_examples_list = [] + # Both of them are used for checkpointing. + self._parent_sequence_start_state = None + self._parent_sequence_end_state = None + self._parent_state = self._parent.get_state() + + def _reach_window_limit(self) -> bool: + """Determines if we have already reached window limit.""" + return self._window_size is not None and self._index % self._window_size == 0 + + def _pop_element(self) -> Optional[dict]: + """Pops element from self._current_example_list.""" + if not self._current_examples_list: + return None + concat_target_labels = np.concatenate( + [x[self._input_key] for x in self._current_examples_list], axis=-1 + ) + # Total tokens to pop could be up to self._max_len + total_tokens_to_pop = min(len(concat_target_labels), self._max_len) + self._current_token_count -= total_tokens_to_pop + if self._current_token_count > 0: + self._current_examples_list = [{self._input_key: concat_target_labels[self._max_len :]}] + self._parent_sequence_start_state = self._parent_sequence_end_state + else: + self._current_examples_list = [] + self._parent_sequence_start_state = None + assert self._current_token_count >= 0 + + # If all the concat target labels is empty, early return. + if total_tokens_to_pop == 0: + return None + + return {self._input_key: concat_target_labels[: self._max_len]} + + def __next__(self): + # Iteratively call __next__ until we yield valid examples. + while True: + # If there are still leftover tokens when we have already reached the window limit, we + # should decide whether to keep this sequence. + if self._current_token_count > 0 and self._reach_window_limit(): + next_element = self._pop_element() + assert self._current_token_count == 0 and not self._current_examples_list + return next_element + + # Keeps filling up the sequence until reaching the limit. + # Termination of this while loop means: + # 1. Reaches the sequence_length limit, and ready to output one batch. + # 2. Reaches the window limit. + while self._current_token_count < self._max_len: + self._parent_sequence_end_state = self._parent.get_state() + if not self._parent_sequence_start_state: + self._parent_sequence_start_state = self._parent_sequence_end_state + try: + example = next(self._parent) + except StopIteration as e: + next_element = self._pop_element() + if next_element is not None: + return next_element + else: + raise e + + self._current_examples_list.append(example) + + self._current_token_count += len(example[self._input_key]) + self._index += 1 + # Updates parent state since the index is moved by 1. + self._parent_state = self._parent.get_state() + + if self._reach_window_limit(): + break + + # If there is enough token, we always return a sequence. + if self._current_token_count >= self._max_len: + return self._pop_element() + + next_element = self._pop_element() + assert self._current_token_count == 0 and not self._current_examples_list + # If next element is empty, that suggests that the sequence is dropped. + if next_element is not None: + return next_element + + def get_state(self) -> dict[str, Any]: + # TODO(haoshuoh, markblee): All of the parent_state thing could be wrapped in a Packer + # class. + return { + "parent_sequence_start_state": self._parent_sequence_start_state + if self._parent_sequence_start_state + else self._parent.get_state(),
How is _parent_sequence_start_state used?
axlearn
github_2023
python
1,046
apple
ruomingp
@@ -23,12 +24,303 @@ def __call__(self, ids: Tensor, *, max_len: int) -> Tensor: ... +_PackingFn = Callable[[Dataset, int, Callable, int, str, grain.ReadOptions], Dataset] + + +class _StreamingPackingDatasetIterator(grain.DatasetIterator): + """An iterator that yields packed examples in a streaming fashion. + + This implementation does not require maintaining a fixed buffer of `window_size` elements in + memory. Instead, it yields packed examples and flushes the buffer as soon as an example is + ready. This significantly improves the first-time read, especially for datasets which have much + higher tokens per sequence, as well as reduces the peak memory requirements for packing. + """ + + def __init__( + self, + parent: grain.DatasetIterator, + *, + max_len: int, + window_size: Optional[int] = None, + input_key: str = "target_labels", + ): + super().__init__(parent) + self._max_len = max_len + self._window_size = window_size + self._input_key = input_key + + # Index of the parent. + self._index = 0 + self._current_token_count = 0 + self._current_examples_list = [] + # Both of them are used for checkpointing. + self._parent_sequence_start_state = None + self._parent_sequence_end_state = None + self._parent_state = self._parent.get_state() + + def _reach_window_limit(self) -> bool: + """Determines if we have already reached window limit.""" + return self._window_size is not None and self._index % self._window_size == 0 + + def _pop_element(self) -> Optional[dict]: + """Pops element from self._current_example_list.""" + if not self._current_examples_list: + return None + concat_target_labels = np.concatenate( + [x[self._input_key] for x in self._current_examples_list], axis=-1 + ) + # Total tokens to pop could be up to self._max_len + total_tokens_to_pop = min(len(concat_target_labels), self._max_len) + self._current_token_count -= total_tokens_to_pop + if self._current_token_count > 0: + self._current_examples_list = [{self._input_key: concat_target_labels[self._max_len :]}] + self._parent_sequence_start_state = self._parent_sequence_end_state + else: + self._current_examples_list = [] + self._parent_sequence_start_state = None + assert self._current_token_count >= 0 + + # If all the concat target labels is empty, early return. + if total_tokens_to_pop == 0: + return None + + return {self._input_key: concat_target_labels[: self._max_len]} + + def __next__(self): + # Iteratively call __next__ until we yield valid examples. + while True: + # If there are still leftover tokens when we have already reached the window limit, we + # should decide whether to keep this sequence. + if self._current_token_count > 0 and self._reach_window_limit(): + next_element = self._pop_element() + assert self._current_token_count == 0 and not self._current_examples_list + return next_element + + # Keeps filling up the sequence until reaching the limit. + # Termination of this while loop means: + # 1. Reaches the sequence_length limit, and ready to output one batch. + # 2. Reaches the window limit. + while self._current_token_count < self._max_len: + self._parent_sequence_end_state = self._parent.get_state()
How is _parent_sequence_end_state used?
axlearn
github_2023
python
1,046
apple
ruomingp
@@ -23,12 +24,303 @@ def __call__(self, ids: Tensor, *, max_len: int) -> Tensor: ... +_PackingFn = Callable[[Dataset, int, Callable, int, str, grain.ReadOptions], Dataset] + + +class _StreamingPackingDatasetIterator(grain.DatasetIterator): + """An iterator that yields packed examples in a streaming fashion. + + This implementation does not require maintaining a fixed buffer of `window_size` elements in + memory. Instead, it yields packed examples and flushes the buffer as soon as an example is + ready. This significantly improves the first-time read, especially for datasets which have much + higher tokens per sequence, as well as reduces the peak memory requirements for packing. + """ + + def __init__( + self, + parent: grain.DatasetIterator, + *, + max_len: int, + window_size: Optional[int] = None, + input_key: str = "target_labels", + ): + super().__init__(parent) + self._max_len = max_len + self._window_size = window_size + self._input_key = input_key + + # Index of the parent. + self._index = 0 + self._current_token_count = 0 + self._current_examples_list = [] + # Both of them are used for checkpointing. + self._parent_sequence_start_state = None + self._parent_sequence_end_state = None + self._parent_state = self._parent.get_state() + + def _reach_window_limit(self) -> bool: + """Determines if we have already reached window limit.""" + return self._window_size is not None and self._index % self._window_size == 0 + + def _pop_element(self) -> Optional[dict]: + """Pops element from self._current_example_list."""
Comment on when we return None?
axlearn
github_2023
python
1,046
apple
ruomingp
@@ -23,12 +24,303 @@ def __call__(self, ids: Tensor, *, max_len: int) -> Tensor: ... +_PackingFn = Callable[[Dataset, int, Callable, int, str, grain.ReadOptions], Dataset] + + +class _StreamingPackingDatasetIterator(grain.DatasetIterator): + """An iterator that yields packed examples in a streaming fashion. + + This implementation does not require maintaining a fixed buffer of `window_size` elements in + memory. Instead, it yields packed examples and flushes the buffer as soon as an example is + ready. This significantly improves the first-time read, especially for datasets which have much + higher tokens per sequence, as well as reduces the peak memory requirements for packing. + """ + + def __init__( + self, + parent: grain.DatasetIterator, + *, + max_len: int, + window_size: Optional[int] = None, + input_key: str = "target_labels", + ): + super().__init__(parent) + self._max_len = max_len + self._window_size = window_size + self._input_key = input_key + + # Index of the parent. + self._index = 0 + self._current_token_count = 0 + self._current_examples_list = [] + # Both of them are used for checkpointing. + self._parent_sequence_start_state = None + self._parent_sequence_end_state = None + self._parent_state = self._parent.get_state() + + def _reach_window_limit(self) -> bool: + """Determines if we have already reached window limit.""" + return self._window_size is not None and self._index % self._window_size == 0 + + def _pop_element(self) -> Optional[dict]: + """Pops element from self._current_example_list.""" + if not self._current_examples_list: + return None + concat_target_labels = np.concatenate( + [x[self._input_key] for x in self._current_examples_list], axis=-1 + ) + # Total tokens to pop could be up to self._max_len + total_tokens_to_pop = min(len(concat_target_labels), self._max_len) + self._current_token_count -= total_tokens_to_pop + if self._current_token_count > 0: + self._current_examples_list = [{self._input_key: concat_target_labels[self._max_len :]}] + self._parent_sequence_start_state = self._parent_sequence_end_state + else: + self._current_examples_list = [] + self._parent_sequence_start_state = None + assert self._current_token_count >= 0
Move this assert before `if`?
axlearn
github_2023
python
1,046
apple
ruomingp
@@ -23,12 +24,303 @@ def __call__(self, ids: Tensor, *, max_len: int) -> Tensor: ... +_PackingFn = Callable[[Dataset, int, Callable, int, str, grain.ReadOptions], Dataset] + + +class _StreamingPackingDatasetIterator(grain.DatasetIterator): + """An iterator that yields packed examples in a streaming fashion. + + This implementation does not require maintaining a fixed buffer of `window_size` elements in + memory. Instead, it yields packed examples and flushes the buffer as soon as an example is + ready. This significantly improves the first-time read, especially for datasets which have much + higher tokens per sequence, as well as reduces the peak memory requirements for packing. + """ + + def __init__( + self, + parent: grain.DatasetIterator, + *, + max_len: int, + window_size: Optional[int] = None, + input_key: str = "target_labels", + ): + super().__init__(parent) + self._max_len = max_len + self._window_size = window_size + self._input_key = input_key + + # Index of the parent. + self._index = 0 + self._current_token_count = 0 + self._current_examples_list = [] + # Both of them are used for checkpointing. + self._parent_sequence_start_state = None + self._parent_sequence_end_state = None + self._parent_state = self._parent.get_state() + + def _reach_window_limit(self) -> bool: + """Determines if we have already reached window limit.""" + return self._window_size is not None and self._index % self._window_size == 0 + + def _pop_element(self) -> Optional[dict]: + """Pops element from self._current_example_list.""" + if not self._current_examples_list: + return None + concat_target_labels = np.concatenate( + [x[self._input_key] for x in self._current_examples_list], axis=-1 + ) + # Total tokens to pop could be up to self._max_len + total_tokens_to_pop = min(len(concat_target_labels), self._max_len) + self._current_token_count -= total_tokens_to_pop + if self._current_token_count > 0: + self._current_examples_list = [{self._input_key: concat_target_labels[self._max_len :]}] + self._parent_sequence_start_state = self._parent_sequence_end_state + else: + self._current_examples_list = [] + self._parent_sequence_start_state = None + assert self._current_token_count >= 0 + + # If all the concat target labels is empty, early return. + if total_tokens_to_pop == 0: + return None + + return {self._input_key: concat_target_labels[: self._max_len]} + + def __next__(self):
Would it simplify the implementation if we override `__iter__` and use a yield-based implementation? def __iter__(self): end_of_parent = False while not end_of_parent or self._current_token_count > 0: try: while self._current_token_count < self._max_len: self._append_example_to_buffer(...) except StopIterator: end_of_parent = True yield self._pop_sequence_from_buffer()
axlearn
github_2023
python
1,046
apple
ruomingp
@@ -23,12 +24,310 @@ def __call__(self, ids: Tensor, *, max_len: int) -> Tensor: ... +_PackingFn = Callable[[Dataset, int, Callable, int, str, grain.ReadOptions], Dataset] + + +class _StreamingPackingDatasetIterator(grain.DatasetIterator): + """An iterator that yields packed examples in a streaming fashion. + + This implementation does not require maintaining a fixed buffer of `window_size` elements in + memory. Instead, it yields packed examples and flushes the buffer as soon as an example is + ready. This significantly improves the first-time read, especially for datasets which have much + higher tokens per sequence, as well as reduces the peak memory requirements for packing. + + window_size is used for parity with windowed_packing. It will also be used if we want to pack + multimodal data which is not represented in sequence, thus naturally has a limit in how many + examples we can pack due to memory limit. + """ + + def __init__( + self, + parent: grain.DatasetIterator, + *, + max_len: int, + window_size: Optional[int] = None, + input_key: str = "target_labels", # The key in the input examples to use for packing. + ): + super().__init__(parent) + self._max_len = max_len + self._window_size = window_size + self._input_key = input_key + + # Index of the parent. + self._index = 0 + self._current_token_count = 0
```suggestion # Total number of tokens in `self._current_examples_list`. self._current_token_count = 0 ```
axlearn
github_2023
python
1,046
apple
ruomingp
@@ -23,12 +24,310 @@ def __call__(self, ids: Tensor, *, max_len: int) -> Tensor: ... +_PackingFn = Callable[[Dataset, int, Callable, int, str, grain.ReadOptions], Dataset] + + +class _StreamingPackingDatasetIterator(grain.DatasetIterator): + """An iterator that yields packed examples in a streaming fashion. + + This implementation does not require maintaining a fixed buffer of `window_size` elements in + memory. Instead, it yields packed examples and flushes the buffer as soon as an example is + ready. This significantly improves the first-time read, especially for datasets which have much + higher tokens per sequence, as well as reduces the peak memory requirements for packing. + + window_size is used for parity with windowed_packing. It will also be used if we want to pack + multimodal data which is not represented in sequence, thus naturally has a limit in how many + examples we can pack due to memory limit. + """ + + def __init__( + self, + parent: grain.DatasetIterator, + *, + max_len: int, + window_size: Optional[int] = None, + input_key: str = "target_labels", # The key in the input examples to use for packing. + ): + super().__init__(parent) + self._max_len = max_len + self._window_size = window_size + self._input_key = input_key + + # Index of the parent. + self._index = 0 + self._current_token_count = 0 + self._current_examples_list = []
```suggestion # The examples in the current buffer. self._current_examples_list = [] ```
axlearn
github_2023
python
1,046
apple
ruomingp
@@ -23,12 +24,310 @@ def __call__(self, ids: Tensor, *, max_len: int) -> Tensor: ... +_PackingFn = Callable[[Dataset, int, Callable, int, str, grain.ReadOptions], Dataset] + + +class _StreamingPackingDatasetIterator(grain.DatasetIterator): + """An iterator that yields packed examples in a streaming fashion. + + This implementation does not require maintaining a fixed buffer of `window_size` elements in + memory. Instead, it yields packed examples and flushes the buffer as soon as an example is + ready. This significantly improves the first-time read, especially for datasets which have much + higher tokens per sequence, as well as reduces the peak memory requirements for packing. + + window_size is used for parity with windowed_packing. It will also be used if we want to pack + multimodal data which is not represented in sequence, thus naturally has a limit in how many + examples we can pack due to memory limit. + """ + + def __init__( + self, + parent: grain.DatasetIterator, + *, + max_len: int, + window_size: Optional[int] = None, + input_key: str = "target_labels", # The key in the input examples to use for packing. + ): + super().__init__(parent) + self._max_len = max_len + self._window_size = window_size + self._input_key = input_key + + # Index of the parent. + self._index = 0 + self._current_token_count = 0 + self._current_examples_list = [] + # For checkpointing support, we need to maintain what exact are the examples in current + # sequence. self._parent_sequence_start_state and self._parent_sequence_end_state are used + # to store to starting and ending state of the examples. + self._parent_sequence_start_state = None + self._parent_sequence_end_state = None
```suggestion # If not None, the state of `self._parent` before the last example in `self._current_examples_list` was added. self._parent_sequence_end_state = None ```
axlearn
github_2023
python
1,046
apple
ruomingp
@@ -23,12 +24,310 @@ def __call__(self, ids: Tensor, *, max_len: int) -> Tensor: ... +_PackingFn = Callable[[Dataset, int, Callable, int, str, grain.ReadOptions], Dataset] + + +class _StreamingPackingDatasetIterator(grain.DatasetIterator): + """An iterator that yields packed examples in a streaming fashion. + + This implementation does not require maintaining a fixed buffer of `window_size` elements in + memory. Instead, it yields packed examples and flushes the buffer as soon as an example is + ready. This significantly improves the first-time read, especially for datasets which have much + higher tokens per sequence, as well as reduces the peak memory requirements for packing. + + window_size is used for parity with windowed_packing. It will also be used if we want to pack + multimodal data which is not represented in sequence, thus naturally has a limit in how many + examples we can pack due to memory limit. + """ + + def __init__( + self, + parent: grain.DatasetIterator, + *, + max_len: int, + window_size: Optional[int] = None, + input_key: str = "target_labels", # The key in the input examples to use for packing. + ): + super().__init__(parent) + self._max_len = max_len + self._window_size = window_size + self._input_key = input_key + + # Index of the parent. + self._index = 0 + self._current_token_count = 0 + self._current_examples_list = [] + # For checkpointing support, we need to maintain what exact are the examples in current + # sequence. self._parent_sequence_start_state and self._parent_sequence_end_state are used + # to store to starting and ending state of the examples. + self._parent_sequence_start_state = None
```suggestion # If not None, the state of `self._parent` before the first example in `self._current_examples_list` was added. # Must be None if `self._current_token_count == 0`. self._parent_sequence_start_state = None ```
axlearn
github_2023
python
1,046
apple
ruomingp
@@ -23,12 +24,310 @@ def __call__(self, ids: Tensor, *, max_len: int) -> Tensor: ... +_PackingFn = Callable[[Dataset, int, Callable, int, str, grain.ReadOptions], Dataset] + + +class _StreamingPackingDatasetIterator(grain.DatasetIterator): + """An iterator that yields packed examples in a streaming fashion. + + This implementation does not require maintaining a fixed buffer of `window_size` elements in + memory. Instead, it yields packed examples and flushes the buffer as soon as an example is + ready. This significantly improves the first-time read, especially for datasets which have much + higher tokens per sequence, as well as reduces the peak memory requirements for packing. + + window_size is used for parity with windowed_packing. It will also be used if we want to pack + multimodal data which is not represented in sequence, thus naturally has a limit in how many + examples we can pack due to memory limit. + """ + + def __init__( + self, + parent: grain.DatasetIterator, + *, + max_len: int, + window_size: Optional[int] = None, + input_key: str = "target_labels", # The key in the input examples to use for packing. + ): + super().__init__(parent) + self._max_len = max_len + self._window_size = window_size + self._input_key = input_key + + # Index of the parent. + self._index = 0 + self._current_token_count = 0 + self._current_examples_list = [] + # For checkpointing support, we need to maintain what exact are the examples in current + # sequence. self._parent_sequence_start_state and self._parent_sequence_end_state are used + # to store to starting and ending state of the examples. + self._parent_sequence_start_state = None + self._parent_sequence_end_state = None + self._parent_state = self._parent.get_state() + + def _reach_window_limit(self) -> bool: + """Determines if we have already reached window limit.""" + return self._window_size is not None and self._index % self._window_size == 0 + + def _pop_element(self) -> Optional[dict]: + """Pops element from self._current_example_list.""" + # If there is no examples in current sequence, return None. + if not self._current_examples_list: + return None + concat_target_labels = np.concatenate( + [x[self._input_key] for x in self._current_examples_list], axis=-1 + ) + # Total tokens to pop could be up to self._max_len + total_tokens_to_pop = min(len(concat_target_labels), self._max_len) + self._current_token_count -= total_tokens_to_pop + assert self._current_token_count >= 0 + if self._current_token_count > 0: + self._current_examples_list = [{self._input_key: concat_target_labels[self._max_len :]}] + self._parent_sequence_start_state = self._parent_sequence_end_state + else: + self._current_examples_list = [] + self._parent_sequence_start_state = None + + # If all the concat target labels is empty, early return. + if total_tokens_to_pop == 0: + return None + + return {self._input_key: concat_target_labels[: self._max_len]} + + def __next__(self): + # Iteratively call __next__ until we yield valid examples. + while True: + # If there are still leftover tokens when we have already reached the window limit, we + # should decide whether to keep this sequence. + if self._current_token_count > 0 and self._reach_window_limit(): + next_element = self._pop_element() + assert self._current_token_count == 0 and not self._current_examples_list + return next_element + + # Keeps filling up the sequence until reaching the limit. + # Termination of this while loop means: + # 1. Reaches the sequence_length limit, and ready to output one batch. + # 2. Reaches the window limit. + while self._current_token_count < self._max_len: + self._parent_sequence_end_state = self._parent.get_state() + if not self._parent_sequence_start_state: + self._parent_sequence_start_state = self._parent_sequence_end_state + try: + example = next(self._parent) + except StopIteration as e: + next_element = self._pop_element() + if next_element is not None: + return next_element + else: + raise e + + self._current_examples_list.append(example) + + self._current_token_count += len(example[self._input_key]) + self._index += 1 + # Updates parent state since the index is moved by 1. + self._parent_state = self._parent.get_state() + + if self._reach_window_limit(): + break + + # If there is enough token, we always return a sequence. + if self._current_token_count >= self._max_len: + return self._pop_element() + + next_element = self._pop_element() + assert self._current_token_count == 0 and not self._current_examples_list + # If next element is empty, that suggests that the sequence is dropped. + if next_element is not None: + return next_element + + def get_state(self) -> dict[str, Any]: + # TODO(haoshuoh, markblee): All of the parent_state thing could be wrapped in a Packer + # class. + return { + "parent_sequence_start_state": self._parent_sequence_start_state + if self._parent_sequence_start_state + else self._parent.get_state(), + "parent": self._parent_state, + "index": self._index, + "current_token_count": self._current_token_count, + } + + def _retrieve_packer_states(self, state: dict[str, Any]):
Can we inline this method into `set_state`?
axlearn
github_2023
python
1,046
apple
ruomingp
@@ -23,12 +24,310 @@ def __call__(self, ids: Tensor, *, max_len: int) -> Tensor: ... +_PackingFn = Callable[[Dataset, int, Callable, int, str, grain.ReadOptions], Dataset] + + +class _StreamingPackingDatasetIterator(grain.DatasetIterator): + """An iterator that yields packed examples in a streaming fashion. + + This implementation does not require maintaining a fixed buffer of `window_size` elements in + memory. Instead, it yields packed examples and flushes the buffer as soon as an example is + ready. This significantly improves the first-time read, especially for datasets which have much + higher tokens per sequence, as well as reduces the peak memory requirements for packing. + + window_size is used for parity with windowed_packing. It will also be used if we want to pack + multimodal data which is not represented in sequence, thus naturally has a limit in how many + examples we can pack due to memory limit. + """ + + def __init__( + self, + parent: grain.DatasetIterator, + *, + max_len: int, + window_size: Optional[int] = None, + input_key: str = "target_labels", # The key in the input examples to use for packing. + ): + super().__init__(parent) + self._max_len = max_len + self._window_size = window_size + self._input_key = input_key + + # Index of the parent. + self._index = 0 + self._current_token_count = 0 + self._current_examples_list = [] + # For checkpointing support, we need to maintain what exact are the examples in current + # sequence. self._parent_sequence_start_state and self._parent_sequence_end_state are used + # to store to starting and ending state of the examples. + self._parent_sequence_start_state = None + self._parent_sequence_end_state = None + self._parent_state = self._parent.get_state()
Do we need this variable? When is it different from `self._parent.get_state()`?
axlearn
github_2023
python
1,046
apple
ruomingp
@@ -23,12 +24,313 @@ def __call__(self, ids: Tensor, *, max_len: int) -> Tensor: ... +_PackingFn = Callable[[Dataset, int, Callable, int, str, grain.ReadOptions], Dataset] + + +class _StreamingPackingDatasetIterator(grain.DatasetIterator): + """An iterator that yields packed examples in a streaming fashion.
We are still missing comments for `max_len` and `input_key`.
axlearn
github_2023
python
1,046
apple
ruomingp
@@ -23,12 +24,313 @@ def __call__(self, ids: Tensor, *, max_len: int) -> Tensor: ... +_PackingFn = Callable[[Dataset, int, Callable, int, str, grain.ReadOptions], Dataset] + + +class _StreamingPackingDatasetIterator(grain.DatasetIterator): + """An iterator that yields packed examples in a streaming fashion. + + This implementation does not require maintaining a fixed buffer of `window_size` elements in + memory. Instead, it yields packed examples and flushes the buffer as soon as an example is + ready. This significantly improves the first-time read, especially for datasets which have much + higher tokens per sequence, as well as reduces the peak memory requirements for packing. + + window_size is used for parity with windowed_packing. It will also be used if we want to pack + multimodal data which is not represented in sequence, thus naturally has a limit in how many + examples we can pack due to memory limit. + """ + + def __init__( + self, + parent: grain.DatasetIterator, + *, + max_len: int, + window_size: Optional[int] = None, + input_key: str = "target_labels", # The key in the input examples to use for packing. + ): + super().__init__(parent) + self._max_len = max_len + self._window_size = window_size + self._input_key = input_key + + # Index of the parent. + self._index = 0 + # Total number of tokens in `self._current_examples_list`. + self._current_token_count = 0 + # The examples in the current buffer. + self._current_examples_list = [] + # For checkpointing support, we need to maintain what exactly are the examples in current + # sequence. self._parent_sequence_start_state and self._parent_sequence_end_state are used + # to store to starting and ending state of the examples. + + # If not None, the state of `self._parent` before the first example in + # `self._current_examples_list` was added. + # Must be None if `self._current_token_count == 0`. + self._parent_sequence_start_state = None + # If not None, the state of `self._parent` before the last example in + # `self._current_examples_list` was added. + self._parent_sequence_end_state = None + + def _reach_window_limit(self) -> bool: + """Determines if we have already reached window limit.""" + return self._window_size is not None and self._index % self._window_size == 0 + + def _pop_element(self) -> Optional[dict]: + """Pops element from self._current_example_list, returns None if the list is empty.""" + # If there is no examples in current sequence, return None. + if not self._current_examples_list: + return None + concat_target_labels = np.concatenate( + [x[self._input_key] for x in self._current_examples_list], axis=-1 + ) + # Total tokens to pop could be up to self._max_len + total_tokens_to_pop = min(len(concat_target_labels), self._max_len) + self._current_token_count -= total_tokens_to_pop + assert self._current_token_count >= 0 + if self._current_token_count > 0: + self._current_examples_list = [{self._input_key: concat_target_labels[self._max_len :]}] + self._parent_sequence_start_state = self._parent_sequence_end_state + else: + self._current_examples_list = [] + self._parent_sequence_start_state = None + + # If all the concat target labels is empty, early return. + if total_tokens_to_pop == 0: + return None + + return {self._input_key: concat_target_labels[: self._max_len]} + + def __next__(self): + # Iteratively call __next__ until we yield valid examples. + while True: + # If there are still leftover tokens when we have already reached the window limit, we + # should decide whether to keep this sequence. + if self._current_token_count > 0 and self._reach_window_limit(): + next_element = self._pop_element() + assert self._current_token_count == 0 and not self._current_examples_list + return next_element + + # Keeps filling up the sequence until reaching the limit. + # Termination of this while loop means: + # 1. Reaches the sequence_length limit, and ready to output one batch. + # 2. Reaches the window limit. + while self._current_token_count < self._max_len: + self._parent_sequence_end_state = self._parent.get_state() + if not self._parent_sequence_start_state: + self._parent_sequence_start_state = self._parent_sequence_end_state + try: + example = next(self._parent) + except StopIteration as e: + next_element = self._pop_element() + if next_element is not None: + return next_element + else: + raise e + + self._current_examples_list.append(example) + + self._current_token_count += len(example[self._input_key]) + self._index += 1 + + if self._reach_window_limit(): + break + + # If there is enough token, we always return a sequence. + if self._current_token_count >= self._max_len: + return self._pop_element() + + next_element = self._pop_element() + assert self._current_token_count == 0 and not self._current_examples_list + # If next element is empty, that suggests that the sequence is dropped. + if next_element is not None: + return next_element + + def get_state(self) -> dict[str, Any]: + # TODO(haoshuoh, markblee): All of the parent_state thing could be wrapped in a Packer + # class. + return { + "parent_sequence_start_state": self._parent_sequence_start_state + if self._parent_sequence_start_state + else self._parent.get_state(),
Can we simplify this by (along with a change in set_state) ```suggestion "parent_sequence_start_state": self._parent_sequence_start_state, ```
axlearn
github_2023
python
1,046
apple
ruomingp
@@ -23,12 +24,313 @@ def __call__(self, ids: Tensor, *, max_len: int) -> Tensor: ... +_PackingFn = Callable[[Dataset, int, Callable, int, str, grain.ReadOptions], Dataset] + + +class _StreamingPackingDatasetIterator(grain.DatasetIterator): + """An iterator that yields packed examples in a streaming fashion. + + This implementation does not require maintaining a fixed buffer of `window_size` elements in + memory. Instead, it yields packed examples and flushes the buffer as soon as an example is + ready. This significantly improves the first-time read, especially for datasets which have much + higher tokens per sequence, as well as reduces the peak memory requirements for packing. + + window_size is used for parity with windowed_packing. It will also be used if we want to pack + multimodal data which is not represented in sequence, thus naturally has a limit in how many + examples we can pack due to memory limit. + """ + + def __init__( + self, + parent: grain.DatasetIterator, + *, + max_len: int, + window_size: Optional[int] = None, + input_key: str = "target_labels", # The key in the input examples to use for packing. + ): + super().__init__(parent) + self._max_len = max_len + self._window_size = window_size + self._input_key = input_key + + # Index of the parent. + self._index = 0 + # Total number of tokens in `self._current_examples_list`. + self._current_token_count = 0 + # The examples in the current buffer. + self._current_examples_list = [] + # For checkpointing support, we need to maintain what exactly are the examples in current + # sequence. self._parent_sequence_start_state and self._parent_sequence_end_state are used + # to store to starting and ending state of the examples. + + # If not None, the state of `self._parent` before the first example in + # `self._current_examples_list` was added. + # Must be None if `self._current_token_count == 0`. + self._parent_sequence_start_state = None + # If not None, the state of `self._parent` before the last example in + # `self._current_examples_list` was added. + self._parent_sequence_end_state = None + + def _reach_window_limit(self) -> bool: + """Determines if we have already reached window limit.""" + return self._window_size is not None and self._index % self._window_size == 0 + + def _pop_element(self) -> Optional[dict]: + """Pops element from self._current_example_list, returns None if the list is empty.""" + # If there is no examples in current sequence, return None. + if not self._current_examples_list: + return None + concat_target_labels = np.concatenate( + [x[self._input_key] for x in self._current_examples_list], axis=-1 + ) + # Total tokens to pop could be up to self._max_len + total_tokens_to_pop = min(len(concat_target_labels), self._max_len) + self._current_token_count -= total_tokens_to_pop + assert self._current_token_count >= 0 + if self._current_token_count > 0: + self._current_examples_list = [{self._input_key: concat_target_labels[self._max_len :]}] + self._parent_sequence_start_state = self._parent_sequence_end_state + else: + self._current_examples_list = [] + self._parent_sequence_start_state = None + + # If all the concat target labels is empty, early return. + if total_tokens_to_pop == 0: + return None + + return {self._input_key: concat_target_labels[: self._max_len]} + + def __next__(self): + # Iteratively call __next__ until we yield valid examples. + while True: + # If there are still leftover tokens when we have already reached the window limit, we + # should decide whether to keep this sequence. + if self._current_token_count > 0 and self._reach_window_limit(): + next_element = self._pop_element() + assert self._current_token_count == 0 and not self._current_examples_list + return next_element + + # Keeps filling up the sequence until reaching the limit. + # Termination of this while loop means: + # 1. Reaches the sequence_length limit, and ready to output one batch. + # 2. Reaches the window limit. + while self._current_token_count < self._max_len: + self._parent_sequence_end_state = self._parent.get_state() + if not self._parent_sequence_start_state: + self._parent_sequence_start_state = self._parent_sequence_end_state + try: + example = next(self._parent) + except StopIteration as e: + next_element = self._pop_element() + if next_element is not None: + return next_element + else: + raise e + + self._current_examples_list.append(example) + + self._current_token_count += len(example[self._input_key]) + self._index += 1 + + if self._reach_window_limit(): + break + + # If there is enough token, we always return a sequence. + if self._current_token_count >= self._max_len: + return self._pop_element() + + next_element = self._pop_element() + assert self._current_token_count == 0 and not self._current_examples_list + # If next element is empty, that suggests that the sequence is dropped. + if next_element is not None: + return next_element + + def get_state(self) -> dict[str, Any]: + # TODO(haoshuoh, markblee): All of the parent_state thing could be wrapped in a Packer + # class. + return { + "parent_sequence_start_state": self._parent_sequence_start_state + if self._parent_sequence_start_state + else self._parent.get_state(), + "parent": self._parent.get_state(), + "index": self._index, + "current_token_count": self._current_token_count, + } + + def set_state(self, state: dict[str, Any]): + def _retrieve_packer_states(state: dict[str, Any]): + """Retrieves packer states by loading all the examples from that sequence.""" + self._current_token_count = state["current_token_count"] + self._current_examples_list = [] + self._parent_sequence_start_state = None + self._parent_sequence_end_state = None + total_tokens_retrieved = 0 + + while self._parent.get_state() != state["parent"]: + self._parent_sequence_end_state = self._parent.get_state() + if not self._parent_sequence_start_state: + self._parent_sequence_start_state = self._parent_sequence_end_state + example = next(self._parent) + total_tokens_retrieved += len(example[self._input_key]) + self._current_examples_list.append(example) + + if total_tokens_retrieved > self._current_token_count: + # The truncation should only happens to the first example (aka rollover example). + assert total_tokens_retrieved - self._current_token_count <= len( + self._current_examples_list[0][self._input_key] + ) + self._current_examples_list[0] = { + self._input_key: self._current_examples_list[0][self._input_key][ + total_tokens_retrieved - self._current_token_count : + ] + } + elif total_tokens_retrieved < self._current_token_count: + raise ValueError("Grain receives invalid states.") + + self._parent.set_state(state["parent_sequence_start_state"])
```suggestion self._parent.set_state(state["parent_sequence_start_state"] or state["parent"]) ```
axlearn
github_2023
python
1,046
apple
ruomingp
@@ -23,12 +24,313 @@ def __call__(self, ids: Tensor, *, max_len: int) -> Tensor: ... +_PackingFn = Callable[[Dataset, int, Callable, int, str, grain.ReadOptions], Dataset] + + +class _StreamingPackingDatasetIterator(grain.DatasetIterator): + """An iterator that yields packed examples in a streaming fashion. + + This implementation does not require maintaining a fixed buffer of `window_size` elements in + memory. Instead, it yields packed examples and flushes the buffer as soon as an example is + ready. This significantly improves the first-time read, especially for datasets which have much + higher tokens per sequence, as well as reduces the peak memory requirements for packing. + + window_size is used for parity with windowed_packing. It will also be used if we want to pack + multimodal data which is not represented in sequence, thus naturally has a limit in how many + examples we can pack due to memory limit. + """ + + def __init__( + self, + parent: grain.DatasetIterator, + *, + max_len: int, + window_size: Optional[int] = None, + input_key: str = "target_labels", # The key in the input examples to use for packing. + ): + super().__init__(parent) + self._max_len = max_len + self._window_size = window_size + self._input_key = input_key + + # Index of the parent. + self._index = 0 + # Total number of tokens in `self._current_examples_list`. + self._current_token_count = 0 + # The examples in the current buffer. + self._current_examples_list = [] + # For checkpointing support, we need to maintain what exactly are the examples in current + # sequence. self._parent_sequence_start_state and self._parent_sequence_end_state are used + # to store to starting and ending state of the examples. + + # If not None, the state of `self._parent` before the first example in + # `self._current_examples_list` was added. + # Must be None if `self._current_token_count == 0`. + self._parent_sequence_start_state = None + # If not None, the state of `self._parent` before the last example in + # `self._current_examples_list` was added. + self._parent_sequence_end_state = None + + def _reach_window_limit(self) -> bool: + """Determines if we have already reached window limit.""" + return self._window_size is not None and self._index % self._window_size == 0 + + def _pop_element(self) -> Optional[dict]: + """Pops element from self._current_example_list, returns None if the list is empty.""" + # If there is no examples in current sequence, return None. + if not self._current_examples_list: + return None + concat_target_labels = np.concatenate( + [x[self._input_key] for x in self._current_examples_list], axis=-1 + ) + # Total tokens to pop could be up to self._max_len + total_tokens_to_pop = min(len(concat_target_labels), self._max_len) + self._current_token_count -= total_tokens_to_pop + assert self._current_token_count >= 0 + if self._current_token_count > 0: + self._current_examples_list = [{self._input_key: concat_target_labels[self._max_len :]}] + self._parent_sequence_start_state = self._parent_sequence_end_state + else: + self._current_examples_list = [] + self._parent_sequence_start_state = None + + # If all the concat target labels is empty, early return. + if total_tokens_to_pop == 0: + return None + + return {self._input_key: concat_target_labels[: self._max_len]} + + def __next__(self): + # Iteratively call __next__ until we yield valid examples. + while True: + # If there are still leftover tokens when we have already reached the window limit, we + # should decide whether to keep this sequence. + if self._current_token_count > 0 and self._reach_window_limit(): + next_element = self._pop_element() + assert self._current_token_count == 0 and not self._current_examples_list + return next_element + + # Keeps filling up the sequence until reaching the limit. + # Termination of this while loop means: + # 1. Reaches the sequence_length limit, and ready to output one batch. + # 2. Reaches the window limit. + while self._current_token_count < self._max_len: + self._parent_sequence_end_state = self._parent.get_state() + if not self._parent_sequence_start_state: + self._parent_sequence_start_state = self._parent_sequence_end_state + try: + example = next(self._parent) + except StopIteration as e: + next_element = self._pop_element() + if next_element is not None: + return next_element + else: + raise e + + self._current_examples_list.append(example) + + self._current_token_count += len(example[self._input_key]) + self._index += 1 + + if self._reach_window_limit(): + break + + # If there is enough token, we always return a sequence. + if self._current_token_count >= self._max_len: + return self._pop_element() + + next_element = self._pop_element() + assert self._current_token_count == 0 and not self._current_examples_list + # If next element is empty, that suggests that the sequence is dropped. + if next_element is not None: + return next_element + + def get_state(self) -> dict[str, Any]: + # TODO(haoshuoh, markblee): All of the parent_state thing could be wrapped in a Packer + # class. + return { + "parent_sequence_start_state": self._parent_sequence_start_state + if self._parent_sequence_start_state + else self._parent.get_state(), + "parent": self._parent.get_state(), + "index": self._index, + "current_token_count": self._current_token_count, + } + + def set_state(self, state: dict[str, Any]): + def _retrieve_packer_states(state: dict[str, Any]):
Do we need this as a separate function?
axlearn
github_2023
python
1,046
apple
ruomingp
@@ -23,12 +24,313 @@ def __call__(self, ids: Tensor, *, max_len: int) -> Tensor: ... +_PackingFn = Callable[[Dataset, int, Callable, int, str, grain.ReadOptions], Dataset] + + +class _StreamingPackingDatasetIterator(grain.DatasetIterator): + """An iterator that yields packed examples in a streaming fashion. + + This implementation does not require maintaining a fixed buffer of `window_size` elements in + memory. Instead, it yields packed examples and flushes the buffer as soon as an example is + ready. This significantly improves the first-time read, especially for datasets which have much + higher tokens per sequence, as well as reduces the peak memory requirements for packing. + + window_size is used for parity with windowed_packing. It will also be used if we want to pack + multimodal data which is not represented in sequence, thus naturally has a limit in how many + examples we can pack due to memory limit. + """ + + def __init__( + self, + parent: grain.DatasetIterator, + *, + max_len: int, + window_size: Optional[int] = None, + input_key: str = "target_labels", # The key in the input examples to use for packing. + ): + super().__init__(parent) + self._max_len = max_len + self._window_size = window_size + self._input_key = input_key + + # Index of the parent. + self._index = 0 + # Total number of tokens in `self._current_examples_list`. + self._current_token_count = 0 + # The examples in the current buffer. + self._current_examples_list = [] + # For checkpointing support, we need to maintain what exactly are the examples in current + # sequence. self._parent_sequence_start_state and self._parent_sequence_end_state are used + # to store to starting and ending state of the examples. + + # If not None, the state of `self._parent` before the first example in + # `self._current_examples_list` was added. + # Must be None if `self._current_token_count == 0`. + self._parent_sequence_start_state = None + # If not None, the state of `self._parent` before the last example in + # `self._current_examples_list` was added. + self._parent_sequence_end_state = None + + def _reach_window_limit(self) -> bool: + """Determines if we have already reached window limit.""" + return self._window_size is not None and self._index % self._window_size == 0 + + def _pop_element(self) -> Optional[dict]: + """Pops element from self._current_example_list, returns None if the list is empty.""" + # If there is no examples in current sequence, return None. + if not self._current_examples_list: + return None + concat_target_labels = np.concatenate( + [x[self._input_key] for x in self._current_examples_list], axis=-1 + ) + # Total tokens to pop could be up to self._max_len + total_tokens_to_pop = min(len(concat_target_labels), self._max_len) + self._current_token_count -= total_tokens_to_pop + assert self._current_token_count >= 0 + if self._current_token_count > 0: + self._current_examples_list = [{self._input_key: concat_target_labels[self._max_len :]}] + self._parent_sequence_start_state = self._parent_sequence_end_state + else: + self._current_examples_list = [] + self._parent_sequence_start_state = None + + # If all the concat target labels is empty, early return. + if total_tokens_to_pop == 0: + return None + + return {self._input_key: concat_target_labels[: self._max_len]} + + def __next__(self): + # Iteratively call __next__ until we yield valid examples. + while True: + # If there are still leftover tokens when we have already reached the window limit, we + # should decide whether to keep this sequence. + if self._current_token_count > 0 and self._reach_window_limit(): + next_element = self._pop_element() + assert self._current_token_count == 0 and not self._current_examples_list + return next_element + + # Keeps filling up the sequence until reaching the limit. + # Termination of this while loop means: + # 1. Reaches the sequence_length limit, and ready to output one batch. + # 2. Reaches the window limit. + while self._current_token_count < self._max_len: + self._parent_sequence_end_state = self._parent.get_state() + if not self._parent_sequence_start_state: + self._parent_sequence_start_state = self._parent_sequence_end_state + try: + example = next(self._parent) + except StopIteration as e: + next_element = self._pop_element() + if next_element is not None: + return next_element + else: + raise e + + self._current_examples_list.append(example) + + self._current_token_count += len(example[self._input_key]) + self._index += 1 + + if self._reach_window_limit(): + break + + # If there is enough token, we always return a sequence. + if self._current_token_count >= self._max_len: + return self._pop_element() + + next_element = self._pop_element() + assert self._current_token_count == 0 and not self._current_examples_list + # If next element is empty, that suggests that the sequence is dropped. + if next_element is not None: + return next_element + + def get_state(self) -> dict[str, Any]: + # TODO(haoshuoh, markblee): All of the parent_state thing could be wrapped in a Packer + # class. + return { + "parent_sequence_start_state": self._parent_sequence_start_state + if self._parent_sequence_start_state + else self._parent.get_state(), + "parent": self._parent.get_state(), + "index": self._index, + "current_token_count": self._current_token_count, + } + + def set_state(self, state: dict[str, Any]): + def _retrieve_packer_states(state: dict[str, Any]): + """Retrieves packer states by loading all the examples from that sequence.""" + self._current_token_count = state["current_token_count"] + self._current_examples_list = [] + self._parent_sequence_start_state = None + self._parent_sequence_end_state = None + total_tokens_retrieved = 0 + + while self._parent.get_state() != state["parent"]:
Add an assert that self._parent.get_state() == state["parent"] if and only if self._current_token_count == 0 ?
axlearn
github_2023
python
1,046
apple
ruomingp
@@ -23,12 +24,313 @@ def __call__(self, ids: Tensor, *, max_len: int) -> Tensor: ... +_PackingFn = Callable[[Dataset, int, Callable, int, str, grain.ReadOptions], Dataset] + + +class _StreamingPackingDatasetIterator(grain.DatasetIterator): + """An iterator that yields packed examples in a streaming fashion. + + This implementation does not require maintaining a fixed buffer of `window_size` elements in + memory. Instead, it yields packed examples and flushes the buffer as soon as an example is + ready. This significantly improves the first-time read, especially for datasets which have much + higher tokens per sequence, as well as reduces the peak memory requirements for packing. + + window_size is used for parity with windowed_packing. It will also be used if we want to pack + multimodal data which is not represented in sequence, thus naturally has a limit in how many + examples we can pack due to memory limit. + """ + + def __init__( + self, + parent: grain.DatasetIterator, + *, + max_len: int, + window_size: Optional[int] = None, + input_key: str = "target_labels", # The key in the input examples to use for packing. + ): + super().__init__(parent) + self._max_len = max_len + self._window_size = window_size + self._input_key = input_key + + # Index of the parent. + self._index = 0 + # Total number of tokens in `self._current_examples_list`. + self._current_token_count = 0 + # The examples in the current buffer. + self._current_examples_list = [] + # For checkpointing support, we need to maintain what exactly are the examples in current + # sequence. self._parent_sequence_start_state and self._parent_sequence_end_state are used + # to store to starting and ending state of the examples. + + # If not None, the state of `self._parent` before the first example in + # `self._current_examples_list` was added. + # Must be None if `self._current_token_count == 0`. + self._parent_sequence_start_state = None + # If not None, the state of `self._parent` before the last example in + # `self._current_examples_list` was added. + self._parent_sequence_end_state = None + + def _reach_window_limit(self) -> bool: + """Determines if we have already reached window limit.""" + return self._window_size is not None and self._index % self._window_size == 0 + + def _pop_element(self) -> Optional[dict]: + """Pops element from self._current_example_list, returns None if the list is empty.""" + # If there is no examples in current sequence, return None. + if not self._current_examples_list: + return None + concat_target_labels = np.concatenate( + [x[self._input_key] for x in self._current_examples_list], axis=-1 + ) + # Total tokens to pop could be up to self._max_len + total_tokens_to_pop = min(len(concat_target_labels), self._max_len) + self._current_token_count -= total_tokens_to_pop + assert self._current_token_count >= 0 + if self._current_token_count > 0: + self._current_examples_list = [{self._input_key: concat_target_labels[self._max_len :]}] + self._parent_sequence_start_state = self._parent_sequence_end_state + else: + self._current_examples_list = [] + self._parent_sequence_start_state = None + + # If all the concat target labels is empty, early return. + if total_tokens_to_pop == 0: + return None + + return {self._input_key: concat_target_labels[: self._max_len]} + + def __next__(self): + # Iteratively call __next__ until we yield valid examples. + while True: + # If there are still leftover tokens when we have already reached the window limit, we + # should decide whether to keep this sequence. + if self._current_token_count > 0 and self._reach_window_limit(): + next_element = self._pop_element() + assert self._current_token_count == 0 and not self._current_examples_list + return next_element + + # Keeps filling up the sequence until reaching the limit. + # Termination of this while loop means: + # 1. Reaches the sequence_length limit, and ready to output one batch. + # 2. Reaches the window limit. + while self._current_token_count < self._max_len: + self._parent_sequence_end_state = self._parent.get_state() + if not self._parent_sequence_start_state: + self._parent_sequence_start_state = self._parent_sequence_end_state + try: + example = next(self._parent) + except StopIteration as e: + next_element = self._pop_element() + if next_element is not None: + return next_element + else: + raise e + + self._current_examples_list.append(example) + + self._current_token_count += len(example[self._input_key]) + self._index += 1 + + if self._reach_window_limit(): + break + + # If there is enough token, we always return a sequence. + if self._current_token_count >= self._max_len: + return self._pop_element() + + next_element = self._pop_element() + assert self._current_token_count == 0 and not self._current_examples_list + # If next element is empty, that suggests that the sequence is dropped. + if next_element is not None: + return next_element + + def get_state(self) -> dict[str, Any]: + # TODO(haoshuoh, markblee): All of the parent_state thing could be wrapped in a Packer + # class. + return { + "parent_sequence_start_state": self._parent_sequence_start_state + if self._parent_sequence_start_state + else self._parent.get_state(), + "parent": self._parent.get_state(), + "index": self._index, + "current_token_count": self._current_token_count, + } + + def set_state(self, state: dict[str, Any]): + def _retrieve_packer_states(state: dict[str, Any]): + """Retrieves packer states by loading all the examples from that sequence.""" + self._current_token_count = state["current_token_count"] + self._current_examples_list = [] + self._parent_sequence_start_state = None + self._parent_sequence_end_state = None + total_tokens_retrieved = 0 + + while self._parent.get_state() != state["parent"]: + self._parent_sequence_end_state = self._parent.get_state() + if not self._parent_sequence_start_state: + self._parent_sequence_start_state = self._parent_sequence_end_state + example = next(self._parent) + total_tokens_retrieved += len(example[self._input_key]) + self._current_examples_list.append(example) + + if total_tokens_retrieved > self._current_token_count: + # The truncation should only happens to the first example (aka rollover example). + assert total_tokens_retrieved - self._current_token_count <= len( + self._current_examples_list[0][self._input_key] + ) + self._current_examples_list[0] = { + self._input_key: self._current_examples_list[0][self._input_key][ + total_tokens_retrieved - self._current_token_count : + ] + } + elif total_tokens_retrieved < self._current_token_count: + raise ValueError("Grain receives invalid states.") + + self._parent.set_state(state["parent_sequence_start_state"]) + self._index = state["index"] + _retrieve_packer_states(state) + + +class _StreamingPackingIterDataset(grain.IterDataset): + """A class that performs streaming packing.""" + + def __init__( + self, + parents, + *, + max_len: int, + window_size: Optional[int] = None, + input_key: str = "target_labels", + ): + super().__init__(parents) + self._max_len = max_len + self._window_size = window_size + self._input_key = input_key + + def __str__(self) -> str: + return "StreamingPackingIterDataset" + + def __iter__(self) -> _StreamingPackingDatasetIterator: + return _StreamingPackingDatasetIterator( + self._parent.__iter__(), + max_len=self._max_len, + window_size=self._window_size, + input_key=self._input_key, + ) + + +def streaming_packing( + ds: Dataset, + *, + max_len: int, + inner: Callable, + window_size: Optional[int] = None, + input_key: str = "target_labels", + read_options: grain.ReadOptions = grain.ReadOptions(num_threads=1, prefetch_buffer_size=16), +) -> Dataset: + """Streaming packing given max_len and optional window_size. + + Given a sequence of tokens with arbitraty length, streaming packing will pack examples until it + reaches the max_len. There is an optional window_size option to make it still compatible with + windowed_packing. If window_size is None, that means there is no upper bound limit on the + window size. + + Note that the semantics of inner in this function is slightly different from the one used in + windowed_packing. In windowed_packing, we expect it to take full window of examples. In + streaming packing, we expect it to take examples that's within this sequence. + + Args: + ds: datasets to be packed. + max_len: Max sequence length. + inner: A processor that operates on packed examples. It should output examples of shape ... + or None if the example should be skipped. + window_size: An upper bound on the window size to use for packing. If None, no upper bound + is enforced. + input_key: The keys in the input examples to use for packing. + read_options: grain.ReadOptions which includes num_threads and prefetch_buffer_size. It is + used to convert the pipeline to grain.IterDataset. + + Returns: + A packed dataset.
Comment that it will only contain values corresponding to `input_key`?
axlearn
github_2023
python
1,046
apple
ruomingp
@@ -23,12 +24,313 @@ def __call__(self, ids: Tensor, *, max_len: int) -> Tensor: ... +_PackingFn = Callable[[Dataset, int, Callable, int, str, grain.ReadOptions], Dataset] + + +class _StreamingPackingDatasetIterator(grain.DatasetIterator): + """An iterator that yields packed examples in a streaming fashion. + + This implementation does not require maintaining a fixed buffer of `window_size` elements in + memory. Instead, it yields packed examples and flushes the buffer as soon as an example is + ready. This significantly improves the first-time read, especially for datasets which have much + higher tokens per sequence, as well as reduces the peak memory requirements for packing. + + window_size is used for parity with windowed_packing. It will also be used if we want to pack + multimodal data which is not represented in sequence, thus naturally has a limit in how many + examples we can pack due to memory limit. + """ + + def __init__( + self, + parent: grain.DatasetIterator, + *, + max_len: int, + window_size: Optional[int] = None, + input_key: str = "target_labels", # The key in the input examples to use for packing. + ): + super().__init__(parent) + self._max_len = max_len + self._window_size = window_size + self._input_key = input_key + + # Index of the parent. + self._index = 0 + # Total number of tokens in `self._current_examples_list`. + self._current_token_count = 0 + # The examples in the current buffer. + self._current_examples_list = [] + # For checkpointing support, we need to maintain what exactly are the examples in current + # sequence. self._parent_sequence_start_state and self._parent_sequence_end_state are used + # to store to starting and ending state of the examples. + + # If not None, the state of `self._parent` before the first example in + # `self._current_examples_list` was added. + # Must be None if `self._current_token_count == 0`. + self._parent_sequence_start_state = None + # If not None, the state of `self._parent` before the last example in + # `self._current_examples_list` was added. + self._parent_sequence_end_state = None + + def _reach_window_limit(self) -> bool: + """Determines if we have already reached window limit.""" + return self._window_size is not None and self._index % self._window_size == 0 + + def _pop_element(self) -> Optional[dict]: + """Pops element from self._current_example_list, returns None if the list is empty.""" + # If there is no examples in current sequence, return None. + if not self._current_examples_list: + return None + concat_target_labels = np.concatenate( + [x[self._input_key] for x in self._current_examples_list], axis=-1 + ) + # Total tokens to pop could be up to self._max_len + total_tokens_to_pop = min(len(concat_target_labels), self._max_len) + self._current_token_count -= total_tokens_to_pop + assert self._current_token_count >= 0 + if self._current_token_count > 0: + self._current_examples_list = [{self._input_key: concat_target_labels[self._max_len :]}] + self._parent_sequence_start_state = self._parent_sequence_end_state + else: + self._current_examples_list = [] + self._parent_sequence_start_state = None + + # If all the concat target labels is empty, early return. + if total_tokens_to_pop == 0: + return None + + return {self._input_key: concat_target_labels[: self._max_len]} + + def __next__(self): + # Iteratively call __next__ until we yield valid examples. + while True: + # If there are still leftover tokens when we have already reached the window limit, we + # should decide whether to keep this sequence. + if self._current_token_count > 0 and self._reach_window_limit(): + next_element = self._pop_element() + assert self._current_token_count == 0 and not self._current_examples_list + return next_element + + # Keeps filling up the sequence until reaching the limit. + # Termination of this while loop means: + # 1. Reaches the sequence_length limit, and ready to output one batch. + # 2. Reaches the window limit. + while self._current_token_count < self._max_len: + self._parent_sequence_end_state = self._parent.get_state() + if not self._parent_sequence_start_state: + self._parent_sequence_start_state = self._parent_sequence_end_state + try: + example = next(self._parent) + except StopIteration as e: + next_element = self._pop_element() + if next_element is not None: + return next_element + else: + raise e + + self._current_examples_list.append(example) + + self._current_token_count += len(example[self._input_key]) + self._index += 1 + + if self._reach_window_limit(): + break + + # If there is enough token, we always return a sequence. + if self._current_token_count >= self._max_len: + return self._pop_element() + + next_element = self._pop_element() + assert self._current_token_count == 0 and not self._current_examples_list + # If next element is empty, that suggests that the sequence is dropped. + if next_element is not None: + return next_element + + def get_state(self) -> dict[str, Any]: + # TODO(haoshuoh, markblee): All of the parent_state thing could be wrapped in a Packer + # class. + return { + "parent_sequence_start_state": self._parent_sequence_start_state + if self._parent_sequence_start_state + else self._parent.get_state(), + "parent": self._parent.get_state(), + "index": self._index, + "current_token_count": self._current_token_count, + } + + def set_state(self, state: dict[str, Any]): + def _retrieve_packer_states(state: dict[str, Any]): + """Retrieves packer states by loading all the examples from that sequence.""" + self._current_token_count = state["current_token_count"] + self._current_examples_list = [] + self._parent_sequence_start_state = None + self._parent_sequence_end_state = None + total_tokens_retrieved = 0 + + while self._parent.get_state() != state["parent"]: + self._parent_sequence_end_state = self._parent.get_state() + if not self._parent_sequence_start_state: + self._parent_sequence_start_state = self._parent_sequence_end_state + example = next(self._parent) + total_tokens_retrieved += len(example[self._input_key]) + self._current_examples_list.append(example) + + if total_tokens_retrieved > self._current_token_count: + # The truncation should only happens to the first example (aka rollover example). + assert total_tokens_retrieved - self._current_token_count <= len( + self._current_examples_list[0][self._input_key] + ) + self._current_examples_list[0] = { + self._input_key: self._current_examples_list[0][self._input_key][ + total_tokens_retrieved - self._current_token_count : + ] + } + elif total_tokens_retrieved < self._current_token_count: + raise ValueError("Grain receives invalid states.") + + self._parent.set_state(state["parent_sequence_start_state"]) + self._index = state["index"] + _retrieve_packer_states(state) + + +class _StreamingPackingIterDataset(grain.IterDataset): + """A class that performs streaming packing.""" + + def __init__( + self, + parents, + *, + max_len: int, + window_size: Optional[int] = None, + input_key: str = "target_labels", + ): + super().__init__(parents) + self._max_len = max_len + self._window_size = window_size + self._input_key = input_key + + def __str__(self) -> str: + return "StreamingPackingIterDataset" + + def __iter__(self) -> _StreamingPackingDatasetIterator: + return _StreamingPackingDatasetIterator( + self._parent.__iter__(), + max_len=self._max_len, + window_size=self._window_size, + input_key=self._input_key, + ) + + +def streaming_packing( + ds: Dataset, + *, + max_len: int, + inner: Callable, + window_size: Optional[int] = None, + input_key: str = "target_labels", + read_options: grain.ReadOptions = grain.ReadOptions(num_threads=1, prefetch_buffer_size=16), +) -> Dataset: + """Streaming packing given max_len and optional window_size. + + Given a sequence of tokens with arbitraty length, streaming packing will pack examples until it + reaches the max_len. There is an optional window_size option to make it still compatible with + windowed_packing. If window_size is None, that means there is no upper bound limit on the + window size. + + Note that the semantics of inner in this function is slightly different from the one used in + windowed_packing. In windowed_packing, we expect it to take full window of examples. In + streaming packing, we expect it to take examples that's within this sequence. + + Args: + ds: datasets to be packed. + max_len: Max sequence length. + inner: A processor that operates on packed examples. It should output examples of shape ... + or None if the example should be skipped. + window_size: An upper bound on the window size to use for packing. If None, no upper bound + is enforced. + input_key: The keys in the input examples to use for packing. + read_options: grain.ReadOptions which includes num_threads and prefetch_buffer_size. It is + used to convert the pipeline to grain.IterDataset. + + Returns: + A packed dataset. + """ + + def _maybe_call(example: Optional[SequenceOr[dict[str, Tensor]]], *, fn: Callable): + if example is not None: + processed_example = fn(example) + # If this example is already dropped by inner function, we skip it by marking it None. + if processed_example[input_key].size == 0: + return None + # fn returns a tensor with shape [1, ..]. We remove the first dimension. + for v in processed_example.values(): + assert v.shape[0] == 1 + return {k: v[0, :] for k, v in processed_example.items()} + return example + + # Converts dataset to IterDataset. + ds = input_grain.maybe_to_iter_dataset(ds, read_options=read_options) + ds = _StreamingPackingIterDataset( + ds, + max_len=max_len, + window_size=window_size, + input_key=input_key, + ) + # Some examples might be dropped after calling inner. Grain IterDataset will automatically + # handle it as long as we mark those examples as None. + ds = ds.map(functools.partial(_maybe_call, fn=inner)) + ds = ds.filter(lambda x: x is not None) + return ds + + +# TODO(markblee): Clean up the unused signatures. +def windowed_packing( + ds: Dataset, + *, + max_len: Optional[int] = None, + inner: Optional[Callable] = None, + window_size: Optional[int] = None, + input_key: str = "target_labels", + read_options: grain.ReadOptions = grain.ReadOptions(num_threads=1, prefetch_buffer_size=16), +) -> Dataset: + """Windowed packing given window_size. + + Given a sequence of tokens with arbitraty length, windowed packing will first batch the example + given window_size then unbatch given max_len. + + Args: + max_len: Max sequence length. + ds: Datasets to be packed. + inner: A processor that operates on packed examples. It should output examples of shape + [1, sequence_length] or None if the example should be skipped. + window_size: An upper bound on the window size to use for packing. + input_key: The keys in the input examples to use for packing. + read_options: grain.ReadOptions which includes num_threads and prefetch_buffer_size. It is + used to convert the pipeline to grain.IterDataset. + + Returns: + A packed dataset.
Ditto?
axlearn
github_2023
python
1,046
apple
ruomingp
@@ -107,6 +411,7 @@ def text_to_lm_training_input( window_size: int = 128, max_padding_fraction: float = 1, read_options: grain.ReadOptions = grain.ReadOptions(num_threads=1, prefetch_buffer_size=16), + packing_fn: Callable = windowed_packing,
Do we have a test that checks that the new packing function is equivalent to the old one with a finite window size?
axlearn
github_2023
python
1,047
apple
ruomingp
@@ -126,7 +127,7 @@ def typestr(self) -> str: def _ckpt_dir(self, info: ocp.type_handlers.ParamInfo) -> str: # Each worker writes its grain checkpoints under a different path. - return os.path.join(info.parent_dir, f"grain_{jax.process_index()}") + return os.path.join(info.parent_dir, f"python_{jax.process_index()}")
This will break existing checkpoints. Is that OK?
axlearn
github_2023
python
1,044
apple
markblee
@@ -0,0 +1,469 @@ +# Copyright © 2025 Apple Inc. + +"""Unit tests of job_flink.py.""" +import contextlib +import json +import logging +from typing import Optional + +from absl import flags +from absl.testing import parameterized + +from axlearn.cloud.common.bundler import Bundler +from axlearn.cloud.gcp import bundler, job, job_flink +from axlearn.cloud.gcp.bundler import ArtifactRegistryBundler, CloudBuildBundler +from axlearn.cloud.gcp.jobset_utils_test import mock_settings +from axlearn.cloud.gcp.test_utils import mock_gcp_settings +from axlearn.common.test_utils import TestCase + +expected_flink_deployment_json = """
Add a ref for how it's generated?
axlearn
github_2023
python
1,044
apple
markblee
@@ -531,6 +532,78 @@ def from_flags(cls, fv: flags.FlagValues, **kwargs): return cfg +class FlinkGKERunnerJob(GKERunnerJob): + """A GKERunnerJob that uses FlinkGKEJob.""" + + inner = FlinkTPUGKEJob + pre_provisioner = TPUNodePoolProvisioner + + def _get_status(self) -> GKERunnerJob.Status: + """ + Returns: + GKERunnerJob: + SUCCEEDED: when the job succeeded. + PENDING: if the job hasn't started yet. + READY: when the job is running. + UNKNOWN: all other cases. + Raises: + RuntimeError: when the job failed, and GKE runner will retry it. + """
nit -- fix formatting.
axlearn
github_2023
python
1,044
apple
markblee
@@ -0,0 +1,24 @@ +# Copyright © 2024 Apple Inc. +"""Utils of TPU pods.""" + +from typing import Any + + +def get_default_env(tpu_type: str, num_tpu_slices: int, job_name: str) -> dict[str, Any]:
```suggestion def get_default_env(*, tpu_type: str, num_tpu_slices: int, job_name: str) -> dict[str, Any]: ```
axlearn
github_2023
python
1,044
apple
markblee
@@ -531,6 +532,78 @@ def from_flags(cls, fv: flags.FlagValues, **kwargs): return cfg +class FlinkGKERunnerJob(GKERunnerJob): + """A GKERunnerJob that uses FlinkGKEJob.""" + + inner = FlinkTPUGKEJob + pre_provisioner = TPUNodePoolProvisioner + + def _get_status(self) -> GKERunnerJob.Status: + """ + Returns: + GKERunnerJob.Status: + SUCCEEDED: When the job succeeded. + PENDING: When the job hasn't started yet. + READY: When the job is running. + UNKNOWN: All other cases. + Raises: + RuntimeError: when the job failS, and GKE runner will retry it. + """
```suggestion """Retrieves the current status of the job. Returns: GKERunnerJob.Status: SUCCEEDED: When the job succeeded. PENDING: When the job hasn't started yet. READY: When the job is running. UNKNOWN: All other cases. Raises: RuntimeError: when the job failS, and GKE runner will retry it. """ ```
axlearn
github_2023
python
1,042
apple
markblee
@@ -1386,4 +1389,8 @@ def m_or_g(x, suffix=""): + f" Load Balancing / Dispatch): {cost_stats.get('utilization8{}')}\n" + f" Texture Units (or Rarely Used Compute Units): {cost_stats.get('utilization9{}')}" ) + else: + # Some platforms may return different format unlike CPU, TPU (v5p) and GPU (H100). + analysis_results += f"{cost_stats}\n"
nit -- Consider logging the warning anyway? Seems useful for users to know that this isn't the 'intended' output.
axlearn
github_2023
python
1,036
apple
apghml
@@ -35,6 +40,7 @@ flags.DEFINE_string("config", None, "The trainer config name.", required=True) flags.DEFINE_string("topology", None, "The TPU topology.") flags.DEFINE_integer("topology_num_slices", 1, "The number of TPU slices.") +flags.DEFINE_boolean("cpu", False, "The number of TPU slices.")
The description of this flag looks incorrect?
axlearn
github_2023
python
1,036
apple
apghml
@@ -91,6 +101,14 @@ def _compile_and_dump_programs( logging.info("Wrote serialized %s to %s", program_name, serialized_compiled_output_path) +def _get_n_devices(topology: Optional[str]) -> int: + if topology is None:
I'm not sure it makes sense to default to 1024. Can we remove the default?
axlearn
github_2023
python
1,036
apple
apghml
@@ -1283,3 +1285,66 @@ def select_mesh_config(trainer_config: SpmdTrainer.Config, *, mesh_selector: str # Override configs from ConfigModifier. mesh_rule_fn = maybe_instantiate(mesh_rule) trainer_config = mesh_rule_fn(trainer_config) + + +def aot_model_analysis(compiled: jax.stages.Compiled) -> str: + """Performs the model analysis on the AOT compiled JAX program. + + Refer to https://docs.jax.dev/en/latest/jax.stages.html#jax.stages.Compiled + + Args: + compiled (jax.stages.Compiled): The compiled JAX program. + + Returns: + memory_analysis: String, model analysis results. + """ + # e.g. _CheckifyCompiledFnWrapper doesn't have memory_analysis attribute. + if not hasattr(compiled, "memory_analysis"): + return "" + + to_mb_gb = lambda x: f"{x / (1024**2):.1f} MB / {x / (1024**3):.2f} GB"
I think this could be confusing since users might think the left number is the usage and right number is the maximum available. (e.g., they might mistake it for meaning "x mb out of y gb used"). Can we eliminate the mb?
axlearn
github_2023
python
1,036
apple
apghml
@@ -91,6 +101,14 @@ def _compile_and_dump_programs( logging.info("Wrote serialized %s to %s", program_name, serialized_compiled_output_path)
This PR still doesn't deduplicate the the memory printing code in `run_aot_compilation.py` with the new function you have in `trainer.py`?
axlearn
github_2023
python
1,036
apple
apghml
@@ -35,6 +40,7 @@ flags.DEFINE_string("config", None, "The trainer config name.", required=True) flags.DEFINE_string("topology", None, "The TPU topology.")
IIUC, this can now represent CPU topology too? Update the description and explain the format for CPU topology?
axlearn
github_2023
python
1,036
apple
kelvin-zou
@@ -1287,3 +1289,88 @@ def select_mesh_config(trainer_config: SpmdTrainer.Config, *, mesh_selector: str # Override configs from ConfigModifier. mesh_rule_fn = maybe_instantiate(mesh_rule) trainer_config = mesh_rule_fn(trainer_config) + + +def aot_model_analysis(compiled: jax.stages.Compiled) -> str: + """Performs the model analysis on the AOT compiled JAX program. + + Refer to https://docs.jax.dev/en/latest/jax.stages.html#jax.stages.Compiled + + Note: memory_analysis() and cost_analysis() are internal statistics used by the XLA compiler, + and there is no official documentation for them. + The human-readable interpretation provided here is based on best guesses from reviewing + the XLA source code. If there are any inaccuracies, please update accordingly. + * memory_analysis: + https://github.com/openxla/xla/blob/101045ad079d17701986060666feda0e70d6c4cf/xla/pjrt/pjrt_executable.h#L284 + * cost_analysis: + https://github.com/openxla/xla/blob/101045ad079d17701986060666feda0e70d6c4cf/xla/service/hlo_cost_analysis.h#L41 + + Args: + compiled: The compiled JAX program. + + Returns: + memory_analysis: String, model analysis results. + """ + # e.g. _CheckifyCompiledFnWrapper doesn't have memory_analysis attribute. + if not hasattr(compiled, "memory_analysis"): + return "" + + def m_or_g(x, suffix=""): + if x is None: + return None + m = 1024**2 + g = 1024**3 + if x > g: + return f"{x / g:.1f}G{suffix}" + else: + return f"{x / m:.1f}M{suffix}" + + mb_or_gb = lambda x: m_or_g(x, "B") + analysis_results = "" + mem_stats = compiled.memory_analysis() + # According to the doc, some platforms may not support it. + if mem_stats is not None: + analysis_results += "======= Memory Analysis ==================================\n" + try: + total_hbm = ( + mem_stats.argument_size_in_bytes + + mem_stats.output_size_in_bytes + + mem_stats.temp_size_in_bytes + + mem_stats.generated_code_size_in_bytes + ) + analysis_results += ( + f"Input memory: {mb_or_gb(mem_stats.argument_size_in_bytes)}\n" + + f"Output memory: {mb_or_gb(mem_stats.output_size_in_bytes)}\n" + + f"Temp memory: {mb_or_gb(mem_stats.temp_size_in_bytes)}\n" + + f"Code memory: {mb_or_gb(mem_stats.generated_code_size_in_bytes)}\n" + + f"Total HBM memory: {mb_or_gb(total_hbm)}\n" + ) + except AttributeError: + # Some platforms may return different format. + analysis_results += f"{mem_stats}\n" + + cost_stats = compiled.cost_analysis()
Did you run regression test on GPU? One of the users is complaining about seeing some error: Wrapped call axlearn.common.trainer.SpmdTrainer.compile_train_step(trainer_state: axlearn.common.trainer.TrainerState, input_batch: dict, compiler_options: dict) File "/opt/axlearn/axlearn/common/trainer.py", line 1164, in compile_train_step logging.log_first_n([logging.INFO](http://logging.info/), aot_model_analysis(compiled), 1) File "/opt/axlearn/axlearn/common/trainer.py", line 1367, in aot_model_analysis cost_stats = cost_stats[0] KeyError: 0
axlearn
github_2023
python
1,037
apple
markblee
@@ -1072,6 +1091,150 @@ def cast(x: Union[Tensor, TensorSpec]) -> Union[Tensor, TensorSpec]: return jax.tree.map(cast, in_tree) +@runtime_checkable +class PerParamFn(Protocol[T]): + """A callable that operates on each parameter.""" + + def __call__(self, params: Union[Nested[Tensor], Nested[TensorSpec]]) -> Nested[T]: + """This protocol requires a callable that accepts either a nested Tensor or + a nested TensorSpec as input and returns a processed value for each parameter. + Args:
Looks like some docstring format got lost from copy/paste?
axlearn
github_2023
python
1,029
apple
ruomingp
@@ -822,65 +837,93 @@ def make_gda(x, partition_spec): return jax.tree.map(make_gda, host_arrays, partition_specs) +# TODO(markblee): Remove partition arg. def global_to_host_array( - global_arrays: NestedTensor, *, partition: DataPartitionType = DataPartitionType.FULL -) -> NestedTensor: - """Extracts host addressable rows from each Tensor in `global_arrays`. + global_arrays: Nested[Tensor], + *, + partition: Optional[DataPartitionType] = DataPartitionType.FULL, +) -> Nested[Tensor]: + """Extracts host addressable data from each Tensor in `global_arrays`. Args: - global_arrays: A NestedTensor. - Each leaf Tensor must have shape [global_batch_size, ...] with identical - global_batch_size across tensors. - The tensors must be partitioned in the same way and can be partitioned only along the - batch axis. - partition: How the global array should be partitioned. + global_arrays: A nested Tensor. + Each leaf Tensor must be uniformly partitioned across each dim. + partition: Deprecated. Returns: - A NestedTensor with the same structure as `global_array`. Each leaf Tensor will have shape - [host_batch_size, ...] where `host_batch_size` will be equal to `global_batch_size` if the - global Tensors are replicated or `global_batch_size // process_count` if the global Tensors - are partitioned across hosts. + A nested Tensor with the same structure as `global_array`. Each leaf Tensor will have shape + `process_shape` where `process_shape` will be equal to `global_shape` if the global Tensors + are replicated. If the global Tensors are partitioned across hosts, the `process_shape` will + represent the host-local portion. """ + if partition is not None: + logging.log_first_n(logging.WARNING, "Specifying partition is deprecated.", n=1) - def sort_global_shards(global_shards: list[jax.Shard]) -> list[jax.Shard]: - # We should sort jax.Array.global_shards by using this function to guarantee - # round-trip equality of host_to_global_device_array and global_to_host_array. - # Shards are sorted in-place. - global_shards.sort(key=lambda shard: shard.index) - return global_shards - - global_array_items = flatten_items(global_arrays) - if not global_array_items: - return global_arrays # no leaf Tensor. - first_path, first_value = global_array_items[0] - sorted_first_value_shards = sort_global_shards(first_value.global_shards) - first_value_shard_is_local = [shard.data is not None for shard in sorted_first_value_shards] - batch_size = first_value.shape[0] - - def get_local_array(path: str, value: Tensor) -> Tensor: - if value.shape[0] != batch_size: - raise ValueError( - f"Value batch size mismatch: {batch_size} @ {first_path} vs. " - f"{value.shape[0]} @ {path} of {shapes(global_arrays)}" - ) - sorted_value_shards = sort_global_shards(value.global_shards) - value_shard_is_local = [shard.data is not None for shard in sorted_value_shards] - if value_shard_is_local != first_value_shard_is_local: - raise ValueError( - f"Value shard mismatch: {first_value_shard_is_local} @ {first_path} vs. " - f"{value_shard_is_local} @ {path}" - ) - local_data = [shard.data for shard in sorted_value_shards if shard.data is not None] - if not local_data: - raise ValueError(f"No local shard found: {sorted_value_shards}.") - if partition == DataPartitionType.FULL: - return np.concatenate(local_data, axis=0) - elif partition == DataPartitionType.REPLICATED: - return local_data[0] - else: - raise NotImplementedError(f"Unsupported partition: {partition}") + def index_to_shard( + shards: list[jax.Shard], global_shape: Sequence[int] + ) -> dict[tuple, jax.Shard]: + """Returns a mapping from (sorted) indices to shards. - return jax.tree.map(get_local_array, tree_paths(global_arrays), global_arrays) + Each key is a tuple of length `len(global_shape)`. + Each element of the tuple is a `(start, limit)` tuple, specifying the start and limit + indices of the shard along the global shape dim. + """ + index_to_shard = [] + for shard in shards: + index = tuple( + (s.start or 0, s.stop or global_shape[dim]) for dim, s in enumerate(shard.index)
Nit: maybe `slice` is more readable than `tuple`?
axlearn
github_2023
python
1,028
apple
changlan
@@ -513,29 +519,15 @@ def _bool_value(self) -> Optional[Tensor]: Shape: [batch, target_len, source_len]. Raises: - NotImplementedError. If `target_positions.ndim not in [1,2]`. + ValueError. If `(target|source)_positions.ndim not == 2`. """ - target_positions, source_positions = jnp.indices(self.shape, sparse=True) - # Shape: [1, target_len, 1], [1, 1, source_len]. - target_positions, source_positions = target_positions[None], source_positions[None] - if self.target_positions is not None: - target_positions = self.target_positions - if target_positions.ndim not in [1, 2]: - raise NotImplementedError(f"Shape of target_positions: {target_positions.shape}.") - if target_positions.ndim == 1: - # Shape: [batch, 1] + [target_len] = [batch, target_len] - # pylint: disable-next=unsubscriptable-object - target_positions = target_positions[:, None] + jnp.arange(self.shape[0]) - elif target_positions.ndim == 2: - shape_with_batch_dim = (1, *self.shape) - # Raise an exception if shapes aren't compatible. We don't use the output. - jnp.broadcast_shapes( - (target_positions.shape[0], 1, target_positions.shape[1]), shape_with_batch_dim - ) - else: - raise NotImplementedError(f"Invalid value {target_positions.ndim=}.") - target_positions = target_positions[..., None] # Shape: [batch, target_len, 1]. - + target_positions, source_positions = self.target_positions, self.source_positions + if target_positions.ndim != source_positions.ndim != 2: + raise ValueError( + f"{target_positions.shape=} or {source_positions.shape=} is not rank 2." + ) + target_positions = einops.rearrange(target_positions, "b t -> b t 1")
fly-by comment: I wonder if we should do the same without introducing extra `einops` dependency.
axlearn
github_2023
python
1,020
apple
markblee
@@ -256,6 +256,14 @@ def define_flags(cls, fv: flags.FlagValues): "not all TPU types support this flag.", **common_kwargs, ) + # Currently only supported in specific clusters with PriorityClass setup. + # TODO(ethanli): infer it from the JobMetadata.priority, and support it on all clusters.
```suggestion # Only supported in clusters with PriorityClass setup. # TODO(ethanli): infer it from the JobMetadata.priority. ``` What's the behavior when specified and not supported? Is it silently ignored or raises an error?
axlearn
github_2023
python
1,018
apple
chunyang-wen
@@ -205,7 +205,10 @@ class BiasAndResidual(BaseAttentionBias, Generic[B]): residual: BaseAttentionBias def _value(self) -> Optional[Tensor]: - return CompositeAttentionBias([self.bias, self.residual]).value() + biases = [self.residual] + if self.bias is not None:
```python biases = [self.bias] if self.bias is not None else [] biases.append(self.residual) ```
axlearn
github_2023
python
1,009
apple
changlan
@@ -701,6 +701,11 @@ def sliding_window_causal_mask(sliding_window_size: int) -> MaskFn: """Returns a causal MaskFn for sliding window attentions of a given window size. Implements the `MaskFn` protocol. + + Note: Setting sliding_window_size = 8 results in a window size of 9, including itself.
nit: Maybe explicitly note that it attends to itself and sliding_window_size tokens on the left. "window size" is not a well defined term.
axlearn
github_2023
python
1,009
apple
changlan
@@ -730,8 +736,12 @@ def make_causal_biases(seq_len: int) -> Tensor: def make_sliding_window_causal_biases(seq_len: int, sliding_window_size: int) -> Tensor: """Generates attention logit biases for sliding window attention. + Note: Setting sliding_window_size = 8 results in attending to 9 tokens - it attends to itself
```suggestion Note: Each token attends to itself and sliding_window_size tokens to the left (i.e. sliding_window_size + 1 tokens). ```
axlearn
github_2023
python
1,009
apple
changlan
@@ -701,6 +701,12 @@ def sliding_window_causal_mask(sliding_window_size: int) -> MaskFn: """Returns a causal MaskFn for sliding window attentions of a given window size. Implements the `MaskFn` protocol. + + Note: Setting sliding_window_size = 8 results in attending to 9 tokens - it attends to itself
```suggestion Note: Each token attends to itself and sliding_window_size tokens to the left (i.e. sliding_window_size + 1 tokens). ```
axlearn
github_2023
python
995
apple
ruomingp
@@ -735,34 +937,31 @@ def init_states( # If `kv_state` is provided externally, we do not have to maintain key/value in cache. # Otherwise, initialize the cache from provided query, key, value. if kv_state is None: + kv_shape = dict(num_kv_heads=self.num_kv_heads, per_head_dim=cfg.per_head_dim) if key is None: - batch, max_len = query.shape[:2] + kv_shape.update(batch_size=query.shape[0], kv_len=query.shape[1]) else: - batch, max_len = key.shape[:2] chex.assert_equal_shape((key, value)) - - # NB: key and value in init_state are transposed so that source_length is in the last - # dimension as a TPU fusion optimization. - # Reference: - # https://github.com/google-research/t5x/blob/4d94d8bf41230d492e15e255c9888b5bfd9a5ee8/t5x/examples/t5/layers.py#L215 - init_state.update( - key=jnp.zeros( - shape=(batch, self.num_kv_heads, cfg.per_head_dim, max_len), - dtype=dtype, - ), - value=jnp.zeros( - shape=(batch, self.num_kv_heads, cfg.per_head_dim, max_len), - dtype=dtype, - ), - ) + kv_shape.update(batch_size=key.shape[0], kv_len=key.shape[1]) + kv_shape = KVCache.Shape(**kv_shape) + if query.dtype is None: + dtype = jnp.float32 + logging.warning("init_states dtype is not set, so fallback to float32")
When does this happen? Can we raise a ValueError instead?
axlearn
github_2023
python
995
apple
ruomingp
@@ -843,44 +1042,32 @@ def extend_step( query_positions += time_step[:, None] # Project inputs to key, value and query. Each has shape [B, steps, N, H]. - q_proj, k_proj, v_proj = self.forward(query, **kv_kwargs, query_positions=query_positions) + q_proj, k_proj, v_proj, unused_q_pos, unused_k_pos = self.forward( + query, query_positions=query_positions, **kv_kwargs + ) updated_state = dict(time_step=time_step + num_query_steps) - if kv_state is None: - # Update the cache via one-hot broadcast and addition. - # NB: Cache updates can also be done via dynamic slice update. However it was observed - # that RLHF training got stuck in some cases. - # TODO(ds-hwang): Investigate the root cause. - cached_key = cached_states["key"] - cached_value = cached_states["value"] - - source_len = cached_key.shape[-1] - - # [B, T, N, H] --> [B, N, H, T]. - k_proj = jnp.einsum("btnh->bnht", k_proj) - v_proj = jnp.einsum("btnh->bnht", v_proj) - - # Create a dispatch matrix of shape [B, T=step, S]. - oh_indices = jax.nn.one_hot( - time_step[:, None] + jnp.arange(num_query_steps), source_len, dtype=cached_key.dtype + with child_context("kv_cache_extend_step", module=self.kv_cache):
Do we need this child_context?
axlearn
github_2023
python
994
apple
Ethanlm
@@ -334,6 +334,15 @@ class _SystemCharacteristics: "16x20x28", 2240, "tpu-v5p-slice", "ct5p-hightpu-4t", 4, AcceleratorType["TPU"], "v5p-17920" ), # v5litepod + "v5litepod-8": _SystemCharacteristics( + "2x4", + 2, + "tpu-v5-lite-podslice", + "ct5lp-hightpu-4t", + 8,
Should this be 4 instead?
axlearn
github_2023
python
820
apple
ruomingp
@@ -0,0 +1,796 @@ +# Copyright © 2024 Apple Inc. + +"""Implements Orbax emergency checkpointing and provide utilities for correct store. + +See the docstring of `OrbaxEmergencyCheckpointer` for more details. +""" + +import copy +import functools +import hashlib +import multiprocessing as mp +import os +import time +from concurrent.futures import ThreadPoolExecutor +from contextlib import contextmanager +from dataclasses import dataclass +from typing import Any, Dict, List, Optional, Tuple, Union + +import jax +import jax.lib +import orbax.checkpoint as ocp +import orbax.checkpoint.experimental.emergency.checkpoint_manager as oecp +import tensorflow as tf +from absl import flags, logging +from jax._src.distributed import global_state +from jax._src.mesh import thread_resources +from jax.experimental.array_serialization import serialization + +from axlearn.common import file_system as fs +from axlearn.common import utils, utils_spmd +from axlearn.common.checkpointer import ( + STEP_NUM_DIGITS, + STEP_PREFIX, + BaseCheckpointer, + Checkpointer, + CheckpointPolicy, + CheckpointValidationType, + InstantiableConfig, + StateStorage, + StateStorageCommitCallback, + async_save_tf_savables, + check_state_structure, + config_for_function, + every_n_steps_policy, + multihost_utils, + parse_step_from_dir, + read_index_file, + restore_tf_savables, + write_index_file, +) +from axlearn.common.config import REQUIRED, Required, config_class +from axlearn.common.module import Module +from axlearn.common.utils import Nested, Tensor, TensorSpec + +FLAGS = flags.FLAGS + + +@contextmanager +def setup(spec: str): + """Setups FLAGS.process_id and FLAGS.distributed_coordinator as required by Orbax. + + See the docstring of `get_consistent_proc_info` for more details. + + Args: + spec: Key=Value pairs separated by comma. Key must be one of ("local_address", + "barrier_timeout_seconds", "local_ckpt_dir"). See the docstring of + `get_consistent_proc_info`. + """ + parsed_args = {} + allowed_fields = ["local_address", "barrier_timeout_seconds", "local_ckpt_dir"] + for field in spec.split(","): + k, v = field.split("=") + if k not in allowed_fields: + raise ValueError(f"Expected key in {allowed_fields}, got key={k}.") + parsed_args[k] = v + if "barrier_timeout_seconds" in parsed_args: + parsed_args["barrier_timeout_seconds"] = int(parsed_args["barrier_timeout_seconds"]) + if "local_ckpt_dir" not in parsed_args: + raise ValueError("local_ckpt_dir must be specified.") + # pylint: disable-next=missing-kwoa + info = get_consistent_proc_info( + **parsed_args, + trainer_dir=FLAGS.trainer_dir, + distributed_coordinator=FLAGS.distributed_coordinator, + num_processes=FLAGS.num_processes, + process_id=FLAGS.process_id, + jax_backend=FLAGS.jax_backend, + initialization_timeout=FLAGS.initialization_timeout, + ) + FLAGS.process_id = info.inv_proc_id + FLAGS.distributed_coordinator = info.address + FLAGS.experimental_orbax_use_distributed_process_id = True + yield + + +class _TFSavablesStateStorage(StateStorage): + """A StateStorage implementation that only saves the index file and tf savables.""" + + @config_class + class Config(StateStorage.Config): + timeout_secs: int = 300 + + def __init__(self, cfg: Config): + super().__init__(cfg) + # One thread is sufficient because `async_save_tf_savables` only creates one future. + self._executor = ThreadPoolExecutor(1) + self._manager = serialization.AsyncManager(timeout_secs=cfg.timeout_secs) + + def _get_spec(self, *, step: int, state: Nested[Any]) -> Nested[Any]: + spec = {"index": [("step", int(step))], "tf_ckpt_map": {}} + for path, value in utils.flatten_items(state): + if isinstance(value, (Tensor, TensorSpec)): + dtype = getattr(value.dtype, "dtype", value.dtype) + spec["index"].append( + (path, {"dtype": str(dtype), "shape": str(tuple(value.shape))}) + ) + elif isinstance(value, tf.data.Iterator): + spec["index"].append((path, str(type(value)))) + spec["tf_ckpt_map"][path] = value + else: + spec["index"].append((path, value)) + logging.log_first_n(logging.INFO, "TF savables spec: %s", 1, str(spec)) + return spec + + def save_to_dir( + self, + *, + step: int, + state: Nested[Tensor], + ckpt_dir: str, + on_commit_callback: StateStorageCommitCallback = write_index_file, + ): + start_time = time.perf_counter() + # We write data files directly to `ckpt_dir`. `index` is written into `ckpt_dir` in + # `on_commit_callback` to finalize the checkpoint. + spec = self._get_spec(step=step, state=state) + self.wait_until_finished() + + save_tf_future = async_save_tf_savables( + spec["tf_ckpt_map"], + executor=self._executor, + dir=os.path.join(ckpt_dir, f"tf_{jax.process_index()}"), + ) + + def commit(): + on_commit_callback(ckpt_dir=ckpt_dir, index=spec["index"]) + logging.info( + "Serialization of TF savables to %s completed in %s seconds.", + ckpt_dir, + time.perf_counter() - start_time, + ) + + # pylint: disable=protected-access + self._manager._add_futures([save_tf_future]) + self._manager._start_async_commit(commit) + + def wait_until_finished(self): + self._manager.wait_until_finished() + + def restore_from_dir( + self, + step: int, + state: Union[Nested[Tensor], Nested[TensorSpec]], + *, + ckpt_dir: str, + validation: CheckpointValidationType = CheckpointValidationType.EXACT, + ) -> Nested[Tensor]: + spec = self._get_spec(step=step, state=state) + logging.info("Restoring TF savables from directory %s", ckpt_dir) + check_state_structure( + read_index_file(ckpt_dir), target_structure=spec["index"], validation=validation + ) + restore_tf_savables( + spec["tf_ckpt_map"], dir=os.path.join(ckpt_dir, f"tf_{jax.process_index()}") + ) + multihost_utils.sync_global_devices(ckpt_dir) + return state + + def stop(self): + self._executor.shutdown(wait=True) + + +_PROCESS_ID_FILE_NAME: str = "process_id.txt" + + +@dataclass +class _ProcessInfo: + """Records the process id and address information for this node. + + Attributes: + address: The global coordinator address. This is set during the first run and stays and + stays the same unless process 0 failed. + inv_proc_id: The invariant process id of this node. This process id is set during the first + run and stays the same for all subsequent runs unless this node failed. + cur_proc_id: Internal field. The new process id assigned externally after failover. Used + during ID negotiation after failover. + key: Internal field. Key used during ID negotiation after failover. + num_proc_per_slice: Internal field. Used to calculate slice ID for TPU. + """ + + address: str + inv_proc_id: int + cur_proc_id: int + key: Optional[str] = None + num_proc_per_slice: Optional[int] = None + + def to_string(self): + return "|".join(str(x) for x in [self.address, self.inv_proc_id, self.cur_proc_id]) + + @property + def prev_slice_id(self): + assert self.num_proc_per_slice is not None + return self.inv_proc_id // self.num_proc_per_slice + + @property + def cur_slice_id(self): + assert self.num_proc_per_slice is not None + return self.cur_proc_id // self.num_proc_per_slice + + @classmethod + def from_string( + cls, data: str, *, key: Optional[str] = None, num_proc_per_slice: Optional[int] = None + ): + ls = data.split("|") + assert len(ls) == 3 + return cls(ls[0], int(ls[1]), int(ls[2]), key=key, num_proc_per_slice=num_proc_per_slice) + + +def _get_previous_process_info(local_dir: str, *, trainer_dir: str) -> _ProcessInfo: + """Gets process info from local checkpoint directory.""" + path = os.path.join(local_dir, _get_unique_id(trainer_dir), _PROCESS_ID_FILE_NAME) + if not fs.exists(path): + return _ProcessInfo(address="", inv_proc_id=-1, cur_proc_id=-1) + + with fs.open(path) as f: + return _ProcessInfo.from_string(f.read()) + + +def _dump_process_info(local_dir: str, *, trainer_dir: str, proc_info: _ProcessInfo): + """Dumps process info to local checkpoint directory.""" + local_dir = os.path.join(local_dir, _get_unique_id(trainer_dir)) + fs.makedirs(local_dir) + process_id_file = os.path.join(local_dir, _PROCESS_ID_FILE_NAME) + with fs.open(process_id_file, "w") as f: + f.write(proc_info.to_string()) + + +def _get_unique_id(trainer_dir: str) -> str: + return hashlib.sha256(trainer_dir.encode(), usedforsecurity=False).hexdigest() + + +def _logger_init(): + """Init logger in spawned processes that don't inherit parent's logger.""" + logging.set_verbosity(logging.INFO) + logging.use_absl_handler() + + +def _init_consistent_proc_ids( + *, + local_address: Optional[str] = None, + barrier_timeout_seconds: int = 300, + trainer_dir: str, + local_ckpt_dir: str, + **setup_kwargs, +): + """Exchanges id info through jax coordinator and dumps to local file. + + During failover, healthy nodes will read their locally stored process id file, but failed nodes + will lost their process ids. To assign ids that are free in the global id range (i.e. 0 to + num_processes - 1), we let each node report its process id (-1 if missing) to rank 0, and rank + 0 will figure out suitable IDs to assign to each failed node. We reuse Jax's distributed client + to avoid writing our own coordinator. + """ + _logger_init() + + jax_backend = setup_kwargs["jax_backend"] + timeout_ms = barrier_timeout_seconds * 1000 + utils_spmd.setup(**setup_kwargs) + client: jax.lib.xla_extension.DistributedRuntimeClient = global_state.client + local_proc_info = _get_previous_process_info(local_ckpt_dir, trainer_dir=trainer_dir) + key_prefix = "axlearn/id_reassign" + # Local key just needs to be unique for each process. + local_proc_info.key = f"{key_prefix}/{jax.process_index()}" + + if jax_backend == "tpu": + worker_hostnames = os.environ["TPU_WORKER_HOSTNAMES"].split(",") + num_slices = int(os.environ["MEGASCALE_NUM_SLICES"]) + num_proc_per_slice = len(worker_hostnames) + worker_id = int(os.environ["TPU_WORKER_ID"]) + + # Coordinator port for TPU is hardcoded. Reference: + # https://github.com/jax-ml/jax/blob/1aa5de66a8f3c910115cac2fbe118e0facd7a3be/jax/_src/clusters/cloud_tpu_cluster.py#L29 + local_proc_info.address = f"{worker_hostnames[worker_id]}:8476" + # Note: cannot use jax.process_index() here because it may be different from the + # distributed id. This is a jax problem. + local_proc_info.cur_proc_id = ( + int(os.environ["MEGASCALE_SLICE_ID"]) * num_proc_per_slice + worker_id + ) + elif jax_backend == "gpu": + if local_address is None: + raise ValueError( + "local_address must be set for GPU when using in-memory checkpointing." + ) + local_proc_info.address = local_address + local_proc_info.cur_proc_id = setup_kwargs["process_id"] + else: + raise RuntimeError(f"Unsupported backend {jax_backend}.") + + # Every worker reports its proc info to rank 0. + client.key_value_set(local_proc_info.key, local_proc_info.to_string()) + client.wait_at_barrier("axlearn/id-reassign-gather-id", timeout_in_ms=timeout_ms) + + # Then, rank 0 assigns inv_proc_id for worker that's missing their inv_proc_id and find the + # coordinator address. + if local_proc_info.cur_proc_id == 0: + ids = client.key_value_dir_get(key_prefix) + proc_infos: list[_ProcessInfo] = [] + + def first_run_assign_fn(info: _ProcessInfo): + info.inv_proc_id = info.cur_proc_id + + inv_id_assign_fn = first_run_assign_fn + if jax_backend == "tpu": + # For TPUs, we have the additional requirement that process ids in slice id X must be + # in range [X * num_processes_per_slice, (X + 1) * num_processes_per_slice). Therefore, + # we first identify the healthy slices' ids and then figure out the slice ids to assign + # to failed slices. Each process in the failed slice will then get id `new_slice_id * + # num_proc_per_slice + cur_proc_id % num_proc_per_slice`. After id assignment, the + # address of process that's assigned with id=0 will be broadcasted to every worker. + + # Mapping from new slice ids to assigned slice ids forfailed slices. + failed_slices_new_ids = {} + for k, data in ids: + info = _ProcessInfo.from_string(data, key=k, num_proc_per_slice=num_proc_per_slice) + proc_infos.append(info) + if info.inv_proc_id == -1: + failed_slices_new_ids[info.cur_slice_id] = -1 + + already_assigned_slice_ids = set() + for info in proc_infos: + if info.cur_slice_id not in failed_slices_new_ids: + already_assigned_slice_ids.add(info.prev_slice_id) + + # If there're no assigned slice ids, that means all slices have failed or we're in the + # very first run. In that case, first_run_assign_fn will be used. + if already_assigned_slice_ids: + to_be_assigned_slice_ids = set(range(num_slices)) - already_assigned_slice_ids + assert len(to_be_assigned_slice_ids) == len(failed_slices_new_ids) + for k, new_id in zip(failed_slices_new_ids.keys(), to_be_assigned_slice_ids): + failed_slices_new_ids[k] = new_id + + def assign_fn(info: _ProcessInfo): + proc_id = info.inv_proc_id + if (new_slice_id := failed_slices_new_ids.get(info.cur_slice_id)) is not None: + proc_id = ( + new_slice_id * num_proc_per_slice + + info.cur_proc_id % num_proc_per_slice + ) + info.inv_proc_id = proc_id + + inv_id_assign_fn = assign_fn + + elif jax_backend == "gpu": + num_processes = setup_kwargs["num_processes"] + # For GPU backend, failed nodes are assigned with ids that are missing in the global id + # range with arbitrary order. + assigned_ids = set() + for key, data in ids: + info = _ProcessInfo.from_string(data, key=key) + proc_infos.append(info) + assigned_ids.add(info.inv_proc_id) + + # If there're no assigned ids, that means all slices have failed or we're in the + # very first run. In that case, first_run_assign_fn will be used. + if assigned_ids: + to_be_assigned_ids = iter(set(range(num_processes)) - assigned_ids) + + def assign_fn(info: _ProcessInfo): + if info.inv_proc_id == -1: + info.inv_proc_id = next(to_be_assigned_ids) + + inv_id_assign_fn = assign_fn + + coordinator_address = None + for info in proc_infos: + inv_id_assign_fn(info) + if info.inv_proc_id == 0: + coordinator_address = info.address + assert coordinator_address is not None + for info in proc_infos: + info.address = coordinator_address + client.key_value_set(info.key + "/get", info.to_string()) + + new_info = _ProcessInfo.from_string( + client.blocking_key_value_get(local_proc_info.key + "/get", timeout_in_ms=timeout_ms) + ) + logging.info( + "Previous proc id: %d. Assigned proc id: %d. Global coordinator address: %s.", + local_proc_info.inv_proc_id, + new_info.inv_proc_id, + new_info.address, + ) + _dump_process_info(local_ckpt_dir, trainer_dir=trainer_dir, proc_info=new_info) + # Block to avoid coordinator exiting too early. + client.wait_at_barrier("axlearn/id-reassign-finalize", timeout_in_ms=timeout_ms) + jax.distributed.shutdown() + + +def get_consistent_proc_info( + *, + local_address: Optional[str] = None, + barrier_timeout_seconds: int = 300, + trainer_dir: str, + local_ckpt_dir: str, + **setup_kwargs, +) -> _ProcessInfo: + """Gets the invariant process id of the current process and global coordinator's address. + + This function guarantees process id <-> node mapping stays the same for healthy nodes after a + failover. This is required to preserve shard order for in-memory checkpoint recovery. For GPU + training, all healthy nodes will have their process id unchanged. For TPU, all nodes in the + healthy slices will have their process id unchanged. See docstring of + `_init_consistent_proc_ids` for implementation details. + + Args: + local_address: A IP:Port that can be used as the coordinator if this rank is elected. + This Port must be free in the coordinator pod and IP:Port must be reachable from all + other processes. + barrier_timeout_seconds: Timeout in seconds for the barrier and key_value_set operations. + trainer_dir: Path to the trainer dir. + local_ckpt_dir: Path to the local checkpoint dir. + **setup_kwargs: Args to `utils_spmd.setup()`. + + Returns: + A _ProcessInfo whose `inv_proc_id` should be used as the process id and `address` should be + used as the global coordinator address. + """ + platform = os.environ.get("JAX_PLATFORMS", "") + try: + start_t = time.perf_counter() + # Patch platform so the process doesn't waste time initializing accelerators. + os.environ["JAX_PLATFORMS"] = "cpu" + proc = mp.get_context("spawn").Process( + target=_init_consistent_proc_ids, + kwargs=dict( + local_address=local_address, + barrier_timeout_seconds=barrier_timeout_seconds, + trainer_dir=trainer_dir, + local_ckpt_dir=local_ckpt_dir, + **setup_kwargs, + ), + ) + proc.start() + proc.join() + if proc.exitcode != 0: + raise RuntimeError( + "Expects id assignment process to finish normally. " + f"Got exit code {proc.exitcode}. Please check the log above for errors." + ) + + info = _get_previous_process_info(local_ckpt_dir, trainer_dir=trainer_dir) + if info.inv_proc_id == -1: + raise RuntimeError("Expects inv process id != -1, but got -1.") + logging.info( + "Successfully finished process ID assignment in %fs", time.perf_counter() - start_t + ) + return info + finally: + # Restore previous platform settings. + if platform != "": + os.environ["JAX_PLATFORMS"] = platform + else: + del os.environ["JAX_PLATFORMS"] + + +class OrbaxEmergencyCheckpointer(BaseCheckpointer):
How does this overlap with https://github.com/apple/axlearn/blob/140a18f33d10178dc695469e36d39d7879bd1ed4/axlearn/common/checkpointer_orbax.py#L169?
axlearn
github_2023
python
820
apple
ruomingp
@@ -0,0 +1,814 @@ +# Copyright © 2024 Apple Inc. + +"""Implements Orbax emergency checkpointing and provide utilities for correct store. + +See the docstring of `OrbaxEmergencyCheckpointer` for more details. +""" + +import copy +import functools +import hashlib +import multiprocessing as mp +import os +import time +from concurrent.futures import ThreadPoolExecutor +from contextlib import contextmanager +from dataclasses import dataclass +from typing import Any, Dict, List, Optional, Tuple, Union + +import jax +import jax.lib +import orbax.checkpoint as ocp +import orbax.checkpoint.experimental.emergency.checkpoint_manager as oecp +import tensorflow as tf +from absl import flags, logging +from jax._src.distributed import global_state +from jax._src.mesh import thread_resources +from jax.experimental.array_serialization import serialization + +from axlearn.common import file_system as fs +from axlearn.common import utils, utils_spmd +from axlearn.common.checkpointer import ( + STEP_NUM_DIGITS, + STEP_PREFIX, + BaseCheckpointer, + Checkpointer, + CheckpointPolicy, + CheckpointValidationType, + InstantiableConfig, + StateStorage, + StateStorageCommitCallback, + async_save_tf_savables, + check_state_structure, + config_for_function, + every_n_steps_policy, + multihost_utils, + parse_step_from_dir, + read_index_file, + restore_tf_savables, + write_index_file, +) +from axlearn.common.config import REQUIRED, Required, config_class +from axlearn.common.module import Module +from axlearn.common.utils import Nested, Tensor, TensorSpec + +FLAGS = flags.FLAGS + + +@contextmanager +def setup(spec: str): + """Setups FLAGS.process_id and FLAGS.distributed_coordinator as required by Orbax. + + See the docstring of `get_consistent_proc_info` for more details. + + Args: + spec: Key=Value pairs separated by comma. Key must be one of ("local_address", + "barrier_timeout_seconds", "local_ckpt_dir"). See the docstring of + `get_consistent_proc_info`. + """ + parsed_args = {} + allowed_fields = ["local_address", "barrier_timeout_seconds", "local_ckpt_dir"] + for field in spec.split(","): + k, v = field.split("=") + if k not in allowed_fields: + raise ValueError(f"Expected key in {allowed_fields}, got key={k}.") + parsed_args[k] = v + if "barrier_timeout_seconds" in parsed_args: + parsed_args["barrier_timeout_seconds"] = int(parsed_args["barrier_timeout_seconds"]) + if "local_ckpt_dir" not in parsed_args: + raise ValueError("local_ckpt_dir must be specified.") + # pylint: disable-next=missing-kwoa + info = get_consistent_proc_info( + **parsed_args, + trainer_dir=FLAGS.trainer_dir, + distributed_coordinator=FLAGS.distributed_coordinator, + num_processes=FLAGS.num_processes, + process_id=FLAGS.process_id, + jax_backend=FLAGS.jax_backend, + initialization_timeout=FLAGS.initialization_timeout, + ) + FLAGS.process_id = info.inv_proc_id + FLAGS.distributed_coordinator = info.address + FLAGS.experimental_orbax_use_distributed_process_id = True + yield + + +class _TFSavablesStateStorage(StateStorage): + """A StateStorage implementation that only saves the index file and tf savables.""" + + @config_class + class Config(StateStorage.Config): + timeout_secs: int = 300 + + def __init__(self, cfg: Config): + super().__init__(cfg) + # One thread is sufficient because `async_save_tf_savables` only creates one future. + self._executor = ThreadPoolExecutor(1) + self._manager = serialization.AsyncManager(timeout_secs=cfg.timeout_secs) + + def _get_spec(self, *, step: int, state: Nested[Any]) -> Nested[Any]: + spec = {"index": [("step", int(step))], "tf_ckpt_map": {}} + for path, value in utils.flatten_items(state): + if isinstance(value, (Tensor, TensorSpec)): + dtype = getattr(value.dtype, "dtype", value.dtype) + spec["index"].append( + (path, {"dtype": str(dtype), "shape": str(tuple(value.shape))}) + ) + elif isinstance(value, tf.data.Iterator): + spec["index"].append((path, str(type(value)))) + spec["tf_ckpt_map"][path] = value + else: + spec["index"].append((path, value)) + logging.log_first_n(logging.INFO, "TF savables spec: %s", 1, str(spec)) + return spec + + def save_to_dir( + self, + *, + step: int, + state: Nested[Tensor], + ckpt_dir: str, + on_commit_callback: StateStorageCommitCallback = write_index_file, + ): + start_time = time.perf_counter() + # We write data files directly to `ckpt_dir`. `index` is written into `ckpt_dir` in + # `on_commit_callback` to finalize the checkpoint. + spec = self._get_spec(step=step, state=state) + self.wait_until_finished() + + save_tf_future = async_save_tf_savables( + spec["tf_ckpt_map"], + executor=self._executor, + dir=os.path.join(ckpt_dir, f"tf_{jax.process_index()}"), + ) + + def commit(): + on_commit_callback(ckpt_dir=ckpt_dir, index=spec["index"]) + logging.info( + "Serialization of TF savables to %s completed in %s seconds.", + ckpt_dir, + time.perf_counter() - start_time, + ) + + # pylint: disable=protected-access + self._manager._add_futures([save_tf_future]) + self._manager._start_async_commit(commit) + + def wait_until_finished(self): + self._manager.wait_until_finished() + + def restore_from_dir( + self, + step: int, + state: Union[Nested[Tensor], Nested[TensorSpec]], + *, + ckpt_dir: str, + validation: CheckpointValidationType = CheckpointValidationType.EXACT, + ) -> Nested[Tensor]: + spec = self._get_spec(step=step, state=state) + logging.info("Restoring TF savables from directory %s", ckpt_dir) + check_state_structure( + read_index_file(ckpt_dir), target_structure=spec["index"], validation=validation + ) + restore_tf_savables( + spec["tf_ckpt_map"], dir=os.path.join(ckpt_dir, f"tf_{jax.process_index()}") + ) + multihost_utils.sync_global_devices(ckpt_dir) + return state + + def stop(self): + self._executor.shutdown(wait=True) + + +_PROCESS_ID_FILE_NAME: str = "process_id.txt" + + +@dataclass +class _ProcessInfo: + """Records the process id and address information for this node. + + Attributes: + address: The global coordinator address. This is set during the first run and stays and + stays the same unless process 0 failed. + inv_proc_id: The invariant process id of this node. This process id is set during the first + run and stays the same for all subsequent runs unless this node failed. + cur_proc_id: Internal field. The new process id assigned externally after failover. Used + during ID negotiation after failover. + key: Internal field. Key used during ID negotiation after failover. + num_proc_per_slice: Internal field. Used to calculate slice ID for TPU. + """ + + address: str + inv_proc_id: int + cur_proc_id: int + key: Optional[str] = None + num_proc_per_slice: Optional[int] = None + + def to_string(self): + return "|".join(str(x) for x in [self.address, self.inv_proc_id, self.cur_proc_id]) + + @property + def prev_slice_id(self): + assert self.num_proc_per_slice is not None + return self.inv_proc_id // self.num_proc_per_slice + + @property + def cur_slice_id(self): + assert self.num_proc_per_slice is not None + return self.cur_proc_id // self.num_proc_per_slice + + @classmethod + def from_string( + cls, data: str, *, key: Optional[str] = None, num_proc_per_slice: Optional[int] = None + ): + ls = data.split("|") + assert len(ls) == 3 + return cls(ls[0], int(ls[1]), int(ls[2]), key=key, num_proc_per_slice=num_proc_per_slice) + + +def _get_previous_process_info(local_dir: str, *, trainer_dir: str) -> _ProcessInfo: + """Gets process info from local checkpoint directory.""" + path = os.path.join(local_dir, _get_unique_id(trainer_dir), _PROCESS_ID_FILE_NAME) + if not fs.exists(path): + return _ProcessInfo(address="", inv_proc_id=-1, cur_proc_id=-1) + + with fs.open(path) as f: + return _ProcessInfo.from_string(f.read()) + + +def _dump_process_info(local_dir: str, *, trainer_dir: str, proc_info: _ProcessInfo): + """Dumps process info to local checkpoint directory.""" + local_dir = os.path.join(local_dir, _get_unique_id(trainer_dir)) + fs.makedirs(local_dir) + process_id_file = os.path.join(local_dir, _PROCESS_ID_FILE_NAME) + with fs.open(process_id_file, "w") as f: + f.write(proc_info.to_string()) + + +def _get_unique_id(trainer_dir: str) -> str: + return hashlib.sha256(trainer_dir.encode(), usedforsecurity=False).hexdigest() + + +def _logger_init(): + """Init logger in spawned processes that don't inherit parent's logger.""" + logging.set_verbosity(logging.INFO) + logging.use_absl_handler() + + +def _init_consistent_proc_ids( + *, + local_address: Optional[str] = None, + barrier_timeout_seconds: int = 300, + trainer_dir: str, + local_ckpt_dir: str, + **setup_kwargs, +): + """Exchanges id info through jax coordinator and dumps to local file. + + During failover, healthy nodes will read their locally stored process id file, but failed nodes + will lost their process ids. To assign ids that are free in the global id range (i.e. 0 to + num_processes - 1), we let each node report its process id (-1 if missing) to rank 0, and rank + 0 will figure out suitable IDs to assign to each failed node. We reuse Jax's distributed client + to avoid writing our own coordinator. + """ + _logger_init() + + jax_backend = setup_kwargs["jax_backend"] + timeout_ms = barrier_timeout_seconds * 1000 + utils_spmd.setup(**setup_kwargs) + client: jax.lib.xla_extension.DistributedRuntimeClient = global_state.client + local_proc_info = _get_previous_process_info(local_ckpt_dir, trainer_dir=trainer_dir) + key_prefix = "axlearn/id_reassign" + # Local key just needs to be unique for each process. + local_proc_info.key = f"{key_prefix}/{jax.process_index()}" + + if jax_backend == "tpu": + worker_hostnames = os.environ["TPU_WORKER_HOSTNAMES"].split(",") + num_slices = int(os.environ["MEGASCALE_NUM_SLICES"]) + num_proc_per_slice = len(worker_hostnames) + worker_id = int(os.environ["TPU_WORKER_ID"]) + + # Coordinator port for TPU is hardcoded. Reference: + # https://github.com/jax-ml/jax/blob/1aa5de66a8f3c910115cac2fbe118e0facd7a3be/jax/_src/clusters/cloud_tpu_cluster.py#L29 + local_proc_info.address = f"{worker_hostnames[worker_id]}:8476" + # Note: cannot use jax.process_index() here because it may be different from the + # distributed id. This is a jax problem. + local_proc_info.cur_proc_id = ( + int(os.environ["MEGASCALE_SLICE_ID"]) * num_proc_per_slice + worker_id + ) + elif jax_backend == "gpu": + if local_address is None: + raise ValueError( + "local_address must be set for GPU when using in-memory checkpointing." + ) + local_proc_info.address = local_address + local_proc_info.cur_proc_id = setup_kwargs["process_id"] + else: + raise RuntimeError(f"Unsupported backend {jax_backend}.") + + # Every worker reports its proc info to rank 0. + client.key_value_set(local_proc_info.key, local_proc_info.to_string()) + client.wait_at_barrier("axlearn/id-reassign-gather-id", timeout_in_ms=timeout_ms) + + # Then, rank 0 assigns inv_proc_id for worker that's missing their inv_proc_id and find the + # coordinator address. + if local_proc_info.cur_proc_id == 0: + ids = client.key_value_dir_get(key_prefix) + proc_infos: list[_ProcessInfo] = [] + + def first_run_assign_fn(info: _ProcessInfo): + info.inv_proc_id = info.cur_proc_id + + inv_id_assign_fn = first_run_assign_fn + if jax_backend == "tpu": + # For TPUs, we have the additional requirement that process ids in slice id X must be + # in range [X * num_processes_per_slice, (X + 1) * num_processes_per_slice). Therefore, + # we first identify the healthy slices' ids and then figure out the slice ids to assign + # to failed slices. Each process in the failed slice will then get id `new_slice_id * + # num_proc_per_slice + cur_proc_id % num_proc_per_slice`. After id assignment, the + # address of process that's assigned with id=0 will be broadcasted to every worker. + + # Mapping from new slice ids to assigned slice ids forfailed slices. + failed_slices_new_ids = {} + for k, data in ids: + info = _ProcessInfo.from_string(data, key=k, num_proc_per_slice=num_proc_per_slice) + proc_infos.append(info) + if info.inv_proc_id == -1: + failed_slices_new_ids[info.cur_slice_id] = -1 + + already_assigned_slice_ids = set() + for info in proc_infos: + if info.cur_slice_id not in failed_slices_new_ids: + already_assigned_slice_ids.add(info.prev_slice_id) + + # If there're no assigned slice ids, that means all slices have failed or we're in the + # very first run. In that case, first_run_assign_fn will be used. + if already_assigned_slice_ids: + to_be_assigned_slice_ids = set(range(num_slices)) - already_assigned_slice_ids + assert len(to_be_assigned_slice_ids) == len(failed_slices_new_ids) + for k, new_id in zip(failed_slices_new_ids.keys(), to_be_assigned_slice_ids): + failed_slices_new_ids[k] = new_id + + def assign_fn(info: _ProcessInfo): + proc_id = info.inv_proc_id + if (new_slice_id := failed_slices_new_ids.get(info.cur_slice_id)) is not None: + proc_id = ( + new_slice_id * num_proc_per_slice + + info.cur_proc_id % num_proc_per_slice + ) + info.inv_proc_id = proc_id + + inv_id_assign_fn = assign_fn + + elif jax_backend == "gpu": + num_processes = setup_kwargs["num_processes"] + # For GPU backend, failed nodes are assigned with ids that are missing in the global id + # range with arbitrary order. + assigned_ids = set() + for key, data in ids: + info = _ProcessInfo.from_string(data, key=key) + proc_infos.append(info) + assigned_ids.add(info.inv_proc_id) + + # If there're no assigned ids, that means all slices have failed or we're in the + # very first run. In that case, first_run_assign_fn will be used. + if assigned_ids: + to_be_assigned_ids = iter(set(range(num_processes)) - assigned_ids) + + def assign_fn(info: _ProcessInfo): + if info.inv_proc_id == -1: + info.inv_proc_id = next(to_be_assigned_ids) + + inv_id_assign_fn = assign_fn + + coordinator_address = None + for info in proc_infos: + inv_id_assign_fn(info) + if info.inv_proc_id == 0: + coordinator_address = info.address + assert coordinator_address is not None + for info in proc_infos: + info.address = coordinator_address + client.key_value_set(info.key + "/get", info.to_string()) + + new_info = _ProcessInfo.from_string( + client.blocking_key_value_get(local_proc_info.key + "/get", timeout_in_ms=timeout_ms) + ) + logging.info( + "Previous proc id: %d. Assigned proc id: %d. Global coordinator address: %s.", + local_proc_info.inv_proc_id, + new_info.inv_proc_id, + new_info.address, + ) + _dump_process_info(local_ckpt_dir, trainer_dir=trainer_dir, proc_info=new_info) + # Block to avoid coordinator exiting too early. + client.wait_at_barrier("axlearn/id-reassign-finalize", timeout_in_ms=timeout_ms) + jax.distributed.shutdown() + + +def get_consistent_proc_info( + *, + local_address: Optional[str] = None, + barrier_timeout_seconds: int = 300, + trainer_dir: str, + local_ckpt_dir: str, + **setup_kwargs, +) -> _ProcessInfo: + """Gets the invariant process id of the current process and global coordinator's address. + + This function guarantees process id <-> node mapping stays the same for healthy nodes after a + failover. This is required to preserve shard order for in-memory checkpoint recovery. For GPU + training, all healthy nodes will have their process id unchanged. For TPU, all nodes in the + healthy slices will have their process id unchanged. See docstring of + `_init_consistent_proc_ids` for implementation details. + + Args: + local_address: A IP:Port that can be used as the coordinator if this rank is elected. + This Port must be free in the coordinator pod and IP:Port must be reachable from all + other processes. + barrier_timeout_seconds: Timeout in seconds for the barrier and key_value_set operations. + trainer_dir: Path to the trainer dir. + local_ckpt_dir: Path to the local checkpoint dir. + **setup_kwargs: Args to `utils_spmd.setup()`. + + Returns: + A _ProcessInfo whose `inv_proc_id` should be used as the process id and `address` should be + used as the global coordinator address. + """ + platform = os.environ.get("JAX_PLATFORMS", "") + try: + start_t = time.perf_counter() + # Patch platform so the process doesn't waste time initializing accelerators. + os.environ["JAX_PLATFORMS"] = "cpu" + proc = mp.get_context("spawn").Process( + target=_init_consistent_proc_ids, + kwargs=dict( + local_address=local_address, + barrier_timeout_seconds=barrier_timeout_seconds, + trainer_dir=trainer_dir, + local_ckpt_dir=local_ckpt_dir, + **setup_kwargs, + ), + ) + proc.start() + proc.join() + if proc.exitcode != 0: + raise RuntimeError( + "Expects id assignment process to finish normally. " + f"Got exit code {proc.exitcode}. Please check the log above for errors." + ) + + info = _get_previous_process_info(local_ckpt_dir, trainer_dir=trainer_dir) + if info.inv_proc_id == -1: + raise RuntimeError("Expects inv process id != -1, but got -1.") + logging.info( + "Successfully finished process ID assignment in %fs", time.perf_counter() - start_t + ) + return info + finally: + # Restore previous platform settings. + if platform != "": + os.environ["JAX_PLATFORMS"] = platform + else: + del os.environ["JAX_PLATFORMS"] + + +class OrbaxEmergencyCheckpointer(BaseCheckpointer): + """Checkpointer implementation that uses Orbax emergency checkpoint. + + ## Summary: + + This checkpointer is designed to improve the goodput of large multi-slice training jobs that + use data-parallelism across slices. At least two data-parallel slices are required. For other + use cases where this is not applicable or ultimate goodput is not required, please use + `OrbaxCheckpointer`. + + Why it can improve goodput: + 1. It can save to a local path (usually backed by a ramdisk) more frequently, so the progress + lost during restart can be reduced. This is in contrast with saving to remote filesystem + such as GCS directly, which has limited bandwidth to support frequent checkpointing. + 2. During restart, checkpoint can be broadcasted through network, which is faster than reading + from a remote filesystem. + + To use the checkpointer, besides configuring it properly, it also requires + `get_consistent_proc_info` to be called and pass `inv_proc_id` and `address` as + `process_id` and `coordinator_address` to `jax.distributed.initialize`. + + ## How it works under the hood + + This checkpointer is intended for multi-slice training that uses data-parallelism across + slices. Orbax emergency checkpoint works by exploiting the following properties: + 1. Tensors are replicated across data-parallel replicas. + 2. When a slice fails in a multi-slice training and failover is started, only nodes + corresponding to the non-healthy slice may be restarted. Healthy nodes from healthy slices + will not restart. + + Hence, all slices can write checkpoints to node's memory or disk, providing us with redundancy + when there's a failure. This checkpoint frequency can be much higher than remote filesystem, + which has limited bandwidth to support high frequency saving. Checkpoints on nodes are referred + as local checkpoints. Checkpoints on remote filesystem are referred as persistent checkpoints. + + When a failure occurs, Orbax checkpointer will find the latest step from all local and + persistent checkpoints. If the checkpoint is local, the slice on which that checkpoint is + stored will read the checkpoint and broadcast the read values to other slices. Since local + checkpoints are scattered across different hosts, the process id, which determines the shard id + of locally stored shards, must stay the same for nodes in the healthy replicas to guarantee a + correct restore. We provide an utility function `get_consistent_proc_info` that returns the + process id and global coordinator address. They must be passed to `jax.distributed.initialize`. + + However, the above procedure doesn't apply to some non-tensor states such as data iterators. + Data iterators are unique across jax processes, and thus cannot be stored on nodes. Orbax + emergency checkpointer doesn't support non-tensor states. Therefore, we reuse axlearn
RE: "Orbax emergency checkpointer doesn't support non-tensor states" While this is true, it seems that it can be extended to support non-tensor states. Specifically, the local checkpointer manager already writes process metadata: https://github.com/google/orbax/blob/6e80ecc27581a413b1a481d4740e61df7316a4f4/checkpoint/orbax/checkpoint/experimental/emergency/checkpoint_manager.py#L569-L574 which is implemented by writing json strings to a file: https://github.com/google/orbax/blob/6e80ecc27581a413b1a481d4740e61df7316a4f4/checkpoint/orbax/checkpoint/experimental/emergency/mesh_consistency.py#L74-L104 Maybe it can be extended to take `user_process_metadata`? Would this work? Will the Orbax team be receptive to this idea?
axlearn
github_2023
python
820
apple
ruomingp
@@ -0,0 +1,814 @@ +# Copyright © 2024 Apple Inc. + +"""Implements Orbax emergency checkpointing and provide utilities for correct store. + +See the docstring of `OrbaxEmergencyCheckpointer` for more details. +""" + +import copy +import functools +import hashlib +import multiprocessing as mp +import os +import time +from concurrent.futures import ThreadPoolExecutor +from contextlib import contextmanager +from dataclasses import dataclass +from typing import Any, Dict, List, Optional, Tuple, Union + +import jax +import jax.lib +import orbax.checkpoint as ocp +import orbax.checkpoint.experimental.emergency.checkpoint_manager as oecp +import tensorflow as tf +from absl import flags, logging +from jax._src.distributed import global_state +from jax._src.mesh import thread_resources +from jax.experimental.array_serialization import serialization + +from axlearn.common import file_system as fs +from axlearn.common import utils, utils_spmd +from axlearn.common.checkpointer import ( + STEP_NUM_DIGITS, + STEP_PREFIX, + BaseCheckpointer, + Checkpointer, + CheckpointPolicy, + CheckpointValidationType, + InstantiableConfig, + StateStorage, + StateStorageCommitCallback, + async_save_tf_savables, + check_state_structure, + config_for_function, + every_n_steps_policy, + multihost_utils, + parse_step_from_dir, + read_index_file, + restore_tf_savables, + write_index_file, +) +from axlearn.common.config import REQUIRED, Required, config_class +from axlearn.common.module import Module +from axlearn.common.utils import Nested, Tensor, TensorSpec + +FLAGS = flags.FLAGS + + +@contextmanager +def setup(spec: str): + """Setups FLAGS.process_id and FLAGS.distributed_coordinator as required by Orbax. + + See the docstring of `get_consistent_proc_info` for more details. + + Args: + spec: Key=Value pairs separated by comma. Key must be one of ("local_address", + "barrier_timeout_seconds", "local_ckpt_dir"). See the docstring of + `get_consistent_proc_info`. + """ + parsed_args = {} + allowed_fields = ["local_address", "barrier_timeout_seconds", "local_ckpt_dir"] + for field in spec.split(","): + k, v = field.split("=") + if k not in allowed_fields: + raise ValueError(f"Expected key in {allowed_fields}, got key={k}.") + parsed_args[k] = v + if "barrier_timeout_seconds" in parsed_args: + parsed_args["barrier_timeout_seconds"] = int(parsed_args["barrier_timeout_seconds"]) + if "local_ckpt_dir" not in parsed_args: + raise ValueError("local_ckpt_dir must be specified.") + # pylint: disable-next=missing-kwoa + info = get_consistent_proc_info( + **parsed_args, + trainer_dir=FLAGS.trainer_dir, + distributed_coordinator=FLAGS.distributed_coordinator, + num_processes=FLAGS.num_processes, + process_id=FLAGS.process_id, + jax_backend=FLAGS.jax_backend, + initialization_timeout=FLAGS.initialization_timeout, + ) + FLAGS.process_id = info.inv_proc_id + FLAGS.distributed_coordinator = info.address + FLAGS.experimental_orbax_use_distributed_process_id = True + yield + + +class _TFSavablesStateStorage(StateStorage): + """A StateStorage implementation that only saves the index file and tf savables.""" + + @config_class + class Config(StateStorage.Config): + timeout_secs: int = 300 + + def __init__(self, cfg: Config): + super().__init__(cfg) + # One thread is sufficient because `async_save_tf_savables` only creates one future. + self._executor = ThreadPoolExecutor(1) + self._manager = serialization.AsyncManager(timeout_secs=cfg.timeout_secs) + + def _get_spec(self, *, step: int, state: Nested[Any]) -> Nested[Any]: + spec = {"index": [("step", int(step))], "tf_ckpt_map": {}} + for path, value in utils.flatten_items(state): + if isinstance(value, (Tensor, TensorSpec)): + dtype = getattr(value.dtype, "dtype", value.dtype) + spec["index"].append( + (path, {"dtype": str(dtype), "shape": str(tuple(value.shape))}) + ) + elif isinstance(value, tf.data.Iterator): + spec["index"].append((path, str(type(value)))) + spec["tf_ckpt_map"][path] = value + else: + spec["index"].append((path, value)) + logging.log_first_n(logging.INFO, "TF savables spec: %s", 1, str(spec)) + return spec + + def save_to_dir( + self, + *, + step: int, + state: Nested[Tensor], + ckpt_dir: str, + on_commit_callback: StateStorageCommitCallback = write_index_file, + ): + start_time = time.perf_counter() + # We write data files directly to `ckpt_dir`. `index` is written into `ckpt_dir` in + # `on_commit_callback` to finalize the checkpoint. + spec = self._get_spec(step=step, state=state) + self.wait_until_finished() + + save_tf_future = async_save_tf_savables( + spec["tf_ckpt_map"], + executor=self._executor, + dir=os.path.join(ckpt_dir, f"tf_{jax.process_index()}"), + ) + + def commit(): + on_commit_callback(ckpt_dir=ckpt_dir, index=spec["index"]) + logging.info( + "Serialization of TF savables to %s completed in %s seconds.", + ckpt_dir, + time.perf_counter() - start_time, + ) + + # pylint: disable=protected-access + self._manager._add_futures([save_tf_future]) + self._manager._start_async_commit(commit) + + def wait_until_finished(self): + self._manager.wait_until_finished() + + def restore_from_dir( + self, + step: int, + state: Union[Nested[Tensor], Nested[TensorSpec]], + *, + ckpt_dir: str, + validation: CheckpointValidationType = CheckpointValidationType.EXACT, + ) -> Nested[Tensor]: + spec = self._get_spec(step=step, state=state) + logging.info("Restoring TF savables from directory %s", ckpt_dir) + check_state_structure( + read_index_file(ckpt_dir), target_structure=spec["index"], validation=validation + ) + restore_tf_savables( + spec["tf_ckpt_map"], dir=os.path.join(ckpt_dir, f"tf_{jax.process_index()}") + ) + multihost_utils.sync_global_devices(ckpt_dir) + return state + + def stop(self): + self._executor.shutdown(wait=True) + + +_PROCESS_ID_FILE_NAME: str = "process_id.txt" + + +@dataclass +class _ProcessInfo: + """Records the process id and address information for this node. + + Attributes: + address: The global coordinator address. This is set during the first run and stays and + stays the same unless process 0 failed. + inv_proc_id: The invariant process id of this node. This process id is set during the first + run and stays the same for all subsequent runs unless this node failed. + cur_proc_id: Internal field. The new process id assigned externally after failover. Used + during ID negotiation after failover. + key: Internal field. Key used during ID negotiation after failover. + num_proc_per_slice: Internal field. Used to calculate slice ID for TPU. + """ + + address: str + inv_proc_id: int + cur_proc_id: int + key: Optional[str] = None + num_proc_per_slice: Optional[int] = None + + def to_string(self): + return "|".join(str(x) for x in [self.address, self.inv_proc_id, self.cur_proc_id]) + + @property + def prev_slice_id(self): + assert self.num_proc_per_slice is not None + return self.inv_proc_id // self.num_proc_per_slice + + @property + def cur_slice_id(self): + assert self.num_proc_per_slice is not None + return self.cur_proc_id // self.num_proc_per_slice + + @classmethod + def from_string( + cls, data: str, *, key: Optional[str] = None, num_proc_per_slice: Optional[int] = None + ): + ls = data.split("|") + assert len(ls) == 3 + return cls(ls[0], int(ls[1]), int(ls[2]), key=key, num_proc_per_slice=num_proc_per_slice) + + +def _get_previous_process_info(local_dir: str, *, trainer_dir: str) -> _ProcessInfo: + """Gets process info from local checkpoint directory.""" + path = os.path.join(local_dir, _get_unique_id(trainer_dir), _PROCESS_ID_FILE_NAME) + if not fs.exists(path): + return _ProcessInfo(address="", inv_proc_id=-1, cur_proc_id=-1) + + with fs.open(path) as f: + return _ProcessInfo.from_string(f.read()) + + +def _dump_process_info(local_dir: str, *, trainer_dir: str, proc_info: _ProcessInfo): + """Dumps process info to local checkpoint directory.""" + local_dir = os.path.join(local_dir, _get_unique_id(trainer_dir)) + fs.makedirs(local_dir) + process_id_file = os.path.join(local_dir, _PROCESS_ID_FILE_NAME) + with fs.open(process_id_file, "w") as f: + f.write(proc_info.to_string()) + + +def _get_unique_id(trainer_dir: str) -> str: + return hashlib.sha256(trainer_dir.encode(), usedforsecurity=False).hexdigest() + + +def _logger_init(): + """Init logger in spawned processes that don't inherit parent's logger.""" + logging.set_verbosity(logging.INFO) + logging.use_absl_handler() + + +def _init_consistent_proc_ids( + *, + local_address: Optional[str] = None, + barrier_timeout_seconds: int = 300, + trainer_dir: str, + local_ckpt_dir: str, + **setup_kwargs, +): + """Exchanges id info through jax coordinator and dumps to local file. + + During failover, healthy nodes will read their locally stored process id file, but failed nodes + will lost their process ids. To assign ids that are free in the global id range (i.e. 0 to + num_processes - 1), we let each node report its process id (-1 if missing) to rank 0, and rank + 0 will figure out suitable IDs to assign to each failed node. We reuse Jax's distributed client + to avoid writing our own coordinator. + """ + _logger_init() + + jax_backend = setup_kwargs["jax_backend"] + timeout_ms = barrier_timeout_seconds * 1000 + utils_spmd.setup(**setup_kwargs) + client: jax.lib.xla_extension.DistributedRuntimeClient = global_state.client + local_proc_info = _get_previous_process_info(local_ckpt_dir, trainer_dir=trainer_dir) + key_prefix = "axlearn/id_reassign" + # Local key just needs to be unique for each process. + local_proc_info.key = f"{key_prefix}/{jax.process_index()}" + + if jax_backend == "tpu": + worker_hostnames = os.environ["TPU_WORKER_HOSTNAMES"].split(",") + num_slices = int(os.environ["MEGASCALE_NUM_SLICES"]) + num_proc_per_slice = len(worker_hostnames) + worker_id = int(os.environ["TPU_WORKER_ID"]) + + # Coordinator port for TPU is hardcoded. Reference: + # https://github.com/jax-ml/jax/blob/1aa5de66a8f3c910115cac2fbe118e0facd7a3be/jax/_src/clusters/cloud_tpu_cluster.py#L29 + local_proc_info.address = f"{worker_hostnames[worker_id]}:8476" + # Note: cannot use jax.process_index() here because it may be different from the + # distributed id. This is a jax problem. + local_proc_info.cur_proc_id = ( + int(os.environ["MEGASCALE_SLICE_ID"]) * num_proc_per_slice + worker_id + ) + elif jax_backend == "gpu": + if local_address is None: + raise ValueError( + "local_address must be set for GPU when using in-memory checkpointing." + ) + local_proc_info.address = local_address + local_proc_info.cur_proc_id = setup_kwargs["process_id"] + else: + raise RuntimeError(f"Unsupported backend {jax_backend}.") + + # Every worker reports its proc info to rank 0. + client.key_value_set(local_proc_info.key, local_proc_info.to_string()) + client.wait_at_barrier("axlearn/id-reassign-gather-id", timeout_in_ms=timeout_ms) + + # Then, rank 0 assigns inv_proc_id for worker that's missing their inv_proc_id and find the + # coordinator address. + if local_proc_info.cur_proc_id == 0: + ids = client.key_value_dir_get(key_prefix) + proc_infos: list[_ProcessInfo] = [] + + def first_run_assign_fn(info: _ProcessInfo): + info.inv_proc_id = info.cur_proc_id + + inv_id_assign_fn = first_run_assign_fn + if jax_backend == "tpu": + # For TPUs, we have the additional requirement that process ids in slice id X must be + # in range [X * num_processes_per_slice, (X + 1) * num_processes_per_slice). Therefore, + # we first identify the healthy slices' ids and then figure out the slice ids to assign + # to failed slices. Each process in the failed slice will then get id `new_slice_id * + # num_proc_per_slice + cur_proc_id % num_proc_per_slice`. After id assignment, the + # address of process that's assigned with id=0 will be broadcasted to every worker. + + # Mapping from new slice ids to assigned slice ids forfailed slices. + failed_slices_new_ids = {} + for k, data in ids: + info = _ProcessInfo.from_string(data, key=k, num_proc_per_slice=num_proc_per_slice) + proc_infos.append(info) + if info.inv_proc_id == -1: + failed_slices_new_ids[info.cur_slice_id] = -1 + + already_assigned_slice_ids = set() + for info in proc_infos: + if info.cur_slice_id not in failed_slices_new_ids: + already_assigned_slice_ids.add(info.prev_slice_id) + + # If there're no assigned slice ids, that means all slices have failed or we're in the + # very first run. In that case, first_run_assign_fn will be used. + if already_assigned_slice_ids: + to_be_assigned_slice_ids = set(range(num_slices)) - already_assigned_slice_ids + assert len(to_be_assigned_slice_ids) == len(failed_slices_new_ids) + for k, new_id in zip(failed_slices_new_ids.keys(), to_be_assigned_slice_ids): + failed_slices_new_ids[k] = new_id + + def assign_fn(info: _ProcessInfo): + proc_id = info.inv_proc_id + if (new_slice_id := failed_slices_new_ids.get(info.cur_slice_id)) is not None: + proc_id = ( + new_slice_id * num_proc_per_slice + + info.cur_proc_id % num_proc_per_slice + ) + info.inv_proc_id = proc_id + + inv_id_assign_fn = assign_fn + + elif jax_backend == "gpu": + num_processes = setup_kwargs["num_processes"] + # For GPU backend, failed nodes are assigned with ids that are missing in the global id + # range with arbitrary order. + assigned_ids = set() + for key, data in ids: + info = _ProcessInfo.from_string(data, key=key) + proc_infos.append(info) + assigned_ids.add(info.inv_proc_id) + + # If there're no assigned ids, that means all slices have failed or we're in the + # very first run. In that case, first_run_assign_fn will be used. + if assigned_ids: + to_be_assigned_ids = iter(set(range(num_processes)) - assigned_ids) + + def assign_fn(info: _ProcessInfo): + if info.inv_proc_id == -1: + info.inv_proc_id = next(to_be_assigned_ids) + + inv_id_assign_fn = assign_fn + + coordinator_address = None + for info in proc_infos: + inv_id_assign_fn(info) + if info.inv_proc_id == 0: + coordinator_address = info.address + assert coordinator_address is not None + for info in proc_infos: + info.address = coordinator_address + client.key_value_set(info.key + "/get", info.to_string()) + + new_info = _ProcessInfo.from_string( + client.blocking_key_value_get(local_proc_info.key + "/get", timeout_in_ms=timeout_ms) + ) + logging.info( + "Previous proc id: %d. Assigned proc id: %d. Global coordinator address: %s.", + local_proc_info.inv_proc_id, + new_info.inv_proc_id, + new_info.address, + ) + _dump_process_info(local_ckpt_dir, trainer_dir=trainer_dir, proc_info=new_info) + # Block to avoid coordinator exiting too early. + client.wait_at_barrier("axlearn/id-reassign-finalize", timeout_in_ms=timeout_ms) + jax.distributed.shutdown() + + +def get_consistent_proc_info( + *, + local_address: Optional[str] = None, + barrier_timeout_seconds: int = 300, + trainer_dir: str, + local_ckpt_dir: str, + **setup_kwargs, +) -> _ProcessInfo: + """Gets the invariant process id of the current process and global coordinator's address. + + This function guarantees process id <-> node mapping stays the same for healthy nodes after a + failover. This is required to preserve shard order for in-memory checkpoint recovery. For GPU + training, all healthy nodes will have their process id unchanged. For TPU, all nodes in the + healthy slices will have their process id unchanged. See docstring of + `_init_consistent_proc_ids` for implementation details. + + Args: + local_address: A IP:Port that can be used as the coordinator if this rank is elected. + This Port must be free in the coordinator pod and IP:Port must be reachable from all + other processes. + barrier_timeout_seconds: Timeout in seconds for the barrier and key_value_set operations. + trainer_dir: Path to the trainer dir. + local_ckpt_dir: Path to the local checkpoint dir. + **setup_kwargs: Args to `utils_spmd.setup()`. + + Returns: + A _ProcessInfo whose `inv_proc_id` should be used as the process id and `address` should be + used as the global coordinator address. + """ + platform = os.environ.get("JAX_PLATFORMS", "") + try: + start_t = time.perf_counter() + # Patch platform so the process doesn't waste time initializing accelerators. + os.environ["JAX_PLATFORMS"] = "cpu" + proc = mp.get_context("spawn").Process( + target=_init_consistent_proc_ids, + kwargs=dict( + local_address=local_address, + barrier_timeout_seconds=barrier_timeout_seconds, + trainer_dir=trainer_dir, + local_ckpt_dir=local_ckpt_dir, + **setup_kwargs, + ), + ) + proc.start() + proc.join() + if proc.exitcode != 0: + raise RuntimeError( + "Expects id assignment process to finish normally. " + f"Got exit code {proc.exitcode}. Please check the log above for errors." + ) + + info = _get_previous_process_info(local_ckpt_dir, trainer_dir=trainer_dir) + if info.inv_proc_id == -1: + raise RuntimeError("Expects inv process id != -1, but got -1.") + logging.info( + "Successfully finished process ID assignment in %fs", time.perf_counter() - start_t + ) + return info + finally: + # Restore previous platform settings. + if platform != "": + os.environ["JAX_PLATFORMS"] = platform + else: + del os.environ["JAX_PLATFORMS"] + + +class OrbaxEmergencyCheckpointer(BaseCheckpointer): + """Checkpointer implementation that uses Orbax emergency checkpoint. + + ## Summary: + + This checkpointer is designed to improve the goodput of large multi-slice training jobs that + use data-parallelism across slices. At least two data-parallel slices are required. For other + use cases where this is not applicable or ultimate goodput is not required, please use + `OrbaxCheckpointer`. + + Why it can improve goodput: + 1. It can save to a local path (usually backed by a ramdisk) more frequently, so the progress + lost during restart can be reduced. This is in contrast with saving to remote filesystem + such as GCS directly, which has limited bandwidth to support frequent checkpointing. + 2. During restart, checkpoint can be broadcasted through network, which is faster than reading + from a remote filesystem. + + To use the checkpointer, besides configuring it properly, it also requires + `get_consistent_proc_info` to be called and pass `inv_proc_id` and `address` as + `process_id` and `coordinator_address` to `jax.distributed.initialize`. + + ## How it works under the hood + + This checkpointer is intended for multi-slice training that uses data-parallelism across + slices. Orbax emergency checkpoint works by exploiting the following properties: + 1. Tensors are replicated across data-parallel replicas. + 2. When a slice fails in a multi-slice training and failover is started, only nodes + corresponding to the non-healthy slice may be restarted. Healthy nodes from healthy slices + will not restart. + + Hence, all slices can write checkpoints to node's memory or disk, providing us with redundancy + when there's a failure. This checkpoint frequency can be much higher than remote filesystem, + which has limited bandwidth to support high frequency saving. Checkpoints on nodes are referred + as local checkpoints. Checkpoints on remote filesystem are referred as persistent checkpoints. + + When a failure occurs, Orbax checkpointer will find the latest step from all local and + persistent checkpoints. If the checkpoint is local, the slice on which that checkpoint is + stored will read the checkpoint and broadcast the read values to other slices. Since local + checkpoints are scattered across different hosts, the process id, which determines the shard id + of locally stored shards, must stay the same for nodes in the healthy replicas to guarantee a + correct restore. We provide an utility function `get_consistent_proc_info` that returns the + process id and global coordinator address. They must be passed to `jax.distributed.initialize`. + + However, the above procedure doesn't apply to some non-tensor states such as data iterators. + Data iterators are unique across jax processes, and thus cannot be stored on nodes. Orbax + emergency checkpointer doesn't support non-tensor states. Therefore, we reuse axlearn + Checkpointer to save, restore and garbage collect those states, which include the index file + and tf iterators. These non-tensor states will be saved whenever local or persistent checkpoint + need to be saved. As the result, the persistent checkpoint structure looks like this: + + ``` + ├── path_prefix + │ ├── non-tensors + │ │ └── step_00000010 + │ │ ├── index + │ │ └── tf_xxx + │ └── tensors + │ └── step_00000010 + │ └── orbax_files_xxx + ``` + + A persistent training checkpoint `step_xxx` is commited when `non-tensors/step_xxx/index` + exists and `tensors/step_xxx` is commited by Orbax. Refer to the docstring of + `OrbaxCheckpointer` for Orbax's commit criteria. + + To abstract the details of the checkpoint layout, the `checkpoint_steps` API returns all steps + for which both Tensor and non-Tensor states have been fully committed. + """ + + _NON_TENSORS_PREFIX: str = "non-tensors" + _TENSORS_PREFIX: str = "tensors" + + @config_class + class Config(BaseCheckpointer.Config): + """Configures OrbaxEmergencyCheckpointer. + + Attributes: + keep_last_n: Keep this many past ckpts. + keep_every_n_steps: If > 0, keeps at least one persistent checkpoint every N steps. + local_keep_last_n: Keep this many past ckpts in local storage (e.g. node memory). + This should almost always set to 1 to avoid OOM. + local_dir: Ckpt base path for local storage. The content in this path must persist + across pod restarts unless the restart is caused by node failure. `local_dir` must + be the same for all processes or processes may hang. + trainer_dir: A string that's unique for the current run. Typically, this is set to + trainer_dir. Local checkpoint will be stored in local_dir/sha256(trainer_dir). + During init, all other folders in local_dir will be removed to prevent unexpected + memory usage. + save_policy: Save policy for persistent checkpoints. + local_save_policy: Save policy for local checkpoints. This should be more frequent than + `save_policy`. Note that data iterator will be saved with either `save_policy` or + `local_save_policy` indicate we should save. + non_tensor_async_timeout_secs: Timeout for async barrier in seconds when saving + non-tensor states. + async_timeout_secs: Timeout for async barrier in seconds when saving tensors. + replica_axis_index: The index of the "data" axis. + """ + + keep_last_n: int = 1 + keep_every_n_steps: Optional[int] = None + local_keep_last_n: int = 1 + local_save_policy: InstantiableConfig[CheckpointPolicy] = config_for_function( + every_n_steps_policy + ).set(n=10) + local_dir: str = "/host-tmp/checkpoints" + trainer_dir: Required[str] = REQUIRED + non_tensor_async_timeout_secs: int = 300 + async_timeout_secs: int = 3600 + replica_axis_index: Required[int] = REQUIRED + + @classmethod + def checkpoint_paths(cls, base_dir: str) -> List[str]: + """See `BaseCheckpointer.checkpointer_paths`. + + Only persistent checkpoint paths are returned. There's no guarantee that the paths returned + have committed TF savables. Use `checkpoint_steps` to get steps with both tensors and + committed TF savables. + """ + logging.log_first_n( + logging.WARNING, + msg="checkpoint_paths is deprecated. Use checkpoint_steps instead.", + n=1, + ) + tensors_dir = os.path.join(base_dir, cls._TENSORS_PREFIX) + return [str(path) for path in ocp.utils.checkpoint_steps_paths(tensors_dir)] + + @classmethod + def checkpoint_steps(cls, base_dir) -> list[int]: + """See `BaseCheckpointer.checkpointer_steps`. + + Only persistent checkpoint steps are returned. + """ + return list( + set( + ocp.utils.checkpoint_steps(os.path.join(base_dir, cls._TENSORS_PREFIX)) + ).intersection( + set(Checkpointer.checkpoint_steps(os.path.join(base_dir, cls._NON_TENSORS_PREFIX))) + ) + ) + + def __init__(self, cfg: Config, *, parent: Optional[Module]): + super().__init__(cfg, parent=parent) + cfg: OrbaxEmergencyCheckpointer.Config = self.config + self._name_format = ocp.step.standard_name_format( + step_prefix=STEP_PREFIX, + step_format_fixed_length=STEP_NUM_DIGITS, + ) + if jax.process_index() == 0: + fs.makedirs(os.path.join(cfg.dir, self._NON_TENSORS_PREFIX)) + fs.makedirs(os.path.join(cfg.dir, self._TENSORS_PREFIX)) + # Cleanup local checkpoints from different runs. + unique_id = _get_unique_id(cfg.trainer_dir) + for fd in fs.listdir(cfg.local_dir): + if not fd.startswith(".") and fd != unique_id: + fs.rmtree(os.path.join(cfg.local_dir, fd)) + self._local_dir = os.path.join(cfg.local_dir, unique_id) + fs.makedirs(self._local_dir) + # Orbax emergency ckpt requires this function to be called prior to checkpointer + # operations. This function also serves as a barrier. + ocp.multihost.initialize_runtime_to_distributed_ids() + ocp.multihost.initialize_distributed_to_device_ids() + ckpt_cfg: Checkpointer.Config = Checkpointer.default_config() + # TODO(hanzhi-zhou): this `keep_last_n` may not be what users expect since non-tensor + # states will save when either local or persistent checkpoint will save. + ckpt_cfg.keep_last_n = cfg.keep_last_n + ckpt_cfg.keep_every_n_steps = cfg.keep_every_n_steps + ckpt_cfg.storage = _TFSavablesStateStorage.default_config() + ckpt_cfg.storage.timeout_secs = cfg.non_tensor_async_timeout_secs + ckpt_cfg.dir = os.path.join(cfg.dir, self._NON_TENSORS_PREFIX) + ckpt_cfg.name = "non-tensors-checkpointer" + + save_policy = cfg.save_policy.instantiate() + local_save_policy = cfg.local_save_policy.instantiate() + + # Non-tensor states must save when either local or persistent ckpt needs to be saved for + # restore from either to succeed. + def _composite_save_policy(*, step: int, evaler_summaries: dict[str, Any]): + return ( + save_policy(step=step, evaler_summaries=evaler_summaries) + or local_save_policy(step=step, evaler_summaries=evaler_summaries) + or self._reached_preemption + ) + + ckpt_cfg.save_policy = config_for_function(lambda: _composite_save_policy) + self._non_tensor_manager: Checkpointer = ckpt_cfg.instantiate(parent=self) + self._tensor_manager: Optional[oecp.CheckpointManager] = None + # See comments of _eval_summaries in `OrbaxCheckpointer`. + self._eval_summaries = None + self._reached_preemption = False + + # pylint: disable-next=redefined-builtin + def ckpt_dir(self, step: int, dir: Optional[str] = None) -> str: + """Obtains the checkpoint dir for the given step.""" + if dir is None: + dir = self._non_tensor_manager.directory + return str(ocp.step.build_step_path(dir, self._name_format, step)) + + def _get_abstract_state( + self, state_with_tensors: Nested[Tensor] + ) -> Nested[jax.ShapeDtypeStruct]: + """Generate the abstract states required by the Orbax emergency checkpointer.""" + return jax.tree.map( + lambda x: jax.ShapeDtypeStruct(x.shape, x.dtype, sharding=x.sharding), + state_with_tensors, + ) + + def _get_tensor_manager(self, state_with_tensors: Nested[Tensor]) -> oecp.CheckpointManager: + """Creates the emergency checkpoint manager if not exists. + + We defer the creation of this checkpoint manager because it requires the state dict, + which is not present during __init__. + """ + cfg: OrbaxEmergencyCheckpointer.Config = self.config + if self._tensor_manager is not None: + return self._tensor_manager + + save_policy = cfg.save_policy.instantiate() + local_save_policy = cfg.local_save_policy.instantiate() + + def _orbax_save_fn( + step: int, last_saved_step: Optional[int], wrapped_save_policy: CheckpointPolicy + ) -> bool: + del last_saved_step + return wrapped_save_policy(step=step, evaler_summaries=self._eval_summaries) + + # For meaning of these options, refer to + # https://github.com/google/orbax/blob/95be2c021bc8cbf4badd83a053ff57b7a9f9b314/checkpoint/orbax/checkpoint/experimental/emergency/checkpoint_manager.py#L277 + self._tensor_manager = oecp.CheckpointManager( + self._local_dir, + persistent_directory=os.path.join(cfg.dir, self._TENSORS_PREFIX), + global_mesh=thread_resources.env.physical_mesh, + abstract_state=self._get_abstract_state(state_with_tensors), + options=oecp.CheckpointManagerOptions( + local=oecp.LocalCheckpointOptions( + should_save_fn=functools.partial( + _orbax_save_fn, wrapped_save_policy=local_save_policy + ), + max_to_keep=cfg.local_keep_last_n, + ), + persistent=oecp.PersistentCheckpointOptions( + should_save_fn=functools.partial( + _orbax_save_fn, wrapped_save_policy=save_policy + ), + max_to_keep=cfg.keep_last_n, + ), + replica_axis_index=cfg.replica_axis_index, + async_options=oecp.checkpoint_manager.AsyncOptions( + timeout_secs=cfg.async_timeout_secs + ), + step_name_format=self._name_format, + cleanup_tmp_directories=True, + enable_async_checkpointing=True, + ), + ) + return self._tensor_manager + + def save( + self, *, step: int, state: Nested[Tensor], evaler_summaries: Optional[Dict[str, Any]] = None + ): + """See `BaseCheckpointer.save` for details.""" + assert self._eval_summaries is None, self._eval_summaries + self._eval_summaries = copy.deepcopy(evaler_summaries or {}) + self._reached_preemption = self._tensor_manager.reached_preemption(step) + + start_t = time.perf_counter() + state_with_tensors = jax.tree.map( + lambda x: x if isinstance(x, (Tensor, TensorSpec)) else None, state + ) + # Note that save() waits for prior serialization to finish. + self._non_tensor_manager.save(step=step, state=state) + self._get_tensor_manager(state_with_tensors).save( + step=step, args=ocp.args.PyTreeSave(item=state_with_tensors) + )
How do we mark the completion of a checkpoint? It should happen only when both tensor and non-tensor states are saved. How is this ensured? Please add a comment.
axlearn
github_2023
python
820
apple
ruomingp
@@ -0,0 +1,814 @@ +# Copyright © 2024 Apple Inc. + +"""Implements Orbax emergency checkpointing and provide utilities for correct store. + +See the docstring of `OrbaxEmergencyCheckpointer` for more details. +""" + +import copy +import functools +import hashlib +import multiprocessing as mp +import os +import time +from concurrent.futures import ThreadPoolExecutor +from contextlib import contextmanager +from dataclasses import dataclass +from typing import Any, Dict, List, Optional, Tuple, Union + +import jax +import jax.lib +import orbax.checkpoint as ocp +import orbax.checkpoint.experimental.emergency.checkpoint_manager as oecp +import tensorflow as tf +from absl import flags, logging +from jax._src.distributed import global_state +from jax._src.mesh import thread_resources +from jax.experimental.array_serialization import serialization + +from axlearn.common import file_system as fs +from axlearn.common import utils, utils_spmd +from axlearn.common.checkpointer import ( + STEP_NUM_DIGITS, + STEP_PREFIX, + BaseCheckpointer, + Checkpointer, + CheckpointPolicy, + CheckpointValidationType, + InstantiableConfig, + StateStorage, + StateStorageCommitCallback, + async_save_tf_savables, + check_state_structure, + config_for_function, + every_n_steps_policy, + multihost_utils, + parse_step_from_dir, + read_index_file, + restore_tf_savables, + write_index_file, +) +from axlearn.common.config import REQUIRED, Required, config_class +from axlearn.common.module import Module +from axlearn.common.utils import Nested, Tensor, TensorSpec + +FLAGS = flags.FLAGS + + +@contextmanager +def setup(spec: str): + """Setups FLAGS.process_id and FLAGS.distributed_coordinator as required by Orbax. + + See the docstring of `get_consistent_proc_info` for more details. + + Args: + spec: Key=Value pairs separated by comma. Key must be one of ("local_address", + "barrier_timeout_seconds", "local_ckpt_dir"). See the docstring of + `get_consistent_proc_info`. + """ + parsed_args = {} + allowed_fields = ["local_address", "barrier_timeout_seconds", "local_ckpt_dir"] + for field in spec.split(","): + k, v = field.split("=") + if k not in allowed_fields: + raise ValueError(f"Expected key in {allowed_fields}, got key={k}.") + parsed_args[k] = v + if "barrier_timeout_seconds" in parsed_args: + parsed_args["barrier_timeout_seconds"] = int(parsed_args["barrier_timeout_seconds"]) + if "local_ckpt_dir" not in parsed_args: + raise ValueError("local_ckpt_dir must be specified.") + # pylint: disable-next=missing-kwoa + info = get_consistent_proc_info( + **parsed_args, + trainer_dir=FLAGS.trainer_dir, + distributed_coordinator=FLAGS.distributed_coordinator, + num_processes=FLAGS.num_processes, + process_id=FLAGS.process_id, + jax_backend=FLAGS.jax_backend, + initialization_timeout=FLAGS.initialization_timeout, + ) + FLAGS.process_id = info.inv_proc_id + FLAGS.distributed_coordinator = info.address + FLAGS.experimental_orbax_use_distributed_process_id = True + yield + + +class _TFSavablesStateStorage(StateStorage): + """A StateStorage implementation that only saves the index file and tf savables.""" + + @config_class + class Config(StateStorage.Config): + timeout_secs: int = 300 + + def __init__(self, cfg: Config): + super().__init__(cfg) + # One thread is sufficient because `async_save_tf_savables` only creates one future. + self._executor = ThreadPoolExecutor(1) + self._manager = serialization.AsyncManager(timeout_secs=cfg.timeout_secs) + + def _get_spec(self, *, step: int, state: Nested[Any]) -> Nested[Any]: + spec = {"index": [("step", int(step))], "tf_ckpt_map": {}} + for path, value in utils.flatten_items(state): + if isinstance(value, (Tensor, TensorSpec)): + dtype = getattr(value.dtype, "dtype", value.dtype) + spec["index"].append( + (path, {"dtype": str(dtype), "shape": str(tuple(value.shape))}) + ) + elif isinstance(value, tf.data.Iterator): + spec["index"].append((path, str(type(value)))) + spec["tf_ckpt_map"][path] = value + else: + spec["index"].append((path, value)) + logging.log_first_n(logging.INFO, "TF savables spec: %s", 1, str(spec)) + return spec + + def save_to_dir( + self, + *, + step: int, + state: Nested[Tensor], + ckpt_dir: str, + on_commit_callback: StateStorageCommitCallback = write_index_file, + ): + start_time = time.perf_counter() + # We write data files directly to `ckpt_dir`. `index` is written into `ckpt_dir` in + # `on_commit_callback` to finalize the checkpoint. + spec = self._get_spec(step=step, state=state) + self.wait_until_finished() + + save_tf_future = async_save_tf_savables( + spec["tf_ckpt_map"], + executor=self._executor, + dir=os.path.join(ckpt_dir, f"tf_{jax.process_index()}"), + ) + + def commit(): + on_commit_callback(ckpt_dir=ckpt_dir, index=spec["index"]) + logging.info( + "Serialization of TF savables to %s completed in %s seconds.", + ckpt_dir, + time.perf_counter() - start_time, + ) + + # pylint: disable=protected-access + self._manager._add_futures([save_tf_future]) + self._manager._start_async_commit(commit) + + def wait_until_finished(self): + self._manager.wait_until_finished() + + def restore_from_dir( + self, + step: int, + state: Union[Nested[Tensor], Nested[TensorSpec]], + *, + ckpt_dir: str, + validation: CheckpointValidationType = CheckpointValidationType.EXACT, + ) -> Nested[Tensor]: + spec = self._get_spec(step=step, state=state) + logging.info("Restoring TF savables from directory %s", ckpt_dir) + check_state_structure( + read_index_file(ckpt_dir), target_structure=spec["index"], validation=validation + ) + restore_tf_savables( + spec["tf_ckpt_map"], dir=os.path.join(ckpt_dir, f"tf_{jax.process_index()}") + ) + multihost_utils.sync_global_devices(ckpt_dir) + return state + + def stop(self): + self._executor.shutdown(wait=True) + + +_PROCESS_ID_FILE_NAME: str = "process_id.txt" + + +@dataclass +class _ProcessInfo: + """Records the process id and address information for this node. + + Attributes: + address: The global coordinator address. This is set during the first run and stays and + stays the same unless process 0 failed. + inv_proc_id: The invariant process id of this node. This process id is set during the first + run and stays the same for all subsequent runs unless this node failed. + cur_proc_id: Internal field. The new process id assigned externally after failover. Used + during ID negotiation after failover. + key: Internal field. Key used during ID negotiation after failover. + num_proc_per_slice: Internal field. Used to calculate slice ID for TPU. + """ + + address: str + inv_proc_id: int + cur_proc_id: int + key: Optional[str] = None + num_proc_per_slice: Optional[int] = None + + def to_string(self): + return "|".join(str(x) for x in [self.address, self.inv_proc_id, self.cur_proc_id]) + + @property + def prev_slice_id(self): + assert self.num_proc_per_slice is not None + return self.inv_proc_id // self.num_proc_per_slice + + @property + def cur_slice_id(self): + assert self.num_proc_per_slice is not None + return self.cur_proc_id // self.num_proc_per_slice + + @classmethod + def from_string( + cls, data: str, *, key: Optional[str] = None, num_proc_per_slice: Optional[int] = None + ): + ls = data.split("|") + assert len(ls) == 3 + return cls(ls[0], int(ls[1]), int(ls[2]), key=key, num_proc_per_slice=num_proc_per_slice) + + +def _get_previous_process_info(local_dir: str, *, trainer_dir: str) -> _ProcessInfo: + """Gets process info from local checkpoint directory.""" + path = os.path.join(local_dir, _get_unique_id(trainer_dir), _PROCESS_ID_FILE_NAME) + if not fs.exists(path): + return _ProcessInfo(address="", inv_proc_id=-1, cur_proc_id=-1) + + with fs.open(path) as f: + return _ProcessInfo.from_string(f.read()) + + +def _dump_process_info(local_dir: str, *, trainer_dir: str, proc_info: _ProcessInfo): + """Dumps process info to local checkpoint directory.""" + local_dir = os.path.join(local_dir, _get_unique_id(trainer_dir)) + fs.makedirs(local_dir) + process_id_file = os.path.join(local_dir, _PROCESS_ID_FILE_NAME) + with fs.open(process_id_file, "w") as f: + f.write(proc_info.to_string()) + + +def _get_unique_id(trainer_dir: str) -> str: + return hashlib.sha256(trainer_dir.encode(), usedforsecurity=False).hexdigest() + + +def _logger_init(): + """Init logger in spawned processes that don't inherit parent's logger.""" + logging.set_verbosity(logging.INFO) + logging.use_absl_handler() + + +def _init_consistent_proc_ids( + *, + local_address: Optional[str] = None, + barrier_timeout_seconds: int = 300, + trainer_dir: str, + local_ckpt_dir: str, + **setup_kwargs, +): + """Exchanges id info through jax coordinator and dumps to local file. + + During failover, healthy nodes will read their locally stored process id file, but failed nodes + will lost their process ids. To assign ids that are free in the global id range (i.e. 0 to + num_processes - 1), we let each node report its process id (-1 if missing) to rank 0, and rank + 0 will figure out suitable IDs to assign to each failed node. We reuse Jax's distributed client + to avoid writing our own coordinator. + """ + _logger_init() + + jax_backend = setup_kwargs["jax_backend"] + timeout_ms = barrier_timeout_seconds * 1000 + utils_spmd.setup(**setup_kwargs) + client: jax.lib.xla_extension.DistributedRuntimeClient = global_state.client + local_proc_info = _get_previous_process_info(local_ckpt_dir, trainer_dir=trainer_dir) + key_prefix = "axlearn/id_reassign" + # Local key just needs to be unique for each process. + local_proc_info.key = f"{key_prefix}/{jax.process_index()}" + + if jax_backend == "tpu": + worker_hostnames = os.environ["TPU_WORKER_HOSTNAMES"].split(",") + num_slices = int(os.environ["MEGASCALE_NUM_SLICES"]) + num_proc_per_slice = len(worker_hostnames) + worker_id = int(os.environ["TPU_WORKER_ID"]) + + # Coordinator port for TPU is hardcoded. Reference: + # https://github.com/jax-ml/jax/blob/1aa5de66a8f3c910115cac2fbe118e0facd7a3be/jax/_src/clusters/cloud_tpu_cluster.py#L29 + local_proc_info.address = f"{worker_hostnames[worker_id]}:8476" + # Note: cannot use jax.process_index() here because it may be different from the + # distributed id. This is a jax problem. + local_proc_info.cur_proc_id = ( + int(os.environ["MEGASCALE_SLICE_ID"]) * num_proc_per_slice + worker_id + ) + elif jax_backend == "gpu": + if local_address is None: + raise ValueError( + "local_address must be set for GPU when using in-memory checkpointing." + ) + local_proc_info.address = local_address + local_proc_info.cur_proc_id = setup_kwargs["process_id"] + else: + raise RuntimeError(f"Unsupported backend {jax_backend}.") + + # Every worker reports its proc info to rank 0. + client.key_value_set(local_proc_info.key, local_proc_info.to_string()) + client.wait_at_barrier("axlearn/id-reassign-gather-id", timeout_in_ms=timeout_ms) + + # Then, rank 0 assigns inv_proc_id for worker that's missing their inv_proc_id and find the + # coordinator address. + if local_proc_info.cur_proc_id == 0: + ids = client.key_value_dir_get(key_prefix) + proc_infos: list[_ProcessInfo] = [] + + def first_run_assign_fn(info: _ProcessInfo): + info.inv_proc_id = info.cur_proc_id + + inv_id_assign_fn = first_run_assign_fn + if jax_backend == "tpu": + # For TPUs, we have the additional requirement that process ids in slice id X must be + # in range [X * num_processes_per_slice, (X + 1) * num_processes_per_slice). Therefore, + # we first identify the healthy slices' ids and then figure out the slice ids to assign + # to failed slices. Each process in the failed slice will then get id `new_slice_id * + # num_proc_per_slice + cur_proc_id % num_proc_per_slice`. After id assignment, the + # address of process that's assigned with id=0 will be broadcasted to every worker. + + # Mapping from new slice ids to assigned slice ids forfailed slices. + failed_slices_new_ids = {} + for k, data in ids: + info = _ProcessInfo.from_string(data, key=k, num_proc_per_slice=num_proc_per_slice) + proc_infos.append(info) + if info.inv_proc_id == -1: + failed_slices_new_ids[info.cur_slice_id] = -1 + + already_assigned_slice_ids = set() + for info in proc_infos: + if info.cur_slice_id not in failed_slices_new_ids: + already_assigned_slice_ids.add(info.prev_slice_id) + + # If there're no assigned slice ids, that means all slices have failed or we're in the + # very first run. In that case, first_run_assign_fn will be used. + if already_assigned_slice_ids: + to_be_assigned_slice_ids = set(range(num_slices)) - already_assigned_slice_ids + assert len(to_be_assigned_slice_ids) == len(failed_slices_new_ids) + for k, new_id in zip(failed_slices_new_ids.keys(), to_be_assigned_slice_ids): + failed_slices_new_ids[k] = new_id + + def assign_fn(info: _ProcessInfo): + proc_id = info.inv_proc_id + if (new_slice_id := failed_slices_new_ids.get(info.cur_slice_id)) is not None: + proc_id = ( + new_slice_id * num_proc_per_slice + + info.cur_proc_id % num_proc_per_slice + ) + info.inv_proc_id = proc_id + + inv_id_assign_fn = assign_fn + + elif jax_backend == "gpu": + num_processes = setup_kwargs["num_processes"] + # For GPU backend, failed nodes are assigned with ids that are missing in the global id + # range with arbitrary order. + assigned_ids = set() + for key, data in ids: + info = _ProcessInfo.from_string(data, key=key) + proc_infos.append(info) + assigned_ids.add(info.inv_proc_id) + + # If there're no assigned ids, that means all slices have failed or we're in the + # very first run. In that case, first_run_assign_fn will be used. + if assigned_ids: + to_be_assigned_ids = iter(set(range(num_processes)) - assigned_ids) + + def assign_fn(info: _ProcessInfo): + if info.inv_proc_id == -1: + info.inv_proc_id = next(to_be_assigned_ids) + + inv_id_assign_fn = assign_fn + + coordinator_address = None + for info in proc_infos: + inv_id_assign_fn(info) + if info.inv_proc_id == 0: + coordinator_address = info.address + assert coordinator_address is not None + for info in proc_infos: + info.address = coordinator_address + client.key_value_set(info.key + "/get", info.to_string()) + + new_info = _ProcessInfo.from_string( + client.blocking_key_value_get(local_proc_info.key + "/get", timeout_in_ms=timeout_ms) + ) + logging.info( + "Previous proc id: %d. Assigned proc id: %d. Global coordinator address: %s.", + local_proc_info.inv_proc_id, + new_info.inv_proc_id, + new_info.address, + ) + _dump_process_info(local_ckpt_dir, trainer_dir=trainer_dir, proc_info=new_info) + # Block to avoid coordinator exiting too early. + client.wait_at_barrier("axlearn/id-reassign-finalize", timeout_in_ms=timeout_ms) + jax.distributed.shutdown() + + +def get_consistent_proc_info( + *, + local_address: Optional[str] = None, + barrier_timeout_seconds: int = 300, + trainer_dir: str, + local_ckpt_dir: str, + **setup_kwargs, +) -> _ProcessInfo: + """Gets the invariant process id of the current process and global coordinator's address. + + This function guarantees process id <-> node mapping stays the same for healthy nodes after a + failover. This is required to preserve shard order for in-memory checkpoint recovery. For GPU + training, all healthy nodes will have their process id unchanged. For TPU, all nodes in the + healthy slices will have their process id unchanged. See docstring of + `_init_consistent_proc_ids` for implementation details. + + Args: + local_address: A IP:Port that can be used as the coordinator if this rank is elected. + This Port must be free in the coordinator pod and IP:Port must be reachable from all + other processes. + barrier_timeout_seconds: Timeout in seconds for the barrier and key_value_set operations. + trainer_dir: Path to the trainer dir. + local_ckpt_dir: Path to the local checkpoint dir. + **setup_kwargs: Args to `utils_spmd.setup()`. + + Returns: + A _ProcessInfo whose `inv_proc_id` should be used as the process id and `address` should be + used as the global coordinator address. + """ + platform = os.environ.get("JAX_PLATFORMS", "") + try: + start_t = time.perf_counter() + # Patch platform so the process doesn't waste time initializing accelerators. + os.environ["JAX_PLATFORMS"] = "cpu" + proc = mp.get_context("spawn").Process( + target=_init_consistent_proc_ids, + kwargs=dict( + local_address=local_address, + barrier_timeout_seconds=barrier_timeout_seconds, + trainer_dir=trainer_dir, + local_ckpt_dir=local_ckpt_dir, + **setup_kwargs, + ), + ) + proc.start() + proc.join() + if proc.exitcode != 0: + raise RuntimeError( + "Expects id assignment process to finish normally. " + f"Got exit code {proc.exitcode}. Please check the log above for errors." + ) + + info = _get_previous_process_info(local_ckpt_dir, trainer_dir=trainer_dir) + if info.inv_proc_id == -1: + raise RuntimeError("Expects inv process id != -1, but got -1.") + logging.info( + "Successfully finished process ID assignment in %fs", time.perf_counter() - start_t + ) + return info + finally: + # Restore previous platform settings. + if platform != "": + os.environ["JAX_PLATFORMS"] = platform + else: + del os.environ["JAX_PLATFORMS"] + + +class OrbaxEmergencyCheckpointer(BaseCheckpointer): + """Checkpointer implementation that uses Orbax emergency checkpoint. + + ## Summary: + + This checkpointer is designed to improve the goodput of large multi-slice training jobs that + use data-parallelism across slices. At least two data-parallel slices are required. For other + use cases where this is not applicable or ultimate goodput is not required, please use + `OrbaxCheckpointer`. + + Why it can improve goodput: + 1. It can save to a local path (usually backed by a ramdisk) more frequently, so the progress + lost during restart can be reduced. This is in contrast with saving to remote filesystem + such as GCS directly, which has limited bandwidth to support frequent checkpointing. + 2. During restart, checkpoint can be broadcasted through network, which is faster than reading + from a remote filesystem. + + To use the checkpointer, besides configuring it properly, it also requires + `get_consistent_proc_info` to be called and pass `inv_proc_id` and `address` as + `process_id` and `coordinator_address` to `jax.distributed.initialize`. + + ## How it works under the hood + + This checkpointer is intended for multi-slice training that uses data-parallelism across + slices. Orbax emergency checkpoint works by exploiting the following properties: + 1. Tensors are replicated across data-parallel replicas. + 2. When a slice fails in a multi-slice training and failover is started, only nodes + corresponding to the non-healthy slice may be restarted. Healthy nodes from healthy slices + will not restart. + + Hence, all slices can write checkpoints to node's memory or disk, providing us with redundancy + when there's a failure. This checkpoint frequency can be much higher than remote filesystem, + which has limited bandwidth to support high frequency saving. Checkpoints on nodes are referred + as local checkpoints. Checkpoints on remote filesystem are referred as persistent checkpoints. + + When a failure occurs, Orbax checkpointer will find the latest step from all local and + persistent checkpoints. If the checkpoint is local, the slice on which that checkpoint is + stored will read the checkpoint and broadcast the read values to other slices. Since local + checkpoints are scattered across different hosts, the process id, which determines the shard id + of locally stored shards, must stay the same for nodes in the healthy replicas to guarantee a + correct restore. We provide an utility function `get_consistent_proc_info` that returns the + process id and global coordinator address. They must be passed to `jax.distributed.initialize`. + + However, the above procedure doesn't apply to some non-tensor states such as data iterators. + Data iterators are unique across jax processes, and thus cannot be stored on nodes. Orbax + emergency checkpointer doesn't support non-tensor states. Therefore, we reuse axlearn + Checkpointer to save, restore and garbage collect those states, which include the index file + and tf iterators. These non-tensor states will be saved whenever local or persistent checkpoint + need to be saved. As the result, the persistent checkpoint structure looks like this: + + ``` + ├── path_prefix + │ ├── non-tensors + │ │ └── step_00000010 + │ │ ├── index + │ │ └── tf_xxx + │ └── tensors + │ └── step_00000010 + │ └── orbax_files_xxx + ``` + + A persistent training checkpoint `step_xxx` is commited when `non-tensors/step_xxx/index` + exists and `tensors/step_xxx` is commited by Orbax. Refer to the docstring of + `OrbaxCheckpointer` for Orbax's commit criteria. + + To abstract the details of the checkpoint layout, the `checkpoint_steps` API returns all steps + for which both Tensor and non-Tensor states have been fully committed. + """ + + _NON_TENSORS_PREFIX: str = "non-tensors" + _TENSORS_PREFIX: str = "tensors" + + @config_class + class Config(BaseCheckpointer.Config): + """Configures OrbaxEmergencyCheckpointer. + + Attributes: + keep_last_n: Keep this many past ckpts. + keep_every_n_steps: If > 0, keeps at least one persistent checkpoint every N steps. + local_keep_last_n: Keep this many past ckpts in local storage (e.g. node memory). + This should almost always set to 1 to avoid OOM. + local_dir: Ckpt base path for local storage. The content in this path must persist + across pod restarts unless the restart is caused by node failure. `local_dir` must + be the same for all processes or processes may hang. + trainer_dir: A string that's unique for the current run. Typically, this is set to + trainer_dir. Local checkpoint will be stored in local_dir/sha256(trainer_dir). + During init, all other folders in local_dir will be removed to prevent unexpected + memory usage. + save_policy: Save policy for persistent checkpoints. + local_save_policy: Save policy for local checkpoints. This should be more frequent than + `save_policy`. Note that data iterator will be saved with either `save_policy` or + `local_save_policy` indicate we should save. + non_tensor_async_timeout_secs: Timeout for async barrier in seconds when saving + non-tensor states. + async_timeout_secs: Timeout for async barrier in seconds when saving tensors. + replica_axis_index: The index of the "data" axis. + """ + + keep_last_n: int = 1 + keep_every_n_steps: Optional[int] = None + local_keep_last_n: int = 1 + local_save_policy: InstantiableConfig[CheckpointPolicy] = config_for_function( + every_n_steps_policy + ).set(n=10) + local_dir: str = "/host-tmp/checkpoints" + trainer_dir: Required[str] = REQUIRED + non_tensor_async_timeout_secs: int = 300 + async_timeout_secs: int = 3600 + replica_axis_index: Required[int] = REQUIRED + + @classmethod + def checkpoint_paths(cls, base_dir: str) -> List[str]: + """See `BaseCheckpointer.checkpointer_paths`. + + Only persistent checkpoint paths are returned. There's no guarantee that the paths returned + have committed TF savables. Use `checkpoint_steps` to get steps with both tensors and + committed TF savables. + """ + logging.log_first_n( + logging.WARNING, + msg="checkpoint_paths is deprecated. Use checkpoint_steps instead.", + n=1, + ) + tensors_dir = os.path.join(base_dir, cls._TENSORS_PREFIX) + return [str(path) for path in ocp.utils.checkpoint_steps_paths(tensors_dir)] + + @classmethod + def checkpoint_steps(cls, base_dir) -> list[int]: + """See `BaseCheckpointer.checkpointer_steps`. + + Only persistent checkpoint steps are returned. + """ + return list( + set( + ocp.utils.checkpoint_steps(os.path.join(base_dir, cls._TENSORS_PREFIX)) + ).intersection( + set(Checkpointer.checkpoint_steps(os.path.join(base_dir, cls._NON_TENSORS_PREFIX))) + ) + ) + + def __init__(self, cfg: Config, *, parent: Optional[Module]): + super().__init__(cfg, parent=parent) + cfg: OrbaxEmergencyCheckpointer.Config = self.config + self._name_format = ocp.step.standard_name_format( + step_prefix=STEP_PREFIX, + step_format_fixed_length=STEP_NUM_DIGITS, + ) + if jax.process_index() == 0: + fs.makedirs(os.path.join(cfg.dir, self._NON_TENSORS_PREFIX)) + fs.makedirs(os.path.join(cfg.dir, self._TENSORS_PREFIX)) + # Cleanup local checkpoints from different runs. + unique_id = _get_unique_id(cfg.trainer_dir) + for fd in fs.listdir(cfg.local_dir): + if not fd.startswith(".") and fd != unique_id: + fs.rmtree(os.path.join(cfg.local_dir, fd)) + self._local_dir = os.path.join(cfg.local_dir, unique_id) + fs.makedirs(self._local_dir) + # Orbax emergency ckpt requires this function to be called prior to checkpointer + # operations. This function also serves as a barrier. + ocp.multihost.initialize_runtime_to_distributed_ids() + ocp.multihost.initialize_distributed_to_device_ids() + ckpt_cfg: Checkpointer.Config = Checkpointer.default_config() + # TODO(hanzhi-zhou): this `keep_last_n` may not be what users expect since non-tensor + # states will save when either local or persistent checkpoint will save. + ckpt_cfg.keep_last_n = cfg.keep_last_n + ckpt_cfg.keep_every_n_steps = cfg.keep_every_n_steps + ckpt_cfg.storage = _TFSavablesStateStorage.default_config() + ckpt_cfg.storage.timeout_secs = cfg.non_tensor_async_timeout_secs + ckpt_cfg.dir = os.path.join(cfg.dir, self._NON_TENSORS_PREFIX) + ckpt_cfg.name = "non-tensors-checkpointer" + + save_policy = cfg.save_policy.instantiate() + local_save_policy = cfg.local_save_policy.instantiate() + + # Non-tensor states must save when either local or persistent ckpt needs to be saved for + # restore from either to succeed. + def _composite_save_policy(*, step: int, evaler_summaries: dict[str, Any]): + return ( + save_policy(step=step, evaler_summaries=evaler_summaries) + or local_save_policy(step=step, evaler_summaries=evaler_summaries) + or self._reached_preemption + ) + + ckpt_cfg.save_policy = config_for_function(lambda: _composite_save_policy) + self._non_tensor_manager: Checkpointer = ckpt_cfg.instantiate(parent=self) + self._tensor_manager: Optional[oecp.CheckpointManager] = None + # See comments of _eval_summaries in `OrbaxCheckpointer`. + self._eval_summaries = None + self._reached_preemption = False + + # pylint: disable-next=redefined-builtin + def ckpt_dir(self, step: int, dir: Optional[str] = None) -> str: + """Obtains the checkpoint dir for the given step.""" + if dir is None: + dir = self._non_tensor_manager.directory + return str(ocp.step.build_step_path(dir, self._name_format, step)) + + def _get_abstract_state( + self, state_with_tensors: Nested[Tensor] + ) -> Nested[jax.ShapeDtypeStruct]: + """Generate the abstract states required by the Orbax emergency checkpointer.""" + return jax.tree.map( + lambda x: jax.ShapeDtypeStruct(x.shape, x.dtype, sharding=x.sharding), + state_with_tensors, + ) + + def _get_tensor_manager(self, state_with_tensors: Nested[Tensor]) -> oecp.CheckpointManager: + """Creates the emergency checkpoint manager if not exists. + + We defer the creation of this checkpoint manager because it requires the state dict, + which is not present during __init__. + """ + cfg: OrbaxEmergencyCheckpointer.Config = self.config + if self._tensor_manager is not None: + return self._tensor_manager + + save_policy = cfg.save_policy.instantiate() + local_save_policy = cfg.local_save_policy.instantiate() + + def _orbax_save_fn( + step: int, last_saved_step: Optional[int], wrapped_save_policy: CheckpointPolicy + ) -> bool: + del last_saved_step + return wrapped_save_policy(step=step, evaler_summaries=self._eval_summaries) + + # For meaning of these options, refer to + # https://github.com/google/orbax/blob/95be2c021bc8cbf4badd83a053ff57b7a9f9b314/checkpoint/orbax/checkpoint/experimental/emergency/checkpoint_manager.py#L277 + self._tensor_manager = oecp.CheckpointManager( + self._local_dir, + persistent_directory=os.path.join(cfg.dir, self._TENSORS_PREFIX), + global_mesh=thread_resources.env.physical_mesh, + abstract_state=self._get_abstract_state(state_with_tensors), + options=oecp.CheckpointManagerOptions( + local=oecp.LocalCheckpointOptions(
Shall we expose `oecp.LocalCheckpointOptions` to users as `Config.local_checkpoint_options`? User can set it to None to disable local checkpoints. We can provide a helper function for users to construct `should_save_fn` from their policy.
axlearn
github_2023
python
820
apple
ruomingp
@@ -0,0 +1,814 @@ +# Copyright © 2024 Apple Inc. + +"""Implements Orbax emergency checkpointing and provide utilities for correct store. + +See the docstring of `OrbaxEmergencyCheckpointer` for more details. +""" + +import copy +import functools +import hashlib +import multiprocessing as mp +import os +import time +from concurrent.futures import ThreadPoolExecutor +from contextlib import contextmanager +from dataclasses import dataclass +from typing import Any, Dict, List, Optional, Tuple, Union + +import jax +import jax.lib +import orbax.checkpoint as ocp +import orbax.checkpoint.experimental.emergency.checkpoint_manager as oecp +import tensorflow as tf +from absl import flags, logging +from jax._src.distributed import global_state +from jax._src.mesh import thread_resources +from jax.experimental.array_serialization import serialization + +from axlearn.common import file_system as fs +from axlearn.common import utils, utils_spmd +from axlearn.common.checkpointer import ( + STEP_NUM_DIGITS, + STEP_PREFIX, + BaseCheckpointer, + Checkpointer, + CheckpointPolicy, + CheckpointValidationType, + InstantiableConfig, + StateStorage, + StateStorageCommitCallback, + async_save_tf_savables, + check_state_structure, + config_for_function, + every_n_steps_policy, + multihost_utils, + parse_step_from_dir, + read_index_file, + restore_tf_savables, + write_index_file, +) +from axlearn.common.config import REQUIRED, Required, config_class +from axlearn.common.module import Module +from axlearn.common.utils import Nested, Tensor, TensorSpec + +FLAGS = flags.FLAGS + + +@contextmanager +def setup(spec: str): + """Setups FLAGS.process_id and FLAGS.distributed_coordinator as required by Orbax. + + See the docstring of `get_consistent_proc_info` for more details. + + Args: + spec: Key=Value pairs separated by comma. Key must be one of ("local_address", + "barrier_timeout_seconds", "local_ckpt_dir"). See the docstring of + `get_consistent_proc_info`. + """ + parsed_args = {} + allowed_fields = ["local_address", "barrier_timeout_seconds", "local_ckpt_dir"] + for field in spec.split(","): + k, v = field.split("=") + if k not in allowed_fields: + raise ValueError(f"Expected key in {allowed_fields}, got key={k}.") + parsed_args[k] = v + if "barrier_timeout_seconds" in parsed_args: + parsed_args["barrier_timeout_seconds"] = int(parsed_args["barrier_timeout_seconds"]) + if "local_ckpt_dir" not in parsed_args: + raise ValueError("local_ckpt_dir must be specified.") + # pylint: disable-next=missing-kwoa + info = get_consistent_proc_info( + **parsed_args, + trainer_dir=FLAGS.trainer_dir, + distributed_coordinator=FLAGS.distributed_coordinator, + num_processes=FLAGS.num_processes, + process_id=FLAGS.process_id, + jax_backend=FLAGS.jax_backend, + initialization_timeout=FLAGS.initialization_timeout, + ) + FLAGS.process_id = info.inv_proc_id + FLAGS.distributed_coordinator = info.address + FLAGS.experimental_orbax_use_distributed_process_id = True + yield + + +class _TFSavablesStateStorage(StateStorage): + """A StateStorage implementation that only saves the index file and tf savables.""" + + @config_class + class Config(StateStorage.Config): + timeout_secs: int = 300 + + def __init__(self, cfg: Config): + super().__init__(cfg) + # One thread is sufficient because `async_save_tf_savables` only creates one future. + self._executor = ThreadPoolExecutor(1) + self._manager = serialization.AsyncManager(timeout_secs=cfg.timeout_secs) + + def _get_spec(self, *, step: int, state: Nested[Any]) -> Nested[Any]: + spec = {"index": [("step", int(step))], "tf_ckpt_map": {}} + for path, value in utils.flatten_items(state): + if isinstance(value, (Tensor, TensorSpec)): + dtype = getattr(value.dtype, "dtype", value.dtype) + spec["index"].append( + (path, {"dtype": str(dtype), "shape": str(tuple(value.shape))}) + ) + elif isinstance(value, tf.data.Iterator): + spec["index"].append((path, str(type(value)))) + spec["tf_ckpt_map"][path] = value + else: + spec["index"].append((path, value)) + logging.log_first_n(logging.INFO, "TF savables spec: %s", 1, str(spec)) + return spec + + def save_to_dir( + self, + *, + step: int, + state: Nested[Tensor], + ckpt_dir: str, + on_commit_callback: StateStorageCommitCallback = write_index_file, + ): + start_time = time.perf_counter() + # We write data files directly to `ckpt_dir`. `index` is written into `ckpt_dir` in + # `on_commit_callback` to finalize the checkpoint. + spec = self._get_spec(step=step, state=state) + self.wait_until_finished() + + save_tf_future = async_save_tf_savables( + spec["tf_ckpt_map"], + executor=self._executor, + dir=os.path.join(ckpt_dir, f"tf_{jax.process_index()}"), + ) + + def commit(): + on_commit_callback(ckpt_dir=ckpt_dir, index=spec["index"]) + logging.info( + "Serialization of TF savables to %s completed in %s seconds.", + ckpt_dir, + time.perf_counter() - start_time, + ) + + # pylint: disable=protected-access + self._manager._add_futures([save_tf_future]) + self._manager._start_async_commit(commit) + + def wait_until_finished(self): + self._manager.wait_until_finished() + + def restore_from_dir( + self, + step: int, + state: Union[Nested[Tensor], Nested[TensorSpec]], + *, + ckpt_dir: str, + validation: CheckpointValidationType = CheckpointValidationType.EXACT, + ) -> Nested[Tensor]: + spec = self._get_spec(step=step, state=state) + logging.info("Restoring TF savables from directory %s", ckpt_dir) + check_state_structure( + read_index_file(ckpt_dir), target_structure=spec["index"], validation=validation + ) + restore_tf_savables( + spec["tf_ckpt_map"], dir=os.path.join(ckpt_dir, f"tf_{jax.process_index()}") + ) + multihost_utils.sync_global_devices(ckpt_dir) + return state + + def stop(self): + self._executor.shutdown(wait=True) + + +_PROCESS_ID_FILE_NAME: str = "process_id.txt" + + +@dataclass +class _ProcessInfo: + """Records the process id and address information for this node. + + Attributes: + address: The global coordinator address. This is set during the first run and stays and + stays the same unless process 0 failed. + inv_proc_id: The invariant process id of this node. This process id is set during the first + run and stays the same for all subsequent runs unless this node failed. + cur_proc_id: Internal field. The new process id assigned externally after failover. Used + during ID negotiation after failover. + key: Internal field. Key used during ID negotiation after failover. + num_proc_per_slice: Internal field. Used to calculate slice ID for TPU. + """ + + address: str + inv_proc_id: int + cur_proc_id: int + key: Optional[str] = None + num_proc_per_slice: Optional[int] = None + + def to_string(self): + return "|".join(str(x) for x in [self.address, self.inv_proc_id, self.cur_proc_id]) + + @property + def prev_slice_id(self): + assert self.num_proc_per_slice is not None + return self.inv_proc_id // self.num_proc_per_slice + + @property + def cur_slice_id(self): + assert self.num_proc_per_slice is not None + return self.cur_proc_id // self.num_proc_per_slice + + @classmethod + def from_string( + cls, data: str, *, key: Optional[str] = None, num_proc_per_slice: Optional[int] = None + ): + ls = data.split("|") + assert len(ls) == 3 + return cls(ls[0], int(ls[1]), int(ls[2]), key=key, num_proc_per_slice=num_proc_per_slice) + + +def _get_previous_process_info(local_dir: str, *, trainer_dir: str) -> _ProcessInfo: + """Gets process info from local checkpoint directory.""" + path = os.path.join(local_dir, _get_unique_id(trainer_dir), _PROCESS_ID_FILE_NAME) + if not fs.exists(path): + return _ProcessInfo(address="", inv_proc_id=-1, cur_proc_id=-1) + + with fs.open(path) as f: + return _ProcessInfo.from_string(f.read()) + + +def _dump_process_info(local_dir: str, *, trainer_dir: str, proc_info: _ProcessInfo): + """Dumps process info to local checkpoint directory.""" + local_dir = os.path.join(local_dir, _get_unique_id(trainer_dir)) + fs.makedirs(local_dir) + process_id_file = os.path.join(local_dir, _PROCESS_ID_FILE_NAME) + with fs.open(process_id_file, "w") as f: + f.write(proc_info.to_string()) + + +def _get_unique_id(trainer_dir: str) -> str: + return hashlib.sha256(trainer_dir.encode(), usedforsecurity=False).hexdigest() + + +def _logger_init(): + """Init logger in spawned processes that don't inherit parent's logger.""" + logging.set_verbosity(logging.INFO) + logging.use_absl_handler() + + +def _init_consistent_proc_ids( + *, + local_address: Optional[str] = None, + barrier_timeout_seconds: int = 300, + trainer_dir: str, + local_ckpt_dir: str, + **setup_kwargs, +): + """Exchanges id info through jax coordinator and dumps to local file. + + During failover, healthy nodes will read their locally stored process id file, but failed nodes + will lost their process ids. To assign ids that are free in the global id range (i.e. 0 to + num_processes - 1), we let each node report its process id (-1 if missing) to rank 0, and rank + 0 will figure out suitable IDs to assign to each failed node. We reuse Jax's distributed client + to avoid writing our own coordinator. + """ + _logger_init() + + jax_backend = setup_kwargs["jax_backend"] + timeout_ms = barrier_timeout_seconds * 1000 + utils_spmd.setup(**setup_kwargs) + client: jax.lib.xla_extension.DistributedRuntimeClient = global_state.client + local_proc_info = _get_previous_process_info(local_ckpt_dir, trainer_dir=trainer_dir) + key_prefix = "axlearn/id_reassign" + # Local key just needs to be unique for each process. + local_proc_info.key = f"{key_prefix}/{jax.process_index()}" + + if jax_backend == "tpu": + worker_hostnames = os.environ["TPU_WORKER_HOSTNAMES"].split(",") + num_slices = int(os.environ["MEGASCALE_NUM_SLICES"]) + num_proc_per_slice = len(worker_hostnames) + worker_id = int(os.environ["TPU_WORKER_ID"]) + + # Coordinator port for TPU is hardcoded. Reference: + # https://github.com/jax-ml/jax/blob/1aa5de66a8f3c910115cac2fbe118e0facd7a3be/jax/_src/clusters/cloud_tpu_cluster.py#L29 + local_proc_info.address = f"{worker_hostnames[worker_id]}:8476" + # Note: cannot use jax.process_index() here because it may be different from the + # distributed id. This is a jax problem. + local_proc_info.cur_proc_id = ( + int(os.environ["MEGASCALE_SLICE_ID"]) * num_proc_per_slice + worker_id + ) + elif jax_backend == "gpu": + if local_address is None: + raise ValueError( + "local_address must be set for GPU when using in-memory checkpointing." + ) + local_proc_info.address = local_address + local_proc_info.cur_proc_id = setup_kwargs["process_id"] + else: + raise RuntimeError(f"Unsupported backend {jax_backend}.") + + # Every worker reports its proc info to rank 0. + client.key_value_set(local_proc_info.key, local_proc_info.to_string()) + client.wait_at_barrier("axlearn/id-reassign-gather-id", timeout_in_ms=timeout_ms) + + # Then, rank 0 assigns inv_proc_id for worker that's missing their inv_proc_id and find the + # coordinator address. + if local_proc_info.cur_proc_id == 0: + ids = client.key_value_dir_get(key_prefix) + proc_infos: list[_ProcessInfo] = [] + + def first_run_assign_fn(info: _ProcessInfo): + info.inv_proc_id = info.cur_proc_id + + inv_id_assign_fn = first_run_assign_fn + if jax_backend == "tpu": + # For TPUs, we have the additional requirement that process ids in slice id X must be + # in range [X * num_processes_per_slice, (X + 1) * num_processes_per_slice). Therefore, + # we first identify the healthy slices' ids and then figure out the slice ids to assign + # to failed slices. Each process in the failed slice will then get id `new_slice_id * + # num_proc_per_slice + cur_proc_id % num_proc_per_slice`. After id assignment, the + # address of process that's assigned with id=0 will be broadcasted to every worker. + + # Mapping from new slice ids to assigned slice ids forfailed slices. + failed_slices_new_ids = {} + for k, data in ids: + info = _ProcessInfo.from_string(data, key=k, num_proc_per_slice=num_proc_per_slice) + proc_infos.append(info) + if info.inv_proc_id == -1: + failed_slices_new_ids[info.cur_slice_id] = -1 + + already_assigned_slice_ids = set() + for info in proc_infos: + if info.cur_slice_id not in failed_slices_new_ids: + already_assigned_slice_ids.add(info.prev_slice_id) + + # If there're no assigned slice ids, that means all slices have failed or we're in the + # very first run. In that case, first_run_assign_fn will be used. + if already_assigned_slice_ids: + to_be_assigned_slice_ids = set(range(num_slices)) - already_assigned_slice_ids + assert len(to_be_assigned_slice_ids) == len(failed_slices_new_ids) + for k, new_id in zip(failed_slices_new_ids.keys(), to_be_assigned_slice_ids): + failed_slices_new_ids[k] = new_id + + def assign_fn(info: _ProcessInfo): + proc_id = info.inv_proc_id + if (new_slice_id := failed_slices_new_ids.get(info.cur_slice_id)) is not None: + proc_id = ( + new_slice_id * num_proc_per_slice + + info.cur_proc_id % num_proc_per_slice + ) + info.inv_proc_id = proc_id + + inv_id_assign_fn = assign_fn + + elif jax_backend == "gpu": + num_processes = setup_kwargs["num_processes"] + # For GPU backend, failed nodes are assigned with ids that are missing in the global id + # range with arbitrary order. + assigned_ids = set() + for key, data in ids: + info = _ProcessInfo.from_string(data, key=key) + proc_infos.append(info) + assigned_ids.add(info.inv_proc_id) + + # If there're no assigned ids, that means all slices have failed or we're in the + # very first run. In that case, first_run_assign_fn will be used. + if assigned_ids: + to_be_assigned_ids = iter(set(range(num_processes)) - assigned_ids) + + def assign_fn(info: _ProcessInfo): + if info.inv_proc_id == -1: + info.inv_proc_id = next(to_be_assigned_ids) + + inv_id_assign_fn = assign_fn + + coordinator_address = None + for info in proc_infos: + inv_id_assign_fn(info) + if info.inv_proc_id == 0: + coordinator_address = info.address + assert coordinator_address is not None + for info in proc_infos: + info.address = coordinator_address + client.key_value_set(info.key + "/get", info.to_string()) + + new_info = _ProcessInfo.from_string( + client.blocking_key_value_get(local_proc_info.key + "/get", timeout_in_ms=timeout_ms) + ) + logging.info( + "Previous proc id: %d. Assigned proc id: %d. Global coordinator address: %s.", + local_proc_info.inv_proc_id, + new_info.inv_proc_id, + new_info.address, + ) + _dump_process_info(local_ckpt_dir, trainer_dir=trainer_dir, proc_info=new_info) + # Block to avoid coordinator exiting too early. + client.wait_at_barrier("axlearn/id-reassign-finalize", timeout_in_ms=timeout_ms) + jax.distributed.shutdown() + + +def get_consistent_proc_info( + *, + local_address: Optional[str] = None, + barrier_timeout_seconds: int = 300, + trainer_dir: str, + local_ckpt_dir: str, + **setup_kwargs, +) -> _ProcessInfo: + """Gets the invariant process id of the current process and global coordinator's address. + + This function guarantees process id <-> node mapping stays the same for healthy nodes after a + failover. This is required to preserve shard order for in-memory checkpoint recovery. For GPU + training, all healthy nodes will have their process id unchanged. For TPU, all nodes in the + healthy slices will have their process id unchanged. See docstring of + `_init_consistent_proc_ids` for implementation details. + + Args: + local_address: A IP:Port that can be used as the coordinator if this rank is elected. + This Port must be free in the coordinator pod and IP:Port must be reachable from all + other processes. + barrier_timeout_seconds: Timeout in seconds for the barrier and key_value_set operations. + trainer_dir: Path to the trainer dir. + local_ckpt_dir: Path to the local checkpoint dir. + **setup_kwargs: Args to `utils_spmd.setup()`. + + Returns: + A _ProcessInfo whose `inv_proc_id` should be used as the process id and `address` should be + used as the global coordinator address. + """ + platform = os.environ.get("JAX_PLATFORMS", "") + try: + start_t = time.perf_counter() + # Patch platform so the process doesn't waste time initializing accelerators. + os.environ["JAX_PLATFORMS"] = "cpu" + proc = mp.get_context("spawn").Process( + target=_init_consistent_proc_ids, + kwargs=dict( + local_address=local_address, + barrier_timeout_seconds=barrier_timeout_seconds, + trainer_dir=trainer_dir, + local_ckpt_dir=local_ckpt_dir, + **setup_kwargs, + ), + ) + proc.start() + proc.join() + if proc.exitcode != 0: + raise RuntimeError( + "Expects id assignment process to finish normally. " + f"Got exit code {proc.exitcode}. Please check the log above for errors." + ) + + info = _get_previous_process_info(local_ckpt_dir, trainer_dir=trainer_dir) + if info.inv_proc_id == -1: + raise RuntimeError("Expects inv process id != -1, but got -1.") + logging.info( + "Successfully finished process ID assignment in %fs", time.perf_counter() - start_t + ) + return info + finally: + # Restore previous platform settings. + if platform != "": + os.environ["JAX_PLATFORMS"] = platform + else: + del os.environ["JAX_PLATFORMS"] + + +class OrbaxEmergencyCheckpointer(BaseCheckpointer): + """Checkpointer implementation that uses Orbax emergency checkpoint. + + ## Summary: + + This checkpointer is designed to improve the goodput of large multi-slice training jobs that + use data-parallelism across slices. At least two data-parallel slices are required. For other + use cases where this is not applicable or ultimate goodput is not required, please use + `OrbaxCheckpointer`. + + Why it can improve goodput: + 1. It can save to a local path (usually backed by a ramdisk) more frequently, so the progress + lost during restart can be reduced. This is in contrast with saving to remote filesystem + such as GCS directly, which has limited bandwidth to support frequent checkpointing. + 2. During restart, checkpoint can be broadcasted through network, which is faster than reading + from a remote filesystem. + + To use the checkpointer, besides configuring it properly, it also requires + `get_consistent_proc_info` to be called and pass `inv_proc_id` and `address` as + `process_id` and `coordinator_address` to `jax.distributed.initialize`. + + ## How it works under the hood + + This checkpointer is intended for multi-slice training that uses data-parallelism across + slices. Orbax emergency checkpoint works by exploiting the following properties: + 1. Tensors are replicated across data-parallel replicas. + 2. When a slice fails in a multi-slice training and failover is started, only nodes + corresponding to the non-healthy slice may be restarted. Healthy nodes from healthy slices + will not restart. + + Hence, all slices can write checkpoints to node's memory or disk, providing us with redundancy + when there's a failure. This checkpoint frequency can be much higher than remote filesystem, + which has limited bandwidth to support high frequency saving. Checkpoints on nodes are referred + as local checkpoints. Checkpoints on remote filesystem are referred as persistent checkpoints. + + When a failure occurs, Orbax checkpointer will find the latest step from all local and + persistent checkpoints. If the checkpoint is local, the slice on which that checkpoint is + stored will read the checkpoint and broadcast the read values to other slices. Since local + checkpoints are scattered across different hosts, the process id, which determines the shard id + of locally stored shards, must stay the same for nodes in the healthy replicas to guarantee a + correct restore. We provide an utility function `get_consistent_proc_info` that returns the + process id and global coordinator address. They must be passed to `jax.distributed.initialize`. + + However, the above procedure doesn't apply to some non-tensor states such as data iterators. + Data iterators are unique across jax processes, and thus cannot be stored on nodes. Orbax + emergency checkpointer doesn't support non-tensor states. Therefore, we reuse axlearn + Checkpointer to save, restore and garbage collect those states, which include the index file + and tf iterators. These non-tensor states will be saved whenever local or persistent checkpoint + need to be saved. As the result, the persistent checkpoint structure looks like this: + + ``` + ├── path_prefix + │ ├── non-tensors + │ │ └── step_00000010 + │ │ ├── index + │ │ └── tf_xxx + │ └── tensors + │ └── step_00000010 + │ └── orbax_files_xxx + ``` + + A persistent training checkpoint `step_xxx` is commited when `non-tensors/step_xxx/index` + exists and `tensors/step_xxx` is commited by Orbax. Refer to the docstring of + `OrbaxCheckpointer` for Orbax's commit criteria. + + To abstract the details of the checkpoint layout, the `checkpoint_steps` API returns all steps + for which both Tensor and non-Tensor states have been fully committed. + """ + + _NON_TENSORS_PREFIX: str = "non-tensors" + _TENSORS_PREFIX: str = "tensors" + + @config_class + class Config(BaseCheckpointer.Config): + """Configures OrbaxEmergencyCheckpointer. + + Attributes: + keep_last_n: Keep this many past ckpts. + keep_every_n_steps: If > 0, keeps at least one persistent checkpoint every N steps. + local_keep_last_n: Keep this many past ckpts in local storage (e.g. node memory). + This should almost always set to 1 to avoid OOM. + local_dir: Ckpt base path for local storage. The content in this path must persist + across pod restarts unless the restart is caused by node failure. `local_dir` must + be the same for all processes or processes may hang. + trainer_dir: A string that's unique for the current run. Typically, this is set to + trainer_dir. Local checkpoint will be stored in local_dir/sha256(trainer_dir). + During init, all other folders in local_dir will be removed to prevent unexpected + memory usage. + save_policy: Save policy for persistent checkpoints. + local_save_policy: Save policy for local checkpoints. This should be more frequent than + `save_policy`. Note that data iterator will be saved with either `save_policy` or + `local_save_policy` indicate we should save. + non_tensor_async_timeout_secs: Timeout for async barrier in seconds when saving + non-tensor states. + async_timeout_secs: Timeout for async barrier in seconds when saving tensors. + replica_axis_index: The index of the "data" axis. + """ + + keep_last_n: int = 1 + keep_every_n_steps: Optional[int] = None + local_keep_last_n: int = 1 + local_save_policy: InstantiableConfig[CheckpointPolicy] = config_for_function( + every_n_steps_policy + ).set(n=10) + local_dir: str = "/host-tmp/checkpoints" + trainer_dir: Required[str] = REQUIRED + non_tensor_async_timeout_secs: int = 300 + async_timeout_secs: int = 3600 + replica_axis_index: Required[int] = REQUIRED + + @classmethod + def checkpoint_paths(cls, base_dir: str) -> List[str]: + """See `BaseCheckpointer.checkpointer_paths`. + + Only persistent checkpoint paths are returned. There's no guarantee that the paths returned + have committed TF savables. Use `checkpoint_steps` to get steps with both tensors and + committed TF savables. + """ + logging.log_first_n( + logging.WARNING, + msg="checkpoint_paths is deprecated. Use checkpoint_steps instead.", + n=1, + ) + tensors_dir = os.path.join(base_dir, cls._TENSORS_PREFIX) + return [str(path) for path in ocp.utils.checkpoint_steps_paths(tensors_dir)] + + @classmethod + def checkpoint_steps(cls, base_dir) -> list[int]: + """See `BaseCheckpointer.checkpointer_steps`. + + Only persistent checkpoint steps are returned. + """ + return list( + set( + ocp.utils.checkpoint_steps(os.path.join(base_dir, cls._TENSORS_PREFIX)) + ).intersection( + set(Checkpointer.checkpoint_steps(os.path.join(base_dir, cls._NON_TENSORS_PREFIX))) + ) + ) + + def __init__(self, cfg: Config, *, parent: Optional[Module]): + super().__init__(cfg, parent=parent) + cfg: OrbaxEmergencyCheckpointer.Config = self.config + self._name_format = ocp.step.standard_name_format( + step_prefix=STEP_PREFIX, + step_format_fixed_length=STEP_NUM_DIGITS, + ) + if jax.process_index() == 0: + fs.makedirs(os.path.join(cfg.dir, self._NON_TENSORS_PREFIX)) + fs.makedirs(os.path.join(cfg.dir, self._TENSORS_PREFIX)) + # Cleanup local checkpoints from different runs. + unique_id = _get_unique_id(cfg.trainer_dir) + for fd in fs.listdir(cfg.local_dir): + if not fd.startswith(".") and fd != unique_id: + fs.rmtree(os.path.join(cfg.local_dir, fd)) + self._local_dir = os.path.join(cfg.local_dir, unique_id) + fs.makedirs(self._local_dir) + # Orbax emergency ckpt requires this function to be called prior to checkpointer + # operations. This function also serves as a barrier. + ocp.multihost.initialize_runtime_to_distributed_ids() + ocp.multihost.initialize_distributed_to_device_ids() + ckpt_cfg: Checkpointer.Config = Checkpointer.default_config() + # TODO(hanzhi-zhou): this `keep_last_n` may not be what users expect since non-tensor + # states will save when either local or persistent checkpoint will save. + ckpt_cfg.keep_last_n = cfg.keep_last_n + ckpt_cfg.keep_every_n_steps = cfg.keep_every_n_steps + ckpt_cfg.storage = _TFSavablesStateStorage.default_config() + ckpt_cfg.storage.timeout_secs = cfg.non_tensor_async_timeout_secs + ckpt_cfg.dir = os.path.join(cfg.dir, self._NON_TENSORS_PREFIX) + ckpt_cfg.name = "non-tensors-checkpointer" + + save_policy = cfg.save_policy.instantiate() + local_save_policy = cfg.local_save_policy.instantiate() + + # Non-tensor states must save when either local or persistent ckpt needs to be saved for + # restore from either to succeed. + def _composite_save_policy(*, step: int, evaler_summaries: dict[str, Any]): + return ( + save_policy(step=step, evaler_summaries=evaler_summaries) + or local_save_policy(step=step, evaler_summaries=evaler_summaries) + or self._reached_preemption + ) + + ckpt_cfg.save_policy = config_for_function(lambda: _composite_save_policy) + self._non_tensor_manager: Checkpointer = ckpt_cfg.instantiate(parent=self) + self._tensor_manager: Optional[oecp.CheckpointManager] = None + # See comments of _eval_summaries in `OrbaxCheckpointer`. + self._eval_summaries = None + self._reached_preemption = False + + # pylint: disable-next=redefined-builtin + def ckpt_dir(self, step: int, dir: Optional[str] = None) -> str: + """Obtains the checkpoint dir for the given step.""" + if dir is None: + dir = self._non_tensor_manager.directory + return str(ocp.step.build_step_path(dir, self._name_format, step)) + + def _get_abstract_state( + self, state_with_tensors: Nested[Tensor] + ) -> Nested[jax.ShapeDtypeStruct]: + """Generate the abstract states required by the Orbax emergency checkpointer.""" + return jax.tree.map( + lambda x: jax.ShapeDtypeStruct(x.shape, x.dtype, sharding=x.sharding), + state_with_tensors, + ) + + def _get_tensor_manager(self, state_with_tensors: Nested[Tensor]) -> oecp.CheckpointManager: + """Creates the emergency checkpoint manager if not exists. + + We defer the creation of this checkpoint manager because it requires the state dict, + which is not present during __init__. + """ + cfg: OrbaxEmergencyCheckpointer.Config = self.config + if self._tensor_manager is not None: + return self._tensor_manager + + save_policy = cfg.save_policy.instantiate() + local_save_policy = cfg.local_save_policy.instantiate() + + def _orbax_save_fn( + step: int, last_saved_step: Optional[int], wrapped_save_policy: CheckpointPolicy + ) -> bool: + del last_saved_step + return wrapped_save_policy(step=step, evaler_summaries=self._eval_summaries) + + # For meaning of these options, refer to + # https://github.com/google/orbax/blob/95be2c021bc8cbf4badd83a053ff57b7a9f9b314/checkpoint/orbax/checkpoint/experimental/emergency/checkpoint_manager.py#L277 + self._tensor_manager = oecp.CheckpointManager( + self._local_dir, + persistent_directory=os.path.join(cfg.dir, self._TENSORS_PREFIX), + global_mesh=thread_resources.env.physical_mesh, + abstract_state=self._get_abstract_state(state_with_tensors), + options=oecp.CheckpointManagerOptions( + local=oecp.LocalCheckpointOptions( + should_save_fn=functools.partial( + _orbax_save_fn, wrapped_save_policy=local_save_policy + ), + max_to_keep=cfg.local_keep_last_n, + ), + persistent=oecp.PersistentCheckpointOptions( + should_save_fn=functools.partial( + _orbax_save_fn, wrapped_save_policy=save_policy + ), + max_to_keep=cfg.keep_last_n, + ), + replica_axis_index=cfg.replica_axis_index, + async_options=oecp.checkpoint_manager.AsyncOptions( + timeout_secs=cfg.async_timeout_secs + ), + step_name_format=self._name_format, + cleanup_tmp_directories=True, + enable_async_checkpointing=True, + ), + ) + return self._tensor_manager + + def save( + self, *, step: int, state: Nested[Tensor], evaler_summaries: Optional[Dict[str, Any]] = None + ): + """See `BaseCheckpointer.save` for details.""" + assert self._eval_summaries is None, self._eval_summaries + self._eval_summaries = copy.deepcopy(evaler_summaries or {}) + self._reached_preemption = self._tensor_manager.reached_preemption(step) + + start_t = time.perf_counter() + state_with_tensors = jax.tree.map( + lambda x: x if isinstance(x, (Tensor, TensorSpec)) else None, state + ) + # Note that save() waits for prior serialization to finish. + self._non_tensor_manager.save(step=step, state=state) + self._get_tensor_manager(state_with_tensors).save( + step=step, args=ocp.args.PyTreeSave(item=state_with_tensors) + ) + self._eval_summaries = None + if (time_diff := time.perf_counter() - start_t) > 0.5: + logging.info("In-mem ckpt blocking time is %fs.", time_diff) + if self._reached_preemption: + self.wait_until_finished() + raise SystemExit(f"Exiting after saving checkpoint at {step=} due to pre-emption.") + + def restore( + self, + *, + step: Optional[int] = None, + state: Union[Nested[Tensor], Nested[TensorSpec]], + ) -> Tuple[Optional[int], Nested[Tensor]]: + """See `BaseCheckpointer.restore` for details.""" + start_t = time.perf_counter() + cfg: OrbaxEmergencyCheckpointer.Config = self.config + state_with_tensors = jax.tree.map( + lambda x: x if isinstance(x, (Tensor, TensorSpec)) else None, state + ) + tensor_manager = self._get_tensor_manager(state_with_tensors) + if step is None: + # Find the intersection of the checkpoint steps managed by tensor and non-tensor + # manager, and then use the latest step in the intersection for restore. `all_steps` + # from tensor manager contains both local and persistent checkpoints.
Consider refactoring this logic to a separate function so that it can be tested directly?
axlearn
github_2023
python
820
apple
ruomingp
@@ -0,0 +1,829 @@ +# Copyright © 2024 Apple Inc. + +"""Implements Orbax emergency checkpointing and provide utilities for correct store. + +See the docstring of `OrbaxEmergencyCheckpointer` for more details. +""" + +import copy +import functools +import hashlib +import multiprocessing as mp +import os +import time +from concurrent.futures import ThreadPoolExecutor +from contextlib import contextmanager +from dataclasses import dataclass +from typing import Any, Dict, List, Optional, Tuple, Union + +import jax +import jax.lib +import orbax.checkpoint as ocp +import orbax.checkpoint.experimental.emergency.checkpoint_manager as oecp +import tensorflow as tf +from absl import flags, logging +from jax._src.distributed import global_state +from jax._src.mesh import thread_resources +from jax.experimental.array_serialization import serialization + +from axlearn.common import file_system as fs +from axlearn.common import utils, utils_spmd +from axlearn.common.checkpointer import ( + STEP_NUM_DIGITS, + STEP_PREFIX, + BaseCheckpointer, + Checkpointer, + CheckpointPolicy, + CheckpointValidationType, + InstantiableConfig, + StateStorage, + StateStorageCommitCallback, + async_save_tf_savables, + check_state_structure, + config_for_function, + every_n_steps_policy, + multihost_utils, + parse_step_from_dir, + read_index_file, + restore_tf_savables, + write_index_file, +) +from axlearn.common.config import REQUIRED, Required, config_class +from axlearn.common.module import Module +from axlearn.common.utils import Nested, Tensor, TensorSpec + +FLAGS = flags.FLAGS + + +@contextmanager +def setup(spec: str): + """Setups FLAGS.process_id and FLAGS.distributed_coordinator as required by Orbax. + + See the docstring of `get_consistent_proc_info` for more details. + + Args: + spec: Key=Value pairs separated by comma. Key must be one of ("local_address", + "barrier_timeout_seconds", "local_ckpt_dir"). See the docstring of + `get_consistent_proc_info`. + """ + parsed_args = {} + allowed_fields = ["local_address", "barrier_timeout_seconds", "local_ckpt_dir"] + for field in spec.split(","): + k, v = field.split("=") + if k not in allowed_fields: + raise ValueError(f"Expected key in {allowed_fields}, got key={k}.") + parsed_args[k] = v + if "barrier_timeout_seconds" in parsed_args: + parsed_args["barrier_timeout_seconds"] = int(parsed_args["barrier_timeout_seconds"]) + if "local_ckpt_dir" not in parsed_args: + raise ValueError("local_ckpt_dir must be specified.") + # pylint: disable-next=missing-kwoa + info = get_consistent_proc_info( + **parsed_args, + trainer_dir=FLAGS.trainer_dir, + distributed_coordinator=FLAGS.distributed_coordinator, + num_processes=FLAGS.num_processes, + process_id=FLAGS.process_id, + jax_backend=FLAGS.jax_backend, + initialization_timeout=FLAGS.initialization_timeout, + ) + FLAGS.process_id = info.inv_proc_id + FLAGS.distributed_coordinator = info.address + FLAGS.experimental_orbax_use_distributed_process_id = True + yield + + +class _TFSavablesStateStorage(StateStorage): + """A StateStorage implementation that only saves the index file and tf savables.""" + + @config_class + class Config(StateStorage.Config): + timeout_secs: int = 300 + + def __init__(self, cfg: Config): + super().__init__(cfg) + # One thread is sufficient because `async_save_tf_savables` only creates one future. + self._executor = ThreadPoolExecutor(1) + self._manager = serialization.AsyncManager(timeout_secs=cfg.timeout_secs) + + def _get_spec(self, *, step: int, state: Nested[Any]) -> Nested[Any]: + spec = {"index": [("step", int(step))], "tf_ckpt_map": {}} + for path, value in utils.flatten_items(state): + if isinstance(value, (Tensor, TensorSpec)): + dtype = getattr(value.dtype, "dtype", value.dtype) + spec["index"].append( + (path, {"dtype": str(dtype), "shape": str(tuple(value.shape))}) + ) + elif isinstance(value, tf.data.Iterator): + spec["index"].append((path, str(type(value)))) + spec["tf_ckpt_map"][path] = value + else: + spec["index"].append((path, value)) + logging.log_first_n(logging.INFO, "TF savables spec: %s", 1, str(spec)) + return spec + + def save_to_dir( + self, + *, + step: int, + state: Nested[Tensor], + ckpt_dir: str, + on_commit_callback: StateStorageCommitCallback = write_index_file, + ): + start_time = time.perf_counter() + # We write data files directly to `ckpt_dir`. `index` is written into `ckpt_dir` in + # `on_commit_callback` to finalize the checkpoint. + spec = self._get_spec(step=step, state=state) + self.wait_until_finished() + jax.block_until_ready(state) + + save_tf_future = async_save_tf_savables( + spec["tf_ckpt_map"], + executor=self._executor, + dir=os.path.join(ckpt_dir, f"tf_{jax.process_index()}"), + ) + + def commit(): + on_commit_callback(ckpt_dir=ckpt_dir, index=spec["index"]) + logging.info( + "Serialization of TF savables to %s completed in %s seconds.", + ckpt_dir, + time.perf_counter() - start_time, + ) + + # pylint: disable=protected-access + self._manager._add_futures([save_tf_future]) + self._manager._start_async_commit(commit) + + def wait_until_finished(self): + self._manager.wait_until_finished() + + def restore_from_dir( + self, + step: int, + state: Union[Nested[Tensor], Nested[TensorSpec]], + *, + ckpt_dir: str, + validation: CheckpointValidationType = CheckpointValidationType.EXACT, + ) -> Nested[Tensor]: + spec = self._get_spec(step=step, state=state) + logging.info("Restoring TF savables from directory %s", ckpt_dir) + check_state_structure( + read_index_file(ckpt_dir), target_structure=spec["index"], validation=validation + ) + restore_tf_savables( + spec["tf_ckpt_map"], dir=os.path.join(ckpt_dir, f"tf_{jax.process_index()}") + ) + multihost_utils.sync_global_devices(ckpt_dir) + return state + + def stop(self): + self._executor.shutdown(wait=True) + + +_PROCESS_ID_FILE_NAME: str = "process_id.txt" + + +@dataclass +class _ProcessInfo: + """Records the process id and address information for this node. + + Attributes: + address: The global coordinator address. This is set during the first run and stays and + stays the same unless process 0 failed. + inv_proc_id: The invariant process id of this node. This process id is set during the first + run and stays the same for all subsequent runs unless this node failed. + cur_proc_id: Internal field. The new process id assigned externally after failover. Used + during ID negotiation after failover. + key: Internal field. Key used during ID negotiation after failover. + num_proc_per_slice: Internal field. Used to calculate slice ID for TPU. + """ + + address: str + inv_proc_id: int + cur_proc_id: int + key: Optional[str] = None + num_proc_per_slice: Optional[int] = None + + def to_string(self): + return "|".join(str(x) for x in [self.address, self.inv_proc_id, self.cur_proc_id]) + + @property + def prev_slice_id(self): + assert self.num_proc_per_slice is not None + return self.inv_proc_id // self.num_proc_per_slice + + @property + def cur_slice_id(self): + assert self.num_proc_per_slice is not None + return self.cur_proc_id // self.num_proc_per_slice + + @classmethod + def from_string( + cls, data: str, *, key: Optional[str] = None, num_proc_per_slice: Optional[int] = None + ): + ls = data.split("|") + assert len(ls) == 3 + return cls(ls[0], int(ls[1]), int(ls[2]), key=key, num_proc_per_slice=num_proc_per_slice) + + +def _get_previous_process_info(local_dir: str, *, trainer_dir: str) -> _ProcessInfo: + """Gets process info from local checkpoint directory.""" + path = os.path.join(local_dir, _get_unique_id(trainer_dir), _PROCESS_ID_FILE_NAME) + if not fs.exists(path): + return _ProcessInfo(address="", inv_proc_id=-1, cur_proc_id=-1) + + with fs.open(path) as f: + return _ProcessInfo.from_string(f.read()) + + +def _dump_process_info(local_dir: str, *, trainer_dir: str, proc_info: _ProcessInfo): + """Dumps process info to local checkpoint directory.""" + local_dir = os.path.join(local_dir, _get_unique_id(trainer_dir)) + fs.makedirs(local_dir) + process_id_file = os.path.join(local_dir, _PROCESS_ID_FILE_NAME) + with fs.open(process_id_file, "w") as f: + f.write(proc_info.to_string()) + + +def _get_unique_id(trainer_dir: str) -> str: + return hashlib.sha256(trainer_dir.encode(), usedforsecurity=False).hexdigest() + + +def _logger_init(): + """Init logger in spawned processes that don't inherit parent's logger.""" + logging.set_verbosity(logging.INFO) + logging.use_absl_handler() + + +def _init_consistent_proc_ids( + *, + local_address: Optional[str] = None, + barrier_timeout_seconds: int = 300, + trainer_dir: str, + local_ckpt_dir: str, + **setup_kwargs, +): + """Exchanges id info through jax coordinator and dumps to local file. + + During failover, healthy nodes will read their locally stored process id file, but failed nodes + will lost their process ids. To assign ids that are free in the global id range (i.e. 0 to + num_processes - 1), we let each node report its process id (-1 if missing) to rank 0, and rank + 0 will figure out suitable IDs to assign to each failed node. We reuse Jax's distributed client + to avoid writing our own coordinator. + """ + _logger_init() + + jax_backend = setup_kwargs["jax_backend"] + timeout_ms = barrier_timeout_seconds * 1000 + utils_spmd.setup(**setup_kwargs) + client: jax.lib.xla_extension.DistributedRuntimeClient = global_state.client + local_proc_info = _get_previous_process_info(local_ckpt_dir, trainer_dir=trainer_dir) + key_prefix = "axlearn/id_reassign" + # Local key just needs to be unique for each process. + local_proc_info.key = f"{key_prefix}/{jax.process_index()}" + + if jax_backend == "tpu": + worker_hostnames = os.environ["TPU_WORKER_HOSTNAMES"].split(",") + num_slices = int(os.environ["MEGASCALE_NUM_SLICES"]) + num_proc_per_slice = len(worker_hostnames) + worker_id = int(os.environ["TPU_WORKER_ID"]) + + # Coordinator port for TPU is hardcoded. Reference: + # https://github.com/jax-ml/jax/blob/1aa5de66a8f3c910115cac2fbe118e0facd7a3be/jax/_src/clusters/cloud_tpu_cluster.py#L29 + local_proc_info.address = f"{worker_hostnames[worker_id]}:8476" + # Note: cannot use jax.process_index() here because it may be different from the + # distributed id. This is a jax problem. + local_proc_info.cur_proc_id = ( + int(os.environ["MEGASCALE_SLICE_ID"]) * num_proc_per_slice + worker_id + ) + elif jax_backend == "gpu": + if local_address is None: + raise ValueError( + "local_address must be set for GPU when using in-memory checkpointing." + ) + local_proc_info.address = local_address + local_proc_info.cur_proc_id = setup_kwargs["process_id"] + else: + raise RuntimeError(f"Unsupported backend {jax_backend}.") + + # Every worker reports its proc info to rank 0. + client.key_value_set(local_proc_info.key, local_proc_info.to_string()) + client.wait_at_barrier("axlearn/id-reassign-gather-id", timeout_in_ms=timeout_ms) + + # Then, rank 0 assigns inv_proc_id for worker that's missing their inv_proc_id and find the + # coordinator address. + if local_proc_info.cur_proc_id == 0: + ids = client.key_value_dir_get(key_prefix) + proc_infos: list[_ProcessInfo] = [] + + def first_run_assign_fn(info: _ProcessInfo): + info.inv_proc_id = info.cur_proc_id + + inv_id_assign_fn = first_run_assign_fn + if jax_backend == "tpu": + # For TPUs, we have the additional requirement that process ids in slice id X must be + # in range [X * num_processes_per_slice, (X + 1) * num_processes_per_slice). Therefore, + # we first identify the healthy slices' ids and then figure out the slice ids to assign + # to failed slices. Each process in the failed slice will then get id `new_slice_id * + # num_proc_per_slice + cur_proc_id % num_proc_per_slice`. After id assignment, the + # address of process that's assigned with id=0 will be broadcasted to every worker. + + # Mapping from new slice ids to assigned slice ids forfailed slices. + failed_slices_new_ids = {} + for k, data in ids: + info = _ProcessInfo.from_string(data, key=k, num_proc_per_slice=num_proc_per_slice) + proc_infos.append(info) + if info.inv_proc_id == -1: + failed_slices_new_ids[info.cur_slice_id] = -1 + + already_assigned_slice_ids = set() + for info in proc_infos: + if info.cur_slice_id not in failed_slices_new_ids: + already_assigned_slice_ids.add(info.prev_slice_id) + + # If there're no assigned slice ids, that means all slices have failed or we're in the + # very first run. In that case, first_run_assign_fn will be used. + if already_assigned_slice_ids: + to_be_assigned_slice_ids = set(range(num_slices)) - already_assigned_slice_ids + assert len(to_be_assigned_slice_ids) == len(failed_slices_new_ids) + for k, new_id in zip(failed_slices_new_ids.keys(), to_be_assigned_slice_ids): + failed_slices_new_ids[k] = new_id + + def assign_fn(info: _ProcessInfo): + proc_id = info.inv_proc_id + if (new_slice_id := failed_slices_new_ids.get(info.cur_slice_id)) is not None: + proc_id = ( + new_slice_id * num_proc_per_slice + + info.cur_proc_id % num_proc_per_slice + ) + info.inv_proc_id = proc_id + + inv_id_assign_fn = assign_fn + + elif jax_backend == "gpu": + num_processes = setup_kwargs["num_processes"] + # For GPU backend, failed nodes are assigned with ids that are missing in the global id + # range with arbitrary order. + assigned_ids = set() + for key, data in ids: + info = _ProcessInfo.from_string(data, key=key) + proc_infos.append(info) + assigned_ids.add(info.inv_proc_id) + + # If there're no assigned ids, that means all slices have failed or we're in the + # very first run. In that case, first_run_assign_fn will be used. + if assigned_ids: + to_be_assigned_ids = iter(set(range(num_processes)) - assigned_ids) + + def assign_fn(info: _ProcessInfo): + if info.inv_proc_id == -1: + info.inv_proc_id = next(to_be_assigned_ids) + + inv_id_assign_fn = assign_fn + + coordinator_address = None + for info in proc_infos: + inv_id_assign_fn(info) + if info.inv_proc_id == 0: + coordinator_address = info.address + assert coordinator_address is not None + for info in proc_infos: + info.address = coordinator_address + client.key_value_set(info.key + "/get", info.to_string()) + + new_info = _ProcessInfo.from_string( + client.blocking_key_value_get(local_proc_info.key + "/get", timeout_in_ms=timeout_ms) + ) + logging.info( + "Previous proc id: %d. Assigned proc id: %d. Global coordinator address: %s.", + local_proc_info.inv_proc_id, + new_info.inv_proc_id, + new_info.address, + ) + _dump_process_info(local_ckpt_dir, trainer_dir=trainer_dir, proc_info=new_info) + # Block to avoid coordinator exiting too early. + client.wait_at_barrier("axlearn/id-reassign-finalize", timeout_in_ms=timeout_ms) + jax.distributed.shutdown() + + +def get_consistent_proc_info( + *, + local_address: Optional[str] = None, + barrier_timeout_seconds: int = 300, + trainer_dir: str, + local_ckpt_dir: str, + **setup_kwargs, +) -> _ProcessInfo: + """Gets the invariant process id of the current process and global coordinator's address. + + This function guarantees process id <-> node mapping stays the same for healthy nodes after a + failover. This is required to preserve shard order for in-memory checkpoint recovery. For GPU + training, all healthy nodes will have their process id unchanged. For TPU, all nodes in the + healthy slices will have their process id unchanged. See docstring of + `_init_consistent_proc_ids` for implementation details. + + Args: + local_address: A IP:Port that can be used as the coordinator if this rank is elected. + This Port must be free in the coordinator pod and IP:Port must be reachable from all + other processes. + barrier_timeout_seconds: Timeout in seconds for the barrier and key_value_set operations. + trainer_dir: Path to the trainer dir. + local_ckpt_dir: Path to the local checkpoint dir. + **setup_kwargs: Args to `utils_spmd.setup()`. + + Returns: + A _ProcessInfo whose `inv_proc_id` should be used as the process id and `address` should be + used as the global coordinator address. + """ + platform = os.environ.get("JAX_PLATFORMS", "") + try: + start_t = time.perf_counter() + # Patch platform so the process doesn't waste time initializing accelerators. + os.environ["JAX_PLATFORMS"] = "cpu" + proc = mp.get_context("spawn").Process( + target=_init_consistent_proc_ids, + kwargs=dict( + local_address=local_address, + barrier_timeout_seconds=barrier_timeout_seconds, + trainer_dir=trainer_dir, + local_ckpt_dir=local_ckpt_dir, + **setup_kwargs, + ), + ) + proc.start() + proc.join() + if proc.exitcode != 0: + raise RuntimeError( + "Expects id assignment process to finish normally. " + f"Got exit code {proc.exitcode}. Please check the log above for errors." + ) + + info = _get_previous_process_info(local_ckpt_dir, trainer_dir=trainer_dir) + if info.inv_proc_id == -1: + raise RuntimeError("Expects inv process id != -1, but got -1.") + logging.info( + "Successfully finished process ID assignment in %fs", time.perf_counter() - start_t + ) + return info + finally: + # Restore previous platform settings. + if platform != "": + os.environ["JAX_PLATFORMS"] = platform + else: + del os.environ["JAX_PLATFORMS"] + + +class OrbaxEmergencyCheckpointer(BaseCheckpointer): + """Checkpointer implementation that uses Orbax emergency checkpoint. + + ## Summary:
```suggestion """Checkpointer implementation that uses Orbax emergency checkpoint. EXPERIMENTAL. Do not use for actual training runs since the checkpoint layout will likely change in the future. ## Summary: ```
axlearn
github_2023
python
820
apple
apivovarov
@@ -0,0 +1,832 @@ +# Copyright © 2024 Apple Inc. + +"""Implements Orbax emergency checkpointing and provide utilities for correct store. + +See the docstring of `OrbaxEmergencyCheckpointer` for more details. +""" + +import copy +import functools +import hashlib +import multiprocessing as mp +import os +import time +from concurrent.futures import ThreadPoolExecutor +from contextlib import contextmanager +from dataclasses import dataclass +from typing import Any, Dict, List, Optional, Tuple, Union + +import jax +import jax.lib +import orbax.checkpoint as ocp +import orbax.checkpoint.experimental.emergency.checkpoint_manager as oecp +import tensorflow as tf +from absl import flags, logging +from jax._src.distributed import global_state +from jax._src.mesh import thread_resources +from jax.experimental.array_serialization import serialization + +from axlearn.common import file_system as fs +from axlearn.common import utils, utils_spmd +from axlearn.common.checkpointer import ( + STEP_NUM_DIGITS, + STEP_PREFIX, + BaseCheckpointer, + Checkpointer, + CheckpointPolicy, + CheckpointValidationType, + InstantiableConfig, + StateStorage, + StateStorageCommitCallback, + async_save_tf_savables, + check_state_structure, + config_for_function, + every_n_steps_policy, + multihost_utils, + parse_step_from_dir, + read_index_file, + restore_tf_savables, + write_index_file, +) +from axlearn.common.config import REQUIRED, Required, config_class +from axlearn.common.module import Module +from axlearn.common.utils import Nested, Tensor, TensorSpec + +FLAGS = flags.FLAGS + + +@contextmanager +def setup(spec: str): + """Setups FLAGS.process_id and FLAGS.distributed_coordinator as required by Orbax. + + See the docstring of `get_consistent_proc_info` for more details. + + Args: + spec: Key=Value pairs separated by comma. Key must be one of ("local_address", + "barrier_timeout_seconds", "local_ckpt_dir"). See the docstring of + `get_consistent_proc_info`. + """ + parsed_args = {} + allowed_fields = ["local_address", "barrier_timeout_seconds", "local_ckpt_dir"] + for field in spec.split(","): + k, v = field.split("=") + if k not in allowed_fields: + raise ValueError(f"Expected key in {allowed_fields}, got key={k}.") + parsed_args[k] = v + if "barrier_timeout_seconds" in parsed_args: + parsed_args["barrier_timeout_seconds"] = int(parsed_args["barrier_timeout_seconds"]) + if "local_ckpt_dir" not in parsed_args: + raise ValueError("local_ckpt_dir must be specified.") + # pylint: disable-next=missing-kwoa + info = get_consistent_proc_info( + **parsed_args, + trainer_dir=FLAGS.trainer_dir, + distributed_coordinator=FLAGS.distributed_coordinator, + num_processes=FLAGS.num_processes, + process_id=FLAGS.process_id, + jax_backend=FLAGS.jax_backend, + initialization_timeout=FLAGS.initialization_timeout, + ) + FLAGS.process_id = info.inv_proc_id + FLAGS.distributed_coordinator = info.address + FLAGS.experimental_orbax_use_distributed_process_id = True + yield + + +class _TFSavablesStateStorage(StateStorage): + """A StateStorage implementation that only saves the index file and tf savables.""" + + @config_class + class Config(StateStorage.Config): + timeout_secs: int = 300 + + def __init__(self, cfg: Config): + super().__init__(cfg) + # One thread is sufficient because `async_save_tf_savables` only creates one future. + self._executor = ThreadPoolExecutor(1) + self._manager = serialization.AsyncManager(timeout_secs=cfg.timeout_secs) + + def _get_spec(self, *, step: int, state: Nested[Any]) -> Nested[Any]: + spec = {"index": [("step", int(step))], "tf_ckpt_map": {}} + for path, value in utils.flatten_items(state): + if isinstance(value, (Tensor, TensorSpec)): + dtype = getattr(value.dtype, "dtype", value.dtype) + spec["index"].append( + (path, {"dtype": str(dtype), "shape": str(tuple(value.shape))}) + ) + elif isinstance(value, tf.data.Iterator): + spec["index"].append((path, str(type(value)))) + spec["tf_ckpt_map"][path] = value + else: + spec["index"].append((path, value)) + logging.log_first_n(logging.INFO, "TF savables spec: %s", 1, str(spec)) + return spec + + def save_to_dir( + self, + *, + step: int, + state: Nested[Tensor], + ckpt_dir: str, + on_commit_callback: StateStorageCommitCallback = write_index_file, + ): + start_time = time.perf_counter() + # We write data files directly to `ckpt_dir`. `index` is written into `ckpt_dir` in + # `on_commit_callback` to finalize the checkpoint. + spec = self._get_spec(step=step, state=state) + self.wait_until_finished() + jax.block_until_ready(state) + + save_tf_future = async_save_tf_savables( + spec["tf_ckpt_map"], + executor=self._executor, + dir=os.path.join(ckpt_dir, f"tf_{jax.process_index()}"), + ) + + def commit(): + on_commit_callback(ckpt_dir=ckpt_dir, index=spec["index"]) + logging.info( + "Serialization of TF savables to %s completed in %s seconds.", + ckpt_dir, + time.perf_counter() - start_time, + ) + + # pylint: disable=protected-access + self._manager._add_futures([save_tf_future]) + self._manager._start_async_commit(commit) + + def wait_until_finished(self): + self._manager.wait_until_finished() + + def restore_from_dir( + self, + step: int, + state: Union[Nested[Tensor], Nested[TensorSpec]], + *, + ckpt_dir: str, + validation: CheckpointValidationType = CheckpointValidationType.EXACT, + ) -> Nested[Tensor]: + spec = self._get_spec(step=step, state=state) + logging.info("Restoring TF savables from directory %s", ckpt_dir) + check_state_structure( + read_index_file(ckpt_dir), target_structure=spec["index"], validation=validation + ) + restore_tf_savables( + spec["tf_ckpt_map"], dir=os.path.join(ckpt_dir, f"tf_{jax.process_index()}") + ) + multihost_utils.sync_global_devices(ckpt_dir) + return state + + def stop(self): + self._executor.shutdown(wait=True) + + +_PROCESS_ID_FILE_NAME: str = "process_id.txt" + + +@dataclass +class _ProcessInfo: + """Records the process id and address information for this node. + + Attributes: + address: The global coordinator address. This is set during the first run and stays and + stays the same unless process 0 failed. + inv_proc_id: The invariant process id of this node. This process id is set during the first + run and stays the same for all subsequent runs unless this node failed. + cur_proc_id: Internal field. The new process id assigned externally after failover. Used + during ID negotiation after failover. + key: Internal field. Key used during ID negotiation after failover. + num_proc_per_slice: Internal field. Used to calculate slice ID for TPU. + """ + + address: str + inv_proc_id: int + cur_proc_id: int + key: Optional[str] = None + num_proc_per_slice: Optional[int] = None + + def to_string(self): + return "|".join(str(x) for x in [self.address, self.inv_proc_id, self.cur_proc_id]) + + @property + def prev_slice_id(self): + assert self.num_proc_per_slice is not None + return self.inv_proc_id // self.num_proc_per_slice + + @property + def cur_slice_id(self): + assert self.num_proc_per_slice is not None + return self.cur_proc_id // self.num_proc_per_slice + + @classmethod + def from_string( + cls, data: str, *, key: Optional[str] = None, num_proc_per_slice: Optional[int] = None + ): + ls = data.split("|") + assert len(ls) == 3 + return cls(ls[0], int(ls[1]), int(ls[2]), key=key, num_proc_per_slice=num_proc_per_slice) + + +def _get_previous_process_info(local_dir: str, *, trainer_dir: str) -> _ProcessInfo: + """Gets process info from local checkpoint directory.""" + path = os.path.join(local_dir, _get_unique_id(trainer_dir), _PROCESS_ID_FILE_NAME) + if not fs.exists(path): + return _ProcessInfo(address="", inv_proc_id=-1, cur_proc_id=-1) + + with fs.open(path) as f: + return _ProcessInfo.from_string(f.read()) + + +def _dump_process_info(local_dir: str, *, trainer_dir: str, proc_info: _ProcessInfo): + """Dumps process info to local checkpoint directory.""" + local_dir = os.path.join(local_dir, _get_unique_id(trainer_dir)) + fs.makedirs(local_dir) + process_id_file = os.path.join(local_dir, _PROCESS_ID_FILE_NAME) + with fs.open(process_id_file, "w") as f: + f.write(proc_info.to_string()) + + +def _get_unique_id(trainer_dir: str) -> str: + return hashlib.sha256(trainer_dir.encode(), usedforsecurity=False).hexdigest() + + +def _logger_init(): + """Init logger in spawned processes that don't inherit parent's logger.""" + logging.set_verbosity(logging.INFO) + logging.use_absl_handler() + + +def _init_consistent_proc_ids( + *, + local_address: Optional[str] = None, + barrier_timeout_seconds: int = 300, + trainer_dir: str, + local_ckpt_dir: str, + **setup_kwargs, +): + """Exchanges id info through jax coordinator and dumps to local file. + + During failover, healthy nodes will read their locally stored process id file, but failed nodes + will lost their process ids. To assign ids that are free in the global id range (i.e. 0 to + num_processes - 1), we let each node report its process id (-1 if missing) to rank 0, and rank + 0 will figure out suitable IDs to assign to each failed node. We reuse Jax's distributed client + to avoid writing our own coordinator. + """ + _logger_init() + + jax_backend = setup_kwargs["jax_backend"] + timeout_ms = barrier_timeout_seconds * 1000 + utils_spmd.setup(**setup_kwargs) + client: jax.lib.xla_extension.DistributedRuntimeClient = global_state.client + local_proc_info = _get_previous_process_info(local_ckpt_dir, trainer_dir=trainer_dir) + key_prefix = "axlearn/id_reassign" + # Local key just needs to be unique for each process. + local_proc_info.key = f"{key_prefix}/{jax.process_index()}" + + if jax_backend == "tpu": + worker_hostnames = os.environ["TPU_WORKER_HOSTNAMES"].split(",") + num_slices = int(os.environ["MEGASCALE_NUM_SLICES"]) + num_proc_per_slice = len(worker_hostnames) + worker_id = int(os.environ["TPU_WORKER_ID"]) + + # Coordinator port for TPU is hardcoded. Reference: + # https://github.com/jax-ml/jax/blob/1aa5de66a8f3c910115cac2fbe118e0facd7a3be/jax/_src/clusters/cloud_tpu_cluster.py#L29 + local_proc_info.address = f"{worker_hostnames[worker_id]}:8476" + # Note: cannot use jax.process_index() here because it may be different from the + # distributed id. This is a jax problem. + local_proc_info.cur_proc_id = ( + int(os.environ["MEGASCALE_SLICE_ID"]) * num_proc_per_slice + worker_id + ) + elif jax_backend == "gpu": + if local_address is None: + raise ValueError( + "local_address must be set for GPU when using in-memory checkpointing." + ) + local_proc_info.address = local_address + local_proc_info.cur_proc_id = setup_kwargs["process_id"] + else: + raise RuntimeError(f"Unsupported backend {jax_backend}.") + + # Every worker reports its proc info to rank 0. + client.key_value_set(local_proc_info.key, local_proc_info.to_string()) + client.wait_at_barrier("axlearn/id-reassign-gather-id", timeout_in_ms=timeout_ms) + + # Then, rank 0 assigns inv_proc_id for worker that's missing their inv_proc_id and find the + # coordinator address. + if local_proc_info.cur_proc_id == 0: + ids = client.key_value_dir_get(key_prefix) + proc_infos: list[_ProcessInfo] = [] + + def first_run_assign_fn(info: _ProcessInfo): + info.inv_proc_id = info.cur_proc_id + + inv_id_assign_fn = first_run_assign_fn + if jax_backend == "tpu": + # For TPUs, we have the additional requirement that process ids in slice id X must be + # in range [X * num_processes_per_slice, (X + 1) * num_processes_per_slice). Therefore, + # we first identify the healthy slices' ids and then figure out the slice ids to assign + # to failed slices. Each process in the failed slice will then get id `new_slice_id * + # num_proc_per_slice + cur_proc_id % num_proc_per_slice`. After id assignment, the + # address of process that's assigned with id=0 will be broadcasted to every worker. + + # Mapping from new slice ids to assigned slice ids forfailed slices. + failed_slices_new_ids = {} + for k, data in ids: + info = _ProcessInfo.from_string(data, key=k, num_proc_per_slice=num_proc_per_slice) + proc_infos.append(info) + if info.inv_proc_id == -1: + failed_slices_new_ids[info.cur_slice_id] = -1 + + already_assigned_slice_ids = set() + for info in proc_infos: + if info.cur_slice_id not in failed_slices_new_ids: + already_assigned_slice_ids.add(info.prev_slice_id) + + # If there're no assigned slice ids, that means all slices have failed or we're in the + # very first run. In that case, first_run_assign_fn will be used. + if already_assigned_slice_ids: + to_be_assigned_slice_ids = set(range(num_slices)) - already_assigned_slice_ids + assert len(to_be_assigned_slice_ids) == len(failed_slices_new_ids) + for k, new_id in zip(failed_slices_new_ids.keys(), to_be_assigned_slice_ids): + failed_slices_new_ids[k] = new_id + + def assign_fn(info: _ProcessInfo): + proc_id = info.inv_proc_id + if (new_slice_id := failed_slices_new_ids.get(info.cur_slice_id)) is not None: + proc_id = ( + new_slice_id * num_proc_per_slice + + info.cur_proc_id % num_proc_per_slice + ) + info.inv_proc_id = proc_id + + inv_id_assign_fn = assign_fn + + elif jax_backend == "gpu": + num_processes = setup_kwargs["num_processes"] + # For GPU backend, failed nodes are assigned with ids that are missing in the global id + # range with arbitrary order. + assigned_ids = set() + for key, data in ids: + info = _ProcessInfo.from_string(data, key=key) + proc_infos.append(info) + assigned_ids.add(info.inv_proc_id) + + # If there're no assigned ids, that means all slices have failed or we're in the + # very first run. In that case, first_run_assign_fn will be used. + if assigned_ids: + to_be_assigned_ids = iter(set(range(num_processes)) - assigned_ids) + + def assign_fn(info: _ProcessInfo): + if info.inv_proc_id == -1: + info.inv_proc_id = next(to_be_assigned_ids) + + inv_id_assign_fn = assign_fn + + coordinator_address = None + for info in proc_infos: + inv_id_assign_fn(info) + if info.inv_proc_id == 0: + coordinator_address = info.address + assert coordinator_address is not None + for info in proc_infos: + info.address = coordinator_address + client.key_value_set(info.key + "/get", info.to_string()) + + new_info = _ProcessInfo.from_string( + client.blocking_key_value_get(local_proc_info.key + "/get", timeout_in_ms=timeout_ms) + ) + logging.info( + "Previous proc id: %d. Assigned proc id: %d. Global coordinator address: %s.", + local_proc_info.inv_proc_id, + new_info.inv_proc_id, + new_info.address, + ) + _dump_process_info(local_ckpt_dir, trainer_dir=trainer_dir, proc_info=new_info) + # Block to avoid coordinator exiting too early. + client.wait_at_barrier("axlearn/id-reassign-finalize", timeout_in_ms=timeout_ms) + jax.distributed.shutdown() + + +def get_consistent_proc_info( + *, + local_address: Optional[str] = None, + barrier_timeout_seconds: int = 300, + trainer_dir: str, + local_ckpt_dir: str, + **setup_kwargs, +) -> _ProcessInfo: + """Gets the invariant process id of the current process and global coordinator's address. + + This function guarantees process id <-> node mapping stays the same for healthy nodes after a + failover. This is required to preserve shard order for in-memory checkpoint recovery. For GPU + training, all healthy nodes will have their process id unchanged. For TPU, all nodes in the + healthy slices will have their process id unchanged. See docstring of + `_init_consistent_proc_ids` for implementation details. + + Args: + local_address: A IP:Port that can be used as the coordinator if this rank is elected. + This Port must be free in the coordinator pod and IP:Port must be reachable from all + other processes. + barrier_timeout_seconds: Timeout in seconds for the barrier and key_value_set operations. + trainer_dir: Path to the trainer dir. + local_ckpt_dir: Path to the local checkpoint dir. + **setup_kwargs: Args to `utils_spmd.setup()`. + + Returns: + A _ProcessInfo whose `inv_proc_id` should be used as the process id and `address` should be + used as the global coordinator address. + """ + platform = os.environ.get("JAX_PLATFORMS", "") + try: + start_t = time.perf_counter() + # Patch platform so the process doesn't waste time initializing accelerators. + os.environ["JAX_PLATFORMS"] = "cpu" + proc = mp.get_context("spawn").Process( + target=_init_consistent_proc_ids, + kwargs=dict( + local_address=local_address, + barrier_timeout_seconds=barrier_timeout_seconds, + trainer_dir=trainer_dir, + local_ckpt_dir=local_ckpt_dir, + **setup_kwargs, + ), + ) + proc.start() + proc.join() + if proc.exitcode != 0: + raise RuntimeError( + "Expects id assignment process to finish normally. " + f"Got exit code {proc.exitcode}. Please check the log above for errors." + ) + + info = _get_previous_process_info(local_ckpt_dir, trainer_dir=trainer_dir) + if info.inv_proc_id == -1: + raise RuntimeError("Expects inv process id != -1, but got -1.") + logging.info( + "Successfully finished process ID assignment in %fs", time.perf_counter() - start_t + ) + return info + finally: + # Restore previous platform settings. + if platform != "": + os.environ["JAX_PLATFORMS"] = platform + else: + del os.environ["JAX_PLATFORMS"] + + +class OrbaxEmergencyCheckpointer(BaseCheckpointer): + """Checkpointer implementation that uses Orbax emergency checkpoint. + + EXPERIMENTAL. Do not use for actual training runs since the checkpoint layout will likely + change in the future. + + ## Summary: + + This checkpointer is designed to improve the goodput of large multi-slice training jobs that + use data-parallelism across slices. At least two data-parallel slices are required. For other + use cases where this is not applicable or ultimate goodput is not required, please use + `OrbaxCheckpointer`. + + Why it can improve goodput: + 1. It can save to a local path (usually backed by a ramdisk) more frequently, so the progress + lost during restart can be reduced. This is in contrast with saving to remote filesystem + such as GCS directly, which has limited bandwidth to support frequent checkpointing. + 2. During restart, checkpoint can be broadcasted through network, which is faster than reading + from a remote filesystem. + + To use the checkpointer, besides configuring it properly, it also requires + `get_consistent_proc_info` to be called and pass `inv_proc_id` and `address` as + `process_id` and `coordinator_address` to `jax.distributed.initialize`. + + ## How it works under the hood + + This checkpointer is intended for multi-slice training that uses data-parallelism across + slices. Orbax emergency checkpoint works by exploiting the following properties: + 1. Tensors are replicated across data-parallel replicas. + 2. When a slice fails in a multi-slice training and failover is started, only nodes + corresponding to the non-healthy slice may be restarted. Healthy nodes from healthy slices + will not restart. + + Hence, all slices can write checkpoints to node's memory or disk, providing us with redundancy + when there's a failure. This checkpoint frequency can be much higher than remote filesystem, + which has limited bandwidth to support high frequency saving. Checkpoints on nodes are referred + as local checkpoints. Checkpoints on remote filesystem are referred as persistent checkpoints. + + When a failure occurs, Orbax checkpointer will find the latest step from all local and + persistent checkpoints. If the checkpoint is local, the slice on which that checkpoint is + stored will read the checkpoint and broadcast the read values to other slices. Since local + checkpoints are scattered across different hosts, the process id, which determines the shard id + of locally stored shards, must stay the same for nodes in the healthy replicas to guarantee a + correct restore. We provide an utility function `get_consistent_proc_info` that returns the + process id and global coordinator address. They must be passed to `jax.distributed.initialize`. + + However, the above procedure doesn't apply to some non-tensor states such as data iterators. + Data iterators are unique across jax processes, and thus cannot be stored on nodes. Orbax + emergency checkpointer doesn't support non-tensor states. Therefore, we reuse axlearn + Checkpointer to save, restore and garbage collect those states, which include the index file + and tf iterators. These non-tensor states will be saved whenever local or persistent checkpoint + need to be saved. As the result, the persistent checkpoint structure looks like this: + + ``` + ├── path_prefix + │ ├── non-tensors + │ │ └── step_00000010 + │ │ ├── index + │ │ └── tf_xxx + │ └── tensors + │ └── step_00000010 + │ └── orbax_files_xxx + ``` + + A persistent training checkpoint `step_xxx` is commited when `non-tensors/step_xxx/index` + exists and `tensors/step_xxx` is commited by Orbax. Refer to the docstring of + `OrbaxCheckpointer` for Orbax's commit criteria. + + To abstract the details of the checkpoint layout, the `checkpoint_steps` API returns all steps + for which both Tensor and non-Tensor states have been fully committed. + """ + + _NON_TENSORS_PREFIX: str = "non-tensors" + _TENSORS_PREFIX: str = "tensors" + + @config_class + class Config(BaseCheckpointer.Config): + """Configures OrbaxEmergencyCheckpointer. + + Attributes: + keep_last_n: Keep this many past ckpts. + keep_every_n_steps: If > 0, keeps at least one persistent checkpoint every N steps. + local_keep_last_n: Keep this many past ckpts in local storage (e.g. node memory). + This should almost always set to 1 to avoid OOM. + local_dir: Ckpt base path for local storage. The content in this path must persist + across pod restarts unless the restart is caused by node failure. `local_dir` must + be the same for all processes or processes may hang. + trainer_dir: A string that's unique for the current run. Typically, this is set to + trainer_dir. Local checkpoint will be stored in local_dir/sha256(trainer_dir). + During init, all other folders in local_dir will be removed to prevent unexpected + memory usage. + save_policy: Save policy for persistent checkpoints. + local_save_policy: Save policy for local checkpoints. This should be more frequent than + `save_policy`. Note that data iterator will be saved with either `save_policy` or + `local_save_policy` indicate we should save. + non_tensor_async_timeout_secs: Timeout for async barrier in seconds when saving + non-tensor states. + async_timeout_secs: Timeout for async barrier in seconds when saving tensors. + replica_axis_index: The index of the "data" axis. + """ + + keep_last_n: int = 1 + keep_every_n_steps: Optional[int] = None + local_keep_last_n: int = 1 + local_save_policy: InstantiableConfig[CheckpointPolicy] = config_for_function( + every_n_steps_policy + ).set(n=10) + local_dir: str = "/host-tmp/checkpoints" + trainer_dir: Required[str] = REQUIRED + non_tensor_async_timeout_secs: int = 300 + async_timeout_secs: int = 3600 + replica_axis_index: Required[int] = REQUIRED + + @classmethod + def checkpoint_paths(cls, base_dir: str) -> List[str]: + """See `BaseCheckpointer.checkpointer_paths`. + + Only persistent checkpoint paths are returned. There's no guarantee that the paths returned + have committed TF savables. Use `checkpoint_steps` to get steps with both tensors and + committed TF savables. + """ + logging.log_first_n( + logging.WARNING, + msg="checkpoint_paths is deprecated. Use checkpoint_steps instead.", + n=1, + ) + tensors_dir = os.path.join(base_dir, cls._TENSORS_PREFIX) + return [str(path) for path in ocp.utils.checkpoint_steps_paths(tensors_dir)] + + @classmethod + def checkpoint_steps(cls, base_dir) -> list[int]: + """See `BaseCheckpointer.checkpointer_steps`. + + Only persistent checkpoint steps are returned. + """ + return list( + set( + ocp.utils.checkpoint_steps(os.path.join(base_dir, cls._TENSORS_PREFIX)) + ).intersection( + set(Checkpointer.checkpoint_steps(os.path.join(base_dir, cls._NON_TENSORS_PREFIX))) + ) + ) + + def __init__(self, cfg: Config, *, parent: Optional[Module]): + super().__init__(cfg, parent=parent) + cfg: OrbaxEmergencyCheckpointer.Config = self.config + self._name_format = ocp.step.standard_name_format( + step_prefix=STEP_PREFIX, + step_format_fixed_length=STEP_NUM_DIGITS, + ) + if jax.process_index() == 0: + fs.makedirs(os.path.join(cfg.dir, self._NON_TENSORS_PREFIX)) + fs.makedirs(os.path.join(cfg.dir, self._TENSORS_PREFIX)) + # Cleanup local checkpoints from different runs. + unique_id = _get_unique_id(cfg.trainer_dir) + for fd in fs.listdir(cfg.local_dir): + if not fd.startswith(".") and fd != unique_id: + fs.rmtree(os.path.join(cfg.local_dir, fd)) + self._local_dir = os.path.join(cfg.local_dir, unique_id) + fs.makedirs(self._local_dir) + # Orbax emergency ckpt requires this function to be called prior to checkpointer + # operations. This function also serves as a barrier. + ocp.multihost.initialize_runtime_to_distributed_ids() + ocp.multihost.initialize_distributed_to_device_ids() + ckpt_cfg: Checkpointer.Config = Checkpointer.default_config() + # TODO(hanzhi-zhou): this `keep_last_n` may not be what users expect since non-tensor + # states will save when either local or persistent checkpoint will save. + ckpt_cfg.keep_last_n = cfg.keep_last_n + ckpt_cfg.keep_every_n_steps = cfg.keep_every_n_steps + ckpt_cfg.storage = _TFSavablesStateStorage.default_config() + ckpt_cfg.storage.timeout_secs = cfg.non_tensor_async_timeout_secs + ckpt_cfg.dir = os.path.join(cfg.dir, self._NON_TENSORS_PREFIX) + ckpt_cfg.name = "non-tensors-checkpointer" + + save_policy = cfg.save_policy.instantiate() + local_save_policy = cfg.local_save_policy.instantiate() + + # Non-tensor states must save when either local or persistent ckpt needs to be saved for + # restore from either to succeed. + def _composite_save_policy(*, step: int, evaler_summaries: dict[str, Any]): + return ( + save_policy(step=step, evaler_summaries=evaler_summaries) + or local_save_policy(step=step, evaler_summaries=evaler_summaries) + or self._reached_preemption + ) + + self._composite_save_policy = _composite_save_policy + ckpt_cfg.save_policy = config_for_function(lambda: _composite_save_policy) + self._non_tensor_manager: Checkpointer = ckpt_cfg.instantiate(parent=self) + self._tensor_manager: Optional[oecp.CheckpointManager] = None + # See comments of _eval_summaries in `OrbaxCheckpointer`. + self._eval_summaries = None + self._reached_preemption = False + + # pylint: disable-next=redefined-builtin + def ckpt_dir(self, step: int, dir: Optional[str] = None) -> str: + """Obtains the checkpoint dir for the given step.""" + if dir is None: + dir = self._non_tensor_manager.directory + return str(ocp.step.build_step_path(dir, self._name_format, step)) + + def _get_abstract_state( + self, state_with_tensors: Nested[Tensor] + ) -> Nested[jax.ShapeDtypeStruct]: + """Generate the abstract states required by the Orbax emergency checkpointer.""" + return jax.tree.map( + lambda x: jax.ShapeDtypeStruct(x.shape, x.dtype, sharding=x.sharding), + state_with_tensors, + ) + + def _get_tensor_manager(self, state_with_tensors: Nested[Tensor]) -> oecp.CheckpointManager: + """Creates the emergency checkpoint manager if not exists. + + We defer the creation of this checkpoint manager because it requires the state dict, + which is not present during __init__. + """ + cfg: OrbaxEmergencyCheckpointer.Config = self.config + if self._tensor_manager is not None: + return self._tensor_manager + + save_policy = cfg.save_policy.instantiate() + local_save_policy = cfg.local_save_policy.instantiate() + + def _orbax_save_fn( + step: int, last_saved_step: Optional[int], wrapped_save_policy: CheckpointPolicy + ) -> bool: + del last_saved_step + return wrapped_save_policy(step=step, evaler_summaries=self._eval_summaries) + + # For meaning of these options, refer to + # https://github.com/google/orbax/blob/95be2c021bc8cbf4badd83a053ff57b7a9f9b314/checkpoint/orbax/checkpoint/experimental/emergency/checkpoint_manager.py#L277 + self._tensor_manager = oecp.CheckpointManager( + self._local_dir, + persistent_directory=os.path.join(cfg.dir, self._TENSORS_PREFIX), + global_mesh=thread_resources.env.physical_mesh, + abstract_state=self._get_abstract_state(state_with_tensors), + options=oecp.CheckpointManagerOptions(
`local_state_handler` is missing here. CheckpointManager::__init__ does not have default value for local_state_handler ``` def __init__( self, local_directory: epath.PathLike, persistent_directory: epath.PathLike, global_mesh: jax.sharding.Mesh, abstract_state: PyTree, # a single PyTree describing the state structure # TODO: b/330585086 - Support arbitrary items beyond state. We will have # to evaluate whether arbitrary items can be a good fit for local # checkpointing, given restore+broadcast requirements. local_state_handler: CheckpointHandler, *, options: Optional[CheckpointManagerOptions] = None, metadata: Optional[dict[str, Any]] = None, logger: Optional[abstract_logger.AbstractLogger] = None, ): ``` pre-commit and pytype fail too.
axlearn
github_2023
python
987
apple
apghml
@@ -30,6 +30,8 @@ def _sort_fields(klass: _T) -> _T: klass.__dataclass_fields__ = dict( sorted(klass.__dataclass_fields__.items(), key=lambda x: x[0]) ) + # Return the class after sorting the fields (for functional style) + return klass
It seems like this could be a footgun since people might not realize it also mutates its argument. Whereas trying to use the returned None value will loudly fail.
axlearn
github_2023
python
987
apple
apghml
@@ -803,6 +803,7 @@ def __enter__(self): if self._within_context: raise ValueError("Already in a context.") self._within_context = True + return self
I might be mistaken, but IIRC, it's not required for `__enter__()` to return anything in Python. I'm not necessarily opposed to adding the return here though.
axlearn
github_2023
python
987
apple
apghml
@@ -950,6 +951,7 @@ def __init__(self, cfg: Config, *, parent: Optional[Module]): def __enter__(self): super().__enter__() self._start_gc_thread()
Could you clarify whether the things you are changing in this and your other PR were found with the assistance of AI code checking tools?
axlearn
github_2023
python
939
apple
kelvin-zou
@@ -0,0 +1,142 @@ +# Copyright © 2024 Amazon Inc. +"""Flash attention Kernels using NKI on Neuron. Tested on trn1 & trn2.""" +from functools import partial + +import jax +import jax.numpy as jnp + +# Import needed to enable JAX cache on Neuron +import jax_neuronx # pylint: disable=unused-import
Do we need to install packages? Maybe update pyproject toml as well? Also, can we make it an on demand import, to avoid breaking main execution just in case?
axlearn
github_2023
python
939
apple
kelvin-zou
@@ -0,0 +1,142 @@ +# Copyright © 2024 Amazon Inc. +"""Flash attention Kernels using NKI on Neuron. Tested on trn1 & trn2.""" +from functools import partial + +import jax +import jax.numpy as jnp + +# Import needed to enable JAX cache on Neuron +import jax_neuronx # pylint: disable=unused-import +import neuronxcc.nki.language as nl +from jax import custom_vjp +from neuronxcc.nki.kernels.attention import flash_attn_bwd, flash_fwd + +Tensor = jax.Array + +lnc = 2 if jax.devices()[0].device_kind == "NC_v3d" else 1 + + +@partial(custom_vjp, nondiff_argnums=(4, 5, 6)) +def flash_attention( + query: Tensor, + key: Tensor, + value: Tensor, + bias: Tensor, + causal: bool = False, + softmax_scale: float = 1.0, + dropout_rate: float = 0.0, +): + out, _ = _mha_forward(query, key, value, bias, causal, softmax_scale, dropout_rate) + return out + + +def _mha_forward(query, key, value, bias, causal, softmax_scale, dropout_rate): + # Get the batch size, sequence lengths, number of heads, and hidden dimension + batch_size, _, num_heads, _ = query.shape + + # Transpose the query, key, and value tensors + q = query.transpose(0, 2, 3, 1) # [batch_size, num_heads, d_model, q_seq_len] + k = key.transpose(0, 2, 3, 1) # [batch_size, num_heads, d_model, kv_seq_len] + v = value.transpose(0, 2, 1, 3) # [batch_size, num_heads, kv_seq_len, d_model] + + seed = jnp.array([1]) + + # Call the NKI kernel, duplicate the kernel if we cannot shard on num_heads + if (num_heads % 2) == 0 and (num_heads // 2 > 0):
This part is already fixed by caller, you shouldn't need it. Is it not the case in your testing?
axlearn
github_2023
python
939
apple
ruomingp
@@ -276,13 +279,33 @@ def get_segment_ids(segment_ids: SegmentIdAttentionBias) -> Optional[Tensor]: block_size=block_size, ) + elif backend == "neuron":
Maybe only import `neuron_attention` inside this branch?
axlearn
github_2023
python
939
apple
ruomingp
@@ -0,0 +1,142 @@ +# Copyright © 2024 Amazon Inc. +"""Flash attention Kernels using NKI on Neuron. Tested on trn1 & trn2.""" +from functools import partial + +import jax +import jax.numpy as jnp + +# Import needed to enable JAX cache on Neuron +import jax_neuronx # pylint: disable=unused-import +import neuronxcc.nki.language as nl +from jax import custom_vjp +from neuronxcc.nki.kernels.attention import flash_attn_bwd, flash_fwd + +Tensor = jax.Array + +lnc = 2 if jax.devices()[0].device_kind == "NC_v3d" else 1 + + +@partial(custom_vjp, nondiff_argnums=(4, 5, 6)) +def flash_attention( + query: Tensor, + key: Tensor, + value: Tensor, + bias: Tensor, + causal: bool = False, + softmax_scale: float = 1.0, + dropout_rate: float = 0.0, +): + out, _ = _mha_forward(query, key, value, bias, causal, softmax_scale, dropout_rate) + return out + + +def _mha_forward(query, key, value, bias, causal, softmax_scale, dropout_rate): + # Get the batch size, sequence lengths, number of heads, and hidden dimension
Nit: end comments with . (here and everywhere)
axlearn
github_2023
python
939
apple
ruomingp
@@ -0,0 +1,142 @@ +# Copyright © 2024 Amazon Inc. +"""Flash attention Kernels using NKI on Neuron. Tested on trn1 & trn2.""" +from functools import partial + +import jax +import jax.numpy as jnp + +# Import needed to enable JAX cache on Neuron +import jax_neuronx # pylint: disable=unused-import +import neuronxcc.nki.language as nl +from jax import custom_vjp +from neuronxcc.nki.kernels.attention import flash_attn_bwd, flash_fwd + +Tensor = jax.Array + +lnc = 2 if jax.devices()[0].device_kind == "NC_v3d" else 1 + + +@partial(custom_vjp, nondiff_argnums=(4, 5, 6)) +def flash_attention( + query: Tensor, + key: Tensor, + value: Tensor, + bias: Tensor, + causal: bool = False, + softmax_scale: float = 1.0, + dropout_rate: float = 0.0, +):
Add comments on args and return value shapes and semantics?
axlearn
github_2023
python
939
apple
kelvin-zou
@@ -0,0 +1,168 @@ +# Copyright © 2024 Amazon Inc. +"""Flash attention Kernels using NKI on Neuron. Tested on trn1 & trn2.""" +from functools import partial + +import jax +import jax.numpy as jnp + +# Import needed to enable JAX cache on Neuron +import jax_neuronx # pylint: disable=unused-import +import neuronxcc.nki.language as nl +from jax import custom_vjp +from neuronxcc.nki.kernels.attention import flash_attn_bwd, flash_fwd + +Tensor = jax.Array + +lnc = 2 if jax.devices()[0].device_kind == "NC_v3d" else 1 + + +@partial(custom_vjp, nondiff_argnums=(4, 5, 6)) +def flash_attention( + query: Tensor, + key: Tensor, + value: Tensor, + bias: Tensor, + causal: bool = False,
Can we support segment ID? Or a more general masking fn (with optimized handling) is even better.
axlearn
github_2023
python
939
apple
apivovarov
@@ -0,0 +1,168 @@ +# Copyright © 2024 Amazon Inc. +"""Flash attention Kernels using NKI on Neuron. Tested on trn1 & trn2.""" +from functools import partial + +import jax +import jax.numpy as jnp + +# Import needed to enable JAX cache on Neuron +import jax_neuronx # pylint: disable=unused-import +import neuronxcc.nki.language as nl +from jax import custom_vjp +from neuronxcc.nki.kernels.attention import flash_attn_bwd, flash_fwd + +Tensor = jax.Array + +lnc = 2 if jax.devices()[0].device_kind == "NC_v3d" else 1 + + +@partial(custom_vjp, nondiff_argnums=(4, 5, 6)) +def flash_attention( + query: Tensor, + key: Tensor, + value: Tensor, + bias: Tensor, + causal: bool = False, + softmax_scale: float = 1.0, + dropout_rate: float = 0.0, +): + """Wraps _mha_forward for custom vjp. + + Args: + query: Query of shape [batch_size, target_length, num_heads, per_head_dim]. + key: Key of shape [batch_size, source_length, num_heads, per_head_dim]. + value: Value of shape [batch_size, source_length, num_heads, per_head_dim]. + bias: Optional logit biases of shape [batch_size, num_heads, target_length, source_length]. + softmax_scale: Optional scale to apply to softmax. Defaults to 1. + causal: Whether to apply causal mask. + dropout_rate: Dropout rate. Default to 0.0 (no dropout). + + Returns: + The attention outputs of shape [batch_size, target_length, num_heads, per_head_dim]. + """ + out, _ = _mha_forward(query, key, value, bias, causal, softmax_scale, dropout_rate) + return out + + +def _mha_forward(query, key, value, bias, causal, softmax_scale, dropout_rate): + """Computes attention outputs following FlashAttention. + + See also `_mha_backward` for the backward pass. + + Args: + query: Input query. + key: Input key. + value: Input value. + bias: Input bias. + causal: Input segment_ids. + softmax_scale: Softmax scale to use in the kernel. + dropout_rate: Dropout rate to use in the kernel. + """ + # Get the batch size, sequence lengths, number of heads, and hidden dimension. + batch_size, _, num_heads, _ = query.shape + + # Transpose the query, key, and value tensors. + q = query.transpose(0, 2, 3, 1) # [batch_size, num_heads, d_model, q_seq_len]. + k = key.transpose(0, 2, 3, 1) # [batch_size, num_heads, d_model, kv_seq_len]. + v = value.transpose(0, 2, 1, 3) # [batch_size, num_heads, kv_seq_len, d_model]. + + seed = jnp.array([1]) + + # Call the NKI kernel, duplicate the kernel if we cannot shard on num_heads. + if (num_heads % 2) == 0 and (num_heads // 2 > 0):
``` # even num_heads except 0 if num_heads > 0 and num_heads % 2 == 0: ```
axlearn
github_2023
python
939
apple
apivovarov
@@ -0,0 +1,144 @@ +# Copyright © 2024 Amazon Inc. +"""Tests for Flash attention on Neuron. Tested on trn1 & trn2.""" + +import chex +import jax +import jax.numpy as jnp +import pytest + +from axlearn.common.flash_attention.utils import mha_reference + +if jax.default_backend() != "neuron": + pytestmark = pytest.mark.skip(reason="Incompatible hardware, AWS Neuron only test.") + +# On demand import only if test is needed. +# pylint: disable=wrong-import-position +from axlearn.common.flash_attention.neuron_attention import flash_attention + + +@pytest.mark.parametrize( + "batch_size,seq_len,num_heads,per_head_dim", + [ + (1, 2048, 1, 64), + (2, 2048, 2, 64), + (1, 2048, 1, 128), + (2, 2048, 2, 128), + (1, 2048, 8, 128), + (2, 2048, 8, 128), + ], +) +@pytest.mark.parametrize("use_fwd", [True, False]) +@pytest.mark.parametrize("causal", [True, False]) +@pytest.mark.parametrize("attention_bias_type", [None, "4d"]) +@pytest.mark.parametrize("input_dtype", [jnp.float16, jnp.bfloat16, jnp.float32]) +def test_fwd_against_ref( + batch_size: int, + seq_len: int, + num_heads: int, + per_head_dim: int, + causal: bool, + input_dtype: jnp.dtype, + attention_bias_type: bool, +): + softmax_scale = 1.0 / (per_head_dim**0.5)
Maybe just ``` per_head_dim**-0.5 ```
axlearn
github_2023
python
939
apple
markblee
@@ -0,0 +1,144 @@ +# Copyright © 2024 Amazon Inc. +"""Tests for Flash attention on Neuron. Tested on trn1 & trn2.""" + +import chex +import jax +import jax.numpy as jnp +import pytest + +from axlearn.common.flash_attention.utils import mha_reference + +if jax.default_backend() != "neuron": + pytestmark = pytest.mark.skip(reason="Incompatible hardware, AWS Neuron only test.")
Looks like a number of CI steps are failing -- I think we can either do something like ``` if jax.default_backend() != "neuron": pytest.skip(reason=..., allow_module_level=True) ``` or update `run_tests.sh` to exclude tests marked with `neuron`.
axlearn
github_2023
python
939
apple
apivovarov
@@ -0,0 +1,179 @@ +# Copyright © 2024 Amazon Inc. +"""Flash attention Kernels using NKI on Neuron. Tested on trn1 & trn2.""" +from functools import partial + +import jax +import jax.numpy as jnp + +# TODO(apoorvtintin) remove pytype disable when dependencies are public. +# pytype: disable=import-error +# Import needed to enable JAX cache on Neuron. +import jax_neuronx # pylint: disable=unused-import +import neuronxcc.nki.language as nl +from jax import custom_vjp +from neuronxcc.nki.kernels.attention import flash_attn_bwd, flash_fwd + +# pytype: enable=import-error + +Tensor = jax.Array +lnc = 2 if jax.devices()[0].device_kind == "NC_v3d" else 1 + + +@partial(custom_vjp, nondiff_argnums=(4, 5, 6)) +def flash_attention( + query: Tensor, + key: Tensor, + value: Tensor, + bias: Tensor, + causal: bool = False, + softmax_scale: float = 1.0, + dropout_rate: float = 0.0, +): + """Wraps _mha_forward for custom vjp. + + Args: + query: Query of shape [batch_size, target_length, num_heads, per_head_dim]. + key: Key of shape [batch_size, source_length, num_heads, per_head_dim]. + value: Value of shape [batch_size, source_length, num_heads, per_head_dim]. + bias: Optional logit biases of shape [1, 1, target_length, source_length]. + softmax_scale: Optional scale to apply to softmax. Defaults to 1. + causal: Whether to apply causal mask. + dropout_rate: Dropout rate. Default to 0.0 (no dropout). + + Returns: + The attention outputs of shape [batch_size, target_length, num_heads, per_head_dim]. + """ + out, _ = _mha_forward(query, key, value, bias, causal, softmax_scale, dropout_rate) + return out + + +def _mha_forward(query, key, value, bias, causal, softmax_scale, dropout_rate): + """Computes attention outputs following FlashAttention. + + See also `_mha_backward` for the backward pass. + + Args: + query: Input query. + key: Input key. + value: Input value. + bias: Input bias. + causal: Input segment_ids. + softmax_scale: Softmax scale to use in the kernel. + dropout_rate: Dropout rate to use in the kernel. + """ + # Get the batch size, sequence lengths, number of heads, and hidden dimension. + batch_size, _, num_heads, _ = query.shape + + # Transpose the query, key, and value tensors. + q = query.transpose(0, 2, 3, 1) # [batch_size, num_heads, d_model, q_seq_len]. + k = key.transpose(0, 2, 3, 1) # [batch_size, num_heads, d_model, kv_seq_len]. + v = value.transpose(0, 2, 1, 3) # [batch_size, num_heads, kv_seq_len, d_model]. + + seed = jnp.array([1]) + + # Call the NKI kernel, duplicate the kernel if we cannot shard on num_heads. + if (num_heads % 2) == 0 and (num_heads // 2 > 0): + grid = batch_size, nl.nc(lnc) * (num_heads // lnc) + else: + grid = batch_size, num_heads + + if bias is not None: + assert ( + bias.ndim == 4 + ), f"Neuron flash_attention is only expecting bias.ndim = 4 but got {bias.ndim}" + assert bias.shape[0] == 1 and bias.shape[1] == 1, ( + f"Bias is only supported when batch and num_heads are both 1, " + f"batch is {bias.shape[0]} and num_heads is {bias.shape[1]}" + ) + attn_output, lse = flash_fwd[grid]( + q, + k, + v, + seed, + bias, + use_causal_mask=causal, + softmax_scale=softmax_scale, + mixed_precision=True, + dropout_p=dropout_rate, + ) + else: + attn_output, lse = flash_fwd[grid]( + q, + k, + v, + seed, + use_causal_mask=causal, + softmax_scale=softmax_scale, + mixed_precision=True, + dropout_p=dropout_rate, + ) + # Transpose the output back to the original shape. + attn_output = attn_output.transpose(0, 2, 1, 3) # [batch_size, q_seq_len, num_heads, d_model]. + + return attn_output, (lse, attn_output, q, k, v, bias) + + +def _mha_backward(causal, softmax_scale, dropout_rate, res, d_attn_output): + lse, o, q, k, v, bias = res + batch_size, num_heads, _, _ = q.shape + + # Transpose the input tensors. + o = o.transpose(0, 2, 3, 1) + dy = d_attn_output.transpose(0, 2, 3, 1) + + # Transpose v tensor. + v = jnp.transpose(v, axes=(0, 1, 3, 2)) + seed = jnp.array([1]) + + # Call the NKI kernel, duplicate the kernel if we cannot shard on num_heads. + if (num_heads % 2) == 0 and (num_heads // 2 > 0):
This can be simplified to ``` # even num_heads except 0 if num_heads > 0 and num_heads % 2 == 0: ```
axlearn
github_2023
python
939
apple
apivovarov
@@ -0,0 +1,179 @@ +# Copyright © 2024 Amazon Inc. +"""Flash attention Kernels using NKI on Neuron. Tested on trn1 & trn2.""" +from functools import partial + +import jax +import jax.numpy as jnp + +# TODO(apoorvtintin) remove pytype disable when dependencies are public. +# pytype: disable=import-error +# Import needed to enable JAX cache on Neuron. +import jax_neuronx # pylint: disable=unused-import +import neuronxcc.nki.language as nl +from jax import custom_vjp +from neuronxcc.nki.kernels.attention import flash_attn_bwd, flash_fwd + +# pytype: enable=import-error + +Tensor = jax.Array +lnc = 2 if jax.devices()[0].device_kind == "NC_v3d" else 1 + + +@partial(custom_vjp, nondiff_argnums=(4, 5, 6)) +def flash_attention( + query: Tensor, + key: Tensor, + value: Tensor, + bias: Tensor, + causal: bool = False, + softmax_scale: float = 1.0, + dropout_rate: float = 0.0, +): + """Wraps _mha_forward for custom vjp. + + Args: + query: Query of shape [batch_size, target_length, num_heads, per_head_dim]. + key: Key of shape [batch_size, source_length, num_heads, per_head_dim]. + value: Value of shape [batch_size, source_length, num_heads, per_head_dim]. + bias: Optional logit biases of shape [1, 1, target_length, source_length]. + softmax_scale: Optional scale to apply to softmax. Defaults to 1. + causal: Whether to apply causal mask. + dropout_rate: Dropout rate. Default to 0.0 (no dropout). + + Returns: + The attention outputs of shape [batch_size, target_length, num_heads, per_head_dim]. + """ + out, _ = _mha_forward(query, key, value, bias, causal, softmax_scale, dropout_rate) + return out + + +def _mha_forward(query, key, value, bias, causal, softmax_scale, dropout_rate): + """Computes attention outputs following FlashAttention. + + See also `_mha_backward` for the backward pass. + + Args: + query: Input query. + key: Input key. + value: Input value. + bias: Input bias. + causal: Input segment_ids. + softmax_scale: Softmax scale to use in the kernel. + dropout_rate: Dropout rate to use in the kernel. + """ + # Get the batch size, sequence lengths, number of heads, and hidden dimension. + batch_size, _, num_heads, _ = query.shape + + # Transpose the query, key, and value tensors. + q = query.transpose(0, 2, 3, 1) # [batch_size, num_heads, d_model, q_seq_len]. + k = key.transpose(0, 2, 3, 1) # [batch_size, num_heads, d_model, kv_seq_len]. + v = value.transpose(0, 2, 1, 3) # [batch_size, num_heads, kv_seq_len, d_model]. + + seed = jnp.array([1]) + + # Call the NKI kernel, duplicate the kernel if we cannot shard on num_heads. + if (num_heads % 2) == 0 and (num_heads // 2 > 0):
This can be simplified to ``` # even num_heads except 0 if num_heads > 0 and num_heads % 2 == 0: ```
axlearn
github_2023
python
939
apple
apivovarov
@@ -0,0 +1,143 @@ +# Copyright © 2024 Amazon Inc. +"""Tests for Flash attention on Neuron. Tested on trn1 & trn2.""" + +import chex +import jax +import jax.numpy as jnp +import pytest + +from axlearn.common.flash_attention.utils import mha_reference + +if jax.default_backend() != "neuron": + pytestmark = pytest.skip( + reason="Incompatible hardware, AWS Neuron only test.", allow_module_level=True + ) + + +@pytest.mark.parametrize( + "batch_size,seq_len,num_heads,per_head_dim", + [ + (1, 2048, 1, 64), + (2, 2048, 2, 64), + (1, 2048, 1, 128), + (2, 2048, 2, 128), + (1, 2048, 8, 128), + (2, 2048, 8, 128), + ], +) +@pytest.mark.parametrize("causal", [True, False]) +@pytest.mark.parametrize("attention_bias_type", [None, "2d"]) +@pytest.mark.parametrize("input_dtype", [jnp.float16, jnp.bfloat16, jnp.float32]) +def test_fwd_against_ref( + batch_size: int, + seq_len: int, + num_heads: int, + per_head_dim: int, + causal: bool, + input_dtype: jnp.dtype, + attention_bias_type: bool, +): + # On demand import only if test is needed. + # pylint: disable=import-outside-toplevel + from axlearn.common.flash_attention.neuron_attention import flash_attention + + softmax_scale = 1.0 / (per_head_dim**0.5)
Maybe just per_head_dim**-0.5