repo_name
stringlengths
1
62
dataset
stringclasses
1 value
lang
stringclasses
11 values
pr_id
int64
1
20.1k
owner
stringlengths
2
34
reviewer
stringlengths
2
39
diff_hunk
stringlengths
15
262k
code_review_comment
stringlengths
1
99.6k
axlearn
github_2023
python
818
apple
markblee
@@ -536,71 +539,123 @@ def _mha_backward_kernel( del out_ref, l_ref # Not needed
Update the docstring with any remaining differences from https://github.com/jax-ml/jax/blob/0995bc231c51e2ee66995be8ee2b31adf9236509/jax/experimental/pallas/ops/gpu/attention.py#L343?
axlearn
github_2023
python
818
apple
markblee
@@ -82,24 +82,24 @@ def _perf_report(prefix: str): # 128 is the most common value for per_head_dim. - batch_size, num_heads, seq_len, per_head_dim = 2, 32, 2048, 128 + batch_size, num_heads, seq_len, per_head_dim = 2, 16, 8192, 128 # Vary batch size for fixed heads and seq length. batch_size_b...
Should we retain the original benchmark configurations?
axlearn
github_2023
python
826
apple
ruomingp
@@ -414,6 +414,41 @@ def _loss(params, inputs, paddings, layer=layer): self.assertNestedAllClose(grad_params, jax.tree.map(jnp.zeros_like, layer_params)) assert_allclose(grad_inputs, jnp.zeros_like(inputs), atol=1e-6, rtol=1e-6) + def test_lookup(self): + batch_size, seq_len, input_dim = 2...
Check the output dtypes?
axlearn
github_2023
python
826
apple
markblee
@@ -414,6 +414,42 @@ def _loss(params, inputs, paddings, layer=layer): self.assertNestedAllClose(grad_params, jax.tree.map(jnp.zeros_like, layer_params)) assert_allclose(grad_inputs, jnp.zeros_like(inputs), atol=1e-6, rtol=1e-6) + def test_lookup(self): + batch_size, seq_len, input_dim = 2...
> This is a convenience, legacy function that exists to support older code that uses the singleton RandomState. Can we use `jax.random` instead?
axlearn
github_2023
python
786
apple
Ethanlm
@@ -0,0 +1,118 @@ +# Copyright © 2024 Apple Inc. + +"""Device monitor module, to collect and report system metrics.""" +import contextlib +import threading +from typing import Literal + +from absl import logging + +from axlearn.common.config import Configurable, config_class, maybe_instantiate +from axlearn.common.util...
I wonder if we can switch to multiprocessing instead of multithreading. There were cases where the whole process was locked, and the watchdog thread can't even run and print out warnings
axlearn
github_2023
python
786
apple
Ethanlm
@@ -376,22 +382,46 @@ def _stop_watchdog(self): def _watchdog_loop(self, *, context_stack: list[InvocationContext]): cfg = self.config install_context_stack(context_stack) + time_elapsed_in_sec_since_last_check: float = 0.0 + # Set a scanning time to 10 mins or the watchdog_timeout_...
+1 on crashing the program to avoid the waste. But also making it configurable so that a job can configure how often the health check should run, and when to crash (like if idle for x hours)
axlearn
github_2023
python
788
apple
kelvinzou
@@ -89,31 +88,5 @@ def setup( process_id=process_id, ) - # Ensure that coordinator initialization respects initialization_timeout. - # The current jax version hardcodes the number of attempts to discover coordinator address: - # https://github.com/google/jax/blob/33e...
add a comment in flag, stating it is being deprecated?
axlearn
github_2023
python
778
apple
markblee
@@ -2331,3 +2333,102 @@ def parameters_from_t5x_encoder_decoder( raise ValueError(f"Unsupported layer: {layer}") return as_tensor(dst) + + +def _permute_q_k_for_rope(vector: torch.Tensor) -> torch.Tensor: + """Permutes q and k vector because transformers package has a different implementation of ...
```suggestion NestedTensor containing the same structure as state, but the weights are from llama. ```
axlearn
github_2023
python
778
apple
markblee
@@ -0,0 +1,174 @@ +# Copyright © 2024 Apple Inc. + +"""Tests fuji weight loading from llama.""" + +import os + +import jax +import jax.numpy as jnp +import numpy as np +import pytest +import torch +from absl.testing import absltest, parameterized +from transformers import AutoConfig +from transformers.models.llama.mode...
Does `self.assertNestedAllClose` not work?
axlearn
github_2023
python
778
apple
ruomingp
@@ -0,0 +1,161 @@ +# Copyright © 2024 Apple Inc. + +"""Tests fuji weight loading from llama.""" + +import os + +import jax +import numpy as np +import pytest +import torch +from absl.testing import absltest, parameterized +from transformers import AutoConfig +from transformers.models.llama.modeling_llama import LlamaFo...
Are high_cpu tests still run at every CI?
axlearn
github_2023
python
743
apple
ruomingp
@@ -1781,8 +1832,6 @@ def _forward_for_mode( "key and value must be both None or both set, " f"key:{type(key)}, value:{type(value)}" ) - if self._mask_fn and (key is not None or value is not None): - raise ValueError("key and value are not expected when u...
Can you remind me why this logic is removed?
axlearn
github_2023
python
743
apple
qdavid1
@@ -49,6 +50,19 @@ class TestFlashAttention(TestCase): ), ] + @parameterized.product(seq_len=[8, 16, 32, 128], sliding_window_size=[4, 8, 16]) + def test_sliding_window_mask(self, seq_len, sliding_window_size): + shape = (seq_len, seq_len) + ref_mask = splash_attention_mask.LocalMask...
According to the documentation of splash_attention_mask.LocalMask(), setting window_size=(sliding_window_size, None) would make the mask unbounded on the right, which would not be the same behavior as the function it is testing against: sliding_window_causal_mask().
axlearn
github_2023
python
743
apple
berlino
@@ -190,11 +205,14 @@ def _compute_attention( ) attention_logit_biases = attention_logit_biases.astype(q_proj.dtype) - mask_fn = self._mask_fn + if attention_logit_biases is None or self._mask_fn is causal_mask:
@changlan I guess this condition should be if attention_logit_bias is None or not self._is_mask_fn_used()
axlearn
github_2023
others
764
apple
markblee
@@ -151,6 +151,7 @@ grain = [ ] # Audio dependencies. audio = [ + "einops",
Should we pin these deps?
axlearn
github_2023
others
759
apple
samos123
@@ -123,7 +123,7 @@ dataflow = [ # GPU custom kernel dependency. gpu = [ "triton==2.1.0", - "jax[cuda12_pip]==0.4.30", + "jax[cuda12_pip]==0.4.33",
```suggestion "jax[cuda12]==0.4.33", ``` https://jax.readthedocs.io/en/latest/installation.html#pip-installation-nvidia-gpu-cuda-installed-via-pip-easier
axlearn
github_2023
python
758
apple
ruomingp
@@ -269,24 +274,11 @@ def _tpu_splash_attention( if mask is None: mask = splash_attention_mask.FullMask(mask_shape) else: - - def wrap_mask(mask: MaskFn) -> MaskFn: - """Wrap `mask` so that the return type is a numpy array - if the original input was, even if we are insid...
Comment on the maximum size of this NumpyMask? E.g., it will contain 1B entries for seq_len=32K?
axlearn
github_2023
python
761
apple
ruomingp
@@ -1912,23 +1918,46 @@ def _forward_for_mode( return dict(i_proj=i_proj_state), output def _logit_biases_for_mask( - self, *, mode: ForwardMode, seq_len: int, time_step: Optional[Tensor] = None + self, + *, + mode: ForwardMode, + kv_len: int, + query_len: Optio...
```suggestion If set, this is the query length. Otherwise, it uses kv_len as the query length. Must be None for ForwardMode.EXTEND_STEP. ```
axlearn
github_2023
python
745
apple
ruomingp
@@ -0,0 +1,123 @@ +# Copyright © 2024 Apple Inc. + +"""View logs for a job via Cloud Logging. + +Example: + + # At the moment, name is assumed to be a job submitted via GKE. + axlearn gcp logs --name=... + +""" + +import urllib.parse + +from absl import app, flags, logging + +from axlearn.cloud.gcp.config import ...
Does the worker ID correspond to JAX `process_index`? If not, is there a way to find the logs of `process_index=0`?
axlearn
github_2023
python
748
apple
ruomingp
@@ -535,6 +535,12 @@ def _wrap_method_with_auto_child_context(*, method_fn: Callable, method_name: st `partial(method_fn, instance)`, or supply an instance explicitly as the first arg. """ + if not traceback_util.is_stack_summary_enabled(): + method_fn = functools.wraps(method_fn)( + fu...
Nit: since we always `_call_method_in_context`, maybe we can move it outside the `if`: ``` method_fn_in_context = functools.partial( _call_method_in_context, method_fn=method_fn, method_name=method_name) if not traceback_util.is_stack_summary_enabled(): return functools.wraps(method_fn)(method_fn_in_cont...
axlearn
github_2023
python
744
apple
samos123
@@ -590,6 +590,9 @@ def _mha_backward( # NOTE: temporarily removed the "xla" branch, which seems unused. if backward_pass_impl == "triton": + # We must shrink the block size for float32 inputs to avoid OOM during bwd pass. + if jnp.float32 in (q.dtype, k.dtype, v.dtype): + block_q =...
Should we use min instead, in the case of block_q and block_k being less than than 64? ``` block_q = min(64, block_q) block_k = min(64, block_k) ``` Not sure if there is any scenario where block_k and block_q are less than 64 though.
axlearn
github_2023
python
738
apple
markblee
@@ -267,84 +269,101 @@ def _methods_to_wrap_for_auto_child_context(self) -> dict[str, Callable]: if not hasattr(BaseLayer, method) } - def dtype(self): - if self.config.dtype is not None: - return self.config.dtype - if self.parent is not None: - return sel...
```suggestion for method_name, method_fn in methods.items(): ``` Did we need the `dict`?
axlearn
github_2023
python
738
apple
markblee
@@ -267,84 +269,101 @@ def _methods_to_wrap_for_auto_child_context(self) -> dict[str, Callable]: if not hasattr(BaseLayer, method) } - def dtype(self): - if self.config.dtype is not None: - return self.config.dtype - if self.parent is not None: - return sel...
I may have missed it, but where is the comment about when wrapping happens?
axlearn
github_2023
python
738
apple
markblee
@@ -520,10 +536,29 @@ def _wrap_method_with_auto_child_context(*, method_fn: Callable, method_name: st """ @no_stack_summary - def wrap_method_fn(self, *args, method_fn=method_fn, **kwargs): - return _call_method_in_context( - self, *args, method_fn=method_fn, method_name=method_name, *...
Do you have an example with/without this? Since CI logs are usually easy to search I wonder if we need this?
axlearn
github_2023
python
728
apple
ruomingp
@@ -3589,6 +3590,30 @@ def _forward_for_mode( return all_layer_states, self._aggregate_layer_outputs(all_layer_outputs) + # pylint: disable=unused-argument + def _update_data( + self, + data: Tensor, + *, + all_layer_outputs: list[BaseTransformerLayer.Output], + ): + ...
Fix the comment format?
axlearn
github_2023
python
728
apple
ruomingp
@@ -3563,6 +3563,7 @@ def _forward_for_mode( all_layer_states = [] for i, layer in enumerate(self._layers): # Prepare inputs to the current layer. + data = self._update_data(data, all_layer_outputs=all_layer_outputs)
Add a test for this logic?
axlearn
github_2023
python
728
apple
markblee
@@ -3589,6 +3590,30 @@ def _forward_for_mode( return all_layer_states, self._aggregate_layer_outputs(all_layer_outputs) + # pylint: disable=unused-argument
Could we use `del all_layer_outputs` in function body or `disable-next`?
axlearn
github_2023
python
728
apple
markblee
@@ -3589,6 +3590,30 @@ def _forward_for_mode( return all_layer_states, self._aggregate_layer_outputs(all_layer_outputs) + # pylint: disable=unused-argument + def _update_data( + self, + data: Tensor, + *, + all_layer_outputs: list[BaseTransformerLayer.Output], + ): + ...
```suggestion Args: data: A Tensor denoting the input data to the upcoming layer. all_layer_outputs: A list of BaseTransformerLayer.Output that is appended with the output of each constituent layer in the stack. Returns: A new Tensor. ```
axlearn
github_2023
python
720
apple
ruomingp
@@ -315,11 +315,14 @@ class Config(StateStorage.Config): `None` and `1` means no sharding. `-1` means fully shard along data-parallel replicas. `>1` means custom sharding degree (currently not implemented). max_concurrent_gb: Max concurrent shards (in GB) to write.
Should we rename this `max_current_save_gb` to be symmetric?
axlearn
github_2023
python
720
apple
ruomingp
@@ -315,11 +315,14 @@ class Config(StateStorage.Config): `None` and `1` means no sharding. `-1` means fully shard along data-parallel replicas. `>1` means custom sharding degree (currently not implemented). max_concurrent_gb: Max concurrent shards (in GB) to write. + ...
Why do we use 32GB as the default? Is it for backwards compatibility?
axlearn
github_2023
python
727
apple
ruomingp
@@ -0,0 +1,209 @@ +# Copyright © 2024 Apple Inc. + +"""Tests grain text utilities.""" + +import os +from typing import Sequence + +import numpy as np +import pytest +import seqio +from absl.testing import parameterized + +from axlearn.common.config import config_for_function +from axlearn.common.input_fake import fake_...
```suggestion _DummyVocabulary, encode_mapping=mapping, decode_mapping=reversed([(v, k) for k, v in mapping]) ```
axlearn
github_2023
python
627
apple
cpgaffney1
@@ -27,6 +27,8 @@ from absl import logging from jax.experimental import maps, multihost_utils from jax.experimental.array_serialization import serialization as array_serialization +from orbax.checkpoint.checkpoint_manager import CheckpointManager, CheckpointManagerOptions
You should always reference `ocp` rather than importing individual modules.
axlearn
github_2023
python
627
apple
cpgaffney1
@@ -619,16 +627,137 @@ class Config(Module.Config): # If > 0, keeps at least one checkpoint every N steps. keep_every_n_steps: Optional[int] = None # Interval between garbage collection runs. - gc_loop_interval_seconds: float = 60 + gc_loop_interval_seconds: Optional[float] = 60...
"transform" has a bit of a different meaning - this can probably just be something like `to_shape_dtype_struct`.
axlearn
github_2023
python
627
apple
cpgaffney1
@@ -619,16 +627,137 @@ class Config(Module.Config): # If > 0, keeps at least one checkpoint every N steps. keep_every_n_steps: Optional[int] = None # Interval between garbage collection runs. - gc_loop_interval_seconds: float = 60 + gc_loop_interval_seconds: Optional[float] = 60...
Not sure if you intend to leave this else here.
axlearn
github_2023
python
627
apple
cpgaffney1
@@ -619,16 +627,137 @@ class Config(Module.Config): # If > 0, keeps at least one checkpoint every N steps. keep_every_n_steps: Optional[int] = None # Interval between garbage collection runs. - gc_loop_interval_seconds: float = 60 + gc_loop_interval_seconds: Optional[float] = 60...
Redundant accesses of `latest_step`
axlearn
github_2023
python
627
apple
cpgaffney1
@@ -619,16 +627,137 @@ class Config(Module.Config): # If > 0, keeps at least one checkpoint every N steps. keep_every_n_steps: Optional[int] = None # Interval between garbage collection runs. - gc_loop_interval_seconds: float = 60 + gc_loop_interval_seconds: Optional[float] = 60...
Logs are a bit redundant since orbax is already logging this info.
axlearn
github_2023
python
627
apple
markblee
@@ -619,16 +626,133 @@ class Config(Module.Config): # If > 0, keeps at least one checkpoint every N steps. keep_every_n_steps: Optional[int] = None # Interval between garbage collection runs. - gc_loop_interval_seconds: float = 60 + gc_loop_interval_seconds: Optional[float] = 60...
Hi @jiya-zhang, [this PR](https://github.com/apple/axlearn/pull/635) introduces a `BaseCheckpointer` interface to decouple the default checkpointer implementation from the orbax one. In particular, some of the utils previously assumed something about the checkpoint layout (e.g. the presence of a file named "index" ...
axlearn
github_2023
others
250
apple
markblee
@@ -0,0 +1,71 @@ +# This is a script to set up a brand new GCP project before you use AXLearn tools"""
```suggestion # Sets up a brand new GCP project for AXLearn. See also the "Getting Started" docs linked in the main readme. ``` Should we move it under https://github.com/apple/axlearn/tree/main/axlearn/cloud/gcp/scripts?
axlearn
github_2023
others
250
apple
markblee
@@ -0,0 +1,71 @@ +# This is a script to set up a brand new GCP project before you use AXLearn tools""" +# +# Usage: +# # fill out environment variables below +# chmod +x project_setup.sh +# ./project_setup.sh + + +#!/bin/sh + +set -e +#set -x
Is this commented line intentional?
axlearn
github_2023
others
250
apple
markblee
@@ -653,7 +653,7 @@ AXLearn comes with tooling for provisioning and launching training on public clo ### Pre-requisites We assume you have: -1. A Google Cloud Platform (GCP) project with TPU quota. +1. A Google Cloud Platform (GCP) project with TPU quota. To set up a brand new GCP project, please follow [these inst...
Maybe we can directly link to the script here. The script itself should have instructions on how it should be run at the top of the file, mitigating possible out-of-sync issues. WDYT?
axlearn
github_2023
others
250
apple
ryanoceros-g
@@ -653,7 +653,7 @@ AXLearn comes with tooling for provisioning and launching training on public clo ### Pre-requisites We assume you have: -1. A Google Cloud Platform (GCP) project with TPU quota. +1. A Google Cloud Platform (GCP) project with TPU quota. To set up a brand new GCP project, please run [this script](...
In addition to inline in project_setup.sh, it may be worth noting here that this project setup does not grant TPU quota.
axlearn
github_2023
others
250
apple
ryanoceros-g
@@ -0,0 +1,76 @@ +# Sets up a brand new GCP project for AXLearn. See also the "Getting Started" docs linked in the main readme. +# +# Usage: +# # fill out environment variables below +# chmod +x project_setup.sh +# ./project_setup.sh + +# This will provision the following resources: +# * A new GCP project u...
i'd recommend putting notes on TPU quota/region at or near the top
axlearn
github_2023
others
250
apple
ryanoceros-g
@@ -0,0 +1,76 @@ +# Sets up a brand new GCP project for AXLearn. See also the "Getting Started" docs linked in the main readme. +# +# Usage: +# # fill out environment variables below +# chmod +x project_setup.sh +# ./project_setup.sh + +# This will provision the following resources: +# * A new GCP project u...
Is there some reason we are using alpha here?
axlearn
github_2023
others
250
apple
samos123
@@ -0,0 +1,79 @@ +# Sets up a brand new GCP project for AXLearn. See also the "Getting Started" docs linked in the main readme. +# +# Usage: +# # fill out environment variables below +# chmod +x project_setup.sh +# ./project_setup.sh + +# This will provision the following resources: +# * A new GCP project u...
I think we should assume the project has been created and a billing account has been linked. In most organizations this is a different team.
axlearn
github_2023
others
250
apple
samos123
@@ -0,0 +1,79 @@ +# Sets up a brand new GCP project for AXLearn. See also the "Getting Started" docs linked in the main readme. +# +# Usage: +# # fill out environment variables below +# chmod +x project_setup.sh +# ./project_setup.sh + +# This will provision the following resources: +# * A new GCP project u...
I think we should assume the project has been created and a billing account has been linked. In most organizations this is a different team.
axlearn
github_2023
others
250
apple
samos123
@@ -0,0 +1,79 @@ +# Sets up a brand new GCP project for AXLearn. See also the "Getting Started" docs linked in the main readme. +# +# Usage: +# # fill out environment variables below +# chmod +x project_setup.sh +# ./project_setup.sh + +# This will provision the following resources: +# * A new GCP project u...
bundle this in an if statement that checks if the resource already exists, so it becomes idempotent
axlearn
github_2023
others
250
apple
samos123
@@ -0,0 +1,79 @@ +# Sets up a brand new GCP project for AXLearn. See also the "Getting Started" docs linked in the main readme. +# +# Usage: +# # fill out environment variables below +# chmod +x project_setup.sh +# ./project_setup.sh + +# This will provision the following resources: +# * A new GCP project u...
bundle this in an if statement that checks if the resource already exists, so it becomes idempotent
axlearn
github_2023
others
250
apple
samos123
@@ -0,0 +1,79 @@ +# Sets up a brand new GCP project for AXLearn. See also the "Getting Started" docs linked in the main readme. +# +# Usage: +# # fill out environment variables below +# chmod +x project_setup.sh +# ./project_setup.sh + +# This will provision the following resources: +# * A new GCP project u...
bundle this in an if statement that checks if the resource already exists, so it becomes idempotent
axlearn
github_2023
others
250
apple
samos123
@@ -0,0 +1,79 @@ +# Sets up a brand new GCP project for AXLearn. See also the "Getting Started" docs linked in the main readme. +# +# Usage: +# # fill out environment variables below +# chmod +x project_setup.sh +# ./project_setup.sh + +# This will provision the following resources: +# * A new GCP project un...
This is how you would do this: ``` if ! gcloud storage buckets describe "${PERMANENT_BUCKET_NAME}" -q >/dev/null; then gcloud storage buckets create gs://$PERMANENT_BUCKET_NAME --location=$BUCKET_REGION --uniform-bucket-level-access fi ```
axlearn
github_2023
python
716
apple
samos123
@@ -13,6 +13,11 @@ "--xla_tpu_spmd_rng_bit_generator_unsafe=1", # SPMD partition-aware RngBitGenerator. "--xla_tpu_enable_latency_hiding_scheduler=true", # Try to schedule ops efficiently. "--xla_tpu_perform_spmd_cse_prevention=false", # b/229655601: prevent OOM on gpt2-small-repeat. + # If MegaSca...
does this work on all tpu versions?
axlearn
github_2023
python
716
apple
changlan
@@ -32,6 +37,7 @@ # concurrently with gradient computation for the following layer. "--xla_tpu_enable_data_parallel_all_reduce_opt=true", "--xla_tpu_data_parallel_opt_different_sized_ops=true", +
revert?
axlearn
github_2023
python
716
apple
ruomingp
@@ -13,6 +13,11 @@ "--xla_tpu_spmd_rng_bit_generator_unsafe=1", # SPMD partition-aware RngBitGenerator. "--xla_tpu_enable_latency_hiding_scheduler=true", # Try to schedule ops efficiently. "--xla_tpu_perform_spmd_cse_prevention=false", # b/229655601: prevent OOM on gpt2-small-repeat. + # If MegaSca...
What's the granularity of the termination? Is it restarting the node encountering the error or all nodes in the slice or job?
axlearn
github_2023
python
716
apple
nstogner
@@ -13,6 +13,11 @@ "--xla_tpu_spmd_rng_bit_generator_unsafe=1", # SPMD partition-aware RngBitGenerator. "--xla_tpu_enable_latency_hiding_scheduler=true", # Try to schedule ops efficiently. "--xla_tpu_perform_spmd_cse_prevention=false", # b/229655601: prevent OOM on gpt2-small-repeat. + # If MegaSca...
What are the mechanics of triggering a node restart? By node I am assuming this means Kubernetes Node?
axlearn
github_2023
python
718
apple
markblee
@@ -6,35 +6,17 @@ import os import sys -instance_type = os.environ.get("TPU_TYPE", "none") - -# Set LIBTPU_INIT_ARGS before importing jax! -libtpu_init_args = [ - "--xla_tpu_spmd_rng_bit_generator_unsafe=1", # SPMD partition-aware RngBitGenerator. - "--xla_tpu_enable_latency_hiding_scheduler=true", # Try to...
How come `infer_tpu_type` (called from `default_xla_options`) doesn't raise when `instance_type="gpu"`? LMK what I missed.
axlearn
github_2023
python
721
apple
markblee
@@ -3816,23 +3817,13 @@ def forward( self, data: Tensor, *, - self_attention_logit_biases: Optional[Tensor] = None, - cross_attention_data: Optional[Tensor] = None, - cross_attention_logit_biases: Optional[Tensor] = None, return_aux: Optional[set[str]] = None, + ...
Should we retain the comment?
axlearn
github_2023
python
714
apple
markblee
@@ -1808,11 +1819,15 @@ def _compute_attention( k_proj: [batch_size, source_length, num_heads, per_head_dim]. v_proj: [batch_size, source_length, num_heads, per_head_dim]. attention_logit_biases: See ``On attention logit biases`` in the file comments. + segment_ids: See...
Rather than silently ignoring, should we warn if segment_ids is not None?
axlearn
github_2023
python
714
apple
markblee
@@ -2061,18 +2079,11 @@ def _compute_attention( k_proj: Tensor, v_proj: Tensor, attention_logit_biases: Optional[Tensor] = None, + segment_ids: Optional[Tensor] = None, ) -> tuple[Tensor, Tensor]: - """Computes attention context and probs. - - Args: - q_pr...
Likewise?
axlearn
github_2023
python
714
apple
markblee
@@ -2984,13 +3003,16 @@ def _forward_for_mode( None, self.self_attention( target=data, + segment_ids=segment_ids, source=self_attention_kv_state, attention_logit_biases=self_attention_logit_biases, ...
In theory we could prefill on multiple segments -- maybe `NotImplementedError` is more suitable, but feel free to leave as-is.
axlearn
github_2023
python
714
apple
markblee
@@ -129,6 +131,13 @@ def _compute_attention( k_proj = self._repeat_kv_heads(k_proj) v_proj = self._repeat_kv_heads(v_proj) + if attention_logit_biases is not None and segment_ids is not None:
Where does this limitaiton come from?
axlearn
github_2023
python
714
apple
markblee
@@ -176,6 +185,8 @@ def _compute_attention( cfg.mha_dim_to_partition_spec["bsnh"], # Bias [batch_size, num_heads, seq_len, seq_len]. cfg.mha_dim_to_partition_spec["bnts"], + # Segment IDs [batch_size, seq_len]
```suggestion # Segment IDs [batch_size, seq_len]. ```
axlearn
github_2023
python
714
apple
markblee
@@ -187,7 +190,7 @@ def forward( Raises: ValueError: If key & value are an invalid combination. """ - + del segment_ids
Same here as above.
axlearn
github_2023
python
714
apple
markblee
@@ -2943,6 +2960,7 @@ def _forward_for_mode( self_attention_logit_biases: Optional[Tensor] = None, cross_attention_data: Optional[Tensor] = None, cross_attention_logit_biases: Optional[Tensor] = None, + segment_ids: Optional[Tensor] = None,
Have we considered calling this e.g. `target_segment_ids` to disambiguate with potential cross attention?
axlearn
github_2023
python
714
apple
markblee
@@ -1808,11 +1828,19 @@ def _compute_attention( k_proj: [batch_size, source_length, num_heads, per_head_dim]. v_proj: [batch_size, source_length, num_heads, per_head_dim]. attention_logit_biases: See ``On attention logit biases`` in the file comments. + segment_ids: See...
Would it be more accurate to say: ```suggestion if segment_ids is not None: raise ValueError( "segment_ids is not supported. To use segment_ids, construct attention_logit_biases using an " "AttentionLogitBiasLayer." ) ``` since providing `segment_i...
axlearn
github_2023
python
714
apple
ruomingp
@@ -1748,11 +1757,21 @@ def _forward_for_mode( causal_mask.astype(q_proj.dtype), attention_logit_biases, ) + + # Merge segment ids into attention_logit_biases if attention_logit_biases is already set. + if attention_logit_biases is not None and se...
I wonder if this merging should happen in `_compute_attention`. Please see my other comments.
axlearn
github_2023
python
714
apple
ruomingp
@@ -1808,11 +1828,18 @@ def _compute_attention( k_proj: [batch_size, source_length, num_heads, per_head_dim]. v_proj: [batch_size, source_length, num_heads, per_head_dim]. attention_logit_biases: See ``On attention logit biases`` in the file comments. + segment_ids: See...
Here we can merge or convert `segment_ids` into `attention_logit_biases`. This allows the caller to pass only `segment_ids=..., attention_logit_biases=None` into MultiheadAttention.
axlearn
github_2023
python
714
apple
ruomingp
@@ -2061,18 +2091,15 @@ def _compute_attention( k_proj: Tensor, v_proj: Tensor, attention_logit_biases: Optional[Tensor] = None, + segment_ids: Optional[Tensor] = None, ) -> tuple[Tensor, Tensor]: - """Computes attention context and probs. - - Args: - q_pr...
Likewise here.
axlearn
github_2023
python
714
apple
ruomingp
@@ -129,6 +130,13 @@ def _compute_attention( k_proj = self._repeat_kv_heads(k_proj) v_proj = self._repeat_kv_heads(v_proj) + if attention_logit_biases is not None and segment_ids is not None: + raise ValueError( + "Using both segment_ids and attention_logit_biases is...
Here we can merge segment_ids into attention_logit_biases.
axlearn
github_2023
python
714
apple
markblee
@@ -178,6 +181,8 @@ def forward( key: an optional Tensor of shape [batch, source_length, source_dim]. value: an optional Tensor of shape [batch, source_length, source_dim]. attention_logit_biases: See ``On attention logit biases`` in the file comments. + segment_ids:...
We may want to fix some of these docstrings too, since they are now used.
axlearn
github_2023
python
714
apple
markblee
@@ -263,6 +274,7 @@ def forward( target: a Tensor of shape [batch, target_length, target_dim]. source: None, uses norm(target) as source for self-attention attention_logit_biases: See ``On attention logit biases`` in the file comments. + segment_ids: Not used. See `On s...
Likewise.
axlearn
github_2023
python
699
apple
markblee
@@ -9,9 +9,25 @@ Following https://platform.openai.com/docs/guides/function-calling for target message. + +The file contains the code for several tool use metrics: +* Standard tool use metrics +* Lenient tool use metric +* Bag of word tool use metric. + +The lenient matching is similar to the standard metric. It pe...
Should this read: ```suggestion * Removes punctuations. ``` ?
axlearn
github_2023
python
699
apple
markblee
@@ -9,9 +9,25 @@ Following https://platform.openai.com/docs/guides/function-calling for target message. + +The file contains the code for several tool use metrics: +* Standard tool use metrics +* Lenient tool use metric +* Bag of word tool use metric. + +The lenient matching is similar to the standard metric. It pe...
```suggestion The bag of word tool use metric transforms the argument strings in the same way as the ```
axlearn
github_2023
python
699
apple
markblee
@@ -9,9 +9,25 @@ Following https://platform.openai.com/docs/guides/function-calling for target message. + +The file contains the code for several tool use metrics: +* Standard tool use metrics +* Lenient tool use metric +* Bag of word tool use metric.
```suggestion * Bag of word (BOW) tool use metric. ```
axlearn
github_2023
python
699
apple
markblee
@@ -232,6 +249,128 @@ def _compare_tool_calls( return True +@dataclasses.dataclass +class DetailedMatchResult: + """Represents the tool matches for different metrics.""" + + func_name_match: bool = False + strict_arg_match: bool = False + lenient_arg_match: bool = False + lenient_bow_arg_match: ...
```suggestion Note that this function returns the matching results for every predicted tool call. ``` I don't think we need to document what happens in the caller here.
axlearn
github_2023
python
699
apple
markblee
@@ -232,6 +249,128 @@ def _compare_tool_calls( return True +@dataclasses.dataclass +class DetailedMatchResult: + """Represents the tool matches for different metrics."""
It's probably worth clarifying in this docstring how users should interpret the fields, e.g. `lenient` vs `lenient_bow`. ```suggestion """Represents the tool matches for different metrics. Attributes: func_name_match: The predicted function name matches the target function name. ... ...
axlearn
github_2023
python
699
apple
markblee
@@ -232,6 +249,128 @@ def _compare_tool_calls( return True +@dataclasses.dataclass +class DetailedMatchResult: + """Represents the tool matches for different metrics.""" + + func_name_match: bool = False + strict_arg_match: bool = False + lenient_arg_match: bool = False + lenient_bow_arg_match: ...
When do we expect this case? Add a comment?
axlearn
github_2023
python
699
apple
markblee
@@ -232,6 +249,128 @@ def _compare_tool_calls( return True +@dataclasses.dataclass +class DetailedMatchResult: + """Represents the tool matches for different metrics.""" + + func_name_match: bool = False + strict_arg_match: bool = False + lenient_arg_match: bool = False + lenient_bow_arg_match: ...
```suggestion except (json.JSONDecodeError, KeyError) as e: logging.error("Unable to decode arguments from target call %s: %s", t["function"], e) ``` Also clarify in the docstring that any pred/target tool calls which fail to decode are ignored/skipped.
axlearn
github_2023
python
699
apple
markblee
@@ -232,6 +249,128 @@ def _compare_tool_calls( return True +@dataclasses.dataclass +class DetailedMatchResult: + """Represents the tool matches for different metrics.""" + + func_name_match: bool = False + strict_arg_match: bool = False + lenient_arg_match: bool = False + lenient_bow_arg_match: ...
Clarify the intended format of each tool, including the expected keys.
axlearn
github_2023
python
699
apple
markblee
@@ -232,6 +249,128 @@ def _compare_tool_calls( return True +@dataclasses.dataclass +class DetailedMatchResult: + """Represents the tool matches for different metrics.""" + + func_name_match: bool = False + strict_arg_match: bool = False + lenient_arg_match: bool = False + lenient_bow_arg_match: ...
We don't need the equivalent check for key error as with `target_tool_calls`?
axlearn
github_2023
python
699
apple
markblee
@@ -232,6 +249,128 @@ def _compare_tool_calls( return True +@dataclasses.dataclass +class DetailedMatchResult: + """Represents the tool matches for different metrics.""" + + func_name_match: bool = False + strict_arg_match: bool = False + lenient_arg_match: bool = False + lenient_bow_arg_match: ...
The definition of `DetailedMatchResult` suggests that a function can match on args but not function name, for example: ``` DetailedMatchResult( func_name_match=False, strict_arg_match=True, ... ) ``` If this configuration can never be the case, we should clarify the behavior somewhere.
axlearn
github_2023
python
699
apple
markblee
@@ -232,6 +249,128 @@ def _compare_tool_calls( return True +@dataclasses.dataclass +class DetailedMatchResult: + """Represents the tool matches for different metrics.""" + + func_name_match: bool = False + strict_arg_match: bool = False + lenient_arg_match: bool = False + lenient_bow_arg_match: ...
```suggestion A list of DetailedMatchResults with the same length as `pred_tool_calls`. Each result indicates whether the corresponding predicted tool call matches any target tool call. ```
axlearn
github_2023
python
699
apple
markblee
@@ -232,6 +249,128 @@ def _compare_tool_calls( return True +@dataclasses.dataclass +class DetailedMatchResult: + """Represents the tool matches for different metrics.""" + + func_name_match: bool = False + strict_arg_match: bool = False + lenient_arg_match: bool = False + lenient_bow_arg_match: ...
It seems that it's possible for the matches for the same result to correspond to different target funcs. If this is intended, please clarify in the docstring.
axlearn
github_2023
python
699
apple
markblee
@@ -294,11 +444,21 @@ def get_tool_calls_from_message(message: dict[str, Any]) -> list[dict[str, Any]] # If the content is empty and there are no tool or function calls we usually have # a generation error. In this case, there is no content field generated, but # sometimes an erro...
Remove this?
axlearn
github_2023
python
699
apple
markblee
@@ -294,11 +444,21 @@ def get_tool_calls_from_message(message: dict[str, Any]) -> list[dict[str, Any]] # If the content is empty and there are no tool or function calls we usually have # a generation error. In this case, there is no content field generated, but # sometimes an erro...
```suggestion ```
axlearn
github_2023
python
699
apple
markblee
@@ -312,13 +472,25 @@ def get_tool_calls_from_message(message: dict[str, Any]) -> list[dict[str, Any]] and len(target.tool_calls) == len(pred.tool_calls) ): pred_tool_calls = get_tool_calls_from_message(pred.model_dump()) - target_tool_calls = get_tool_calls...
Remove the commented code and clarify the comment on L483 with full sentences.
axlearn
github_2023
python
699
apple
markblee
@@ -342,4 +514,9 @@ def get_tool_calls_from_message(message: dict[str, Any]) -> list[dict[str, Any]] "number_of_examples": len(responses), "number_of_parsing_errors": number_of_parsing_errors, "number_of_generation_errors": number_of_generation_errors, + "func_name_accuracy": _safe_div...
We can probably just do ```suggestion "func_name_accuracy": total_func_name_matches / max(1, total_tool_calls), ```
axlearn
github_2023
python
699
apple
markblee
@@ -232,6 +249,138 @@ def _compare_tool_calls( return True +@dataclasses.dataclass +class DetailedMatchResult: + """Represents the tool matches for different metrics. + + Attributes: + func_name_match: The predicted function name matches the target function name. + strict_arg_match: The pre...
```suggestion the lenient bag-of-word comparison. ```
axlearn
github_2023
python
699
apple
markblee
@@ -267,7 +416,18 @@ def get_tool_calls_from_message(message: dict[str, Any]) -> list[dict[str, Any]] new_tool_calls.append(tool_call) return new_tool_calls + def _safe_div(dividend: int, divisor: int) -> float: + return dividend / max(1, divisor)
Let's inline this fn?
axlearn
github_2023
python
699
apple
markblee
@@ -232,6 +249,165 @@ def _compare_tool_calls( return True +@dataclasses.dataclass +class DetailedMatchResult:
If this is internal, consider marking it as private: ```suggestion class _DetailedMatchResult: ```
axlearn
github_2023
python
699
apple
markblee
@@ -267,7 +443,18 @@ def get_tool_calls_from_message(message: dict[str, Any]) -> list[dict[str, Any]] new_tool_calls.append(tool_call) return new_tool_calls + def _safe_div(dividend: int, divisor: int) -> float: + return dividend / max(1, divisor) + total_matches = 0 + + # The ...
Comments should end with punctuations.
axlearn
github_2023
python
699
apple
markblee
@@ -296,9 +483,17 @@ def get_tool_calls_from_message(message: dict[str, Any]) -> list[dict[str, Any]] # sometimes an error field. number_of_generation_errors += 1 pred_tool_calls, target_tool_calls = None, None + + target = OpenAIClient.format_message(target_message) + + ...
Same comment as above.
axlearn
github_2023
python
699
apple
markblee
@@ -0,0 +1,307 @@ +# Copyright © 2024 Apple Inc. +"""Utilities for the detailed tool use metrics.""" + +import re +import string +from typing import Dict, List, Union + +from typing_extensions import TypeAlias + +Value = Union[str, int, bool, float] +ValueOrListOf: TypeAlias = Union[Value, List[Value]] + +_STOP_WORDS =...
Slicing `words` creates a copy each iteration; should we just keep track of indices and slice at the end?
axlearn
github_2023
python
699
apple
markblee
@@ -0,0 +1,307 @@ +# Copyright © 2024 Apple Inc. +"""Utilities for the detailed tool use metrics.""" + +import re +import string +from typing import Dict, List, Union + +from typing_extensions import TypeAlias + +Value = Union[str, int, bool, float] +ValueOrListOf: TypeAlias = Union[Value, List[Value]]
```suggestion ValueOrListOf: TypeAlias = Union[Value, list[Value]] ``` nit -- we are trying to move away from the deprecated typing annotations.
axlearn
github_2023
python
699
apple
markblee
@@ -0,0 +1,307 @@ +# Copyright © 2024 Apple Inc. +"""Utilities for the detailed tool use metrics.""" + +import re +import string +from typing import Dict, List, Union + +from typing_extensions import TypeAlias + +Value = Union[str, int, bool, float] +ValueOrListOf: TypeAlias = Union[Value, List[Value]] + +_STOP_WORDS =...
```suggestion assert threshold > 0, "Bag of words string matching threshold must be above 0." ``` It's preferable to raise a ValueError for caller-provided values. Asserts are usually used for catching logical bugs.
axlearn
github_2023
python
699
apple
markblee
@@ -0,0 +1,307 @@ +# Copyright © 2024 Apple Inc. +"""Utilities for the detailed tool use metrics.""" + +import re +import string +from typing import Dict, List, Union + +from typing_extensions import TypeAlias + +Value = Union[str, int, bool, float] +ValueOrListOf: TypeAlias = Union[Value, List[Value]] + +_STOP_WORDS =...
```suggestion threshold: Thresold to be compared with the ratio (# unique common words) / (# unique pred_str words). The predicted string is considered to match the target if the ratio is higher or equal to this threshold. ```
axlearn
github_2023
python
699
apple
markblee
@@ -0,0 +1,307 @@ +# Copyright © 2024 Apple Inc. +"""Utilities for the detailed tool use metrics.""" + +import re +import string +from typing import Dict, List, Union + +from typing_extensions import TypeAlias + +Value = Union[str, int, bool, float] +ValueOrListOf: TypeAlias = Union[Value, List[Value]] + +_STOP_WORDS =...
What if target word set is also empty?
axlearn
github_2023
python
699
apple
markblee
@@ -0,0 +1,307 @@ +# Copyright © 2024 Apple Inc. +"""Utilities for the detailed tool use metrics.""" + +import re +import string +from typing import Dict, List, Union + +from typing_extensions import TypeAlias + +Value = Union[str, int, bool, float] +ValueOrListOf: TypeAlias = Union[Value, List[Value]] + +_STOP_WORDS =...
```suggestion def _match_strings_bag_of_words(*, pred_str: str, target_str: str, threshold: float = 1.0) -> bool: ```
axlearn
github_2023
python
699
apple
markblee
@@ -0,0 +1,307 @@ +# Copyright © 2024 Apple Inc. +"""Utilities for the detailed tool use metrics.""" + +import re +import string +from typing import Dict, List, Union + +from typing_extensions import TypeAlias + +Value = Union[str, int, bool, float] +ValueOrListOf: TypeAlias = Union[Value, List[Value]] + +_STOP_WORDS =...
```suggestion def _is_arg_value_equal( *, ```
axlearn
github_2023
python
699
apple
markblee
@@ -0,0 +1,307 @@ +# Copyright © 2024 Apple Inc. +"""Utilities for the detailed tool use metrics.""" + +import re +import string +from typing import Dict, List, Union + +from typing_extensions import TypeAlias + +Value = Union[str, int, bool, float] +ValueOrListOf: TypeAlias = Union[Value, List[Value]] + +_STOP_WORDS =...
```suggestion def check_arguments( *, pred_args: dict[str, ValueOrListOf], target_args: dict[str, ValueOrListOf], check_lenient: bool = False, bag_of_words: bool = False, ) -> bool: ``` FWIW, this kind of API with multiple bool flags is generally discouraged: https://docs.google.com/docum...
axlearn
github_2023
python
699
apple
markblee
@@ -0,0 +1,307 @@ +# Copyright © 2024 Apple Inc. +"""Utilities for the detailed tool use metrics.""" + +import re +import string +from typing import Dict, List, Union + +from typing_extensions import TypeAlias + +Value = Union[str, int, bool, float] +ValueOrListOf: TypeAlias = Union[Value, List[Value]] + +_STOP_WORDS =...
nit -- it may be more readable to handle the other case first: ``` if not check_lenient: return pred_arg == target_arg # ... handle the longer case. ```
axlearn
github_2023
python
699
apple
markblee
@@ -0,0 +1,334 @@ +# Copyright © 2024 Apple Inc. +"""Utilities for the detailed tool use metrics.""" + +import re +import string +from enum import Enum +from typing import Union + +from typing_extensions import TypeAlias + +Value = Union[str, int, bool, float] +ValueOrListOf: TypeAlias = Union[Value, list[Value]] + +_S...
Fix the docstring?
axlearn
github_2023
python
699
apple
markblee
@@ -0,0 +1,334 @@ +# Copyright © 2024 Apple Inc. +"""Utilities for the detailed tool use metrics.""" + +import re +import string +from enum import Enum +from typing import Union + +from typing_extensions import TypeAlias + +Value = Union[str, int, bool, float] +ValueOrListOf: TypeAlias = Union[Value, list[Value]] + +_S...
As usual, any public APIs should have a docstring.