repo_name stringlengths 1 62 | dataset stringclasses 1 value | lang stringclasses 11 values | pr_id int64 1 20.1k | owner stringlengths 2 34 | reviewer stringlengths 2 39 | diff_hunk stringlengths 15 262k | code_review_comment stringlengths 1 99.6k |
|---|---|---|---|---|---|---|---|
lm-human-preference-details | github_2023 | python | 19 | vwxyzjn | lewtun | @@ -510,6 +514,37 @@ def train(args: Args):
)
dataloader = DataLoader(dataset, batch_size=args.ppo.local_batch_size)
policy, optimizer, dataloader = accelerator.prepare(policy, optimizer, dataloader)
+ if args.deepspeed:
+ import deepspeed
+
+ deepspeed_states = AcceleratorState().deepspeed_plugin
+ deepspeed_states.deepspeed_config['train_micro_batch_size_per_gpu'] = args.ppo.local_micro_batch_size | If I'm not mistaken, these config values are set automatically by the accelerator and don't need to be overridden |
lm-human-preference-details | github_2023 | python | 19 | vwxyzjn | lewtun | @@ -510,6 +514,37 @@ def train(args: Args):
)
dataloader = DataLoader(dataset, batch_size=args.ppo.local_batch_size)
policy, optimizer, dataloader = accelerator.prepare(policy, optimizer, dataloader)
+ if args.deepspeed:
+ import deepspeed
+
+ deepspeed_states = AcceleratorState().deepspeed_plugin
+ deepspeed_states.deepspeed_config['train_micro_batch_size_per_gpu'] = args.ppo.local_micro_batch_size
+ deepspeed_states.deepspeed_config['checkpoint'] = {'use_node_local_storage': True}
+ off_load_device = "cpu"
+ stage = 3
+ eval_ds_config = {
+ "train_micro_batch_size_per_gpu": deepspeed_states.deepspeed_config['train_micro_batch_size_per_gpu'],
+ "steps_per_print": 10,
+ # "zero_optimization": {
+ # "stage": stage,
+ # "stage3_param_persistence_threshold": 1e4,
+ # "offload_param": {
+ # "device": off_load_device
+ # }
+ # },
+ "bf16": {
+ "enabled": True
+ },
+ "prescale_gradients": False, | I think this flag and the one below are false by default, so probably don't need to be set either |
lm-human-preference-details | github_2023 | python | 19 | vwxyzjn | lewtun | @@ -755,7 +790,8 @@ def train(args: Args):
)
with torch.no_grad():
- writer.add_histogram("ppo/val/ratio_hist", ratio, update)
+ if not args.deepspeed: # for some reason there is a OOM with the `writer.add_histogram`
+ writer.add_histogram("ppo/val/ratio_hist", ratio, update) | FYI I was able to train 7B models in TRL with ZeRO-2 and didn't need to remove the histogram. On the other hand that was for sentiment tuning, which is less memory intensive than your application here |
lm-human-preference-details | github_2023 | python | 18 | vwxyzjn | vwxyzjn | @@ -0,0 +1,975 @@
+import functools
+import os
+import time
+from dataclasses import asdict, dataclass, field
+from types import SimpleNamespace
+from typing import List, Optional
+
+import einops
+import flax
+import flax.linen as nn
+import jax
+import jax.numpy as jnp
+import numpy as np
+import optax
+import orbax
+import tyro
+from flax import jax_utils
+from flax.training import common_utils, orbax_utils
+from flax.training.train_state import TrainState
+from optax import ScaleByAdamState, update_moment, update_moment_per_elem_norm
+from optax._src import base, combine, numerics, utils
+from optax._src.alias import _scale_by_learning_rate
+from rich.console import Console
+from rich.pretty import pprint
+from torch.utils.data import DataLoader, IterableDataset
+from torch.utils.tensorboard import SummaryWriter
+from transformers import AutoTokenizer, FlaxAutoModelForCausalLM, GenerationConfig
+
+from lm_human_preference_details.data import DATASET
+
+
+@dataclass
+class AdaptiveKLParams:
+ target: float = 6.0
+ horizon: int = 10000 # in episodes
+
+
+@dataclass
+class RewardHParams:
+ kl_coef: float = 0.15
+ adaptive_kl: Optional[AdaptiveKLParams] = field(default_factory=AdaptiveKLParams)
+ trained_model: Optional[str] = "models/"
+ label_dataset: tyro.conf.Suppress[Optional[str]] = None
+
+
+@dataclass
+class PpoHParams:
+ total_episodes: int = 1000000
+ local_batch_size: int = 64
+ local_mini_batch_size: tyro.conf.Suppress[int] = None
+ batch_size: tyro.conf.Suppress[int] = None
+ mini_batch_size: tyro.conf.Suppress[int] = None
+ gradient_accumulation_steps: int = 1
+ """gradient accumulation steps"""
+ local_micro_batch_size: tyro.conf.Suppress[int] = None
+ """per rank micro batch size"""
+ world_size: tyro.conf.Suppress[int] = None
+ batch_size: tyro.conf.Suppress[int] = None
+ minibatch_size: tyro.conf.Suppress[int] = None
+ num_updates: tyro.conf.Suppress[int] = None
+ nminibatches: int = 1
+ noptepochs: int = 4
+ lr: float = 0.00001
+ eps: float = 1e-5
+ vf_coef: float = 0.1
+ cliprange: float = 0.2
+ cliprange_value: float = 0.2
+ gamma: float = 1
+ lam: float = 0.95
+ whiten_rewards: bool = True
+
+
+@dataclass
+class TaskHParams:
+ # Query params
+ query_length: int = 64
+ query_dataset: str = "books"
+ query_prefix: str = ""
+ query_suffix: str = ""
+ start_text: Optional[str] = None
+ end_text: Optional[str] = None
+
+ # Response params
+ response_length: int = 24
+
+ # Truncate response after the first occurrence of this token at or after index after when sampling.
+ truncate_token: int = 13
+ truncate_after: int = 16
+ penalty_reward_value: int = -1
+
+ # LM params
+ temperature: float = 0.7
+
+
+@dataclass
+class Args:
+ # common args
+ exp_name: str = os.path.basename(__file__)[: -len(".py")]
+ """the name of this experiment"""
+ seed: int = 1
+ """seed of the experiment"""
+ track: bool = False
+ """if toggled, this experiment will be tracked with Weights and Biases"""
+ wandb_project_name: str = "cleanrl"
+ """the wandb's project name"""
+ wandb_entity: Optional[str] = None
+ """the entity (team) of wandb's project"""
+ cuda: bool = True
+ """Whether to use cuda if available."""
+ run_name: tyro.conf.Suppress[str] = None
+ """TO BE FILLED: a unique name of this run"""
+
+ base_model: str = "gpt2"
+ """the name of the pretrained model to use"""
+ print_sample_output_freq: int = 0
+ """How often to print sample output"""
+ save_path: str = "models/policy/"
+ """Where to save the model"""
+ use_tensorflow_adam: bool = True
+ """Whether to use tensorflow-style Adam optimizer instead of PyTorch's"""
+ task: TaskHParams = field(default_factory=TaskHParams)
+ rewards: RewardHParams = field(default_factory=RewardHParams)
+ ppo: PpoHParams = field(default_factory=PpoHParams)
+
+ # distributed settings
+ local_rank: int = 0
+ """the rank of this process"""
+ learner_device_ids: List[int] = field(default_factory=lambda: [0])
+ "the device ids that script will use"
+ learner_devices: tyro.conf.Suppress[int] = None # real type is `List[str]`
+ """the devices that script will use"""
+ global_learner_decices: tyro.conf.Suppress[int] = None # real type is `List[str]`
+ """the total devices (across all nodes and machines) that script will use"""
+
+
+def scale_by_adam_tf_style(
+ b1: float = 0.9,
+ b2: float = 0.999,
+ eps: float = 1e-8,
+ eps_root: float = 0.0,
+ mu_dtype=None,
+) -> base.GradientTransformation:
+ """Rescale updates according to the Adam algorithm.
+
+ References:
+ [Kingma et al, 2014](https://arxiv.org/abs/1412.6980)
+
+ Args:
+ b1: Decay rate for the exponentially weighted average of grads.
+ b2: Decay rate for the exponentially weighted average of squared grads.
+ eps: Term added to the denominator to improve numerical stability.
+ eps_root: Term added to the denominator inside the square-root to improve
+ numerical stability when backpropagating gradients through the rescaling.
+ mu_dtype: Optional `dtype` to be used for the first order accumulator; if
+ `None` then the `dtype` is inferred from `params` and `updates`.
+
+ Returns:
+ A `GradientTransformation` object.
+ """
+
+ mu_dtype = utils.canonicalize_dtype(mu_dtype)
+
+ def init_fn(params):
+ mu = jax.tree_util.tree_map(lambda t: jnp.zeros_like(t, dtype=mu_dtype), params) # First moment
+ nu = jax.tree_util.tree_map(jnp.zeros_like, params) # Second moment
+ return ScaleByAdamState(count=jnp.zeros([], jnp.int32), mu=mu, nu=nu)
+
+ def update_fn(updates, state, params=None):
+ del params
+ mu = update_moment(updates, state.mu, b1, 1)
+ nu = update_moment_per_elem_norm(updates, state.nu, b2, 2)
+ count_inc = numerics.safe_int32_increment(state.count)
+
+ ### `optax` default adam implementation
+ # mu_hat = bias_correction(mu, b1, count_inc)
+ # nu_hat = bias_correction(nu, b2, count_inc)
+ # updates = jax.tree_util.tree_map(
+ # lambda m, v: m / (jnp.sqrt(v + eps_root) + eps), mu_hat, nu_hat)
+ ### Tensorflow adam implementation
+ updates = jax.tree_util.tree_map(
+ lambda m, v: (jnp.sqrt(1 - b2**count_inc) / (1 - b1**count_inc)) * m / (jnp.sqrt(v + eps_root) + eps),
+ mu,
+ nu,
+ ) #
+ mu = utils.cast_tree(mu, mu_dtype)
+ return updates, ScaleByAdamState(count=count_inc, mu=mu, nu=nu)
+
+ return base.GradientTransformation(init_fn, update_fn)
+
+
+def adam_tf_style(
+ learning_rate,
+ b1: float = 0.9,
+ b2: float = 0.999,
+ eps: float = 1e-8,
+ eps_root: float = 0.0,
+ mu_dtype=None,
+):
+ return combine.chain(
+ scale_by_adam_tf_style(b1=b1, b2=b2, eps=eps, eps_root=eps_root, mu_dtype=mu_dtype),
+ _scale_by_learning_rate(learning_rate),
+ )
+
+
+class AdaptiveKLController:
+ def __init__(self, init_kl_coef: float, hparams: AdaptiveKLParams):
+ self.value = init_kl_coef
+ self.hparams = hparams
+
+ def update(self, current, n_steps):
+ target = self.hparams.target
+ proportional_error = np.clip(current / target - 1, -0.2, 0.2)
+ mult = 1 + proportional_error * n_steps / self.hparams.horizon
+ self.value *= mult
+
+
+def whiten(values, shift_mean=True):
+ # `unbiased=False` matches TF `tf.nn.moments`'s setting
+ mean, var = jnp.mean(values), jnp.var(values)
+ whitened = (values - mean) * jax.lax.rsqrt(var + 1e-8)
+ if not shift_mean:
+ whitened += mean
+ return whitened
+
+
+class ScalarHead(nn.Module):
+ head_input_size: int
+
+ @nn.compact
+ def __call__(self, x):
+ x = nn.Dense(
+ 1,
+ kernel_init=nn.initializers.normal(stddev=0),
+ bias_init=nn.initializers.zeros_init(),
+ )(x)
+ return x
+
+
+class RewardHead(nn.Module):
+ head_input_size: int
+
+ @nn.compact
+ def __call__(self, x):
+ assert x.shape[-1] == self.head_input_size
+ x = nn.Dense(
+ 1,
+ kernel_init=nn.initializers.normal(stddev=1 / np.sqrt(self.head_input_size + 1)),
+ bias_init=nn.initializers.zeros_init(),
+ )(x)
+ reward_gain = self.param("reward_gain", nn.initializers.ones_init(), ())
+ reward_bias = self.param("reward_bias", nn.initializers.zeros_init(), ())
+ x = x * reward_gain + reward_bias
+ return x
+
+
+@flax.struct.dataclass
+class LMBackboneWithScalarHeadParams:
+ """Parameters for the language model backbone and a scalar head."""
+
+ lm_backbone_params: flax.core.FrozenDict
+ head_params: flax.core.FrozenDict
+
+
+# a pytorch dataset
+class MyDataset(IterableDataset):
+ def __init__(self, generator, tokenizer, query_length, seed, start_text=None, end_text=None):
+ self.generator = generator
+ self.tokenizer = tokenizer
+ self.query_length = query_length
+ self.start_text = start_text
+ self.end_text = end_text
+ self.seed = seed
+ token_to_index = tokenizer.get_vocab()
+ self.start_token = token_to_index[start_text] if self.start_text else None
+ self.end_token = token_to_index[end_text] if self.end_text else None
+
+ def __iter__(self):
+ for text in self.generator("train", self.seed, shuffle=True):
+ tokens = self.tokenizer.encode(text)
+ if self.start_token is not None:
+ try:
+ first_index = tokens.index(self.start_token) + 1
+ if first_index < len(tokens):
+ tokens = tokens[first_index:]
+ except:
+ continue
+ tokens = tokens[: self.query_length]
+ if self.end_token is not None:
+ try:
+ last_index = len(tokens) - tokens[::-1].index(self.end_token)
+ tokens = tokens[:last_index]
+ except:
+ continue
+ output = self.tokenizer.pad(
+ {"input_ids": tokens},
+ padding="max_length",
+ max_length=self.query_length,
+ return_tensors="pt",
+ return_attention_mask=True,
+ )
+ yield output
+
+
+def right_padding_to_left_padding(tokens, pad_id):
+ # Convert from right padding to left padding.
+ return np.array([[pad_id] * (row == pad_id).sum() + [x for x in row if x != pad_id] for row in tokens])
+
+
+def ceil_div(a, b):
+ return (a - 1) // b + 1
+
+
+def exact_div(a, b):
+ q = a // b
+ if a != q * b:
+ raise ValueError(f"Inexact division: {a} / {b} = {a / b}")
+ return q
+
+
+def prepare_reward_forward(args, tokenizer):
+ """Prepare the forward pass of the reward model and parameters."""
+
+ lm_backbone = FlaxAutoModelForCausalLM.from_pretrained(args.base_model)
+ scalar_head = RewardHead(head_input_size=lm_backbone.config.hidden_size)
+
+ def reward_forward(
+ params: LMBackboneWithScalarHeadParams,
+ query_responses_ids: jnp.ndarray,
+ ):
+ """Get reward for each queiry--response pair."""
+ assert query_responses_ids.ndim == 2
+
+ # mask out padding tokens
+ attention_mask = query_responses_ids != tokenizer.pad_token_id
+ query_responses_ids = jnp.where(attention_mask, query_responses_ids, 0)
+
+ # assign position ids
+ position_ids = attention_mask.cumsum(1) - attention_mask
+
+ reward_latents = lm_backbone.module.apply(
+ variables=params.lm_backbone_params,
+ input_ids=query_responses_ids,
+ attention_mask=attention_mask,
+ position_ids=position_ids,
+ output_hidden_states=True,
+ ).hidden_states[-1]
+ # reward_latents: [batch_size, length, hidden_size]
+
+ last_reward_latents = reward_latents[:, -1, :]
+ # last_reward_latents: [batch_size, hidden_size]
+
+ reward = scalar_head.apply(variables=params.head_params, x=last_reward_latents)
+ # reward: [batch_size, 1]
+ return reward
+
+ if args.rewards.trained_model:
+ orbax_checkpointer = orbax.checkpoint.PyTreeCheckpointer()
+ reward_state_params = orbax_checkpointer.restore(args.rewards.trained_model)["reward_model"]["params"]
+ reward_params = LMBackboneWithScalarHeadParams(
+ lm_backbone_params=flax.core.FrozenDict({"params": reward_state_params["lm_backbone_params"]["params"]}),
+ head_params=flax.core.FrozenDict({"params": reward_state_params["head_params"]["params"]}),
+ )
+ pprint(f"Loaded pretrained reward model from {args.rewards.trained_model}")
+ else:
+ key = jax.random.PRNGKey(args.seed)
+ key, init_key = jax.random.split(key, 2)
+ reward_params = LMBackboneWithScalarHeadParams(
+ lm_backbone_params=flax.core.FrozenDict({"params": lm_backbone.params}),
+ head_params=flax.core.FrozenDict(
+ scalar_head.init(
+ init_key,
+ jnp.ones(lm_backbone.config.hidden_size)[None, None, :],
+ )
+ ),
+ )
+
+ return functools.partial(reward_forward, params=reward_params)
+
+
+def prepare_policy_forward_and_policy_generate(args, tokenizer):
+ """Prepare the forward pass of the policy model and parameters."""
+
+ lm_backbone = FlaxAutoModelForCausalLM.from_pretrained(args.base_model)
+ # disable `pad_token_id` and `eos_token_id` because we just want to
+ # generate tokens without truncation / padding
+ lm_backbone.generation_config.eos_token_id = None
+ lm_backbone.generation_config.pad_token_id = tokenizer.pad_token_id
+ scalar_head = ScalarHead(head_input_size=lm_backbone.config.hidden_size)
+
+ generation_config = GenerationConfig(
+ max_new_tokens=args.task.response_length,
+ min_new_tokens=args.task.response_length,
+ temperature=args.task.temperature,
+ top_k=0.0,
+ top_p=1.0,
+ do_sample=True,
+ pad_token_id=tokenizer.pad_token_id,
+ )
+
+ def policy_forward(
+ params: LMBackboneWithScalarHeadParams,
+ input_ids: jnp.ndarray,
+ ):
+ """Get reward for input_ids."""
+ assert input_ids.ndim == 2
+ # shape: [batch_size, length]
+
+ # mask out padding tokens
+ attention_mask = input_ids != tokenizer.pad_token_id
+ input_ids = jnp.where(attention_mask, input_ids, 0)
+
+ # assign position ids
+ position_ids = attention_mask.cumsum(1) - attention_mask
+
+ lm_backbone_out = lm_backbone.module.apply(
+ variables=params.lm_backbone_params,
+ input_ids=input_ids,
+ attention_mask=attention_mask,
+ position_ids=position_ids,
+ output_hidden_states=True,
+ )
+
+ value_latents = lm_backbone_out.hidden_states[-1]
+ # shape: [batch_size, length, hidden_size]
+
+ values = scalar_head.apply(variables=params.head_params, x=value_latents)
+ # shape: [batch_size, length, 1]
+ return lm_backbone_out, values
+
+ def policy_generate(
+ params: LMBackboneWithScalarHeadParams,
+ queries: jnp.ndarray,
+ ):
+ input_ids = queries
+ attention_mask = input_ids != tokenizer.pad_token_id
+ input_ids = jnp.where(attention_mask, queries, 0)
+ output = lm_backbone.generate(
+ params=params["params"],
+ input_ids=input_ids,
+ generation_config=generation_config,
+ attention_mask=attention_mask.astype("i4"),
+ return_dict_in_generate=True,
+ )
+ context_length = input_ids.shape[1]
+ return jnp.concatenate((queries, output.sequences[:, context_length:]), axis=1)
+
+ key = jax.random.PRNGKey(args.seed)
+ key, init_key = jax.random.split(key, 2)
+ policy_params = LMBackboneWithScalarHeadParams(
+ lm_backbone_params=flax.core.FrozenDict({"params": lm_backbone.params}),
+ head_params=flax.core.FrozenDict(
+ scalar_head.init(
+ init_key,
+ jnp.ones(lm_backbone.config.hidden_size)[None, None, :],
+ )
+ ),
+ )
+
+ return policy_forward, policy_generate, policy_params
+
+
+@flax.struct.dataclass
+class RolloutStatistics:
+ returns: jnp.array
+ values: jnp.array
+ advantage: jnp.array
+ responses: jnp.array
+ query_responses: jnp.array
+ logprobs: jnp.array
+
+
+@flax.struct.dataclass
+class RLStatistics:
+ approxkl: jnp.array
+ entropy: jnp.array
+ pg_loss: jnp.array
+ pg_clipfrac: jnp.array
+ vf_losses1: jnp.array
+ vf_loss: jnp.array
+ vf_clipfrac: jnp.array
+ ratio: jnp.array
+ loss: jnp.array
+
+
+def train_step(policy_state, mb_stats, args):
+ def loss(params):
+ # mb_stats.query_responses: [local_micro_batch_size, query_length + response_length]
+ output, vpred_temp = policy_state.apply_fn(params, mb_stats.query_responses)
+
+ # vpred_temp: [local_micro_batch_size, query_length + response_length, 1]
+ vpred = jnp.squeeze(vpred_temp[:, args.task.query_length - 1 : -1, :], axis=-1)
+ # vpred: [local_micro_batch_size, response_length]
+ vpredclipped = jnp.clip(
+ vpred,
+ mb_stats.values - args.ppo.cliprange_value,
+ mb_stats.values + args.ppo.cliprange_value,
+ )
+ vf_losses1 = jnp.square(vpred - mb_stats.returns)
+ vf_losses2 = jnp.square(vpredclipped - mb_stats.returns)
+ vf_loss = 0.5 * jnp.maximum(vf_losses1, vf_losses2).mean()
+ vf_clipfrac = (vf_losses2 > vf_losses1).astype(jnp.float32).mean()
+
+ logits = output.logits[:, args.task.query_length - 1 : -1, :]
+ logits /= args.task.temperature
+ new_logprobs = -optax.softmax_cross_entropy_with_integer_labels(logits, mb_stats.responses)
+
+ logprobs_diff = new_logprobs - mb_stats.logprobs
+ ratio = jnp.exp(logprobs_diff)
+ pg_losses = -mb_stats.advantage * ratio
+ pg_losses2 = -mb_stats.advantage * jnp.clip(ratio, 1.0 - args.ppo.cliprange, 1.0 + args.ppo.cliprange)
+
+ pg_loss = jnp.maximum(pg_losses, pg_losses2).mean()
+ pg_clipfrac = (pg_losses2 > pg_losses).astype(jnp.float32).mean()
+
+ pd = jax.nn.softmax(logits, axis=-1)
+ entropy = jax.nn.logsumexp(logits, axis=-1) - jnp.sum(pd * logits, axis=-1)
+
+ approxkl = 0.5 * ((logprobs_diff) ** 2).mean()
+ loss = pg_loss + args.ppo.vf_coef * vf_loss
+
+ rl_stats = RLStatistics(
+ vf_loss=vf_loss,
+ vf_clipfrac=vf_clipfrac,
+ pg_loss=pg_loss,
+ pg_clipfrac=pg_clipfrac,
+ approxkl=approxkl,
+ loss=loss,
+ entropy=entropy.mean(),
+ ratio=ratio.mean(),
+ vf_losses1=vf_losses1.mean(),
+ )
+ rl_stats = jax.lax.pmean(rl_stats, "batch")
+ return loss, rl_stats
+
+ grad_fn = jax.value_and_grad(loss, has_aux=True)
+ (loss, rl_stats), grads = grad_fn(policy_state.params)
+ grads = jax.lax.pmean(grads, "batch")
+ policy_state = policy_state.apply_gradients(grads=grads)
+ return policy_state, rl_stats
+
+
+def linear_schedule(optimizer_step, args):
+ """anneal learning rate linearly to reach 0 after one epoch."""
+ update = 1 + optimizer_step // (args.ppo.noptepochs * args.ppo.nminibatches * args.ppo.gradient_accumulation_steps)
+ frac = 1.0 - (update - 1.0) / args.ppo.num_updates
+ lrnow = frac * args.ppo.lr
+ return lrnow
+
+
+def train(args: Args):
+ local_devices = jax.local_devices()
+ global_devices = jax.devices()
+ args.ppo.world_size = jax.process_count()
+ learner_devices = [local_devices[d_id] for d_id in args.learner_device_ids]
+ global_learner_decices = [
+ global_devices[d_id + process_index * len(local_devices)]
+ for process_index in range(args.ppo.world_size)
+ for d_id in args.learner_device_ids
+ ]
+ pprint({"global_learner_decices": global_learner_decices})
+ args.global_learner_decices = [str(item) for item in global_learner_decices]
+ args.learner_devices = [str(item) for item in learner_devices]
+ args.local_rank = jax.process_index()
+ args.ppo.batch_size = int(args.ppo.local_batch_size * len(args.learner_devices) * args.ppo.world_size)
+ args.ppo.minibatch_size = exact_div(args.ppo.batch_size, args.ppo.nminibatches)
+ args.ppo.local_mini_batch_size = exact_div(args.ppo.local_batch_size, args.ppo.nminibatches)
+ args.ppo.local_micro_batch_size = exact_div(args.ppo.local_mini_batch_size, args.ppo.gradient_accumulation_steps)
+ if args.ppo.whiten_rewards:
+ assert (
+ args.ppo.local_mini_batch_size >= 8
+ ), f"Per-rank minibatch size {args.ppo.local_mini_batch_size} is insufficient for whitening"
+ # `per_rank_rollout_batch_size` is our `args.ppo.local_batch_size`
+ # `per_rank_minibatch_size` is our `args.ppo.local_mini_batch_size`
+ args.ppo.num_updates = args.ppo.total_episodes // args.ppo.batch_size
+
+ console = Console(force_terminal=True)
+ run_name = f"{args.exp_name}__{args.seed}__{int(time.time())}"
+ writer = SimpleNamespace() # dummy writer
+ writer.add_scalar = lambda x, y, z: None
+ writer.add_histogram = lambda x, y, z: None
+
+ if args.local_rank == 0:
+ if args.track:
+ import wandb
+
+ wandb.init(
+ project=args.wandb_project_name,
+ entity=args.wandb_entity,
+ sync_tensorboard=True,
+ config=asdict(args),
+ name=run_name,
+ save_code=True,
+ )
+ wandb.run.log_code(".")
+ writer = SummaryWriter(f"runs/{run_name}")
+ writer.add_text(
+ "hyperparameters",
+ "|param|value|\n|-|-|\n%s" % ("\n".join([f"|{key}|{value}|" for key, value in vars(args).items()])),
+ )
+ pprint(args)
+ local_seed = args.seed + args.local_rank * 100003 # Prime
+ tokenizer = AutoTokenizer.from_pretrained(
+ args.base_model,
+ padding_side="right",
+ )
+ # we use the padding token manually but do not resize the token embedding of the model
+ tokenizer.add_special_tokens({"pad_token": "[PAD]"})
+ reward_forward = prepare_reward_forward(args, tokenizer)
+ policy_forward, policy_generate, policy_params = prepare_policy_forward_and_policy_generate(args, tokenizer)
+ _, _, ref_policy_params = prepare_policy_forward_and_policy_generate(args, tokenizer)
+
+ p_whiten_no_shift_mean = jax.pmap(functools.partial(whiten, shift_mean=False))
+ p_whiten_shift_mean = jax.pmap(functools.partial(whiten, shift_mean=True))
+ p_reward_forward = jax.pmap(reward_forward)
+ p_train_step = jax.pmap(
+ functools.partial(train_step, args=args),
+ axis_name="batch",
+ donate_argnums=(0,),
+ )
+
+ @jax.pmap
+ def p_get_response_values_and_logprobs(policy_state_params, queries):
+ query_responses = policy_generate(
+ params=policy_state_params.lm_backbone_params,
+ queries=queries,
+ )
+ # query_responses: [local_batch_size, query_length + response_length]
+ responses = query_responses[:, args.task.query_length :]
+
+ output, full_values = policy_forward(policy_state_params, query_responses)
+ values = full_values[:, args.task.query_length - 1 : -1].squeeze(-1)
+ # values: [local_batch_size, response_length]
+ logits = output.logits[:, args.task.query_length - 1 : -1, :]
+ # logits: [local_batch_size, response_length, vocab_size]
+ logits /= args.task.temperature
+ all_logprobs = jax.nn.log_softmax(logits, axis=-1)
+ # all_logprobs: [local_batch_size, response_length, vocab_size]
+ logprobs = jnp.take_along_axis(all_logprobs, responses[..., None], -1).squeeze(-1)
+ # logprobs: [local_batch_size, response_length]
+
+ ref_output, _ = policy_forward(ref_policy_params, query_responses)
+ ref_logits = ref_output.logits[:, args.task.query_length - 1 : -1, :]
+ # ref_logits: [local_batch_size, response_length, vocab_size]
+ ref_logits /= args.task.temperature
+ ref_all_logprobs = jax.nn.log_softmax(ref_logits, axis=-1)
+ ref_logprobs = jnp.take_along_axis(ref_all_logprobs, responses[..., None], -1).squeeze(-1)
+ # ref_logprobs: [local_batch_size, response_length]
+ return query_responses, values, logprobs, ref_logprobs
+
+ if args.use_tensorflow_adam:
+ adam = adam_tf_style
+ else:
+ adam = optax.adam
+
+ optimizer = adam(
+ learning_rate=functools.partial(linear_schedule, args=args),
+ eps=args.ppo.eps,
+ )
+
+ optimizer = optax.MultiSteps(optimizer, args.ppo.gradient_accumulation_steps)
+
+ policy_state = TrainState.create(apply_fn=policy_forward, params=policy_params, tx=optimizer)
+ policy_state = jax_utils.replicate(policy_state)
+
+ del policy_params
+
+ dataset = MyDataset(
+ DATASET[args.task.query_dataset],
+ tokenizer,
+ args.task.query_length,
+ seed=local_seed,
+ start_text=args.task.start_text,
+ end_text=args.task.end_text,
+ )
+ dataloader = DataLoader(dataset, batch_size=args.ppo.batch_size)
+ iter_dataloader = iter(dataloader)
+ kl_ctl = AdaptiveKLController(args.rewards.kl_coef, hparams=args.rewards.adaptive_kl)
+
+ print("===training policy===")
+ global_step = 0
+ approxkls_stats = np.zeros(
+ (
+ args.ppo.noptepochs,
+ args.ppo.nminibatches,
+ args.ppo.gradient_accumulation_steps,
+ ),
+ )
+ pg_clipfracs_stats = np.zeros(
+ (
+ args.ppo.noptepochs,
+ args.ppo.nminibatches,
+ args.ppo.gradient_accumulation_steps,
+ ),
+ )
+ pg_losses_stats = np.zeros(
+ (
+ args.ppo.noptepochs,
+ args.ppo.nminibatches,
+ args.ppo.gradient_accumulation_steps,
+ ),
+ )
+ vf_losses_stats = np.zeros(
+ (
+ args.ppo.noptepochs,
+ args.ppo.nminibatches,
+ args.ppo.gradient_accumulation_steps,
+ ),
+ )
+ vf_clipfrac_stats = np.zeros(
+ (
+ args.ppo.noptepochs,
+ args.ppo.nminibatches,
+ args.ppo.gradient_accumulation_steps,
+ ),
+ )
+ entropies_stats = np.zeros(
+ (
+ args.ppo.noptepochs,
+ args.ppo.nminibatches,
+ args.ppo.gradient_accumulation_steps,
+ ),
+ )
+ for update in range(1, args.ppo.num_updates + 1):
+ global_step += 1 * args.ppo.batch_size
+ data = next(iter_dataloader)
+ queries = right_padding_to_left_padding(data["input_ids"], tokenizer.pad_token_id)
+ queries = common_utils.shard(queries)
+ # queries: [num_device, local_batch_size, query_length]
+
+ query_responses, values, logprobs, ref_logprobs = p_get_response_values_and_logprobs(policy_state.params, queries)
+ responses = query_responses[..., args.task.query_length :] | This should go into the jitted function. |
lm-human-preference-details | github_2023 | python | 18 | vwxyzjn | vwxyzjn | @@ -0,0 +1,975 @@
+import functools
+import os
+import time
+from dataclasses import asdict, dataclass, field
+from types import SimpleNamespace
+from typing import List, Optional
+
+import einops
+import flax
+import flax.linen as nn
+import jax
+import jax.numpy as jnp
+import numpy as np
+import optax
+import orbax
+import tyro
+from flax import jax_utils
+from flax.training import common_utils, orbax_utils
+from flax.training.train_state import TrainState
+from optax import ScaleByAdamState, update_moment, update_moment_per_elem_norm
+from optax._src import base, combine, numerics, utils
+from optax._src.alias import _scale_by_learning_rate
+from rich.console import Console
+from rich.pretty import pprint
+from torch.utils.data import DataLoader, IterableDataset
+from torch.utils.tensorboard import SummaryWriter
+from transformers import AutoTokenizer, FlaxAutoModelForCausalLM, GenerationConfig
+
+from lm_human_preference_details.data import DATASET
+
+
+@dataclass
+class AdaptiveKLParams:
+ target: float = 6.0
+ horizon: int = 10000 # in episodes
+
+
+@dataclass
+class RewardHParams:
+ kl_coef: float = 0.15
+ adaptive_kl: Optional[AdaptiveKLParams] = field(default_factory=AdaptiveKLParams)
+ trained_model: Optional[str] = "models/"
+ label_dataset: tyro.conf.Suppress[Optional[str]] = None
+
+
+@dataclass
+class PpoHParams:
+ total_episodes: int = 1000000
+ local_batch_size: int = 64
+ local_mini_batch_size: tyro.conf.Suppress[int] = None
+ batch_size: tyro.conf.Suppress[int] = None
+ mini_batch_size: tyro.conf.Suppress[int] = None
+ gradient_accumulation_steps: int = 1
+ """gradient accumulation steps"""
+ local_micro_batch_size: tyro.conf.Suppress[int] = None
+ """per rank micro batch size"""
+ world_size: tyro.conf.Suppress[int] = None
+ batch_size: tyro.conf.Suppress[int] = None
+ minibatch_size: tyro.conf.Suppress[int] = None
+ num_updates: tyro.conf.Suppress[int] = None
+ nminibatches: int = 1
+ noptepochs: int = 4
+ lr: float = 0.00001
+ eps: float = 1e-5
+ vf_coef: float = 0.1
+ cliprange: float = 0.2
+ cliprange_value: float = 0.2
+ gamma: float = 1
+ lam: float = 0.95
+ whiten_rewards: bool = True
+
+
+@dataclass
+class TaskHParams:
+ # Query params
+ query_length: int = 64
+ query_dataset: str = "books"
+ query_prefix: str = ""
+ query_suffix: str = ""
+ start_text: Optional[str] = None
+ end_text: Optional[str] = None
+
+ # Response params
+ response_length: int = 24
+
+ # Truncate response after the first occurrence of this token at or after index after when sampling.
+ truncate_token: int = 13
+ truncate_after: int = 16
+ penalty_reward_value: int = -1
+
+ # LM params
+ temperature: float = 0.7
+
+
+@dataclass
+class Args:
+ # common args
+ exp_name: str = os.path.basename(__file__)[: -len(".py")]
+ """the name of this experiment"""
+ seed: int = 1
+ """seed of the experiment"""
+ track: bool = False
+ """if toggled, this experiment will be tracked with Weights and Biases"""
+ wandb_project_name: str = "cleanrl"
+ """the wandb's project name"""
+ wandb_entity: Optional[str] = None
+ """the entity (team) of wandb's project"""
+ cuda: bool = True
+ """Whether to use cuda if available."""
+ run_name: tyro.conf.Suppress[str] = None
+ """TO BE FILLED: a unique name of this run"""
+
+ base_model: str = "gpt2"
+ """the name of the pretrained model to use"""
+ print_sample_output_freq: int = 0
+ """How often to print sample output"""
+ save_path: str = "models/policy/"
+ """Where to save the model"""
+ use_tensorflow_adam: bool = True
+ """Whether to use tensorflow-style Adam optimizer instead of PyTorch's"""
+ task: TaskHParams = field(default_factory=TaskHParams)
+ rewards: RewardHParams = field(default_factory=RewardHParams)
+ ppo: PpoHParams = field(default_factory=PpoHParams)
+
+ # distributed settings
+ local_rank: int = 0
+ """the rank of this process"""
+ learner_device_ids: List[int] = field(default_factory=lambda: [0])
+ "the device ids that script will use"
+ learner_devices: tyro.conf.Suppress[int] = None # real type is `List[str]`
+ """the devices that script will use"""
+ global_learner_decices: tyro.conf.Suppress[int] = None # real type is `List[str]`
+ """the total devices (across all nodes and machines) that script will use"""
+
+
+def scale_by_adam_tf_style(
+ b1: float = 0.9,
+ b2: float = 0.999,
+ eps: float = 1e-8,
+ eps_root: float = 0.0,
+ mu_dtype=None,
+) -> base.GradientTransformation:
+ """Rescale updates according to the Adam algorithm.
+
+ References:
+ [Kingma et al, 2014](https://arxiv.org/abs/1412.6980)
+
+ Args:
+ b1: Decay rate for the exponentially weighted average of grads.
+ b2: Decay rate for the exponentially weighted average of squared grads.
+ eps: Term added to the denominator to improve numerical stability.
+ eps_root: Term added to the denominator inside the square-root to improve
+ numerical stability when backpropagating gradients through the rescaling.
+ mu_dtype: Optional `dtype` to be used for the first order accumulator; if
+ `None` then the `dtype` is inferred from `params` and `updates`.
+
+ Returns:
+ A `GradientTransformation` object.
+ """
+
+ mu_dtype = utils.canonicalize_dtype(mu_dtype)
+
+ def init_fn(params):
+ mu = jax.tree_util.tree_map(lambda t: jnp.zeros_like(t, dtype=mu_dtype), params) # First moment
+ nu = jax.tree_util.tree_map(jnp.zeros_like, params) # Second moment
+ return ScaleByAdamState(count=jnp.zeros([], jnp.int32), mu=mu, nu=nu)
+
+ def update_fn(updates, state, params=None):
+ del params
+ mu = update_moment(updates, state.mu, b1, 1)
+ nu = update_moment_per_elem_norm(updates, state.nu, b2, 2)
+ count_inc = numerics.safe_int32_increment(state.count)
+
+ ### `optax` default adam implementation
+ # mu_hat = bias_correction(mu, b1, count_inc)
+ # nu_hat = bias_correction(nu, b2, count_inc)
+ # updates = jax.tree_util.tree_map(
+ # lambda m, v: m / (jnp.sqrt(v + eps_root) + eps), mu_hat, nu_hat)
+ ### Tensorflow adam implementation
+ updates = jax.tree_util.tree_map(
+ lambda m, v: (jnp.sqrt(1 - b2**count_inc) / (1 - b1**count_inc)) * m / (jnp.sqrt(v + eps_root) + eps),
+ mu,
+ nu,
+ ) #
+ mu = utils.cast_tree(mu, mu_dtype)
+ return updates, ScaleByAdamState(count=count_inc, mu=mu, nu=nu)
+
+ return base.GradientTransformation(init_fn, update_fn)
+
+
+def adam_tf_style(
+ learning_rate,
+ b1: float = 0.9,
+ b2: float = 0.999,
+ eps: float = 1e-8,
+ eps_root: float = 0.0,
+ mu_dtype=None,
+):
+ return combine.chain(
+ scale_by_adam_tf_style(b1=b1, b2=b2, eps=eps, eps_root=eps_root, mu_dtype=mu_dtype),
+ _scale_by_learning_rate(learning_rate),
+ )
+
+
+class AdaptiveKLController:
+ def __init__(self, init_kl_coef: float, hparams: AdaptiveKLParams):
+ self.value = init_kl_coef
+ self.hparams = hparams
+
+ def update(self, current, n_steps):
+ target = self.hparams.target
+ proportional_error = np.clip(current / target - 1, -0.2, 0.2)
+ mult = 1 + proportional_error * n_steps / self.hparams.horizon
+ self.value *= mult
+
+
+def whiten(values, shift_mean=True):
+ # `unbiased=False` matches TF `tf.nn.moments`'s setting
+ mean, var = jnp.mean(values), jnp.var(values)
+ whitened = (values - mean) * jax.lax.rsqrt(var + 1e-8)
+ if not shift_mean:
+ whitened += mean
+ return whitened
+
+
+class ScalarHead(nn.Module):
+ head_input_size: int
+
+ @nn.compact
+ def __call__(self, x):
+ x = nn.Dense(
+ 1,
+ kernel_init=nn.initializers.normal(stddev=0),
+ bias_init=nn.initializers.zeros_init(),
+ )(x)
+ return x
+
+
+class RewardHead(nn.Module):
+ head_input_size: int
+
+ @nn.compact
+ def __call__(self, x):
+ assert x.shape[-1] == self.head_input_size
+ x = nn.Dense(
+ 1,
+ kernel_init=nn.initializers.normal(stddev=1 / np.sqrt(self.head_input_size + 1)),
+ bias_init=nn.initializers.zeros_init(),
+ )(x)
+ reward_gain = self.param("reward_gain", nn.initializers.ones_init(), ())
+ reward_bias = self.param("reward_bias", nn.initializers.zeros_init(), ())
+ x = x * reward_gain + reward_bias
+ return x
+
+
+@flax.struct.dataclass
+class LMBackboneWithScalarHeadParams:
+ """Parameters for the language model backbone and a scalar head."""
+
+ lm_backbone_params: flax.core.FrozenDict
+ head_params: flax.core.FrozenDict
+
+
+# a pytorch dataset
+class MyDataset(IterableDataset):
+ def __init__(self, generator, tokenizer, query_length, seed, start_text=None, end_text=None):
+ self.generator = generator
+ self.tokenizer = tokenizer
+ self.query_length = query_length
+ self.start_text = start_text
+ self.end_text = end_text
+ self.seed = seed
+ token_to_index = tokenizer.get_vocab()
+ self.start_token = token_to_index[start_text] if self.start_text else None
+ self.end_token = token_to_index[end_text] if self.end_text else None
+
+ def __iter__(self):
+ for text in self.generator("train", self.seed, shuffle=True):
+ tokens = self.tokenizer.encode(text)
+ if self.start_token is not None:
+ try:
+ first_index = tokens.index(self.start_token) + 1
+ if first_index < len(tokens):
+ tokens = tokens[first_index:]
+ except:
+ continue
+ tokens = tokens[: self.query_length]
+ if self.end_token is not None:
+ try:
+ last_index = len(tokens) - tokens[::-1].index(self.end_token)
+ tokens = tokens[:last_index]
+ except:
+ continue
+ output = self.tokenizer.pad(
+ {"input_ids": tokens},
+ padding="max_length",
+ max_length=self.query_length,
+ return_tensors="pt",
+ return_attention_mask=True,
+ )
+ yield output
+
+
+def right_padding_to_left_padding(tokens, pad_id):
+ # Convert from right padding to left padding.
+ return np.array([[pad_id] * (row == pad_id).sum() + [x for x in row if x != pad_id] for row in tokens])
+
+
+def ceil_div(a, b):
+ return (a - 1) // b + 1
+
+
+def exact_div(a, b):
+ q = a // b
+ if a != q * b:
+ raise ValueError(f"Inexact division: {a} / {b} = {a / b}")
+ return q
+
+
+def prepare_reward_forward(args, tokenizer):
+ """Prepare the forward pass of the reward model and parameters."""
+
+ lm_backbone = FlaxAutoModelForCausalLM.from_pretrained(args.base_model)
+ scalar_head = RewardHead(head_input_size=lm_backbone.config.hidden_size)
+
+ def reward_forward(
+ params: LMBackboneWithScalarHeadParams,
+ query_responses_ids: jnp.ndarray,
+ ):
+ """Get reward for each queiry--response pair."""
+ assert query_responses_ids.ndim == 2
+
+ # mask out padding tokens
+ attention_mask = query_responses_ids != tokenizer.pad_token_id
+ query_responses_ids = jnp.where(attention_mask, query_responses_ids, 0)
+
+ # assign position ids
+ position_ids = attention_mask.cumsum(1) - attention_mask
+
+ reward_latents = lm_backbone.module.apply(
+ variables=params.lm_backbone_params,
+ input_ids=query_responses_ids,
+ attention_mask=attention_mask,
+ position_ids=position_ids,
+ output_hidden_states=True,
+ ).hidden_states[-1]
+ # reward_latents: [batch_size, length, hidden_size]
+
+ last_reward_latents = reward_latents[:, -1, :]
+ # last_reward_latents: [batch_size, hidden_size]
+
+ reward = scalar_head.apply(variables=params.head_params, x=last_reward_latents)
+ # reward: [batch_size, 1]
+ return reward
+
+ if args.rewards.trained_model:
+ orbax_checkpointer = orbax.checkpoint.PyTreeCheckpointer()
+ reward_state_params = orbax_checkpointer.restore(args.rewards.trained_model)["reward_model"]["params"]
+ reward_params = LMBackboneWithScalarHeadParams(
+ lm_backbone_params=flax.core.FrozenDict({"params": reward_state_params["lm_backbone_params"]["params"]}),
+ head_params=flax.core.FrozenDict({"params": reward_state_params["head_params"]["params"]}),
+ )
+ pprint(f"Loaded pretrained reward model from {args.rewards.trained_model}")
+ else:
+ key = jax.random.PRNGKey(args.seed)
+ key, init_key = jax.random.split(key, 2)
+ reward_params = LMBackboneWithScalarHeadParams(
+ lm_backbone_params=flax.core.FrozenDict({"params": lm_backbone.params}),
+ head_params=flax.core.FrozenDict(
+ scalar_head.init(
+ init_key,
+ jnp.ones(lm_backbone.config.hidden_size)[None, None, :],
+ )
+ ),
+ )
+
+ return functools.partial(reward_forward, params=reward_params)
+
+
+def prepare_policy_forward_and_policy_generate(args, tokenizer):
+ """Prepare the forward pass of the policy model and parameters."""
+
+ lm_backbone = FlaxAutoModelForCausalLM.from_pretrained(args.base_model)
+ # disable `pad_token_id` and `eos_token_id` because we just want to
+ # generate tokens without truncation / padding
+ lm_backbone.generation_config.eos_token_id = None
+ lm_backbone.generation_config.pad_token_id = tokenizer.pad_token_id
+ scalar_head = ScalarHead(head_input_size=lm_backbone.config.hidden_size)
+
+ generation_config = GenerationConfig(
+ max_new_tokens=args.task.response_length,
+ min_new_tokens=args.task.response_length,
+ temperature=args.task.temperature,
+ top_k=0.0,
+ top_p=1.0,
+ do_sample=True,
+ pad_token_id=tokenizer.pad_token_id,
+ )
+
+ def policy_forward(
+ params: LMBackboneWithScalarHeadParams,
+ input_ids: jnp.ndarray,
+ ):
+ """Get reward for input_ids."""
+ assert input_ids.ndim == 2
+ # shape: [batch_size, length]
+
+ # mask out padding tokens
+ attention_mask = input_ids != tokenizer.pad_token_id
+ input_ids = jnp.where(attention_mask, input_ids, 0)
+
+ # assign position ids
+ position_ids = attention_mask.cumsum(1) - attention_mask
+
+ lm_backbone_out = lm_backbone.module.apply(
+ variables=params.lm_backbone_params,
+ input_ids=input_ids,
+ attention_mask=attention_mask,
+ position_ids=position_ids,
+ output_hidden_states=True,
+ )
+
+ value_latents = lm_backbone_out.hidden_states[-1]
+ # shape: [batch_size, length, hidden_size]
+
+ values = scalar_head.apply(variables=params.head_params, x=value_latents)
+ # shape: [batch_size, length, 1]
+ return lm_backbone_out, values
+
+ def policy_generate(
+ params: LMBackboneWithScalarHeadParams,
+ queries: jnp.ndarray,
+ ):
+ input_ids = queries
+ attention_mask = input_ids != tokenizer.pad_token_id
+ input_ids = jnp.where(attention_mask, queries, 0)
+ output = lm_backbone.generate(
+ params=params["params"],
+ input_ids=input_ids,
+ generation_config=generation_config,
+ attention_mask=attention_mask.astype("i4"),
+ return_dict_in_generate=True,
+ )
+ context_length = input_ids.shape[1]
+ return jnp.concatenate((queries, output.sequences[:, context_length:]), axis=1)
+
+ key = jax.random.PRNGKey(args.seed)
+ key, init_key = jax.random.split(key, 2)
+ policy_params = LMBackboneWithScalarHeadParams(
+ lm_backbone_params=flax.core.FrozenDict({"params": lm_backbone.params}),
+ head_params=flax.core.FrozenDict(
+ scalar_head.init(
+ init_key,
+ jnp.ones(lm_backbone.config.hidden_size)[None, None, :],
+ )
+ ),
+ )
+
+ return policy_forward, policy_generate, policy_params
+
+
+@flax.struct.dataclass
+class RolloutStatistics:
+ returns: jnp.array
+ values: jnp.array
+ advantage: jnp.array
+ responses: jnp.array
+ query_responses: jnp.array
+ logprobs: jnp.array
+
+
+@flax.struct.dataclass
+class RLStatistics:
+ approxkl: jnp.array
+ entropy: jnp.array
+ pg_loss: jnp.array
+ pg_clipfrac: jnp.array
+ vf_losses1: jnp.array
+ vf_loss: jnp.array
+ vf_clipfrac: jnp.array
+ ratio: jnp.array
+ loss: jnp.array
+
+
+def train_step(policy_state, mb_stats, args):
+ def loss(params):
+ # mb_stats.query_responses: [local_micro_batch_size, query_length + response_length]
+ output, vpred_temp = policy_state.apply_fn(params, mb_stats.query_responses)
+
+ # vpred_temp: [local_micro_batch_size, query_length + response_length, 1]
+ vpred = jnp.squeeze(vpred_temp[:, args.task.query_length - 1 : -1, :], axis=-1)
+ # vpred: [local_micro_batch_size, response_length]
+ vpredclipped = jnp.clip(
+ vpred,
+ mb_stats.values - args.ppo.cliprange_value,
+ mb_stats.values + args.ppo.cliprange_value,
+ )
+ vf_losses1 = jnp.square(vpred - mb_stats.returns)
+ vf_losses2 = jnp.square(vpredclipped - mb_stats.returns)
+ vf_loss = 0.5 * jnp.maximum(vf_losses1, vf_losses2).mean()
+ vf_clipfrac = (vf_losses2 > vf_losses1).astype(jnp.float32).mean()
+
+ logits = output.logits[:, args.task.query_length - 1 : -1, :]
+ logits /= args.task.temperature
+ new_logprobs = -optax.softmax_cross_entropy_with_integer_labels(logits, mb_stats.responses)
+
+ logprobs_diff = new_logprobs - mb_stats.logprobs
+ ratio = jnp.exp(logprobs_diff)
+ pg_losses = -mb_stats.advantage * ratio
+ pg_losses2 = -mb_stats.advantage * jnp.clip(ratio, 1.0 - args.ppo.cliprange, 1.0 + args.ppo.cliprange)
+
+ pg_loss = jnp.maximum(pg_losses, pg_losses2).mean()
+ pg_clipfrac = (pg_losses2 > pg_losses).astype(jnp.float32).mean()
+
+ pd = jax.nn.softmax(logits, axis=-1)
+ entropy = jax.nn.logsumexp(logits, axis=-1) - jnp.sum(pd * logits, axis=-1)
+
+ approxkl = 0.5 * ((logprobs_diff) ** 2).mean()
+ loss = pg_loss + args.ppo.vf_coef * vf_loss
+
+ rl_stats = RLStatistics(
+ vf_loss=vf_loss,
+ vf_clipfrac=vf_clipfrac,
+ pg_loss=pg_loss,
+ pg_clipfrac=pg_clipfrac,
+ approxkl=approxkl,
+ loss=loss,
+ entropy=entropy.mean(),
+ ratio=ratio.mean(),
+ vf_losses1=vf_losses1.mean(),
+ )
+ rl_stats = jax.lax.pmean(rl_stats, "batch")
+ return loss, rl_stats
+
+ grad_fn = jax.value_and_grad(loss, has_aux=True)
+ (loss, rl_stats), grads = grad_fn(policy_state.params)
+ grads = jax.lax.pmean(grads, "batch")
+ policy_state = policy_state.apply_gradients(grads=grads)
+ return policy_state, rl_stats
+
+
+def linear_schedule(optimizer_step, args):
+ """anneal learning rate linearly to reach 0 after one epoch."""
+ update = 1 + optimizer_step // (args.ppo.noptepochs * args.ppo.nminibatches * args.ppo.gradient_accumulation_steps)
+ frac = 1.0 - (update - 1.0) / args.ppo.num_updates
+ lrnow = frac * args.ppo.lr
+ return lrnow
+
+
+def train(args: Args):
+ local_devices = jax.local_devices()
+ global_devices = jax.devices()
+ args.ppo.world_size = jax.process_count()
+ learner_devices = [local_devices[d_id] for d_id in args.learner_device_ids]
+ global_learner_decices = [
+ global_devices[d_id + process_index * len(local_devices)]
+ for process_index in range(args.ppo.world_size)
+ for d_id in args.learner_device_ids
+ ]
+ pprint({"global_learner_decices": global_learner_decices})
+ args.global_learner_decices = [str(item) for item in global_learner_decices]
+ args.learner_devices = [str(item) for item in learner_devices]
+ args.local_rank = jax.process_index()
+ args.ppo.batch_size = int(args.ppo.local_batch_size * len(args.learner_devices) * args.ppo.world_size)
+ args.ppo.minibatch_size = exact_div(args.ppo.batch_size, args.ppo.nminibatches)
+ args.ppo.local_mini_batch_size = exact_div(args.ppo.local_batch_size, args.ppo.nminibatches)
+ args.ppo.local_micro_batch_size = exact_div(args.ppo.local_mini_batch_size, args.ppo.gradient_accumulation_steps)
+ if args.ppo.whiten_rewards:
+ assert (
+ args.ppo.local_mini_batch_size >= 8
+ ), f"Per-rank minibatch size {args.ppo.local_mini_batch_size} is insufficient for whitening"
+ # `per_rank_rollout_batch_size` is our `args.ppo.local_batch_size`
+ # `per_rank_minibatch_size` is our `args.ppo.local_mini_batch_size`
+ args.ppo.num_updates = args.ppo.total_episodes // args.ppo.batch_size
+
+ console = Console(force_terminal=True)
+ run_name = f"{args.exp_name}__{args.seed}__{int(time.time())}"
+ writer = SimpleNamespace() # dummy writer
+ writer.add_scalar = lambda x, y, z: None
+ writer.add_histogram = lambda x, y, z: None
+
+ if args.local_rank == 0:
+ if args.track:
+ import wandb
+
+ wandb.init(
+ project=args.wandb_project_name,
+ entity=args.wandb_entity,
+ sync_tensorboard=True,
+ config=asdict(args),
+ name=run_name,
+ save_code=True,
+ )
+ wandb.run.log_code(".")
+ writer = SummaryWriter(f"runs/{run_name}")
+ writer.add_text(
+ "hyperparameters",
+ "|param|value|\n|-|-|\n%s" % ("\n".join([f"|{key}|{value}|" for key, value in vars(args).items()])),
+ )
+ pprint(args)
+ local_seed = args.seed + args.local_rank * 100003 # Prime
+ tokenizer = AutoTokenizer.from_pretrained(
+ args.base_model,
+ padding_side="right",
+ )
+ # we use the padding token manually but do not resize the token embedding of the model
+ tokenizer.add_special_tokens({"pad_token": "[PAD]"})
+ reward_forward = prepare_reward_forward(args, tokenizer)
+ policy_forward, policy_generate, policy_params = prepare_policy_forward_and_policy_generate(args, tokenizer)
+ _, _, ref_policy_params = prepare_policy_forward_and_policy_generate(args, tokenizer)
+
+ p_whiten_no_shift_mean = jax.pmap(functools.partial(whiten, shift_mean=False))
+ p_whiten_shift_mean = jax.pmap(functools.partial(whiten, shift_mean=True))
+ p_reward_forward = jax.pmap(reward_forward)
+ p_train_step = jax.pmap(
+ functools.partial(train_step, args=args),
+ axis_name="batch",
+ donate_argnums=(0,),
+ )
+
+ @jax.pmap
+ def p_get_response_values_and_logprobs(policy_state_params, queries):
+ query_responses = policy_generate(
+ params=policy_state_params.lm_backbone_params,
+ queries=queries,
+ )
+ # query_responses: [local_batch_size, query_length + response_length]
+ responses = query_responses[:, args.task.query_length :]
+
+ output, full_values = policy_forward(policy_state_params, query_responses)
+ values = full_values[:, args.task.query_length - 1 : -1].squeeze(-1)
+ # values: [local_batch_size, response_length]
+ logits = output.logits[:, args.task.query_length - 1 : -1, :]
+ # logits: [local_batch_size, response_length, vocab_size]
+ logits /= args.task.temperature
+ all_logprobs = jax.nn.log_softmax(logits, axis=-1)
+ # all_logprobs: [local_batch_size, response_length, vocab_size]
+ logprobs = jnp.take_along_axis(all_logprobs, responses[..., None], -1).squeeze(-1)
+ # logprobs: [local_batch_size, response_length]
+
+ ref_output, _ = policy_forward(ref_policy_params, query_responses)
+ ref_logits = ref_output.logits[:, args.task.query_length - 1 : -1, :]
+ # ref_logits: [local_batch_size, response_length, vocab_size]
+ ref_logits /= args.task.temperature
+ ref_all_logprobs = jax.nn.log_softmax(ref_logits, axis=-1)
+ ref_logprobs = jnp.take_along_axis(ref_all_logprobs, responses[..., None], -1).squeeze(-1)
+ # ref_logprobs: [local_batch_size, response_length]
+ return query_responses, values, logprobs, ref_logprobs
+
+ if args.use_tensorflow_adam:
+ adam = adam_tf_style
+ else:
+ adam = optax.adam
+
+ optimizer = adam(
+ learning_rate=functools.partial(linear_schedule, args=args),
+ eps=args.ppo.eps,
+ )
+
+ optimizer = optax.MultiSteps(optimizer, args.ppo.gradient_accumulation_steps)
+
+ policy_state = TrainState.create(apply_fn=policy_forward, params=policy_params, tx=optimizer)
+ policy_state = jax_utils.replicate(policy_state)
+
+ del policy_params
+
+ dataset = MyDataset(
+ DATASET[args.task.query_dataset],
+ tokenizer,
+ args.task.query_length,
+ seed=local_seed,
+ start_text=args.task.start_text,
+ end_text=args.task.end_text,
+ )
+ dataloader = DataLoader(dataset, batch_size=args.ppo.batch_size)
+ iter_dataloader = iter(dataloader)
+ kl_ctl = AdaptiveKLController(args.rewards.kl_coef, hparams=args.rewards.adaptive_kl)
+
+ print("===training policy===")
+ global_step = 0
+ approxkls_stats = np.zeros(
+ (
+ args.ppo.noptepochs,
+ args.ppo.nminibatches,
+ args.ppo.gradient_accumulation_steps,
+ ),
+ )
+ pg_clipfracs_stats = np.zeros(
+ (
+ args.ppo.noptepochs,
+ args.ppo.nminibatches,
+ args.ppo.gradient_accumulation_steps,
+ ),
+ )
+ pg_losses_stats = np.zeros(
+ (
+ args.ppo.noptepochs,
+ args.ppo.nminibatches,
+ args.ppo.gradient_accumulation_steps,
+ ),
+ )
+ vf_losses_stats = np.zeros(
+ (
+ args.ppo.noptepochs,
+ args.ppo.nminibatches,
+ args.ppo.gradient_accumulation_steps,
+ ),
+ )
+ vf_clipfrac_stats = np.zeros(
+ (
+ args.ppo.noptepochs,
+ args.ppo.nminibatches,
+ args.ppo.gradient_accumulation_steps,
+ ),
+ )
+ entropies_stats = np.zeros(
+ (
+ args.ppo.noptepochs,
+ args.ppo.nminibatches,
+ args.ppo.gradient_accumulation_steps,
+ ),
+ )
+ for update in range(1, args.ppo.num_updates + 1):
+ global_step += 1 * args.ppo.batch_size
+ data = next(iter_dataloader)
+ queries = right_padding_to_left_padding(data["input_ids"], tokenizer.pad_token_id)
+ queries = common_utils.shard(queries)
+ # queries: [num_device, local_batch_size, query_length]
+
+ query_responses, values, logprobs, ref_logprobs = p_get_response_values_and_logprobs(policy_state.params, queries)
+ responses = query_responses[..., args.task.query_length :]
+
+ # **Response Processing**
+ # 1. truncate at the first occurrence of `truncate_token` that appears at or after
+ # position truncate_after in the responses
+ # https://github.com/openai/lm-human-preferences/blob/cbfd210bb8b08f6bc5c26878c10984b90f516c66/lm_human_preferences/train_policy.py#L378
+ truncate_token_mask = responses == args.task.truncate_token
+ # truncate_token_mask: [num_device, local_batch_size, response_length]
+ truncate_after_or_token_mask = jnp.concatenate(
+ [
+ jnp.zeros_like(truncate_token_mask)[:, :, : args.task.truncate_after],
+ truncate_token_mask[:, :, args.task.truncate_after :],
+ ],
+ axis=-1,
+ )
+ truncate_mask = (jnp.cumsum(truncate_after_or_token_mask, axis=-1) - truncate_after_or_token_mask).astype("bool")
+ postprocessed_responses = jnp.where(
+ truncate_mask,
+ jnp.full_like(responses, tokenizer.pad_token_id),
+ responses,
+ )
+ # postprocessed_responses: [num_device, local_batch_size, response_length]
+ del truncate_token_mask, truncate_after_or_token_mask, truncate_mask
+
+ # 2. run reward model on the truncated responses
+ postprocessed_query_responses = np.concatenate((queries, postprocessed_responses), axis=-1)
+ # postprocessed_query_responses: [num_device, local_batch_size, query_length + response_length]
+ postprocessed_query_responses = einops.rearrange(
+ right_padding_to_left_padding(
+ einops.rearrange(postprocessed_query_responses, "d b l -> (d b) l"), tokenizer.pad_token_id
+ ),
+ "(d b) l -> d b l",
+ d=len(args.learner_devices),
+ )
+ scores = p_reward_forward(query_responses_ids=postprocessed_query_responses).squeeze(-1)
+ # scores: [num_device, local_batch_size]
+
+ # 3. filter response. Ensure that the sample contains truncate_token
+ # responses not passing that filter will receive a low (fixed) score
+ # only query humans on responses that pass that filter
+ matches_token = postprocessed_responses[..., args.task.truncate_after :] == args.task.truncate_token
+ # matches_token: [num_device, local_batch_size, response_length - args.task.truncate_after]
+
+ filter_mask = jnp.any(matches_token, axis=-1)
+ # filter_mask: [num_device, local_batch_size]
+
+ scores = jnp.where(
+ filter_mask,
+ scores,
+ jnp.full_like(scores, args.task.penalty_reward_value),
+ )
+ # scores: [num_device, local_batch_size]
+ del matches_token, filter_mask
+
+ # 4. compute rewards
+ kl = logprobs - ref_logprobs
+ # kl: [num_device, local_batch_size, response_length]
+ non_score_reward = -kl_ctl.value * kl
+ rewards = non_score_reward
+ rewards = rewards.at[..., -1].add(scores)
+ # rewards: [num_device, local_batch_size, response_length]
+
+ # 5. whiten rewards
+ if args.ppo.whiten_rewards:
+ rewards = p_whiten_no_shift_mean(rewards) | All of this should go to the jitted function. Also, do not worry about `del` — if I remember correctly, jax compiler will basically do that for you automatically. |
lm-human-preference-details | github_2023 | python | 18 | vwxyzjn | vwxyzjn | @@ -0,0 +1,975 @@
+import functools
+import os
+import time
+from dataclasses import asdict, dataclass, field
+from types import SimpleNamespace
+from typing import List, Optional
+
+import einops
+import flax
+import flax.linen as nn
+import jax
+import jax.numpy as jnp
+import numpy as np
+import optax
+import orbax
+import tyro
+from flax import jax_utils
+from flax.training import common_utils, orbax_utils
+from flax.training.train_state import TrainState
+from optax import ScaleByAdamState, update_moment, update_moment_per_elem_norm
+from optax._src import base, combine, numerics, utils
+from optax._src.alias import _scale_by_learning_rate
+from rich.console import Console
+from rich.pretty import pprint
+from torch.utils.data import DataLoader, IterableDataset
+from torch.utils.tensorboard import SummaryWriter
+from transformers import AutoTokenizer, FlaxAutoModelForCausalLM, GenerationConfig
+
+from lm_human_preference_details.data import DATASET
+
+
+@dataclass
+class AdaptiveKLParams:
+ target: float = 6.0
+ horizon: int = 10000 # in episodes
+
+
+@dataclass
+class RewardHParams:
+ kl_coef: float = 0.15
+ adaptive_kl: Optional[AdaptiveKLParams] = field(default_factory=AdaptiveKLParams)
+ trained_model: Optional[str] = "models/"
+ label_dataset: tyro.conf.Suppress[Optional[str]] = None
+
+
+@dataclass
+class PpoHParams:
+ total_episodes: int = 1000000
+ local_batch_size: int = 64
+ local_mini_batch_size: tyro.conf.Suppress[int] = None
+ batch_size: tyro.conf.Suppress[int] = None
+ mini_batch_size: tyro.conf.Suppress[int] = None
+ gradient_accumulation_steps: int = 1
+ """gradient accumulation steps"""
+ local_micro_batch_size: tyro.conf.Suppress[int] = None
+ """per rank micro batch size"""
+ world_size: tyro.conf.Suppress[int] = None
+ batch_size: tyro.conf.Suppress[int] = None
+ minibatch_size: tyro.conf.Suppress[int] = None
+ num_updates: tyro.conf.Suppress[int] = None
+ nminibatches: int = 1
+ noptepochs: int = 4
+ lr: float = 0.00001
+ eps: float = 1e-5
+ vf_coef: float = 0.1
+ cliprange: float = 0.2
+ cliprange_value: float = 0.2
+ gamma: float = 1
+ lam: float = 0.95
+ whiten_rewards: bool = True
+
+
+@dataclass
+class TaskHParams:
+ # Query params
+ query_length: int = 64
+ query_dataset: str = "books"
+ query_prefix: str = ""
+ query_suffix: str = ""
+ start_text: Optional[str] = None
+ end_text: Optional[str] = None
+
+ # Response params
+ response_length: int = 24
+
+ # Truncate response after the first occurrence of this token at or after index after when sampling.
+ truncate_token: int = 13
+ truncate_after: int = 16
+ penalty_reward_value: int = -1
+
+ # LM params
+ temperature: float = 0.7
+
+
+@dataclass
+class Args:
+ # common args
+ exp_name: str = os.path.basename(__file__)[: -len(".py")]
+ """the name of this experiment"""
+ seed: int = 1
+ """seed of the experiment"""
+ track: bool = False
+ """if toggled, this experiment will be tracked with Weights and Biases"""
+ wandb_project_name: str = "cleanrl"
+ """the wandb's project name"""
+ wandb_entity: Optional[str] = None
+ """the entity (team) of wandb's project"""
+ cuda: bool = True
+ """Whether to use cuda if available."""
+ run_name: tyro.conf.Suppress[str] = None
+ """TO BE FILLED: a unique name of this run"""
+
+ base_model: str = "gpt2"
+ """the name of the pretrained model to use"""
+ print_sample_output_freq: int = 0
+ """How often to print sample output"""
+ save_path: str = "models/policy/"
+ """Where to save the model"""
+ use_tensorflow_adam: bool = True
+ """Whether to use tensorflow-style Adam optimizer instead of PyTorch's"""
+ task: TaskHParams = field(default_factory=TaskHParams)
+ rewards: RewardHParams = field(default_factory=RewardHParams)
+ ppo: PpoHParams = field(default_factory=PpoHParams)
+
+ # distributed settings
+ local_rank: int = 0
+ """the rank of this process"""
+ learner_device_ids: List[int] = field(default_factory=lambda: [0])
+ "the device ids that script will use"
+ learner_devices: tyro.conf.Suppress[int] = None # real type is `List[str]`
+ """the devices that script will use"""
+ global_learner_decices: tyro.conf.Suppress[int] = None # real type is `List[str]`
+ """the total devices (across all nodes and machines) that script will use"""
+
+
+def scale_by_adam_tf_style(
+ b1: float = 0.9,
+ b2: float = 0.999,
+ eps: float = 1e-8,
+ eps_root: float = 0.0,
+ mu_dtype=None,
+) -> base.GradientTransformation:
+ """Rescale updates according to the Adam algorithm.
+
+ References:
+ [Kingma et al, 2014](https://arxiv.org/abs/1412.6980)
+
+ Args:
+ b1: Decay rate for the exponentially weighted average of grads.
+ b2: Decay rate for the exponentially weighted average of squared grads.
+ eps: Term added to the denominator to improve numerical stability.
+ eps_root: Term added to the denominator inside the square-root to improve
+ numerical stability when backpropagating gradients through the rescaling.
+ mu_dtype: Optional `dtype` to be used for the first order accumulator; if
+ `None` then the `dtype` is inferred from `params` and `updates`.
+
+ Returns:
+ A `GradientTransformation` object.
+ """
+
+ mu_dtype = utils.canonicalize_dtype(mu_dtype)
+
+ def init_fn(params):
+ mu = jax.tree_util.tree_map(lambda t: jnp.zeros_like(t, dtype=mu_dtype), params) # First moment
+ nu = jax.tree_util.tree_map(jnp.zeros_like, params) # Second moment
+ return ScaleByAdamState(count=jnp.zeros([], jnp.int32), mu=mu, nu=nu)
+
+ def update_fn(updates, state, params=None):
+ del params
+ mu = update_moment(updates, state.mu, b1, 1)
+ nu = update_moment_per_elem_norm(updates, state.nu, b2, 2)
+ count_inc = numerics.safe_int32_increment(state.count)
+
+ ### `optax` default adam implementation
+ # mu_hat = bias_correction(mu, b1, count_inc)
+ # nu_hat = bias_correction(nu, b2, count_inc)
+ # updates = jax.tree_util.tree_map(
+ # lambda m, v: m / (jnp.sqrt(v + eps_root) + eps), mu_hat, nu_hat)
+ ### Tensorflow adam implementation
+ updates = jax.tree_util.tree_map(
+ lambda m, v: (jnp.sqrt(1 - b2**count_inc) / (1 - b1**count_inc)) * m / (jnp.sqrt(v + eps_root) + eps),
+ mu,
+ nu,
+ ) #
+ mu = utils.cast_tree(mu, mu_dtype)
+ return updates, ScaleByAdamState(count=count_inc, mu=mu, nu=nu)
+
+ return base.GradientTransformation(init_fn, update_fn)
+
+
+def adam_tf_style(
+ learning_rate,
+ b1: float = 0.9,
+ b2: float = 0.999,
+ eps: float = 1e-8,
+ eps_root: float = 0.0,
+ mu_dtype=None,
+):
+ return combine.chain(
+ scale_by_adam_tf_style(b1=b1, b2=b2, eps=eps, eps_root=eps_root, mu_dtype=mu_dtype),
+ _scale_by_learning_rate(learning_rate),
+ )
+
+
+class AdaptiveKLController:
+ def __init__(self, init_kl_coef: float, hparams: AdaptiveKLParams):
+ self.value = init_kl_coef
+ self.hparams = hparams
+
+ def update(self, current, n_steps):
+ target = self.hparams.target
+ proportional_error = np.clip(current / target - 1, -0.2, 0.2)
+ mult = 1 + proportional_error * n_steps / self.hparams.horizon
+ self.value *= mult
+
+
+def whiten(values, shift_mean=True):
+ # `unbiased=False` matches TF `tf.nn.moments`'s setting
+ mean, var = jnp.mean(values), jnp.var(values)
+ whitened = (values - mean) * jax.lax.rsqrt(var + 1e-8)
+ if not shift_mean:
+ whitened += mean
+ return whitened
+
+
+class ScalarHead(nn.Module):
+ head_input_size: int
+
+ @nn.compact
+ def __call__(self, x):
+ x = nn.Dense(
+ 1,
+ kernel_init=nn.initializers.normal(stddev=0),
+ bias_init=nn.initializers.zeros_init(),
+ )(x)
+ return x
+
+
+class RewardHead(nn.Module):
+ head_input_size: int
+
+ @nn.compact
+ def __call__(self, x):
+ assert x.shape[-1] == self.head_input_size
+ x = nn.Dense(
+ 1,
+ kernel_init=nn.initializers.normal(stddev=1 / np.sqrt(self.head_input_size + 1)),
+ bias_init=nn.initializers.zeros_init(),
+ )(x)
+ reward_gain = self.param("reward_gain", nn.initializers.ones_init(), ())
+ reward_bias = self.param("reward_bias", nn.initializers.zeros_init(), ())
+ x = x * reward_gain + reward_bias
+ return x
+
+
+@flax.struct.dataclass
+class LMBackboneWithScalarHeadParams:
+ """Parameters for the language model backbone and a scalar head."""
+
+ lm_backbone_params: flax.core.FrozenDict
+ head_params: flax.core.FrozenDict
+
+
+# a pytorch dataset
+class MyDataset(IterableDataset):
+ def __init__(self, generator, tokenizer, query_length, seed, start_text=None, end_text=None):
+ self.generator = generator
+ self.tokenizer = tokenizer
+ self.query_length = query_length
+ self.start_text = start_text
+ self.end_text = end_text
+ self.seed = seed
+ token_to_index = tokenizer.get_vocab()
+ self.start_token = token_to_index[start_text] if self.start_text else None
+ self.end_token = token_to_index[end_text] if self.end_text else None
+
+ def __iter__(self):
+ for text in self.generator("train", self.seed, shuffle=True):
+ tokens = self.tokenizer.encode(text)
+ if self.start_token is not None:
+ try:
+ first_index = tokens.index(self.start_token) + 1
+ if first_index < len(tokens):
+ tokens = tokens[first_index:]
+ except:
+ continue
+ tokens = tokens[: self.query_length]
+ if self.end_token is not None:
+ try:
+ last_index = len(tokens) - tokens[::-1].index(self.end_token)
+ tokens = tokens[:last_index]
+ except:
+ continue
+ output = self.tokenizer.pad(
+ {"input_ids": tokens},
+ padding="max_length",
+ max_length=self.query_length,
+ return_tensors="pt",
+ return_attention_mask=True,
+ )
+ yield output
+
+
+def right_padding_to_left_padding(tokens, pad_id):
+ # Convert from right padding to left padding.
+ return np.array([[pad_id] * (row == pad_id).sum() + [x for x in row if x != pad_id] for row in tokens])
+
+
+def ceil_div(a, b):
+ return (a - 1) // b + 1
+
+
+def exact_div(a, b):
+ q = a // b
+ if a != q * b:
+ raise ValueError(f"Inexact division: {a} / {b} = {a / b}")
+ return q
+
+
+def prepare_reward_forward(args, tokenizer):
+ """Prepare the forward pass of the reward model and parameters."""
+
+ lm_backbone = FlaxAutoModelForCausalLM.from_pretrained(args.base_model)
+ scalar_head = RewardHead(head_input_size=lm_backbone.config.hidden_size)
+
+ def reward_forward(
+ params: LMBackboneWithScalarHeadParams,
+ query_responses_ids: jnp.ndarray,
+ ):
+ """Get reward for each queiry--response pair."""
+ assert query_responses_ids.ndim == 2
+
+ # mask out padding tokens
+ attention_mask = query_responses_ids != tokenizer.pad_token_id
+ query_responses_ids = jnp.where(attention_mask, query_responses_ids, 0)
+
+ # assign position ids
+ position_ids = attention_mask.cumsum(1) - attention_mask
+
+ reward_latents = lm_backbone.module.apply(
+ variables=params.lm_backbone_params,
+ input_ids=query_responses_ids,
+ attention_mask=attention_mask,
+ position_ids=position_ids,
+ output_hidden_states=True,
+ ).hidden_states[-1]
+ # reward_latents: [batch_size, length, hidden_size]
+
+ last_reward_latents = reward_latents[:, -1, :]
+ # last_reward_latents: [batch_size, hidden_size]
+
+ reward = scalar_head.apply(variables=params.head_params, x=last_reward_latents)
+ # reward: [batch_size, 1]
+ return reward
+
+ if args.rewards.trained_model:
+ orbax_checkpointer = orbax.checkpoint.PyTreeCheckpointer()
+ reward_state_params = orbax_checkpointer.restore(args.rewards.trained_model)["reward_model"]["params"]
+ reward_params = LMBackboneWithScalarHeadParams(
+ lm_backbone_params=flax.core.FrozenDict({"params": reward_state_params["lm_backbone_params"]["params"]}),
+ head_params=flax.core.FrozenDict({"params": reward_state_params["head_params"]["params"]}),
+ )
+ pprint(f"Loaded pretrained reward model from {args.rewards.trained_model}")
+ else:
+ key = jax.random.PRNGKey(args.seed)
+ key, init_key = jax.random.split(key, 2)
+ reward_params = LMBackboneWithScalarHeadParams(
+ lm_backbone_params=flax.core.FrozenDict({"params": lm_backbone.params}),
+ head_params=flax.core.FrozenDict(
+ scalar_head.init(
+ init_key,
+ jnp.ones(lm_backbone.config.hidden_size)[None, None, :],
+ )
+ ),
+ )
+
+ return functools.partial(reward_forward, params=reward_params)
+
+
+def prepare_policy_forward_and_policy_generate(args, tokenizer):
+ """Prepare the forward pass of the policy model and parameters."""
+
+ lm_backbone = FlaxAutoModelForCausalLM.from_pretrained(args.base_model)
+ # disable `pad_token_id` and `eos_token_id` because we just want to
+ # generate tokens without truncation / padding
+ lm_backbone.generation_config.eos_token_id = None
+ lm_backbone.generation_config.pad_token_id = tokenizer.pad_token_id
+ scalar_head = ScalarHead(head_input_size=lm_backbone.config.hidden_size)
+
+ generation_config = GenerationConfig(
+ max_new_tokens=args.task.response_length,
+ min_new_tokens=args.task.response_length,
+ temperature=args.task.temperature,
+ top_k=0.0,
+ top_p=1.0,
+ do_sample=True,
+ pad_token_id=tokenizer.pad_token_id,
+ )
+
+ def policy_forward(
+ params: LMBackboneWithScalarHeadParams,
+ input_ids: jnp.ndarray,
+ ):
+ """Get reward for input_ids."""
+ assert input_ids.ndim == 2
+ # shape: [batch_size, length]
+
+ # mask out padding tokens
+ attention_mask = input_ids != tokenizer.pad_token_id
+ input_ids = jnp.where(attention_mask, input_ids, 0)
+
+ # assign position ids
+ position_ids = attention_mask.cumsum(1) - attention_mask
+
+ lm_backbone_out = lm_backbone.module.apply(
+ variables=params.lm_backbone_params,
+ input_ids=input_ids,
+ attention_mask=attention_mask,
+ position_ids=position_ids,
+ output_hidden_states=True,
+ )
+
+ value_latents = lm_backbone_out.hidden_states[-1]
+ # shape: [batch_size, length, hidden_size]
+
+ values = scalar_head.apply(variables=params.head_params, x=value_latents)
+ # shape: [batch_size, length, 1]
+ return lm_backbone_out, values
+
+ def policy_generate(
+ params: LMBackboneWithScalarHeadParams,
+ queries: jnp.ndarray,
+ ):
+ input_ids = queries
+ attention_mask = input_ids != tokenizer.pad_token_id
+ input_ids = jnp.where(attention_mask, queries, 0)
+ output = lm_backbone.generate(
+ params=params["params"],
+ input_ids=input_ids,
+ generation_config=generation_config,
+ attention_mask=attention_mask.astype("i4"),
+ return_dict_in_generate=True,
+ )
+ context_length = input_ids.shape[1]
+ return jnp.concatenate((queries, output.sequences[:, context_length:]), axis=1)
+
+ key = jax.random.PRNGKey(args.seed)
+ key, init_key = jax.random.split(key, 2)
+ policy_params = LMBackboneWithScalarHeadParams(
+ lm_backbone_params=flax.core.FrozenDict({"params": lm_backbone.params}),
+ head_params=flax.core.FrozenDict(
+ scalar_head.init(
+ init_key,
+ jnp.ones(lm_backbone.config.hidden_size)[None, None, :],
+ )
+ ),
+ )
+
+ return policy_forward, policy_generate, policy_params
+
+
+@flax.struct.dataclass
+class RolloutStatistics:
+ returns: jnp.array
+ values: jnp.array
+ advantage: jnp.array
+ responses: jnp.array
+ query_responses: jnp.array
+ logprobs: jnp.array
+
+
+@flax.struct.dataclass
+class RLStatistics:
+ approxkl: jnp.array
+ entropy: jnp.array
+ pg_loss: jnp.array
+ pg_clipfrac: jnp.array
+ vf_losses1: jnp.array
+ vf_loss: jnp.array
+ vf_clipfrac: jnp.array
+ ratio: jnp.array
+ loss: jnp.array
+
+
+def train_step(policy_state, mb_stats, args):
+ def loss(params):
+ # mb_stats.query_responses: [local_micro_batch_size, query_length + response_length]
+ output, vpred_temp = policy_state.apply_fn(params, mb_stats.query_responses)
+
+ # vpred_temp: [local_micro_batch_size, query_length + response_length, 1]
+ vpred = jnp.squeeze(vpred_temp[:, args.task.query_length - 1 : -1, :], axis=-1)
+ # vpred: [local_micro_batch_size, response_length]
+ vpredclipped = jnp.clip(
+ vpred,
+ mb_stats.values - args.ppo.cliprange_value,
+ mb_stats.values + args.ppo.cliprange_value,
+ )
+ vf_losses1 = jnp.square(vpred - mb_stats.returns)
+ vf_losses2 = jnp.square(vpredclipped - mb_stats.returns)
+ vf_loss = 0.5 * jnp.maximum(vf_losses1, vf_losses2).mean()
+ vf_clipfrac = (vf_losses2 > vf_losses1).astype(jnp.float32).mean()
+
+ logits = output.logits[:, args.task.query_length - 1 : -1, :]
+ logits /= args.task.temperature
+ new_logprobs = -optax.softmax_cross_entropy_with_integer_labels(logits, mb_stats.responses)
+
+ logprobs_diff = new_logprobs - mb_stats.logprobs
+ ratio = jnp.exp(logprobs_diff)
+ pg_losses = -mb_stats.advantage * ratio
+ pg_losses2 = -mb_stats.advantage * jnp.clip(ratio, 1.0 - args.ppo.cliprange, 1.0 + args.ppo.cliprange)
+
+ pg_loss = jnp.maximum(pg_losses, pg_losses2).mean()
+ pg_clipfrac = (pg_losses2 > pg_losses).astype(jnp.float32).mean()
+
+ pd = jax.nn.softmax(logits, axis=-1)
+ entropy = jax.nn.logsumexp(logits, axis=-1) - jnp.sum(pd * logits, axis=-1)
+
+ approxkl = 0.5 * ((logprobs_diff) ** 2).mean()
+ loss = pg_loss + args.ppo.vf_coef * vf_loss
+
+ rl_stats = RLStatistics(
+ vf_loss=vf_loss,
+ vf_clipfrac=vf_clipfrac,
+ pg_loss=pg_loss,
+ pg_clipfrac=pg_clipfrac,
+ approxkl=approxkl,
+ loss=loss,
+ entropy=entropy.mean(),
+ ratio=ratio.mean(),
+ vf_losses1=vf_losses1.mean(),
+ )
+ rl_stats = jax.lax.pmean(rl_stats, "batch")
+ return loss, rl_stats
+
+ grad_fn = jax.value_and_grad(loss, has_aux=True)
+ (loss, rl_stats), grads = grad_fn(policy_state.params)
+ grads = jax.lax.pmean(grads, "batch")
+ policy_state = policy_state.apply_gradients(grads=grads)
+ return policy_state, rl_stats
+
+
+def linear_schedule(optimizer_step, args):
+ """anneal learning rate linearly to reach 0 after one epoch."""
+ update = 1 + optimizer_step // (args.ppo.noptepochs * args.ppo.nminibatches * args.ppo.gradient_accumulation_steps)
+ frac = 1.0 - (update - 1.0) / args.ppo.num_updates
+ lrnow = frac * args.ppo.lr
+ return lrnow
+
+
+def train(args: Args):
+ local_devices = jax.local_devices()
+ global_devices = jax.devices()
+ args.ppo.world_size = jax.process_count()
+ learner_devices = [local_devices[d_id] for d_id in args.learner_device_ids]
+ global_learner_decices = [
+ global_devices[d_id + process_index * len(local_devices)]
+ for process_index in range(args.ppo.world_size)
+ for d_id in args.learner_device_ids
+ ]
+ pprint({"global_learner_decices": global_learner_decices})
+ args.global_learner_decices = [str(item) for item in global_learner_decices]
+ args.learner_devices = [str(item) for item in learner_devices]
+ args.local_rank = jax.process_index()
+ args.ppo.batch_size = int(args.ppo.local_batch_size * len(args.learner_devices) * args.ppo.world_size)
+ args.ppo.minibatch_size = exact_div(args.ppo.batch_size, args.ppo.nminibatches)
+ args.ppo.local_mini_batch_size = exact_div(args.ppo.local_batch_size, args.ppo.nminibatches)
+ args.ppo.local_micro_batch_size = exact_div(args.ppo.local_mini_batch_size, args.ppo.gradient_accumulation_steps)
+ if args.ppo.whiten_rewards:
+ assert (
+ args.ppo.local_mini_batch_size >= 8
+ ), f"Per-rank minibatch size {args.ppo.local_mini_batch_size} is insufficient for whitening"
+ # `per_rank_rollout_batch_size` is our `args.ppo.local_batch_size`
+ # `per_rank_minibatch_size` is our `args.ppo.local_mini_batch_size`
+ args.ppo.num_updates = args.ppo.total_episodes // args.ppo.batch_size
+
+ console = Console(force_terminal=True)
+ run_name = f"{args.exp_name}__{args.seed}__{int(time.time())}"
+ writer = SimpleNamespace() # dummy writer
+ writer.add_scalar = lambda x, y, z: None
+ writer.add_histogram = lambda x, y, z: None
+
+ if args.local_rank == 0:
+ if args.track:
+ import wandb
+
+ wandb.init(
+ project=args.wandb_project_name,
+ entity=args.wandb_entity,
+ sync_tensorboard=True,
+ config=asdict(args),
+ name=run_name,
+ save_code=True,
+ )
+ wandb.run.log_code(".")
+ writer = SummaryWriter(f"runs/{run_name}")
+ writer.add_text(
+ "hyperparameters",
+ "|param|value|\n|-|-|\n%s" % ("\n".join([f"|{key}|{value}|" for key, value in vars(args).items()])),
+ )
+ pprint(args)
+ local_seed = args.seed + args.local_rank * 100003 # Prime
+ tokenizer = AutoTokenizer.from_pretrained(
+ args.base_model,
+ padding_side="right",
+ )
+ # we use the padding token manually but do not resize the token embedding of the model
+ tokenizer.add_special_tokens({"pad_token": "[PAD]"})
+ reward_forward = prepare_reward_forward(args, tokenizer)
+ policy_forward, policy_generate, policy_params = prepare_policy_forward_and_policy_generate(args, tokenizer)
+ _, _, ref_policy_params = prepare_policy_forward_and_policy_generate(args, tokenizer)
+
+ p_whiten_no_shift_mean = jax.pmap(functools.partial(whiten, shift_mean=False))
+ p_whiten_shift_mean = jax.pmap(functools.partial(whiten, shift_mean=True))
+ p_reward_forward = jax.pmap(reward_forward)
+ p_train_step = jax.pmap(
+ functools.partial(train_step, args=args),
+ axis_name="batch",
+ donate_argnums=(0,),
+ )
+
+ @jax.pmap
+ def p_get_response_values_and_logprobs(policy_state_params, queries):
+ query_responses = policy_generate(
+ params=policy_state_params.lm_backbone_params,
+ queries=queries,
+ )
+ # query_responses: [local_batch_size, query_length + response_length]
+ responses = query_responses[:, args.task.query_length :]
+
+ output, full_values = policy_forward(policy_state_params, query_responses)
+ values = full_values[:, args.task.query_length - 1 : -1].squeeze(-1)
+ # values: [local_batch_size, response_length]
+ logits = output.logits[:, args.task.query_length - 1 : -1, :]
+ # logits: [local_batch_size, response_length, vocab_size]
+ logits /= args.task.temperature
+ all_logprobs = jax.nn.log_softmax(logits, axis=-1)
+ # all_logprobs: [local_batch_size, response_length, vocab_size]
+ logprobs = jnp.take_along_axis(all_logprobs, responses[..., None], -1).squeeze(-1)
+ # logprobs: [local_batch_size, response_length]
+
+ ref_output, _ = policy_forward(ref_policy_params, query_responses)
+ ref_logits = ref_output.logits[:, args.task.query_length - 1 : -1, :]
+ # ref_logits: [local_batch_size, response_length, vocab_size]
+ ref_logits /= args.task.temperature
+ ref_all_logprobs = jax.nn.log_softmax(ref_logits, axis=-1)
+ ref_logprobs = jnp.take_along_axis(ref_all_logprobs, responses[..., None], -1).squeeze(-1)
+ # ref_logprobs: [local_batch_size, response_length]
+ return query_responses, values, logprobs, ref_logprobs
+
+ if args.use_tensorflow_adam:
+ adam = adam_tf_style
+ else:
+ adam = optax.adam
+
+ optimizer = adam(
+ learning_rate=functools.partial(linear_schedule, args=args),
+ eps=args.ppo.eps,
+ )
+
+ optimizer = optax.MultiSteps(optimizer, args.ppo.gradient_accumulation_steps)
+
+ policy_state = TrainState.create(apply_fn=policy_forward, params=policy_params, tx=optimizer)
+ policy_state = jax_utils.replicate(policy_state)
+
+ del policy_params
+
+ dataset = MyDataset(
+ DATASET[args.task.query_dataset],
+ tokenizer,
+ args.task.query_length,
+ seed=local_seed,
+ start_text=args.task.start_text,
+ end_text=args.task.end_text,
+ )
+ dataloader = DataLoader(dataset, batch_size=args.ppo.batch_size)
+ iter_dataloader = iter(dataloader)
+ kl_ctl = AdaptiveKLController(args.rewards.kl_coef, hparams=args.rewards.adaptive_kl)
+
+ print("===training policy===")
+ global_step = 0
+ approxkls_stats = np.zeros(
+ (
+ args.ppo.noptepochs,
+ args.ppo.nminibatches,
+ args.ppo.gradient_accumulation_steps,
+ ),
+ )
+ pg_clipfracs_stats = np.zeros(
+ (
+ args.ppo.noptepochs,
+ args.ppo.nminibatches,
+ args.ppo.gradient_accumulation_steps,
+ ),
+ )
+ pg_losses_stats = np.zeros(
+ (
+ args.ppo.noptepochs,
+ args.ppo.nminibatches,
+ args.ppo.gradient_accumulation_steps,
+ ),
+ )
+ vf_losses_stats = np.zeros(
+ (
+ args.ppo.noptepochs,
+ args.ppo.nminibatches,
+ args.ppo.gradient_accumulation_steps,
+ ),
+ )
+ vf_clipfrac_stats = np.zeros(
+ (
+ args.ppo.noptepochs,
+ args.ppo.nminibatches,
+ args.ppo.gradient_accumulation_steps,
+ ),
+ )
+ entropies_stats = np.zeros(
+ (
+ args.ppo.noptepochs,
+ args.ppo.nminibatches,
+ args.ppo.gradient_accumulation_steps,
+ ),
+ )
+ for update in range(1, args.ppo.num_updates + 1):
+ global_step += 1 * args.ppo.batch_size
+ data = next(iter_dataloader)
+ queries = right_padding_to_left_padding(data["input_ids"], tokenizer.pad_token_id)
+ queries = common_utils.shard(queries)
+ # queries: [num_device, local_batch_size, query_length]
+
+ query_responses, values, logprobs, ref_logprobs = p_get_response_values_and_logprobs(policy_state.params, queries)
+ responses = query_responses[..., args.task.query_length :]
+
+ # **Response Processing**
+ # 1. truncate at the first occurrence of `truncate_token` that appears at or after
+ # position truncate_after in the responses
+ # https://github.com/openai/lm-human-preferences/blob/cbfd210bb8b08f6bc5c26878c10984b90f516c66/lm_human_preferences/train_policy.py#L378
+ truncate_token_mask = responses == args.task.truncate_token
+ # truncate_token_mask: [num_device, local_batch_size, response_length]
+ truncate_after_or_token_mask = jnp.concatenate(
+ [
+ jnp.zeros_like(truncate_token_mask)[:, :, : args.task.truncate_after],
+ truncate_token_mask[:, :, args.task.truncate_after :],
+ ],
+ axis=-1,
+ )
+ truncate_mask = (jnp.cumsum(truncate_after_or_token_mask, axis=-1) - truncate_after_or_token_mask).astype("bool")
+ postprocessed_responses = jnp.where(
+ truncate_mask,
+ jnp.full_like(responses, tokenizer.pad_token_id),
+ responses,
+ )
+ # postprocessed_responses: [num_device, local_batch_size, response_length]
+ del truncate_token_mask, truncate_after_or_token_mask, truncate_mask
+
+ # 2. run reward model on the truncated responses
+ postprocessed_query_responses = np.concatenate((queries, postprocessed_responses), axis=-1)
+ # postprocessed_query_responses: [num_device, local_batch_size, query_length + response_length]
+ postprocessed_query_responses = einops.rearrange(
+ right_padding_to_left_padding(
+ einops.rearrange(postprocessed_query_responses, "d b l -> (d b) l"), tokenizer.pad_token_id
+ ),
+ "(d b) l -> d b l",
+ d=len(args.learner_devices),
+ )
+ scores = p_reward_forward(query_responses_ids=postprocessed_query_responses).squeeze(-1)
+ # scores: [num_device, local_batch_size]
+
+ # 3. filter response. Ensure that the sample contains truncate_token
+ # responses not passing that filter will receive a low (fixed) score
+ # only query humans on responses that pass that filter
+ matches_token = postprocessed_responses[..., args.task.truncate_after :] == args.task.truncate_token
+ # matches_token: [num_device, local_batch_size, response_length - args.task.truncate_after]
+
+ filter_mask = jnp.any(matches_token, axis=-1)
+ # filter_mask: [num_device, local_batch_size]
+
+ scores = jnp.where(
+ filter_mask,
+ scores,
+ jnp.full_like(scores, args.task.penalty_reward_value),
+ )
+ # scores: [num_device, local_batch_size]
+ del matches_token, filter_mask
+
+ # 4. compute rewards
+ kl = logprobs - ref_logprobs
+ # kl: [num_device, local_batch_size, response_length]
+ non_score_reward = -kl_ctl.value * kl
+ rewards = non_score_reward
+ rewards = rewards.at[..., -1].add(scores)
+ # rewards: [num_device, local_batch_size, response_length]
+
+ # 5. whiten rewards
+ if args.ppo.whiten_rewards:
+ rewards = p_whiten_no_shift_mean(rewards)
+ try:
+ sample_kl = kl[0][0].sum().item()
+ console.print(
+ f"[green][bold]{'Query'}:[/]\n"
+ + f"[green]{ tokenizer.decode(queries[0][0], skip_special_tokens=True)}[/]\n\n"
+ + f"[blue][bold]{'Raw response'}:[/]\n"
+ + f"[blue]{tokenizer.decode(responses[0][0], skip_special_tokens=True)}[/]\n\n"
+ + f"[yellow][bold]{'Processed response'}:[/]\n"
+ + f"[yellow]{tokenizer.decode(postprocessed_responses[0][0], skip_special_tokens=True)}[/]\n\n"
+ + f"[red]score: {scores[0][0]}, kl: {kl[0][0].sum().item()}, total reward: {scores[0][0] - kl_ctl.value * sample_kl} [/]"
+ )
+ except Exception as e:
+ print(e)
+ del postprocessed_query_responses
+
+ # 6. compute advantages and returns
+ lastgaelam = 0
+ advantages_reversed = []
+ gen_length = args.task.response_length
+
+ for t in reversed(range(gen_length)):
+ nextvalues = values[..., t + 1] if t < gen_length - 1 else 0.0
+ delta = rewards[..., t] + args.ppo.gamma * nextvalues - values[..., t]
+ lastgaelam = delta + args.ppo.gamma * args.ppo.lam * lastgaelam
+ advantages_reversed.append(lastgaelam)
+ # advantages_reversed is a list of 2D arrays
+ # Each array has the shape [num_devices, local_batch_size]
+
+ advantages = jnp.stack(advantages_reversed[::-1], axis=-1)
+ # advantages: [num_device, local_batch_size, response_length]
+ returns = advantages + values
+ # returns: [num_device, local_batch_size, response_length]
+ advantages = p_whiten_shift_mean(advantages) | Advantages calculation should be jitted as well. You should use `jax.scan` to do the `for t in reversed(range(gen_length)):`
References:
* not directly applicable, but here is Cleanba PPO GAE `jax.lax.scan`: https://github.com/vwxyzjn/cleanba/blob/81dca0054c8c0930046a2d12b07ada2945014d01/cleanba/cleanba_ppo.py#L558-L560
* you should also add a test case like https://github.com/vwxyzjn/cleanrl/blob/master/tests/test_jax_compute_gae.py to assert the advantage calculation is correct |
lm-human-preference-details | github_2023 | python | 18 | vwxyzjn | vwxyzjn | @@ -0,0 +1,975 @@
+import functools
+import os
+import time
+from dataclasses import asdict, dataclass, field
+from types import SimpleNamespace
+from typing import List, Optional
+
+import einops
+import flax
+import flax.linen as nn
+import jax
+import jax.numpy as jnp
+import numpy as np
+import optax
+import orbax
+import tyro
+from flax import jax_utils
+from flax.training import common_utils, orbax_utils
+from flax.training.train_state import TrainState
+from optax import ScaleByAdamState, update_moment, update_moment_per_elem_norm
+from optax._src import base, combine, numerics, utils
+from optax._src.alias import _scale_by_learning_rate
+from rich.console import Console
+from rich.pretty import pprint
+from torch.utils.data import DataLoader, IterableDataset
+from torch.utils.tensorboard import SummaryWriter
+from transformers import AutoTokenizer, FlaxAutoModelForCausalLM, GenerationConfig
+
+from lm_human_preference_details.data import DATASET
+
+
+@dataclass
+class AdaptiveKLParams:
+ target: float = 6.0
+ horizon: int = 10000 # in episodes
+
+
+@dataclass
+class RewardHParams:
+ kl_coef: float = 0.15
+ adaptive_kl: Optional[AdaptiveKLParams] = field(default_factory=AdaptiveKLParams)
+ trained_model: Optional[str] = "models/"
+ label_dataset: tyro.conf.Suppress[Optional[str]] = None
+
+
+@dataclass
+class PpoHParams:
+ total_episodes: int = 1000000
+ local_batch_size: int = 64
+ local_mini_batch_size: tyro.conf.Suppress[int] = None
+ batch_size: tyro.conf.Suppress[int] = None
+ mini_batch_size: tyro.conf.Suppress[int] = None
+ gradient_accumulation_steps: int = 1
+ """gradient accumulation steps"""
+ local_micro_batch_size: tyro.conf.Suppress[int] = None
+ """per rank micro batch size"""
+ world_size: tyro.conf.Suppress[int] = None
+ batch_size: tyro.conf.Suppress[int] = None
+ minibatch_size: tyro.conf.Suppress[int] = None
+ num_updates: tyro.conf.Suppress[int] = None
+ nminibatches: int = 1
+ noptepochs: int = 4
+ lr: float = 0.00001
+ eps: float = 1e-5
+ vf_coef: float = 0.1
+ cliprange: float = 0.2
+ cliprange_value: float = 0.2
+ gamma: float = 1
+ lam: float = 0.95
+ whiten_rewards: bool = True
+
+
+@dataclass
+class TaskHParams:
+ # Query params
+ query_length: int = 64
+ query_dataset: str = "books"
+ query_prefix: str = ""
+ query_suffix: str = ""
+ start_text: Optional[str] = None
+ end_text: Optional[str] = None
+
+ # Response params
+ response_length: int = 24
+
+ # Truncate response after the first occurrence of this token at or after index after when sampling.
+ truncate_token: int = 13
+ truncate_after: int = 16
+ penalty_reward_value: int = -1
+
+ # LM params
+ temperature: float = 0.7
+
+
+@dataclass
+class Args:
+ # common args
+ exp_name: str = os.path.basename(__file__)[: -len(".py")]
+ """the name of this experiment"""
+ seed: int = 1
+ """seed of the experiment"""
+ track: bool = False
+ """if toggled, this experiment will be tracked with Weights and Biases"""
+ wandb_project_name: str = "cleanrl"
+ """the wandb's project name"""
+ wandb_entity: Optional[str] = None
+ """the entity (team) of wandb's project"""
+ cuda: bool = True
+ """Whether to use cuda if available."""
+ run_name: tyro.conf.Suppress[str] = None
+ """TO BE FILLED: a unique name of this run"""
+
+ base_model: str = "gpt2"
+ """the name of the pretrained model to use"""
+ print_sample_output_freq: int = 0
+ """How often to print sample output"""
+ save_path: str = "models/policy/"
+ """Where to save the model"""
+ use_tensorflow_adam: bool = True
+ """Whether to use tensorflow-style Adam optimizer instead of PyTorch's"""
+ task: TaskHParams = field(default_factory=TaskHParams)
+ rewards: RewardHParams = field(default_factory=RewardHParams)
+ ppo: PpoHParams = field(default_factory=PpoHParams)
+
+ # distributed settings
+ local_rank: int = 0
+ """the rank of this process"""
+ learner_device_ids: List[int] = field(default_factory=lambda: [0])
+ "the device ids that script will use"
+ learner_devices: tyro.conf.Suppress[int] = None # real type is `List[str]`
+ """the devices that script will use"""
+ global_learner_decices: tyro.conf.Suppress[int] = None # real type is `List[str]`
+ """the total devices (across all nodes and machines) that script will use"""
+
+
+def scale_by_adam_tf_style(
+ b1: float = 0.9,
+ b2: float = 0.999,
+ eps: float = 1e-8,
+ eps_root: float = 0.0,
+ mu_dtype=None,
+) -> base.GradientTransformation:
+ """Rescale updates according to the Adam algorithm.
+
+ References:
+ [Kingma et al, 2014](https://arxiv.org/abs/1412.6980)
+
+ Args:
+ b1: Decay rate for the exponentially weighted average of grads.
+ b2: Decay rate for the exponentially weighted average of squared grads.
+ eps: Term added to the denominator to improve numerical stability.
+ eps_root: Term added to the denominator inside the square-root to improve
+ numerical stability when backpropagating gradients through the rescaling.
+ mu_dtype: Optional `dtype` to be used for the first order accumulator; if
+ `None` then the `dtype` is inferred from `params` and `updates`.
+
+ Returns:
+ A `GradientTransformation` object.
+ """
+
+ mu_dtype = utils.canonicalize_dtype(mu_dtype)
+
+ def init_fn(params):
+ mu = jax.tree_util.tree_map(lambda t: jnp.zeros_like(t, dtype=mu_dtype), params) # First moment
+ nu = jax.tree_util.tree_map(jnp.zeros_like, params) # Second moment
+ return ScaleByAdamState(count=jnp.zeros([], jnp.int32), mu=mu, nu=nu)
+
+ def update_fn(updates, state, params=None):
+ del params
+ mu = update_moment(updates, state.mu, b1, 1)
+ nu = update_moment_per_elem_norm(updates, state.nu, b2, 2)
+ count_inc = numerics.safe_int32_increment(state.count)
+
+ ### `optax` default adam implementation
+ # mu_hat = bias_correction(mu, b1, count_inc)
+ # nu_hat = bias_correction(nu, b2, count_inc)
+ # updates = jax.tree_util.tree_map(
+ # lambda m, v: m / (jnp.sqrt(v + eps_root) + eps), mu_hat, nu_hat)
+ ### Tensorflow adam implementation
+ updates = jax.tree_util.tree_map(
+ lambda m, v: (jnp.sqrt(1 - b2**count_inc) / (1 - b1**count_inc)) * m / (jnp.sqrt(v + eps_root) + eps),
+ mu,
+ nu,
+ ) #
+ mu = utils.cast_tree(mu, mu_dtype)
+ return updates, ScaleByAdamState(count=count_inc, mu=mu, nu=nu)
+
+ return base.GradientTransformation(init_fn, update_fn)
+
+
+def adam_tf_style(
+ learning_rate,
+ b1: float = 0.9,
+ b2: float = 0.999,
+ eps: float = 1e-8,
+ eps_root: float = 0.0,
+ mu_dtype=None,
+):
+ return combine.chain(
+ scale_by_adam_tf_style(b1=b1, b2=b2, eps=eps, eps_root=eps_root, mu_dtype=mu_dtype),
+ _scale_by_learning_rate(learning_rate),
+ )
+
+
+class AdaptiveKLController:
+ def __init__(self, init_kl_coef: float, hparams: AdaptiveKLParams):
+ self.value = init_kl_coef
+ self.hparams = hparams
+
+ def update(self, current, n_steps):
+ target = self.hparams.target
+ proportional_error = np.clip(current / target - 1, -0.2, 0.2)
+ mult = 1 + proportional_error * n_steps / self.hparams.horizon
+ self.value *= mult
+
+
+def whiten(values, shift_mean=True):
+ # `unbiased=False` matches TF `tf.nn.moments`'s setting
+ mean, var = jnp.mean(values), jnp.var(values)
+ whitened = (values - mean) * jax.lax.rsqrt(var + 1e-8)
+ if not shift_mean:
+ whitened += mean
+ return whitened
+
+
+class ScalarHead(nn.Module):
+ head_input_size: int
+
+ @nn.compact
+ def __call__(self, x):
+ x = nn.Dense(
+ 1,
+ kernel_init=nn.initializers.normal(stddev=0),
+ bias_init=nn.initializers.zeros_init(),
+ )(x)
+ return x
+
+
+class RewardHead(nn.Module):
+ head_input_size: int
+
+ @nn.compact
+ def __call__(self, x):
+ assert x.shape[-1] == self.head_input_size
+ x = nn.Dense(
+ 1,
+ kernel_init=nn.initializers.normal(stddev=1 / np.sqrt(self.head_input_size + 1)),
+ bias_init=nn.initializers.zeros_init(),
+ )(x)
+ reward_gain = self.param("reward_gain", nn.initializers.ones_init(), ())
+ reward_bias = self.param("reward_bias", nn.initializers.zeros_init(), ())
+ x = x * reward_gain + reward_bias
+ return x
+
+
+@flax.struct.dataclass
+class LMBackboneWithScalarHeadParams:
+ """Parameters for the language model backbone and a scalar head."""
+
+ lm_backbone_params: flax.core.FrozenDict
+ head_params: flax.core.FrozenDict
+
+
+# a pytorch dataset
+class MyDataset(IterableDataset):
+ def __init__(self, generator, tokenizer, query_length, seed, start_text=None, end_text=None):
+ self.generator = generator
+ self.tokenizer = tokenizer
+ self.query_length = query_length
+ self.start_text = start_text
+ self.end_text = end_text
+ self.seed = seed
+ token_to_index = tokenizer.get_vocab()
+ self.start_token = token_to_index[start_text] if self.start_text else None
+ self.end_token = token_to_index[end_text] if self.end_text else None
+
+ def __iter__(self):
+ for text in self.generator("train", self.seed, shuffle=True):
+ tokens = self.tokenizer.encode(text)
+ if self.start_token is not None:
+ try:
+ first_index = tokens.index(self.start_token) + 1
+ if first_index < len(tokens):
+ tokens = tokens[first_index:]
+ except:
+ continue
+ tokens = tokens[: self.query_length]
+ if self.end_token is not None:
+ try:
+ last_index = len(tokens) - tokens[::-1].index(self.end_token)
+ tokens = tokens[:last_index]
+ except:
+ continue
+ output = self.tokenizer.pad(
+ {"input_ids": tokens},
+ padding="max_length",
+ max_length=self.query_length,
+ return_tensors="pt",
+ return_attention_mask=True,
+ )
+ yield output
+
+
+def right_padding_to_left_padding(tokens, pad_id):
+ # Convert from right padding to left padding.
+ return np.array([[pad_id] * (row == pad_id).sum() + [x for x in row if x != pad_id] for row in tokens])
+
+
+def ceil_div(a, b):
+ return (a - 1) // b + 1
+
+
+def exact_div(a, b):
+ q = a // b
+ if a != q * b:
+ raise ValueError(f"Inexact division: {a} / {b} = {a / b}")
+ return q
+
+
+def prepare_reward_forward(args, tokenizer):
+ """Prepare the forward pass of the reward model and parameters."""
+
+ lm_backbone = FlaxAutoModelForCausalLM.from_pretrained(args.base_model)
+ scalar_head = RewardHead(head_input_size=lm_backbone.config.hidden_size)
+
+ def reward_forward(
+ params: LMBackboneWithScalarHeadParams,
+ query_responses_ids: jnp.ndarray,
+ ):
+ """Get reward for each queiry--response pair."""
+ assert query_responses_ids.ndim == 2
+
+ # mask out padding tokens
+ attention_mask = query_responses_ids != tokenizer.pad_token_id
+ query_responses_ids = jnp.where(attention_mask, query_responses_ids, 0)
+
+ # assign position ids
+ position_ids = attention_mask.cumsum(1) - attention_mask
+
+ reward_latents = lm_backbone.module.apply(
+ variables=params.lm_backbone_params,
+ input_ids=query_responses_ids,
+ attention_mask=attention_mask,
+ position_ids=position_ids,
+ output_hidden_states=True,
+ ).hidden_states[-1]
+ # reward_latents: [batch_size, length, hidden_size]
+
+ last_reward_latents = reward_latents[:, -1, :]
+ # last_reward_latents: [batch_size, hidden_size]
+
+ reward = scalar_head.apply(variables=params.head_params, x=last_reward_latents)
+ # reward: [batch_size, 1]
+ return reward
+
+ if args.rewards.trained_model:
+ orbax_checkpointer = orbax.checkpoint.PyTreeCheckpointer()
+ reward_state_params = orbax_checkpointer.restore(args.rewards.trained_model)["reward_model"]["params"]
+ reward_params = LMBackboneWithScalarHeadParams(
+ lm_backbone_params=flax.core.FrozenDict({"params": reward_state_params["lm_backbone_params"]["params"]}),
+ head_params=flax.core.FrozenDict({"params": reward_state_params["head_params"]["params"]}),
+ )
+ pprint(f"Loaded pretrained reward model from {args.rewards.trained_model}")
+ else:
+ key = jax.random.PRNGKey(args.seed)
+ key, init_key = jax.random.split(key, 2)
+ reward_params = LMBackboneWithScalarHeadParams(
+ lm_backbone_params=flax.core.FrozenDict({"params": lm_backbone.params}),
+ head_params=flax.core.FrozenDict(
+ scalar_head.init(
+ init_key,
+ jnp.ones(lm_backbone.config.hidden_size)[None, None, :],
+ )
+ ),
+ )
+
+ return functools.partial(reward_forward, params=reward_params)
+
+
+def prepare_policy_forward_and_policy_generate(args, tokenizer):
+ """Prepare the forward pass of the policy model and parameters."""
+
+ lm_backbone = FlaxAutoModelForCausalLM.from_pretrained(args.base_model)
+ # disable `pad_token_id` and `eos_token_id` because we just want to
+ # generate tokens without truncation / padding
+ lm_backbone.generation_config.eos_token_id = None
+ lm_backbone.generation_config.pad_token_id = tokenizer.pad_token_id
+ scalar_head = ScalarHead(head_input_size=lm_backbone.config.hidden_size)
+
+ generation_config = GenerationConfig(
+ max_new_tokens=args.task.response_length,
+ min_new_tokens=args.task.response_length,
+ temperature=args.task.temperature,
+ top_k=0.0,
+ top_p=1.0,
+ do_sample=True,
+ pad_token_id=tokenizer.pad_token_id,
+ )
+
+ def policy_forward(
+ params: LMBackboneWithScalarHeadParams,
+ input_ids: jnp.ndarray,
+ ):
+ """Get reward for input_ids."""
+ assert input_ids.ndim == 2
+ # shape: [batch_size, length]
+
+ # mask out padding tokens
+ attention_mask = input_ids != tokenizer.pad_token_id
+ input_ids = jnp.where(attention_mask, input_ids, 0)
+
+ # assign position ids
+ position_ids = attention_mask.cumsum(1) - attention_mask
+
+ lm_backbone_out = lm_backbone.module.apply(
+ variables=params.lm_backbone_params,
+ input_ids=input_ids,
+ attention_mask=attention_mask,
+ position_ids=position_ids,
+ output_hidden_states=True,
+ )
+
+ value_latents = lm_backbone_out.hidden_states[-1]
+ # shape: [batch_size, length, hidden_size]
+
+ values = scalar_head.apply(variables=params.head_params, x=value_latents)
+ # shape: [batch_size, length, 1]
+ return lm_backbone_out, values
+
+ def policy_generate(
+ params: LMBackboneWithScalarHeadParams,
+ queries: jnp.ndarray,
+ ):
+ input_ids = queries
+ attention_mask = input_ids != tokenizer.pad_token_id
+ input_ids = jnp.where(attention_mask, queries, 0)
+ output = lm_backbone.generate(
+ params=params["params"],
+ input_ids=input_ids,
+ generation_config=generation_config,
+ attention_mask=attention_mask.astype("i4"),
+ return_dict_in_generate=True,
+ )
+ context_length = input_ids.shape[1]
+ return jnp.concatenate((queries, output.sequences[:, context_length:]), axis=1)
+
+ key = jax.random.PRNGKey(args.seed)
+ key, init_key = jax.random.split(key, 2)
+ policy_params = LMBackboneWithScalarHeadParams(
+ lm_backbone_params=flax.core.FrozenDict({"params": lm_backbone.params}),
+ head_params=flax.core.FrozenDict(
+ scalar_head.init(
+ init_key,
+ jnp.ones(lm_backbone.config.hidden_size)[None, None, :],
+ )
+ ),
+ )
+
+ return policy_forward, policy_generate, policy_params
+
+
+@flax.struct.dataclass
+class RolloutStatistics:
+ returns: jnp.array
+ values: jnp.array
+ advantage: jnp.array
+ responses: jnp.array
+ query_responses: jnp.array
+ logprobs: jnp.array
+
+
+@flax.struct.dataclass
+class RLStatistics:
+ approxkl: jnp.array
+ entropy: jnp.array
+ pg_loss: jnp.array
+ pg_clipfrac: jnp.array
+ vf_losses1: jnp.array
+ vf_loss: jnp.array
+ vf_clipfrac: jnp.array
+ ratio: jnp.array
+ loss: jnp.array
+
+
+def train_step(policy_state, mb_stats, args):
+ def loss(params):
+ # mb_stats.query_responses: [local_micro_batch_size, query_length + response_length]
+ output, vpred_temp = policy_state.apply_fn(params, mb_stats.query_responses)
+
+ # vpred_temp: [local_micro_batch_size, query_length + response_length, 1]
+ vpred = jnp.squeeze(vpred_temp[:, args.task.query_length - 1 : -1, :], axis=-1)
+ # vpred: [local_micro_batch_size, response_length]
+ vpredclipped = jnp.clip(
+ vpred,
+ mb_stats.values - args.ppo.cliprange_value,
+ mb_stats.values + args.ppo.cliprange_value,
+ )
+ vf_losses1 = jnp.square(vpred - mb_stats.returns)
+ vf_losses2 = jnp.square(vpredclipped - mb_stats.returns)
+ vf_loss = 0.5 * jnp.maximum(vf_losses1, vf_losses2).mean()
+ vf_clipfrac = (vf_losses2 > vf_losses1).astype(jnp.float32).mean()
+
+ logits = output.logits[:, args.task.query_length - 1 : -1, :]
+ logits /= args.task.temperature
+ new_logprobs = -optax.softmax_cross_entropy_with_integer_labels(logits, mb_stats.responses)
+
+ logprobs_diff = new_logprobs - mb_stats.logprobs
+ ratio = jnp.exp(logprobs_diff)
+ pg_losses = -mb_stats.advantage * ratio
+ pg_losses2 = -mb_stats.advantage * jnp.clip(ratio, 1.0 - args.ppo.cliprange, 1.0 + args.ppo.cliprange)
+
+ pg_loss = jnp.maximum(pg_losses, pg_losses2).mean()
+ pg_clipfrac = (pg_losses2 > pg_losses).astype(jnp.float32).mean()
+
+ pd = jax.nn.softmax(logits, axis=-1)
+ entropy = jax.nn.logsumexp(logits, axis=-1) - jnp.sum(pd * logits, axis=-1)
+
+ approxkl = 0.5 * ((logprobs_diff) ** 2).mean()
+ loss = pg_loss + args.ppo.vf_coef * vf_loss
+
+ rl_stats = RLStatistics(
+ vf_loss=vf_loss,
+ vf_clipfrac=vf_clipfrac,
+ pg_loss=pg_loss,
+ pg_clipfrac=pg_clipfrac,
+ approxkl=approxkl,
+ loss=loss,
+ entropy=entropy.mean(),
+ ratio=ratio.mean(),
+ vf_losses1=vf_losses1.mean(),
+ )
+ rl_stats = jax.lax.pmean(rl_stats, "batch")
+ return loss, rl_stats
+
+ grad_fn = jax.value_and_grad(loss, has_aux=True)
+ (loss, rl_stats), grads = grad_fn(policy_state.params)
+ grads = jax.lax.pmean(grads, "batch")
+ policy_state = policy_state.apply_gradients(grads=grads)
+ return policy_state, rl_stats
+
+
+def linear_schedule(optimizer_step, args):
+ """anneal learning rate linearly to reach 0 after one epoch."""
+ update = 1 + optimizer_step // (args.ppo.noptepochs * args.ppo.nminibatches * args.ppo.gradient_accumulation_steps)
+ frac = 1.0 - (update - 1.0) / args.ppo.num_updates
+ lrnow = frac * args.ppo.lr
+ return lrnow
+
+
+def train(args: Args):
+ local_devices = jax.local_devices()
+ global_devices = jax.devices()
+ args.ppo.world_size = jax.process_count()
+ learner_devices = [local_devices[d_id] for d_id in args.learner_device_ids]
+ global_learner_decices = [
+ global_devices[d_id + process_index * len(local_devices)]
+ for process_index in range(args.ppo.world_size)
+ for d_id in args.learner_device_ids
+ ]
+ pprint({"global_learner_decices": global_learner_decices})
+ args.global_learner_decices = [str(item) for item in global_learner_decices]
+ args.learner_devices = [str(item) for item in learner_devices]
+ args.local_rank = jax.process_index()
+ args.ppo.batch_size = int(args.ppo.local_batch_size * len(args.learner_devices) * args.ppo.world_size)
+ args.ppo.minibatch_size = exact_div(args.ppo.batch_size, args.ppo.nminibatches)
+ args.ppo.local_mini_batch_size = exact_div(args.ppo.local_batch_size, args.ppo.nminibatches)
+ args.ppo.local_micro_batch_size = exact_div(args.ppo.local_mini_batch_size, args.ppo.gradient_accumulation_steps)
+ if args.ppo.whiten_rewards:
+ assert (
+ args.ppo.local_mini_batch_size >= 8
+ ), f"Per-rank minibatch size {args.ppo.local_mini_batch_size} is insufficient for whitening"
+ # `per_rank_rollout_batch_size` is our `args.ppo.local_batch_size`
+ # `per_rank_minibatch_size` is our `args.ppo.local_mini_batch_size`
+ args.ppo.num_updates = args.ppo.total_episodes // args.ppo.batch_size
+
+ console = Console(force_terminal=True)
+ run_name = f"{args.exp_name}__{args.seed}__{int(time.time())}"
+ writer = SimpleNamespace() # dummy writer
+ writer.add_scalar = lambda x, y, z: None
+ writer.add_histogram = lambda x, y, z: None
+
+ if args.local_rank == 0:
+ if args.track:
+ import wandb
+
+ wandb.init(
+ project=args.wandb_project_name,
+ entity=args.wandb_entity,
+ sync_tensorboard=True,
+ config=asdict(args),
+ name=run_name,
+ save_code=True,
+ )
+ wandb.run.log_code(".")
+ writer = SummaryWriter(f"runs/{run_name}")
+ writer.add_text(
+ "hyperparameters",
+ "|param|value|\n|-|-|\n%s" % ("\n".join([f"|{key}|{value}|" for key, value in vars(args).items()])),
+ )
+ pprint(args)
+ local_seed = args.seed + args.local_rank * 100003 # Prime
+ tokenizer = AutoTokenizer.from_pretrained(
+ args.base_model,
+ padding_side="right",
+ )
+ # we use the padding token manually but do not resize the token embedding of the model
+ tokenizer.add_special_tokens({"pad_token": "[PAD]"})
+ reward_forward = prepare_reward_forward(args, tokenizer)
+ policy_forward, policy_generate, policy_params = prepare_policy_forward_and_policy_generate(args, tokenizer)
+ _, _, ref_policy_params = prepare_policy_forward_and_policy_generate(args, tokenizer)
+
+ p_whiten_no_shift_mean = jax.pmap(functools.partial(whiten, shift_mean=False))
+ p_whiten_shift_mean = jax.pmap(functools.partial(whiten, shift_mean=True))
+ p_reward_forward = jax.pmap(reward_forward)
+ p_train_step = jax.pmap(
+ functools.partial(train_step, args=args),
+ axis_name="batch",
+ donate_argnums=(0,),
+ )
+
+ @jax.pmap
+ def p_get_response_values_and_logprobs(policy_state_params, queries):
+ query_responses = policy_generate(
+ params=policy_state_params.lm_backbone_params,
+ queries=queries,
+ )
+ # query_responses: [local_batch_size, query_length + response_length]
+ responses = query_responses[:, args.task.query_length :]
+
+ output, full_values = policy_forward(policy_state_params, query_responses)
+ values = full_values[:, args.task.query_length - 1 : -1].squeeze(-1)
+ # values: [local_batch_size, response_length]
+ logits = output.logits[:, args.task.query_length - 1 : -1, :]
+ # logits: [local_batch_size, response_length, vocab_size]
+ logits /= args.task.temperature
+ all_logprobs = jax.nn.log_softmax(logits, axis=-1)
+ # all_logprobs: [local_batch_size, response_length, vocab_size]
+ logprobs = jnp.take_along_axis(all_logprobs, responses[..., None], -1).squeeze(-1)
+ # logprobs: [local_batch_size, response_length]
+
+ ref_output, _ = policy_forward(ref_policy_params, query_responses)
+ ref_logits = ref_output.logits[:, args.task.query_length - 1 : -1, :]
+ # ref_logits: [local_batch_size, response_length, vocab_size]
+ ref_logits /= args.task.temperature
+ ref_all_logprobs = jax.nn.log_softmax(ref_logits, axis=-1)
+ ref_logprobs = jnp.take_along_axis(ref_all_logprobs, responses[..., None], -1).squeeze(-1)
+ # ref_logprobs: [local_batch_size, response_length]
+ return query_responses, values, logprobs, ref_logprobs
+
+ if args.use_tensorflow_adam:
+ adam = adam_tf_style
+ else:
+ adam = optax.adam
+
+ optimizer = adam(
+ learning_rate=functools.partial(linear_schedule, args=args),
+ eps=args.ppo.eps,
+ )
+
+ optimizer = optax.MultiSteps(optimizer, args.ppo.gradient_accumulation_steps)
+
+ policy_state = TrainState.create(apply_fn=policy_forward, params=policy_params, tx=optimizer)
+ policy_state = jax_utils.replicate(policy_state)
+
+ del policy_params
+
+ dataset = MyDataset(
+ DATASET[args.task.query_dataset],
+ tokenizer,
+ args.task.query_length,
+ seed=local_seed,
+ start_text=args.task.start_text,
+ end_text=args.task.end_text,
+ )
+ dataloader = DataLoader(dataset, batch_size=args.ppo.batch_size)
+ iter_dataloader = iter(dataloader)
+ kl_ctl = AdaptiveKLController(args.rewards.kl_coef, hparams=args.rewards.adaptive_kl)
+
+ print("===training policy===")
+ global_step = 0
+ approxkls_stats = np.zeros(
+ (
+ args.ppo.noptepochs,
+ args.ppo.nminibatches,
+ args.ppo.gradient_accumulation_steps,
+ ),
+ )
+ pg_clipfracs_stats = np.zeros(
+ (
+ args.ppo.noptepochs,
+ args.ppo.nminibatches,
+ args.ppo.gradient_accumulation_steps,
+ ),
+ )
+ pg_losses_stats = np.zeros(
+ (
+ args.ppo.noptepochs,
+ args.ppo.nminibatches,
+ args.ppo.gradient_accumulation_steps,
+ ),
+ )
+ vf_losses_stats = np.zeros(
+ (
+ args.ppo.noptepochs,
+ args.ppo.nminibatches,
+ args.ppo.gradient_accumulation_steps,
+ ),
+ )
+ vf_clipfrac_stats = np.zeros(
+ (
+ args.ppo.noptepochs,
+ args.ppo.nminibatches,
+ args.ppo.gradient_accumulation_steps,
+ ),
+ )
+ entropies_stats = np.zeros(
+ (
+ args.ppo.noptepochs,
+ args.ppo.nminibatches,
+ args.ppo.gradient_accumulation_steps,
+ ),
+ )
+ for update in range(1, args.ppo.num_updates + 1):
+ global_step += 1 * args.ppo.batch_size
+ data = next(iter_dataloader)
+ queries = right_padding_to_left_padding(data["input_ids"], tokenizer.pad_token_id)
+ queries = common_utils.shard(queries)
+ # queries: [num_device, local_batch_size, query_length]
+
+ query_responses, values, logprobs, ref_logprobs = p_get_response_values_and_logprobs(policy_state.params, queries)
+ responses = query_responses[..., args.task.query_length :]
+
+ # **Response Processing**
+ # 1. truncate at the first occurrence of `truncate_token` that appears at or after
+ # position truncate_after in the responses
+ # https://github.com/openai/lm-human-preferences/blob/cbfd210bb8b08f6bc5c26878c10984b90f516c66/lm_human_preferences/train_policy.py#L378
+ truncate_token_mask = responses == args.task.truncate_token
+ # truncate_token_mask: [num_device, local_batch_size, response_length]
+ truncate_after_or_token_mask = jnp.concatenate(
+ [
+ jnp.zeros_like(truncate_token_mask)[:, :, : args.task.truncate_after],
+ truncate_token_mask[:, :, args.task.truncate_after :],
+ ],
+ axis=-1,
+ )
+ truncate_mask = (jnp.cumsum(truncate_after_or_token_mask, axis=-1) - truncate_after_or_token_mask).astype("bool")
+ postprocessed_responses = jnp.where(
+ truncate_mask,
+ jnp.full_like(responses, tokenizer.pad_token_id),
+ responses,
+ )
+ # postprocessed_responses: [num_device, local_batch_size, response_length]
+ del truncate_token_mask, truncate_after_or_token_mask, truncate_mask
+
+ # 2. run reward model on the truncated responses
+ postprocessed_query_responses = np.concatenate((queries, postprocessed_responses), axis=-1)
+ # postprocessed_query_responses: [num_device, local_batch_size, query_length + response_length]
+ postprocessed_query_responses = einops.rearrange(
+ right_padding_to_left_padding(
+ einops.rearrange(postprocessed_query_responses, "d b l -> (d b) l"), tokenizer.pad_token_id
+ ),
+ "(d b) l -> d b l",
+ d=len(args.learner_devices),
+ )
+ scores = p_reward_forward(query_responses_ids=postprocessed_query_responses).squeeze(-1)
+ # scores: [num_device, local_batch_size]
+
+ # 3. filter response. Ensure that the sample contains truncate_token
+ # responses not passing that filter will receive a low (fixed) score
+ # only query humans on responses that pass that filter
+ matches_token = postprocessed_responses[..., args.task.truncate_after :] == args.task.truncate_token
+ # matches_token: [num_device, local_batch_size, response_length - args.task.truncate_after]
+
+ filter_mask = jnp.any(matches_token, axis=-1)
+ # filter_mask: [num_device, local_batch_size]
+
+ scores = jnp.where(
+ filter_mask,
+ scores,
+ jnp.full_like(scores, args.task.penalty_reward_value),
+ )
+ # scores: [num_device, local_batch_size]
+ del matches_token, filter_mask
+
+ # 4. compute rewards
+ kl = logprobs - ref_logprobs
+ # kl: [num_device, local_batch_size, response_length]
+ non_score_reward = -kl_ctl.value * kl
+ rewards = non_score_reward
+ rewards = rewards.at[..., -1].add(scores)
+ # rewards: [num_device, local_batch_size, response_length]
+
+ # 5. whiten rewards
+ if args.ppo.whiten_rewards:
+ rewards = p_whiten_no_shift_mean(rewards)
+ try:
+ sample_kl = kl[0][0].sum().item()
+ console.print(
+ f"[green][bold]{'Query'}:[/]\n"
+ + f"[green]{ tokenizer.decode(queries[0][0], skip_special_tokens=True)}[/]\n\n"
+ + f"[blue][bold]{'Raw response'}:[/]\n"
+ + f"[blue]{tokenizer.decode(responses[0][0], skip_special_tokens=True)}[/]\n\n"
+ + f"[yellow][bold]{'Processed response'}:[/]\n"
+ + f"[yellow]{tokenizer.decode(postprocessed_responses[0][0], skip_special_tokens=True)}[/]\n\n"
+ + f"[red]score: {scores[0][0]}, kl: {kl[0][0].sum().item()}, total reward: {scores[0][0] - kl_ctl.value * sample_kl} [/]"
+ )
+ except Exception as e:
+ print(e)
+ del postprocessed_query_responses
+
+ # 6. compute advantages and returns
+ lastgaelam = 0
+ advantages_reversed = []
+ gen_length = args.task.response_length
+
+ for t in reversed(range(gen_length)):
+ nextvalues = values[..., t + 1] if t < gen_length - 1 else 0.0
+ delta = rewards[..., t] + args.ppo.gamma * nextvalues - values[..., t]
+ lastgaelam = delta + args.ppo.gamma * args.ppo.lam * lastgaelam
+ advantages_reversed.append(lastgaelam)
+ # advantages_reversed is a list of 2D arrays
+ # Each array has the shape [num_devices, local_batch_size]
+
+ advantages = jnp.stack(advantages_reversed[::-1], axis=-1)
+ # advantages: [num_device, local_batch_size, response_length]
+ returns = advantages + values
+ # returns: [num_device, local_batch_size, response_length]
+ advantages = p_whiten_shift_mean(advantages)
+
+ return_mean, return_var = returns.mean(), returns.var()
+ value_mean, value_var = values.mean(), values.var()
+
+ # Do multiple epochs of PPO training, with a fresh random shuffle in each epoch
+ for ppo_epoch_idx in range(args.ppo.noptepochs):
+ b_inds = np.random.permutation(args.ppo.local_batch_size)
+ minibatch_idx = 0
+ for mini_batch_start in range(0, args.ppo.local_batch_size, args.ppo.local_mini_batch_size):
+ mini_batch_end = mini_batch_start + args.ppo.local_mini_batch_size
+ mini_batch_inds = b_inds[mini_batch_start:mini_batch_end]
+ gradient_accumulation_idx = 0
+ for micro_batch_start in range(0, args.ppo.local_mini_batch_size, args.ppo.local_micro_batch_size):
+ micro_batch_end = micro_batch_start + args.ppo.local_micro_batch_size
+ micro_batch_inds = mini_batch_inds[micro_batch_start:micro_batch_end]
+ mb_returns = returns[:, micro_batch_inds, :]
+ mb_advantage = advantages[:, micro_batch_inds, :]
+ mb_values = values[:, micro_batch_inds, :]
+ mb_responses = responses[:, micro_batch_inds, :]
+ mb_query_responses = query_responses[:, micro_batch_inds, :]
+ mb_logprobs = logprobs[:, micro_batch_inds, :]
+ mb_stats = RolloutStatistics(
+ returns=mb_returns,
+ values=mb_values,
+ advantage=mb_advantage,
+ responses=mb_responses,
+ query_responses=mb_query_responses,
+ logprobs=mb_logprobs,
+ )
+
+ # before training step
+ policy_state, rl_stats = p_train_step(policy_state, mb_stats)
+ rl_stats = common_utils.get_metrics([rl_stats])
+
+ approxkls_stats[ppo_epoch_idx, minibatch_idx, gradient_accumulation_idx] = rl_stats.approxkl
+ pg_clipfracs_stats[ppo_epoch_idx, minibatch_idx, gradient_accumulation_idx] = rl_stats.pg_clipfrac
+ pg_losses_stats[ppo_epoch_idx, minibatch_idx, gradient_accumulation_idx] = rl_stats.pg_loss
+ vf_losses_stats[ppo_epoch_idx, minibatch_idx, gradient_accumulation_idx] = rl_stats.vf_loss
+ vf_clipfrac_stats[ppo_epoch_idx, minibatch_idx, gradient_accumulation_idx] = rl_stats.vf_clipfrac
+ entropies_stats[ppo_epoch_idx, minibatch_idx, gradient_accumulation_idx] = rl_stats.entropy
+
+ gradient_accumulation_idx += 1
+ minibatch_idx += 1
+ if args.local_rank == 0:
+ console.print(
+ f"ppo_epoch_idx",
+ ppo_epoch_idx,
+ "approxkl",
+ rl_stats.approxkl.item(),
+ "pg_loss",
+ rl_stats.pg_loss.item(),
+ "pg_clipfrac",
+ rl_stats.pg_clipfrac.item(),
+ "ratio",
+ rl_stats.ratio.item(),
+ ) | Everything here goes into jitted function |
lm-human-preference-details | github_2023 | python | 18 | vwxyzjn | vwxyzjn | @@ -0,0 +1,975 @@
+import functools
+import os
+import time
+from dataclasses import asdict, dataclass, field
+from types import SimpleNamespace
+from typing import List, Optional
+
+import einops
+import flax
+import flax.linen as nn
+import jax
+import jax.numpy as jnp
+import numpy as np
+import optax
+import orbax
+import tyro
+from flax import jax_utils
+from flax.training import common_utils, orbax_utils
+from flax.training.train_state import TrainState
+from optax import ScaleByAdamState, update_moment, update_moment_per_elem_norm
+from optax._src import base, combine, numerics, utils
+from optax._src.alias import _scale_by_learning_rate
+from rich.console import Console
+from rich.pretty import pprint
+from torch.utils.data import DataLoader, IterableDataset
+from torch.utils.tensorboard import SummaryWriter
+from transformers import AutoTokenizer, FlaxAutoModelForCausalLM, GenerationConfig
+
+from lm_human_preference_details.data import DATASET
+
+
+@dataclass
+class AdaptiveKLParams:
+ target: float = 6.0
+ horizon: int = 10000 # in episodes
+
+
+@dataclass
+class RewardHParams:
+ kl_coef: float = 0.15
+ adaptive_kl: Optional[AdaptiveKLParams] = field(default_factory=AdaptiveKLParams)
+ trained_model: Optional[str] = "models/"
+ label_dataset: tyro.conf.Suppress[Optional[str]] = None
+
+
+@dataclass
+class PpoHParams:
+ total_episodes: int = 1000000
+ local_batch_size: int = 64
+ local_mini_batch_size: tyro.conf.Suppress[int] = None
+ batch_size: tyro.conf.Suppress[int] = None
+ mini_batch_size: tyro.conf.Suppress[int] = None
+ gradient_accumulation_steps: int = 1
+ """gradient accumulation steps"""
+ local_micro_batch_size: tyro.conf.Suppress[int] = None
+ """per rank micro batch size"""
+ world_size: tyro.conf.Suppress[int] = None
+ batch_size: tyro.conf.Suppress[int] = None
+ minibatch_size: tyro.conf.Suppress[int] = None
+ num_updates: tyro.conf.Suppress[int] = None
+ nminibatches: int = 1
+ noptepochs: int = 4
+ lr: float = 0.00001
+ eps: float = 1e-5
+ vf_coef: float = 0.1
+ cliprange: float = 0.2
+ cliprange_value: float = 0.2
+ gamma: float = 1
+ lam: float = 0.95
+ whiten_rewards: bool = True
+
+
+@dataclass
+class TaskHParams:
+ # Query params
+ query_length: int = 64
+ query_dataset: str = "books"
+ query_prefix: str = ""
+ query_suffix: str = ""
+ start_text: Optional[str] = None
+ end_text: Optional[str] = None
+
+ # Response params
+ response_length: int = 24
+
+ # Truncate response after the first occurrence of this token at or after index after when sampling.
+ truncate_token: int = 13
+ truncate_after: int = 16
+ penalty_reward_value: int = -1
+
+ # LM params
+ temperature: float = 0.7
+
+
+@dataclass
+class Args:
+ # common args
+ exp_name: str = os.path.basename(__file__)[: -len(".py")]
+ """the name of this experiment"""
+ seed: int = 1
+ """seed of the experiment"""
+ track: bool = False
+ """if toggled, this experiment will be tracked with Weights and Biases"""
+ wandb_project_name: str = "cleanrl"
+ """the wandb's project name"""
+ wandb_entity: Optional[str] = None
+ """the entity (team) of wandb's project"""
+ cuda: bool = True
+ """Whether to use cuda if available."""
+ run_name: tyro.conf.Suppress[str] = None
+ """TO BE FILLED: a unique name of this run"""
+
+ base_model: str = "gpt2"
+ """the name of the pretrained model to use"""
+ print_sample_output_freq: int = 0
+ """How often to print sample output"""
+ save_path: str = "models/policy/"
+ """Where to save the model"""
+ use_tensorflow_adam: bool = True
+ """Whether to use tensorflow-style Adam optimizer instead of PyTorch's"""
+ task: TaskHParams = field(default_factory=TaskHParams)
+ rewards: RewardHParams = field(default_factory=RewardHParams)
+ ppo: PpoHParams = field(default_factory=PpoHParams)
+
+ # distributed settings
+ local_rank: int = 0
+ """the rank of this process"""
+ learner_device_ids: List[int] = field(default_factory=lambda: [0])
+ "the device ids that script will use"
+ learner_devices: tyro.conf.Suppress[int] = None # real type is `List[str]`
+ """the devices that script will use"""
+ global_learner_decices: tyro.conf.Suppress[int] = None # real type is `List[str]`
+ """the total devices (across all nodes and machines) that script will use"""
+
+
+def scale_by_adam_tf_style(
+ b1: float = 0.9,
+ b2: float = 0.999,
+ eps: float = 1e-8,
+ eps_root: float = 0.0,
+ mu_dtype=None,
+) -> base.GradientTransformation:
+ """Rescale updates according to the Adam algorithm.
+
+ References:
+ [Kingma et al, 2014](https://arxiv.org/abs/1412.6980)
+
+ Args:
+ b1: Decay rate for the exponentially weighted average of grads.
+ b2: Decay rate for the exponentially weighted average of squared grads.
+ eps: Term added to the denominator to improve numerical stability.
+ eps_root: Term added to the denominator inside the square-root to improve
+ numerical stability when backpropagating gradients through the rescaling.
+ mu_dtype: Optional `dtype` to be used for the first order accumulator; if
+ `None` then the `dtype` is inferred from `params` and `updates`.
+
+ Returns:
+ A `GradientTransformation` object.
+ """
+
+ mu_dtype = utils.canonicalize_dtype(mu_dtype)
+
+ def init_fn(params):
+ mu = jax.tree_util.tree_map(lambda t: jnp.zeros_like(t, dtype=mu_dtype), params) # First moment
+ nu = jax.tree_util.tree_map(jnp.zeros_like, params) # Second moment
+ return ScaleByAdamState(count=jnp.zeros([], jnp.int32), mu=mu, nu=nu)
+
+ def update_fn(updates, state, params=None):
+ del params
+ mu = update_moment(updates, state.mu, b1, 1)
+ nu = update_moment_per_elem_norm(updates, state.nu, b2, 2)
+ count_inc = numerics.safe_int32_increment(state.count)
+
+ ### `optax` default adam implementation
+ # mu_hat = bias_correction(mu, b1, count_inc)
+ # nu_hat = bias_correction(nu, b2, count_inc)
+ # updates = jax.tree_util.tree_map(
+ # lambda m, v: m / (jnp.sqrt(v + eps_root) + eps), mu_hat, nu_hat)
+ ### Tensorflow adam implementation
+ updates = jax.tree_util.tree_map(
+ lambda m, v: (jnp.sqrt(1 - b2**count_inc) / (1 - b1**count_inc)) * m / (jnp.sqrt(v + eps_root) + eps),
+ mu,
+ nu,
+ ) #
+ mu = utils.cast_tree(mu, mu_dtype)
+ return updates, ScaleByAdamState(count=count_inc, mu=mu, nu=nu)
+
+ return base.GradientTransformation(init_fn, update_fn)
+
+
+def adam_tf_style(
+ learning_rate,
+ b1: float = 0.9,
+ b2: float = 0.999,
+ eps: float = 1e-8,
+ eps_root: float = 0.0,
+ mu_dtype=None,
+):
+ return combine.chain(
+ scale_by_adam_tf_style(b1=b1, b2=b2, eps=eps, eps_root=eps_root, mu_dtype=mu_dtype),
+ _scale_by_learning_rate(learning_rate),
+ )
+
+
+class AdaptiveKLController:
+ def __init__(self, init_kl_coef: float, hparams: AdaptiveKLParams):
+ self.value = init_kl_coef
+ self.hparams = hparams
+
+ def update(self, current, n_steps):
+ target = self.hparams.target
+ proportional_error = np.clip(current / target - 1, -0.2, 0.2)
+ mult = 1 + proportional_error * n_steps / self.hparams.horizon
+ self.value *= mult
+
+
+def whiten(values, shift_mean=True):
+ # `unbiased=False` matches TF `tf.nn.moments`'s setting
+ mean, var = jnp.mean(values), jnp.var(values)
+ whitened = (values - mean) * jax.lax.rsqrt(var + 1e-8)
+ if not shift_mean:
+ whitened += mean
+ return whitened
+
+
+class ScalarHead(nn.Module):
+ head_input_size: int
+
+ @nn.compact
+ def __call__(self, x):
+ x = nn.Dense(
+ 1,
+ kernel_init=nn.initializers.normal(stddev=0),
+ bias_init=nn.initializers.zeros_init(),
+ )(x)
+ return x
+
+
+class RewardHead(nn.Module):
+ head_input_size: int
+
+ @nn.compact
+ def __call__(self, x):
+ assert x.shape[-1] == self.head_input_size
+ x = nn.Dense(
+ 1,
+ kernel_init=nn.initializers.normal(stddev=1 / np.sqrt(self.head_input_size + 1)),
+ bias_init=nn.initializers.zeros_init(),
+ )(x)
+ reward_gain = self.param("reward_gain", nn.initializers.ones_init(), ())
+ reward_bias = self.param("reward_bias", nn.initializers.zeros_init(), ())
+ x = x * reward_gain + reward_bias
+ return x
+
+
+@flax.struct.dataclass
+class LMBackboneWithScalarHeadParams:
+ """Parameters for the language model backbone and a scalar head."""
+
+ lm_backbone_params: flax.core.FrozenDict
+ head_params: flax.core.FrozenDict
+
+
+# a pytorch dataset
+class MyDataset(IterableDataset):
+ def __init__(self, generator, tokenizer, query_length, seed, start_text=None, end_text=None):
+ self.generator = generator
+ self.tokenizer = tokenizer
+ self.query_length = query_length
+ self.start_text = start_text
+ self.end_text = end_text
+ self.seed = seed
+ token_to_index = tokenizer.get_vocab()
+ self.start_token = token_to_index[start_text] if self.start_text else None
+ self.end_token = token_to_index[end_text] if self.end_text else None
+
+ def __iter__(self):
+ for text in self.generator("train", self.seed, shuffle=True):
+ tokens = self.tokenizer.encode(text)
+ if self.start_token is not None:
+ try:
+ first_index = tokens.index(self.start_token) + 1
+ if first_index < len(tokens):
+ tokens = tokens[first_index:]
+ except:
+ continue
+ tokens = tokens[: self.query_length]
+ if self.end_token is not None:
+ try:
+ last_index = len(tokens) - tokens[::-1].index(self.end_token)
+ tokens = tokens[:last_index]
+ except:
+ continue
+ output = self.tokenizer.pad(
+ {"input_ids": tokens},
+ padding="max_length",
+ max_length=self.query_length,
+ return_tensors="pt",
+ return_attention_mask=True,
+ )
+ yield output
+
+
+def right_padding_to_left_padding(tokens, pad_id):
+ # Convert from right padding to left padding.
+ return np.array([[pad_id] * (row == pad_id).sum() + [x for x in row if x != pad_id] for row in tokens])
+
+
+def ceil_div(a, b):
+ return (a - 1) // b + 1
+
+
+def exact_div(a, b):
+ q = a // b
+ if a != q * b:
+ raise ValueError(f"Inexact division: {a} / {b} = {a / b}")
+ return q
+
+
+def prepare_reward_forward(args, tokenizer):
+ """Prepare the forward pass of the reward model and parameters."""
+
+ lm_backbone = FlaxAutoModelForCausalLM.from_pretrained(args.base_model)
+ scalar_head = RewardHead(head_input_size=lm_backbone.config.hidden_size)
+
+ def reward_forward(
+ params: LMBackboneWithScalarHeadParams,
+ query_responses_ids: jnp.ndarray,
+ ):
+ """Get reward for each queiry--response pair."""
+ assert query_responses_ids.ndim == 2
+
+ # mask out padding tokens
+ attention_mask = query_responses_ids != tokenizer.pad_token_id
+ query_responses_ids = jnp.where(attention_mask, query_responses_ids, 0)
+
+ # assign position ids
+ position_ids = attention_mask.cumsum(1) - attention_mask
+
+ reward_latents = lm_backbone.module.apply(
+ variables=params.lm_backbone_params,
+ input_ids=query_responses_ids,
+ attention_mask=attention_mask,
+ position_ids=position_ids,
+ output_hidden_states=True,
+ ).hidden_states[-1]
+ # reward_latents: [batch_size, length, hidden_size]
+
+ last_reward_latents = reward_latents[:, -1, :]
+ # last_reward_latents: [batch_size, hidden_size]
+
+ reward = scalar_head.apply(variables=params.head_params, x=last_reward_latents)
+ # reward: [batch_size, 1]
+ return reward
+
+ if args.rewards.trained_model:
+ orbax_checkpointer = orbax.checkpoint.PyTreeCheckpointer()
+ reward_state_params = orbax_checkpointer.restore(args.rewards.trained_model)["reward_model"]["params"]
+ reward_params = LMBackboneWithScalarHeadParams(
+ lm_backbone_params=flax.core.FrozenDict({"params": reward_state_params["lm_backbone_params"]["params"]}),
+ head_params=flax.core.FrozenDict({"params": reward_state_params["head_params"]["params"]}),
+ )
+ pprint(f"Loaded pretrained reward model from {args.rewards.trained_model}")
+ else:
+ key = jax.random.PRNGKey(args.seed)
+ key, init_key = jax.random.split(key, 2)
+ reward_params = LMBackboneWithScalarHeadParams(
+ lm_backbone_params=flax.core.FrozenDict({"params": lm_backbone.params}),
+ head_params=flax.core.FrozenDict(
+ scalar_head.init(
+ init_key,
+ jnp.ones(lm_backbone.config.hidden_size)[None, None, :],
+ )
+ ),
+ )
+
+ return functools.partial(reward_forward, params=reward_params)
+
+
+def prepare_policy_forward_and_policy_generate(args, tokenizer):
+ """Prepare the forward pass of the policy model and parameters."""
+
+ lm_backbone = FlaxAutoModelForCausalLM.from_pretrained(args.base_model)
+ # disable `pad_token_id` and `eos_token_id` because we just want to
+ # generate tokens without truncation / padding
+ lm_backbone.generation_config.eos_token_id = None
+ lm_backbone.generation_config.pad_token_id = tokenizer.pad_token_id
+ scalar_head = ScalarHead(head_input_size=lm_backbone.config.hidden_size)
+
+ generation_config = GenerationConfig(
+ max_new_tokens=args.task.response_length,
+ min_new_tokens=args.task.response_length,
+ temperature=args.task.temperature,
+ top_k=0.0,
+ top_p=1.0,
+ do_sample=True,
+ pad_token_id=tokenizer.pad_token_id,
+ )
+
+ def policy_forward(
+ params: LMBackboneWithScalarHeadParams,
+ input_ids: jnp.ndarray,
+ ):
+ """Get reward for input_ids."""
+ assert input_ids.ndim == 2
+ # shape: [batch_size, length]
+
+ # mask out padding tokens
+ attention_mask = input_ids != tokenizer.pad_token_id
+ input_ids = jnp.where(attention_mask, input_ids, 0)
+
+ # assign position ids
+ position_ids = attention_mask.cumsum(1) - attention_mask
+
+ lm_backbone_out = lm_backbone.module.apply(
+ variables=params.lm_backbone_params,
+ input_ids=input_ids,
+ attention_mask=attention_mask,
+ position_ids=position_ids,
+ output_hidden_states=True,
+ )
+
+ value_latents = lm_backbone_out.hidden_states[-1]
+ # shape: [batch_size, length, hidden_size]
+
+ values = scalar_head.apply(variables=params.head_params, x=value_latents)
+ # shape: [batch_size, length, 1]
+ return lm_backbone_out, values
+
+ def policy_generate(
+ params: LMBackboneWithScalarHeadParams,
+ queries: jnp.ndarray,
+ ):
+ input_ids = queries
+ attention_mask = input_ids != tokenizer.pad_token_id
+ input_ids = jnp.where(attention_mask, queries, 0)
+ output = lm_backbone.generate(
+ params=params["params"],
+ input_ids=input_ids,
+ generation_config=generation_config,
+ attention_mask=attention_mask.astype("i4"),
+ return_dict_in_generate=True,
+ )
+ context_length = input_ids.shape[1]
+ return jnp.concatenate((queries, output.sequences[:, context_length:]), axis=1)
+
+ key = jax.random.PRNGKey(args.seed)
+ key, init_key = jax.random.split(key, 2)
+ policy_params = LMBackboneWithScalarHeadParams(
+ lm_backbone_params=flax.core.FrozenDict({"params": lm_backbone.params}),
+ head_params=flax.core.FrozenDict(
+ scalar_head.init(
+ init_key,
+ jnp.ones(lm_backbone.config.hidden_size)[None, None, :],
+ )
+ ),
+ )
+
+ return policy_forward, policy_generate, policy_params
+
+
+@flax.struct.dataclass
+class RolloutStatistics:
+ returns: jnp.array
+ values: jnp.array
+ advantage: jnp.array
+ responses: jnp.array
+ query_responses: jnp.array
+ logprobs: jnp.array
+
+
+@flax.struct.dataclass
+class RLStatistics:
+ approxkl: jnp.array
+ entropy: jnp.array
+ pg_loss: jnp.array
+ pg_clipfrac: jnp.array
+ vf_losses1: jnp.array
+ vf_loss: jnp.array
+ vf_clipfrac: jnp.array
+ ratio: jnp.array
+ loss: jnp.array
+
+
+def train_step(policy_state, mb_stats, args):
+ def loss(params):
+ # mb_stats.query_responses: [local_micro_batch_size, query_length + response_length]
+ output, vpred_temp = policy_state.apply_fn(params, mb_stats.query_responses)
+
+ # vpred_temp: [local_micro_batch_size, query_length + response_length, 1]
+ vpred = jnp.squeeze(vpred_temp[:, args.task.query_length - 1 : -1, :], axis=-1)
+ # vpred: [local_micro_batch_size, response_length]
+ vpredclipped = jnp.clip(
+ vpred,
+ mb_stats.values - args.ppo.cliprange_value,
+ mb_stats.values + args.ppo.cliprange_value,
+ )
+ vf_losses1 = jnp.square(vpred - mb_stats.returns)
+ vf_losses2 = jnp.square(vpredclipped - mb_stats.returns)
+ vf_loss = 0.5 * jnp.maximum(vf_losses1, vf_losses2).mean()
+ vf_clipfrac = (vf_losses2 > vf_losses1).astype(jnp.float32).mean()
+
+ logits = output.logits[:, args.task.query_length - 1 : -1, :]
+ logits /= args.task.temperature
+ new_logprobs = -optax.softmax_cross_entropy_with_integer_labels(logits, mb_stats.responses)
+
+ logprobs_diff = new_logprobs - mb_stats.logprobs
+ ratio = jnp.exp(logprobs_diff)
+ pg_losses = -mb_stats.advantage * ratio
+ pg_losses2 = -mb_stats.advantage * jnp.clip(ratio, 1.0 - args.ppo.cliprange, 1.0 + args.ppo.cliprange)
+
+ pg_loss = jnp.maximum(pg_losses, pg_losses2).mean()
+ pg_clipfrac = (pg_losses2 > pg_losses).astype(jnp.float32).mean()
+
+ pd = jax.nn.softmax(logits, axis=-1)
+ entropy = jax.nn.logsumexp(logits, axis=-1) - jnp.sum(pd * logits, axis=-1)
+
+ approxkl = 0.5 * ((logprobs_diff) ** 2).mean()
+ loss = pg_loss + args.ppo.vf_coef * vf_loss
+
+ rl_stats = RLStatistics(
+ vf_loss=vf_loss,
+ vf_clipfrac=vf_clipfrac,
+ pg_loss=pg_loss,
+ pg_clipfrac=pg_clipfrac,
+ approxkl=approxkl,
+ loss=loss,
+ entropy=entropy.mean(),
+ ratio=ratio.mean(),
+ vf_losses1=vf_losses1.mean(),
+ )
+ rl_stats = jax.lax.pmean(rl_stats, "batch")
+ return loss, rl_stats
+
+ grad_fn = jax.value_and_grad(loss, has_aux=True)
+ (loss, rl_stats), grads = grad_fn(policy_state.params)
+ grads = jax.lax.pmean(grads, "batch")
+ policy_state = policy_state.apply_gradients(grads=grads)
+ return policy_state, rl_stats
+
+
+def linear_schedule(optimizer_step, args):
+ """anneal learning rate linearly to reach 0 after one epoch."""
+ update = 1 + optimizer_step // (args.ppo.noptepochs * args.ppo.nminibatches * args.ppo.gradient_accumulation_steps)
+ frac = 1.0 - (update - 1.0) / args.ppo.num_updates
+ lrnow = frac * args.ppo.lr
+ return lrnow
+
+
+def train(args: Args):
+ local_devices = jax.local_devices()
+ global_devices = jax.devices()
+ args.ppo.world_size = jax.process_count()
+ learner_devices = [local_devices[d_id] for d_id in args.learner_device_ids]
+ global_learner_decices = [
+ global_devices[d_id + process_index * len(local_devices)]
+ for process_index in range(args.ppo.world_size)
+ for d_id in args.learner_device_ids
+ ]
+ pprint({"global_learner_decices": global_learner_decices})
+ args.global_learner_decices = [str(item) for item in global_learner_decices]
+ args.learner_devices = [str(item) for item in learner_devices]
+ args.local_rank = jax.process_index()
+ args.ppo.batch_size = int(args.ppo.local_batch_size * len(args.learner_devices) * args.ppo.world_size)
+ args.ppo.minibatch_size = exact_div(args.ppo.batch_size, args.ppo.nminibatches)
+ args.ppo.local_mini_batch_size = exact_div(args.ppo.local_batch_size, args.ppo.nminibatches)
+ args.ppo.local_micro_batch_size = exact_div(args.ppo.local_mini_batch_size, args.ppo.gradient_accumulation_steps)
+ if args.ppo.whiten_rewards:
+ assert (
+ args.ppo.local_mini_batch_size >= 8
+ ), f"Per-rank minibatch size {args.ppo.local_mini_batch_size} is insufficient for whitening"
+ # `per_rank_rollout_batch_size` is our `args.ppo.local_batch_size`
+ # `per_rank_minibatch_size` is our `args.ppo.local_mini_batch_size`
+ args.ppo.num_updates = args.ppo.total_episodes // args.ppo.batch_size
+
+ console = Console(force_terminal=True)
+ run_name = f"{args.exp_name}__{args.seed}__{int(time.time())}"
+ writer = SimpleNamespace() # dummy writer
+ writer.add_scalar = lambda x, y, z: None
+ writer.add_histogram = lambda x, y, z: None
+
+ if args.local_rank == 0:
+ if args.track:
+ import wandb
+
+ wandb.init(
+ project=args.wandb_project_name,
+ entity=args.wandb_entity,
+ sync_tensorboard=True,
+ config=asdict(args),
+ name=run_name,
+ save_code=True,
+ )
+ wandb.run.log_code(".")
+ writer = SummaryWriter(f"runs/{run_name}")
+ writer.add_text(
+ "hyperparameters",
+ "|param|value|\n|-|-|\n%s" % ("\n".join([f"|{key}|{value}|" for key, value in vars(args).items()])),
+ )
+ pprint(args)
+ local_seed = args.seed + args.local_rank * 100003 # Prime
+ tokenizer = AutoTokenizer.from_pretrained(
+ args.base_model,
+ padding_side="right",
+ )
+ # we use the padding token manually but do not resize the token embedding of the model
+ tokenizer.add_special_tokens({"pad_token": "[PAD]"})
+ reward_forward = prepare_reward_forward(args, tokenizer)
+ policy_forward, policy_generate, policy_params = prepare_policy_forward_and_policy_generate(args, tokenizer)
+ _, _, ref_policy_params = prepare_policy_forward_and_policy_generate(args, tokenizer)
+
+ p_whiten_no_shift_mean = jax.pmap(functools.partial(whiten, shift_mean=False))
+ p_whiten_shift_mean = jax.pmap(functools.partial(whiten, shift_mean=True))
+ p_reward_forward = jax.pmap(reward_forward)
+ p_train_step = jax.pmap(
+ functools.partial(train_step, args=args),
+ axis_name="batch",
+ donate_argnums=(0,),
+ )
+
+ @jax.pmap
+ def p_get_response_values_and_logprobs(policy_state_params, queries):
+ query_responses = policy_generate(
+ params=policy_state_params.lm_backbone_params,
+ queries=queries,
+ )
+ # query_responses: [local_batch_size, query_length + response_length]
+ responses = query_responses[:, args.task.query_length :]
+
+ output, full_values = policy_forward(policy_state_params, query_responses)
+ values = full_values[:, args.task.query_length - 1 : -1].squeeze(-1)
+ # values: [local_batch_size, response_length]
+ logits = output.logits[:, args.task.query_length - 1 : -1, :]
+ # logits: [local_batch_size, response_length, vocab_size]
+ logits /= args.task.temperature
+ all_logprobs = jax.nn.log_softmax(logits, axis=-1)
+ # all_logprobs: [local_batch_size, response_length, vocab_size]
+ logprobs = jnp.take_along_axis(all_logprobs, responses[..., None], -1).squeeze(-1)
+ # logprobs: [local_batch_size, response_length]
+
+ ref_output, _ = policy_forward(ref_policy_params, query_responses)
+ ref_logits = ref_output.logits[:, args.task.query_length - 1 : -1, :]
+ # ref_logits: [local_batch_size, response_length, vocab_size]
+ ref_logits /= args.task.temperature
+ ref_all_logprobs = jax.nn.log_softmax(ref_logits, axis=-1)
+ ref_logprobs = jnp.take_along_axis(ref_all_logprobs, responses[..., None], -1).squeeze(-1)
+ # ref_logprobs: [local_batch_size, response_length]
+ return query_responses, values, logprobs, ref_logprobs
+
+ if args.use_tensorflow_adam:
+ adam = adam_tf_style
+ else:
+ adam = optax.adam
+
+ optimizer = adam(
+ learning_rate=functools.partial(linear_schedule, args=args),
+ eps=args.ppo.eps,
+ )
+
+ optimizer = optax.MultiSteps(optimizer, args.ppo.gradient_accumulation_steps)
+
+ policy_state = TrainState.create(apply_fn=policy_forward, params=policy_params, tx=optimizer)
+ policy_state = jax_utils.replicate(policy_state)
+
+ del policy_params
+
+ dataset = MyDataset(
+ DATASET[args.task.query_dataset],
+ tokenizer,
+ args.task.query_length,
+ seed=local_seed,
+ start_text=args.task.start_text,
+ end_text=args.task.end_text,
+ )
+ dataloader = DataLoader(dataset, batch_size=args.ppo.batch_size)
+ iter_dataloader = iter(dataloader)
+ kl_ctl = AdaptiveKLController(args.rewards.kl_coef, hparams=args.rewards.adaptive_kl)
+
+ print("===training policy===")
+ global_step = 0
+ approxkls_stats = np.zeros(
+ (
+ args.ppo.noptepochs,
+ args.ppo.nminibatches,
+ args.ppo.gradient_accumulation_steps,
+ ),
+ )
+ pg_clipfracs_stats = np.zeros(
+ (
+ args.ppo.noptepochs,
+ args.ppo.nminibatches,
+ args.ppo.gradient_accumulation_steps,
+ ),
+ )
+ pg_losses_stats = np.zeros(
+ (
+ args.ppo.noptepochs,
+ args.ppo.nminibatches,
+ args.ppo.gradient_accumulation_steps,
+ ),
+ )
+ vf_losses_stats = np.zeros(
+ (
+ args.ppo.noptepochs,
+ args.ppo.nminibatches,
+ args.ppo.gradient_accumulation_steps,
+ ),
+ )
+ vf_clipfrac_stats = np.zeros(
+ (
+ args.ppo.noptepochs,
+ args.ppo.nminibatches,
+ args.ppo.gradient_accumulation_steps,
+ ),
+ )
+ entropies_stats = np.zeros(
+ (
+ args.ppo.noptepochs,
+ args.ppo.nminibatches,
+ args.ppo.gradient_accumulation_steps,
+ ),
+ )
+ for update in range(1, args.ppo.num_updates + 1):
+ global_step += 1 * args.ppo.batch_size
+ data = next(iter_dataloader)
+ queries = right_padding_to_left_padding(data["input_ids"], tokenizer.pad_token_id)
+ queries = common_utils.shard(queries)
+ # queries: [num_device, local_batch_size, query_length]
+
+ query_responses, values, logprobs, ref_logprobs = p_get_response_values_and_logprobs(policy_state.params, queries)
+ responses = query_responses[..., args.task.query_length :]
+
+ # **Response Processing**
+ # 1. truncate at the first occurrence of `truncate_token` that appears at or after
+ # position truncate_after in the responses
+ # https://github.com/openai/lm-human-preferences/blob/cbfd210bb8b08f6bc5c26878c10984b90f516c66/lm_human_preferences/train_policy.py#L378
+ truncate_token_mask = responses == args.task.truncate_token
+ # truncate_token_mask: [num_device, local_batch_size, response_length]
+ truncate_after_or_token_mask = jnp.concatenate(
+ [
+ jnp.zeros_like(truncate_token_mask)[:, :, : args.task.truncate_after],
+ truncate_token_mask[:, :, args.task.truncate_after :],
+ ],
+ axis=-1,
+ )
+ truncate_mask = (jnp.cumsum(truncate_after_or_token_mask, axis=-1) - truncate_after_or_token_mask).astype("bool")
+ postprocessed_responses = jnp.where(
+ truncate_mask,
+ jnp.full_like(responses, tokenizer.pad_token_id),
+ responses,
+ )
+ # postprocessed_responses: [num_device, local_batch_size, response_length]
+ del truncate_token_mask, truncate_after_or_token_mask, truncate_mask
+
+ # 2. run reward model on the truncated responses
+ postprocessed_query_responses = np.concatenate((queries, postprocessed_responses), axis=-1)
+ # postprocessed_query_responses: [num_device, local_batch_size, query_length + response_length]
+ postprocessed_query_responses = einops.rearrange(
+ right_padding_to_left_padding(
+ einops.rearrange(postprocessed_query_responses, "d b l -> (d b) l"), tokenizer.pad_token_id
+ ),
+ "(d b) l -> d b l",
+ d=len(args.learner_devices),
+ )
+ scores = p_reward_forward(query_responses_ids=postprocessed_query_responses).squeeze(-1)
+ # scores: [num_device, local_batch_size]
+
+ # 3. filter response. Ensure that the sample contains truncate_token
+ # responses not passing that filter will receive a low (fixed) score
+ # only query humans on responses that pass that filter
+ matches_token = postprocessed_responses[..., args.task.truncate_after :] == args.task.truncate_token
+ # matches_token: [num_device, local_batch_size, response_length - args.task.truncate_after]
+
+ filter_mask = jnp.any(matches_token, axis=-1)
+ # filter_mask: [num_device, local_batch_size]
+
+ scores = jnp.where(
+ filter_mask,
+ scores,
+ jnp.full_like(scores, args.task.penalty_reward_value),
+ )
+ # scores: [num_device, local_batch_size]
+ del matches_token, filter_mask
+
+ # 4. compute rewards
+ kl = logprobs - ref_logprobs
+ # kl: [num_device, local_batch_size, response_length]
+ non_score_reward = -kl_ctl.value * kl
+ rewards = non_score_reward
+ rewards = rewards.at[..., -1].add(scores)
+ # rewards: [num_device, local_batch_size, response_length]
+
+ # 5. whiten rewards
+ if args.ppo.whiten_rewards:
+ rewards = p_whiten_no_shift_mean(rewards)
+ try:
+ sample_kl = kl[0][0].sum().item()
+ console.print(
+ f"[green][bold]{'Query'}:[/]\n"
+ + f"[green]{ tokenizer.decode(queries[0][0], skip_special_tokens=True)}[/]\n\n"
+ + f"[blue][bold]{'Raw response'}:[/]\n"
+ + f"[blue]{tokenizer.decode(responses[0][0], skip_special_tokens=True)}[/]\n\n"
+ + f"[yellow][bold]{'Processed response'}:[/]\n"
+ + f"[yellow]{tokenizer.decode(postprocessed_responses[0][0], skip_special_tokens=True)}[/]\n\n"
+ + f"[red]score: {scores[0][0]}, kl: {kl[0][0].sum().item()}, total reward: {scores[0][0] - kl_ctl.value * sample_kl} [/]"
+ )
+ except Exception as e:
+ print(e)
+ del postprocessed_query_responses
+
+ # 6. compute advantages and returns
+ lastgaelam = 0
+ advantages_reversed = []
+ gen_length = args.task.response_length
+
+ for t in reversed(range(gen_length)):
+ nextvalues = values[..., t + 1] if t < gen_length - 1 else 0.0
+ delta = rewards[..., t] + args.ppo.gamma * nextvalues - values[..., t]
+ lastgaelam = delta + args.ppo.gamma * args.ppo.lam * lastgaelam
+ advantages_reversed.append(lastgaelam)
+ # advantages_reversed is a list of 2D arrays
+ # Each array has the shape [num_devices, local_batch_size]
+
+ advantages = jnp.stack(advantages_reversed[::-1], axis=-1)
+ # advantages: [num_device, local_batch_size, response_length]
+ returns = advantages + values
+ # returns: [num_device, local_batch_size, response_length]
+ advantages = p_whiten_shift_mean(advantages)
+
+ return_mean, return_var = returns.mean(), returns.var()
+ value_mean, value_var = values.mean(), values.var()
+
+ # Do multiple epochs of PPO training, with a fresh random shuffle in each epoch
+ for ppo_epoch_idx in range(args.ppo.noptepochs):
+ b_inds = np.random.permutation(args.ppo.local_batch_size)
+ minibatch_idx = 0
+ for mini_batch_start in range(0, args.ppo.local_batch_size, args.ppo.local_mini_batch_size):
+ mini_batch_end = mini_batch_start + args.ppo.local_mini_batch_size
+ mini_batch_inds = b_inds[mini_batch_start:mini_batch_end]
+ gradient_accumulation_idx = 0
+ for micro_batch_start in range(0, args.ppo.local_mini_batch_size, args.ppo.local_micro_batch_size): | jax.lax.scan.See https://github.com/vwxyzjn/cleanba/blob/81dca0054c8c0930046a2d12b07ada2945014d01/cleanba/cleanba_ppo.py#L638-L654 |
lm-human-preference-details | github_2023 | python | 18 | vwxyzjn | vwxyzjn | @@ -0,0 +1,975 @@
+import functools
+import os
+import time
+from dataclasses import asdict, dataclass, field
+from types import SimpleNamespace
+from typing import List, Optional
+
+import einops
+import flax
+import flax.linen as nn
+import jax
+import jax.numpy as jnp
+import numpy as np
+import optax
+import orbax
+import tyro
+from flax import jax_utils
+from flax.training import common_utils, orbax_utils
+from flax.training.train_state import TrainState
+from optax import ScaleByAdamState, update_moment, update_moment_per_elem_norm
+from optax._src import base, combine, numerics, utils
+from optax._src.alias import _scale_by_learning_rate
+from rich.console import Console
+from rich.pretty import pprint
+from torch.utils.data import DataLoader, IterableDataset
+from torch.utils.tensorboard import SummaryWriter
+from transformers import AutoTokenizer, FlaxAutoModelForCausalLM, GenerationConfig
+
+from lm_human_preference_details.data import DATASET
+
+
+@dataclass
+class AdaptiveKLParams:
+ target: float = 6.0
+ horizon: int = 10000 # in episodes
+
+
+@dataclass
+class RewardHParams:
+ kl_coef: float = 0.15
+ adaptive_kl: Optional[AdaptiveKLParams] = field(default_factory=AdaptiveKLParams)
+ trained_model: Optional[str] = "models/"
+ label_dataset: tyro.conf.Suppress[Optional[str]] = None
+
+
+@dataclass
+class PpoHParams:
+ total_episodes: int = 1000000
+ local_batch_size: int = 64
+ local_mini_batch_size: tyro.conf.Suppress[int] = None
+ batch_size: tyro.conf.Suppress[int] = None
+ mini_batch_size: tyro.conf.Suppress[int] = None
+ gradient_accumulation_steps: int = 1
+ """gradient accumulation steps"""
+ local_micro_batch_size: tyro.conf.Suppress[int] = None
+ """per rank micro batch size"""
+ world_size: tyro.conf.Suppress[int] = None
+ batch_size: tyro.conf.Suppress[int] = None
+ minibatch_size: tyro.conf.Suppress[int] = None
+ num_updates: tyro.conf.Suppress[int] = None
+ nminibatches: int = 1
+ noptepochs: int = 4
+ lr: float = 0.00001
+ eps: float = 1e-5
+ vf_coef: float = 0.1
+ cliprange: float = 0.2
+ cliprange_value: float = 0.2
+ gamma: float = 1
+ lam: float = 0.95
+ whiten_rewards: bool = True
+
+
+@dataclass
+class TaskHParams:
+ # Query params
+ query_length: int = 64
+ query_dataset: str = "books"
+ query_prefix: str = ""
+ query_suffix: str = ""
+ start_text: Optional[str] = None
+ end_text: Optional[str] = None
+
+ # Response params
+ response_length: int = 24
+
+ # Truncate response after the first occurrence of this token at or after index after when sampling.
+ truncate_token: int = 13
+ truncate_after: int = 16
+ penalty_reward_value: int = -1
+
+ # LM params
+ temperature: float = 0.7
+
+
+@dataclass
+class Args:
+ # common args
+ exp_name: str = os.path.basename(__file__)[: -len(".py")]
+ """the name of this experiment"""
+ seed: int = 1
+ """seed of the experiment"""
+ track: bool = False
+ """if toggled, this experiment will be tracked with Weights and Biases"""
+ wandb_project_name: str = "cleanrl"
+ """the wandb's project name"""
+ wandb_entity: Optional[str] = None
+ """the entity (team) of wandb's project"""
+ cuda: bool = True
+ """Whether to use cuda if available."""
+ run_name: tyro.conf.Suppress[str] = None
+ """TO BE FILLED: a unique name of this run"""
+
+ base_model: str = "gpt2"
+ """the name of the pretrained model to use"""
+ print_sample_output_freq: int = 0
+ """How often to print sample output"""
+ save_path: str = "models/policy/"
+ """Where to save the model"""
+ use_tensorflow_adam: bool = True
+ """Whether to use tensorflow-style Adam optimizer instead of PyTorch's"""
+ task: TaskHParams = field(default_factory=TaskHParams)
+ rewards: RewardHParams = field(default_factory=RewardHParams)
+ ppo: PpoHParams = field(default_factory=PpoHParams)
+
+ # distributed settings
+ local_rank: int = 0
+ """the rank of this process"""
+ learner_device_ids: List[int] = field(default_factory=lambda: [0])
+ "the device ids that script will use"
+ learner_devices: tyro.conf.Suppress[int] = None # real type is `List[str]`
+ """the devices that script will use"""
+ global_learner_decices: tyro.conf.Suppress[int] = None # real type is `List[str]`
+ """the total devices (across all nodes and machines) that script will use"""
+
+
+def scale_by_adam_tf_style(
+ b1: float = 0.9,
+ b2: float = 0.999,
+ eps: float = 1e-8,
+ eps_root: float = 0.0,
+ mu_dtype=None,
+) -> base.GradientTransformation:
+ """Rescale updates according to the Adam algorithm.
+
+ References:
+ [Kingma et al, 2014](https://arxiv.org/abs/1412.6980)
+
+ Args:
+ b1: Decay rate for the exponentially weighted average of grads.
+ b2: Decay rate for the exponentially weighted average of squared grads.
+ eps: Term added to the denominator to improve numerical stability.
+ eps_root: Term added to the denominator inside the square-root to improve
+ numerical stability when backpropagating gradients through the rescaling.
+ mu_dtype: Optional `dtype` to be used for the first order accumulator; if
+ `None` then the `dtype` is inferred from `params` and `updates`.
+
+ Returns:
+ A `GradientTransformation` object.
+ """
+
+ mu_dtype = utils.canonicalize_dtype(mu_dtype)
+
+ def init_fn(params):
+ mu = jax.tree_util.tree_map(lambda t: jnp.zeros_like(t, dtype=mu_dtype), params) # First moment
+ nu = jax.tree_util.tree_map(jnp.zeros_like, params) # Second moment
+ return ScaleByAdamState(count=jnp.zeros([], jnp.int32), mu=mu, nu=nu)
+
+ def update_fn(updates, state, params=None):
+ del params
+ mu = update_moment(updates, state.mu, b1, 1)
+ nu = update_moment_per_elem_norm(updates, state.nu, b2, 2)
+ count_inc = numerics.safe_int32_increment(state.count)
+
+ ### `optax` default adam implementation
+ # mu_hat = bias_correction(mu, b1, count_inc)
+ # nu_hat = bias_correction(nu, b2, count_inc)
+ # updates = jax.tree_util.tree_map(
+ # lambda m, v: m / (jnp.sqrt(v + eps_root) + eps), mu_hat, nu_hat)
+ ### Tensorflow adam implementation
+ updates = jax.tree_util.tree_map(
+ lambda m, v: (jnp.sqrt(1 - b2**count_inc) / (1 - b1**count_inc)) * m / (jnp.sqrt(v + eps_root) + eps),
+ mu,
+ nu,
+ ) #
+ mu = utils.cast_tree(mu, mu_dtype)
+ return updates, ScaleByAdamState(count=count_inc, mu=mu, nu=nu)
+
+ return base.GradientTransformation(init_fn, update_fn)
+
+
+def adam_tf_style(
+ learning_rate,
+ b1: float = 0.9,
+ b2: float = 0.999,
+ eps: float = 1e-8,
+ eps_root: float = 0.0,
+ mu_dtype=None,
+):
+ return combine.chain(
+ scale_by_adam_tf_style(b1=b1, b2=b2, eps=eps, eps_root=eps_root, mu_dtype=mu_dtype),
+ _scale_by_learning_rate(learning_rate),
+ )
+
+
+class AdaptiveKLController:
+ def __init__(self, init_kl_coef: float, hparams: AdaptiveKLParams):
+ self.value = init_kl_coef
+ self.hparams = hparams
+
+ def update(self, current, n_steps):
+ target = self.hparams.target
+ proportional_error = np.clip(current / target - 1, -0.2, 0.2)
+ mult = 1 + proportional_error * n_steps / self.hparams.horizon
+ self.value *= mult
+
+
+def whiten(values, shift_mean=True):
+ # `unbiased=False` matches TF `tf.nn.moments`'s setting
+ mean, var = jnp.mean(values), jnp.var(values)
+ whitened = (values - mean) * jax.lax.rsqrt(var + 1e-8)
+ if not shift_mean:
+ whitened += mean
+ return whitened
+
+
+class ScalarHead(nn.Module):
+ head_input_size: int
+
+ @nn.compact
+ def __call__(self, x):
+ x = nn.Dense(
+ 1,
+ kernel_init=nn.initializers.normal(stddev=0),
+ bias_init=nn.initializers.zeros_init(),
+ )(x)
+ return x
+
+
+class RewardHead(nn.Module):
+ head_input_size: int
+
+ @nn.compact
+ def __call__(self, x):
+ assert x.shape[-1] == self.head_input_size
+ x = nn.Dense(
+ 1,
+ kernel_init=nn.initializers.normal(stddev=1 / np.sqrt(self.head_input_size + 1)),
+ bias_init=nn.initializers.zeros_init(),
+ )(x)
+ reward_gain = self.param("reward_gain", nn.initializers.ones_init(), ())
+ reward_bias = self.param("reward_bias", nn.initializers.zeros_init(), ())
+ x = x * reward_gain + reward_bias
+ return x
+
+
+@flax.struct.dataclass
+class LMBackboneWithScalarHeadParams:
+ """Parameters for the language model backbone and a scalar head."""
+
+ lm_backbone_params: flax.core.FrozenDict
+ head_params: flax.core.FrozenDict
+
+
+# a pytorch dataset
+class MyDataset(IterableDataset):
+ def __init__(self, generator, tokenizer, query_length, seed, start_text=None, end_text=None):
+ self.generator = generator
+ self.tokenizer = tokenizer
+ self.query_length = query_length
+ self.start_text = start_text
+ self.end_text = end_text
+ self.seed = seed
+ token_to_index = tokenizer.get_vocab()
+ self.start_token = token_to_index[start_text] if self.start_text else None
+ self.end_token = token_to_index[end_text] if self.end_text else None
+
+ def __iter__(self):
+ for text in self.generator("train", self.seed, shuffle=True):
+ tokens = self.tokenizer.encode(text)
+ if self.start_token is not None:
+ try:
+ first_index = tokens.index(self.start_token) + 1
+ if first_index < len(tokens):
+ tokens = tokens[first_index:]
+ except:
+ continue
+ tokens = tokens[: self.query_length]
+ if self.end_token is not None:
+ try:
+ last_index = len(tokens) - tokens[::-1].index(self.end_token)
+ tokens = tokens[:last_index]
+ except:
+ continue
+ output = self.tokenizer.pad(
+ {"input_ids": tokens},
+ padding="max_length",
+ max_length=self.query_length,
+ return_tensors="pt",
+ return_attention_mask=True,
+ )
+ yield output
+
+
+def right_padding_to_left_padding(tokens, pad_id):
+ # Convert from right padding to left padding.
+ return np.array([[pad_id] * (row == pad_id).sum() + [x for x in row if x != pad_id] for row in tokens])
+
+
+def ceil_div(a, b):
+ return (a - 1) // b + 1
+
+
+def exact_div(a, b):
+ q = a // b
+ if a != q * b:
+ raise ValueError(f"Inexact division: {a} / {b} = {a / b}")
+ return q
+
+
+def prepare_reward_forward(args, tokenizer):
+ """Prepare the forward pass of the reward model and parameters."""
+
+ lm_backbone = FlaxAutoModelForCausalLM.from_pretrained(args.base_model)
+ scalar_head = RewardHead(head_input_size=lm_backbone.config.hidden_size)
+
+ def reward_forward(
+ params: LMBackboneWithScalarHeadParams,
+ query_responses_ids: jnp.ndarray,
+ ):
+ """Get reward for each queiry--response pair."""
+ assert query_responses_ids.ndim == 2
+
+ # mask out padding tokens
+ attention_mask = query_responses_ids != tokenizer.pad_token_id
+ query_responses_ids = jnp.where(attention_mask, query_responses_ids, 0)
+
+ # assign position ids
+ position_ids = attention_mask.cumsum(1) - attention_mask
+
+ reward_latents = lm_backbone.module.apply(
+ variables=params.lm_backbone_params,
+ input_ids=query_responses_ids,
+ attention_mask=attention_mask,
+ position_ids=position_ids,
+ output_hidden_states=True,
+ ).hidden_states[-1]
+ # reward_latents: [batch_size, length, hidden_size]
+
+ last_reward_latents = reward_latents[:, -1, :]
+ # last_reward_latents: [batch_size, hidden_size]
+
+ reward = scalar_head.apply(variables=params.head_params, x=last_reward_latents)
+ # reward: [batch_size, 1]
+ return reward
+
+ if args.rewards.trained_model:
+ orbax_checkpointer = orbax.checkpoint.PyTreeCheckpointer()
+ reward_state_params = orbax_checkpointer.restore(args.rewards.trained_model)["reward_model"]["params"]
+ reward_params = LMBackboneWithScalarHeadParams(
+ lm_backbone_params=flax.core.FrozenDict({"params": reward_state_params["lm_backbone_params"]["params"]}),
+ head_params=flax.core.FrozenDict({"params": reward_state_params["head_params"]["params"]}),
+ )
+ pprint(f"Loaded pretrained reward model from {args.rewards.trained_model}")
+ else:
+ key = jax.random.PRNGKey(args.seed)
+ key, init_key = jax.random.split(key, 2)
+ reward_params = LMBackboneWithScalarHeadParams(
+ lm_backbone_params=flax.core.FrozenDict({"params": lm_backbone.params}),
+ head_params=flax.core.FrozenDict(
+ scalar_head.init(
+ init_key,
+ jnp.ones(lm_backbone.config.hidden_size)[None, None, :],
+ )
+ ),
+ )
+
+ return functools.partial(reward_forward, params=reward_params)
+
+
+def prepare_policy_forward_and_policy_generate(args, tokenizer):
+ """Prepare the forward pass of the policy model and parameters."""
+
+ lm_backbone = FlaxAutoModelForCausalLM.from_pretrained(args.base_model)
+ # disable `pad_token_id` and `eos_token_id` because we just want to
+ # generate tokens without truncation / padding
+ lm_backbone.generation_config.eos_token_id = None
+ lm_backbone.generation_config.pad_token_id = tokenizer.pad_token_id
+ scalar_head = ScalarHead(head_input_size=lm_backbone.config.hidden_size)
+
+ generation_config = GenerationConfig(
+ max_new_tokens=args.task.response_length,
+ min_new_tokens=args.task.response_length,
+ temperature=args.task.temperature,
+ top_k=0.0,
+ top_p=1.0,
+ do_sample=True,
+ pad_token_id=tokenizer.pad_token_id,
+ )
+
+ def policy_forward(
+ params: LMBackboneWithScalarHeadParams,
+ input_ids: jnp.ndarray,
+ ):
+ """Get reward for input_ids."""
+ assert input_ids.ndim == 2
+ # shape: [batch_size, length]
+
+ # mask out padding tokens
+ attention_mask = input_ids != tokenizer.pad_token_id
+ input_ids = jnp.where(attention_mask, input_ids, 0)
+
+ # assign position ids
+ position_ids = attention_mask.cumsum(1) - attention_mask
+
+ lm_backbone_out = lm_backbone.module.apply(
+ variables=params.lm_backbone_params,
+ input_ids=input_ids,
+ attention_mask=attention_mask,
+ position_ids=position_ids,
+ output_hidden_states=True,
+ )
+
+ value_latents = lm_backbone_out.hidden_states[-1]
+ # shape: [batch_size, length, hidden_size]
+
+ values = scalar_head.apply(variables=params.head_params, x=value_latents)
+ # shape: [batch_size, length, 1]
+ return lm_backbone_out, values
+
+ def policy_generate(
+ params: LMBackboneWithScalarHeadParams,
+ queries: jnp.ndarray,
+ ):
+ input_ids = queries
+ attention_mask = input_ids != tokenizer.pad_token_id
+ input_ids = jnp.where(attention_mask, queries, 0)
+ output = lm_backbone.generate(
+ params=params["params"],
+ input_ids=input_ids,
+ generation_config=generation_config,
+ attention_mask=attention_mask.astype("i4"),
+ return_dict_in_generate=True,
+ )
+ context_length = input_ids.shape[1]
+ return jnp.concatenate((queries, output.sequences[:, context_length:]), axis=1)
+
+ key = jax.random.PRNGKey(args.seed)
+ key, init_key = jax.random.split(key, 2)
+ policy_params = LMBackboneWithScalarHeadParams(
+ lm_backbone_params=flax.core.FrozenDict({"params": lm_backbone.params}),
+ head_params=flax.core.FrozenDict(
+ scalar_head.init(
+ init_key,
+ jnp.ones(lm_backbone.config.hidden_size)[None, None, :],
+ )
+ ),
+ )
+
+ return policy_forward, policy_generate, policy_params
+
+
+@flax.struct.dataclass
+class RolloutStatistics:
+ returns: jnp.array
+ values: jnp.array
+ advantage: jnp.array
+ responses: jnp.array
+ query_responses: jnp.array
+ logprobs: jnp.array
+
+
+@flax.struct.dataclass
+class RLStatistics:
+ approxkl: jnp.array
+ entropy: jnp.array
+ pg_loss: jnp.array
+ pg_clipfrac: jnp.array
+ vf_losses1: jnp.array
+ vf_loss: jnp.array
+ vf_clipfrac: jnp.array
+ ratio: jnp.array
+ loss: jnp.array
+
+
+def train_step(policy_state, mb_stats, args):
+ def loss(params):
+ # mb_stats.query_responses: [local_micro_batch_size, query_length + response_length]
+ output, vpred_temp = policy_state.apply_fn(params, mb_stats.query_responses)
+
+ # vpred_temp: [local_micro_batch_size, query_length + response_length, 1]
+ vpred = jnp.squeeze(vpred_temp[:, args.task.query_length - 1 : -1, :], axis=-1)
+ # vpred: [local_micro_batch_size, response_length]
+ vpredclipped = jnp.clip(
+ vpred,
+ mb_stats.values - args.ppo.cliprange_value,
+ mb_stats.values + args.ppo.cliprange_value,
+ )
+ vf_losses1 = jnp.square(vpred - mb_stats.returns)
+ vf_losses2 = jnp.square(vpredclipped - mb_stats.returns)
+ vf_loss = 0.5 * jnp.maximum(vf_losses1, vf_losses2).mean()
+ vf_clipfrac = (vf_losses2 > vf_losses1).astype(jnp.float32).mean()
+
+ logits = output.logits[:, args.task.query_length - 1 : -1, :]
+ logits /= args.task.temperature
+ new_logprobs = -optax.softmax_cross_entropy_with_integer_labels(logits, mb_stats.responses)
+
+ logprobs_diff = new_logprobs - mb_stats.logprobs
+ ratio = jnp.exp(logprobs_diff)
+ pg_losses = -mb_stats.advantage * ratio
+ pg_losses2 = -mb_stats.advantage * jnp.clip(ratio, 1.0 - args.ppo.cliprange, 1.0 + args.ppo.cliprange)
+
+ pg_loss = jnp.maximum(pg_losses, pg_losses2).mean()
+ pg_clipfrac = (pg_losses2 > pg_losses).astype(jnp.float32).mean()
+
+ pd = jax.nn.softmax(logits, axis=-1)
+ entropy = jax.nn.logsumexp(logits, axis=-1) - jnp.sum(pd * logits, axis=-1)
+
+ approxkl = 0.5 * ((logprobs_diff) ** 2).mean()
+ loss = pg_loss + args.ppo.vf_coef * vf_loss
+
+ rl_stats = RLStatistics(
+ vf_loss=vf_loss,
+ vf_clipfrac=vf_clipfrac,
+ pg_loss=pg_loss,
+ pg_clipfrac=pg_clipfrac,
+ approxkl=approxkl,
+ loss=loss,
+ entropy=entropy.mean(),
+ ratio=ratio.mean(),
+ vf_losses1=vf_losses1.mean(),
+ )
+ rl_stats = jax.lax.pmean(rl_stats, "batch")
+ return loss, rl_stats
+
+ grad_fn = jax.value_and_grad(loss, has_aux=True)
+ (loss, rl_stats), grads = grad_fn(policy_state.params)
+ grads = jax.lax.pmean(grads, "batch")
+ policy_state = policy_state.apply_gradients(grads=grads)
+ return policy_state, rl_stats
+
+
+def linear_schedule(optimizer_step, args):
+ """anneal learning rate linearly to reach 0 after one epoch."""
+ update = 1 + optimizer_step // (args.ppo.noptepochs * args.ppo.nminibatches * args.ppo.gradient_accumulation_steps)
+ frac = 1.0 - (update - 1.0) / args.ppo.num_updates
+ lrnow = frac * args.ppo.lr
+ return lrnow
+
+
+def train(args: Args):
+ local_devices = jax.local_devices()
+ global_devices = jax.devices()
+ args.ppo.world_size = jax.process_count()
+ learner_devices = [local_devices[d_id] for d_id in args.learner_device_ids]
+ global_learner_decices = [
+ global_devices[d_id + process_index * len(local_devices)]
+ for process_index in range(args.ppo.world_size)
+ for d_id in args.learner_device_ids
+ ]
+ pprint({"global_learner_decices": global_learner_decices})
+ args.global_learner_decices = [str(item) for item in global_learner_decices]
+ args.learner_devices = [str(item) for item in learner_devices]
+ args.local_rank = jax.process_index()
+ args.ppo.batch_size = int(args.ppo.local_batch_size * len(args.learner_devices) * args.ppo.world_size)
+ args.ppo.minibatch_size = exact_div(args.ppo.batch_size, args.ppo.nminibatches)
+ args.ppo.local_mini_batch_size = exact_div(args.ppo.local_batch_size, args.ppo.nminibatches)
+ args.ppo.local_micro_batch_size = exact_div(args.ppo.local_mini_batch_size, args.ppo.gradient_accumulation_steps)
+ if args.ppo.whiten_rewards:
+ assert (
+ args.ppo.local_mini_batch_size >= 8
+ ), f"Per-rank minibatch size {args.ppo.local_mini_batch_size} is insufficient for whitening"
+ # `per_rank_rollout_batch_size` is our `args.ppo.local_batch_size`
+ # `per_rank_minibatch_size` is our `args.ppo.local_mini_batch_size`
+ args.ppo.num_updates = args.ppo.total_episodes // args.ppo.batch_size
+
+ console = Console(force_terminal=True)
+ run_name = f"{args.exp_name}__{args.seed}__{int(time.time())}"
+ writer = SimpleNamespace() # dummy writer
+ writer.add_scalar = lambda x, y, z: None
+ writer.add_histogram = lambda x, y, z: None
+
+ if args.local_rank == 0:
+ if args.track:
+ import wandb
+
+ wandb.init(
+ project=args.wandb_project_name,
+ entity=args.wandb_entity,
+ sync_tensorboard=True,
+ config=asdict(args),
+ name=run_name,
+ save_code=True,
+ )
+ wandb.run.log_code(".")
+ writer = SummaryWriter(f"runs/{run_name}")
+ writer.add_text(
+ "hyperparameters",
+ "|param|value|\n|-|-|\n%s" % ("\n".join([f"|{key}|{value}|" for key, value in vars(args).items()])),
+ )
+ pprint(args)
+ local_seed = args.seed + args.local_rank * 100003 # Prime
+ tokenizer = AutoTokenizer.from_pretrained(
+ args.base_model,
+ padding_side="right",
+ )
+ # we use the padding token manually but do not resize the token embedding of the model
+ tokenizer.add_special_tokens({"pad_token": "[PAD]"})
+ reward_forward = prepare_reward_forward(args, tokenizer)
+ policy_forward, policy_generate, policy_params = prepare_policy_forward_and_policy_generate(args, tokenizer)
+ _, _, ref_policy_params = prepare_policy_forward_and_policy_generate(args, tokenizer)
+
+ p_whiten_no_shift_mean = jax.pmap(functools.partial(whiten, shift_mean=False))
+ p_whiten_shift_mean = jax.pmap(functools.partial(whiten, shift_mean=True))
+ p_reward_forward = jax.pmap(reward_forward)
+ p_train_step = jax.pmap(
+ functools.partial(train_step, args=args),
+ axis_name="batch",
+ donate_argnums=(0,),
+ ) | You probably just need a single `pmap` on a `train` function that does all the pmaps you have here together. |
lm-human-preference-details | github_2023 | python | 18 | vwxyzjn | vwxyzjn | @@ -0,0 +1,1024 @@
+import functools
+import os
+import time
+from dataclasses import asdict, dataclass, field
+from types import SimpleNamespace
+from typing import List, Optional
+import einops
+from clu import metrics
+import flax
+import flax.linen as nn
+import jax
+import jax.numpy as jnp
+import numpy as np
+import optax
+import orbax
+import tyro
+from flax import jax_utils
+from flax.training import common_utils, orbax_utils
+from flax.training.train_state import TrainState
+from optax import ScaleByAdamState, update_moment, update_moment_per_elem_norm
+from optax._src import base, combine, numerics, utils
+from optax._src.alias import _scale_by_learning_rate
+from rich.console import Console
+from rich.pretty import pprint
+from torch.utils.data import DataLoader, IterableDataset
+from torch.utils.tensorboard import SummaryWriter
+from transformers import AutoTokenizer, FlaxAutoModelForCausalLM, GenerationConfig
+
+from lm_human_preference_details.data import DATASET
+
+
+@dataclass
+class AdaptiveKLParams:
+ target: float = 6.0
+ horizon: int = 10000 # in episodes
+
+
+@dataclass
+class RewardHParams:
+ kl_coef: float = 0.15
+ adaptive_kl: Optional[AdaptiveKLParams] = field(default_factory=AdaptiveKLParams)
+ trained_model: Optional[str] = "models/"
+ label_dataset: tyro.conf.Suppress[Optional[str]] = None
+
+
+@dataclass
+class PpoHParams:
+ total_episodes: int = 1000000
+ local_batch_size: int = 64
+ local_mini_batch_size: tyro.conf.Suppress[int] = None
+ batch_size: tyro.conf.Suppress[int] = None
+ mini_batch_size: tyro.conf.Suppress[int] = None
+ gradient_accumulation_steps: int = 1
+ """gradient accumulation steps"""
+ local_micro_batch_size: tyro.conf.Suppress[int] = None
+ """per rank micro batch size"""
+ world_size: tyro.conf.Suppress[int] = None
+ batch_size: tyro.conf.Suppress[int] = None
+ minibatch_size: tyro.conf.Suppress[int] = None
+ num_updates: tyro.conf.Suppress[int] = None
+ nminibatches: int = 1
+ noptepochs: int = 4
+ lr: float = 0.00001
+ eps: float = 1e-5
+ vf_coef: float = 0.1
+ cliprange: float = 0.2
+ cliprange_value: float = 0.2
+ gamma: float = 1
+ lam: float = 0.95
+ whiten_rewards: bool = True
+
+
+@dataclass
+class TaskHParams:
+ # Query params
+ query_length: int = 64
+ query_dataset: str = "books"
+ query_prefix: str = ""
+ query_suffix: str = ""
+ start_text: Optional[str] = None
+ end_text: Optional[str] = None
+
+ # Response params
+ response_length: int = 24
+
+ # Truncate response after the first occurrence of this token at or after index after when sampling.
+ truncate_token: int = 13
+ truncate_after: int = 16
+ penalty_reward_value: int = -1
+
+ # LM params
+ temperature: float = 0.7
+
+
+@dataclass
+class Args:
+ # common args
+ exp_name: str = os.path.basename(__file__)[: -len(".py")]
+ """the name of this experiment"""
+ seed: int = 1
+ """seed of the experiment"""
+ track: bool = False
+ """if toggled, this experiment will be tracked with Weights and Biases"""
+ wandb_project_name: str = "cleanrl"
+ """the wandb's project name"""
+ wandb_entity: Optional[str] = None
+ """the entity (team) of wandb's project"""
+ cuda: bool = True
+ """Whether to use cuda if available."""
+ run_name: tyro.conf.Suppress[str] = None
+ """TO BE FILLED: a unique name of this run"""
+
+ base_model: str = "gpt2"
+ """the name of the pretrained model to use"""
+ print_sample_output_freq: int = 0
+ """How often to print sample output"""
+ save_path: str = "models/policy/"
+ """Where to save the model"""
+ use_tensorflow_adam: bool = True
+ """Whether to use tensorflow-style Adam optimizer instead of PyTorch's"""
+ task: TaskHParams = field(default_factory=TaskHParams)
+ rewards: RewardHParams = field(default_factory=RewardHParams)
+ ppo: PpoHParams = field(default_factory=PpoHParams)
+
+ # distributed settings
+ local_rank: int = 0
+ """the rank of this process"""
+ learner_device_ids: List[int] = field(default_factory=lambda: [0])
+ "the device ids that script will use"
+ learner_devices: tyro.conf.Suppress[int] = None # real type is `List[str]`
+ """the devices that script will use"""
+ global_learner_decices: tyro.conf.Suppress[int] = None # real type is `List[str]`
+ """the total devices (across all nodes and machines) that script will use"""
+
+
+def scale_by_adam_tf_style(
+ b1: float = 0.9,
+ b2: float = 0.999,
+ eps: float = 1e-8,
+ eps_root: float = 0.0,
+ mu_dtype=None,
+) -> base.GradientTransformation:
+ """Rescale updates according to the Adam algorithm.
+
+ References:
+ [Kingma et al, 2014](https://arxiv.org/abs/1412.6980)
+
+ Args:
+ b1: Decay rate for the exponentially weighted average of grads.
+ b2: Decay rate for the exponentially weighted average of squared grads.
+ eps: Term added to the denominator to improve numerical stability.
+ eps_root: Term added to the denominator inside the square-root to improve
+ numerical stability when backpropagating gradients through the rescaling.
+ mu_dtype: Optional `dtype` to be used for the first order accumulator; if
+ `None` then the `dtype` is inferred from `params` and `updates`.
+
+ Returns:
+ A `GradientTransformation` object.
+ """
+
+ mu_dtype = utils.canonicalize_dtype(mu_dtype)
+
+ def init_fn(params):
+ mu = jax.tree_util.tree_map(
+ lambda t: jnp.zeros_like(t, dtype=mu_dtype), params
+ ) # First moment
+ nu = jax.tree_util.tree_map(jnp.zeros_like, params) # Second moment
+ return ScaleByAdamState(count=jnp.zeros([], jnp.int32), mu=mu, nu=nu)
+
+ def update_fn(updates, state, params=None):
+ del params
+ mu = update_moment(updates, state.mu, b1, 1)
+ nu = update_moment_per_elem_norm(updates, state.nu, b2, 2)
+ count_inc = numerics.safe_int32_increment(state.count)
+
+ ### `optax` default adam implementation
+ # mu_hat = bias_correction(mu, b1, count_inc)
+ # nu_hat = bias_correction(nu, b2, count_inc)
+ # updates = jax.tree_util.tree_map(
+ # lambda m, v: m / (jnp.sqrt(v + eps_root) + eps), mu_hat, nu_hat)
+ ### Tensorflow adam implementation
+ updates = jax.tree_util.tree_map(
+ lambda m, v: (jnp.sqrt(1 - b2**count_inc) / (1 - b1**count_inc))
+ * m
+ / (jnp.sqrt(v + eps_root) + eps),
+ mu,
+ nu,
+ ) #
+ mu = utils.cast_tree(mu, mu_dtype)
+ return updates, ScaleByAdamState(count=count_inc, mu=mu, nu=nu)
+
+ return base.GradientTransformation(init_fn, update_fn)
+
+
+def adam_tf_style(
+ learning_rate,
+ b1: float = 0.9,
+ b2: float = 0.999,
+ eps: float = 1e-8,
+ eps_root: float = 0.0,
+ mu_dtype=None,
+):
+ return combine.chain(
+ scale_by_adam_tf_style(
+ b1=b1, b2=b2, eps=eps, eps_root=eps_root, mu_dtype=mu_dtype
+ ),
+ _scale_by_learning_rate(learning_rate),
+ )
+
+
+class AdaptiveKLController:
+ def __init__(self, init_kl_coef: float, hparams: AdaptiveKLParams):
+ self.value = init_kl_coef
+ self.hparams = hparams
+
+ def update(self, current, n_steps):
+ target = self.hparams.target
+ proportional_error = np.clip(current / target - 1, -0.2, 0.2)
+ mult = 1 + proportional_error * n_steps / self.hparams.horizon
+ self.value *= mult
+
+
+def whiten(values, shift_mean=True):
+ # `unbiased=False` matches TF `tf.nn.moments`'s setting
+ mean, var = jnp.mean(values), jnp.var(values)
+ whitened = (values - mean) * jax.lax.rsqrt(var + 1e-8)
+ if not shift_mean:
+ whitened += mean
+ return whitened
+
+
+class ScalarHead(nn.Module):
+ head_input_size: int
+
+ @nn.compact
+ def __call__(self, x):
+ x = nn.Dense(
+ 1,
+ kernel_init=nn.initializers.normal(stddev=0),
+ bias_init=nn.initializers.zeros_init(),
+ )(x)
+ return x
+
+
+class RewardHead(nn.Module):
+ head_input_size: int
+
+ @nn.compact
+ def __call__(self, x):
+ assert x.shape[-1] == self.head_input_size
+ x = nn.Dense(
+ 1,
+ kernel_init=nn.initializers.normal(
+ stddev=1 / np.sqrt(self.head_input_size + 1)
+ ),
+ bias_init=nn.initializers.zeros_init(),
+ )(x)
+ reward_gain = self.param("reward_gain", nn.initializers.ones_init(), ())
+ reward_bias = self.param("reward_bias", nn.initializers.zeros_init(), ())
+ x = x * reward_gain + reward_bias
+ return x
+
+
+@flax.struct.dataclass
+class LMBackboneWithScalarHeadParams:
+ """Parameters for the language model backbone and a scalar head."""
+
+ lm_backbone_params: flax.core.FrozenDict
+ head_params: flax.core.FrozenDict
+
+
+# a pytorch dataset
+class MyDataset(IterableDataset):
+ def __init__(
+ self, generator, tokenizer, query_length, seed, start_text=None, end_text=None
+ ):
+ self.generator = generator
+ self.tokenizer = tokenizer
+ self.query_length = query_length
+ self.start_text = start_text
+ self.end_text = end_text
+ self.seed = seed
+ token_to_index = tokenizer.get_vocab()
+ self.start_token = token_to_index[start_text] if self.start_text else None
+ self.end_token = token_to_index[end_text] if self.end_text else None
+
+ def __iter__(self):
+ for text in self.generator("train", self.seed, shuffle=True):
+ tokens = self.tokenizer.encode(text)
+ if self.start_token is not None:
+ try:
+ first_index = tokens.index(self.start_token) + 1
+ if first_index < len(tokens):
+ tokens = tokens[first_index:]
+ except:
+ continue
+ tokens = tokens[: self.query_length]
+ if self.end_token is not None:
+ try:
+ last_index = len(tokens) - tokens[::-1].index(self.end_token)
+ tokens = tokens[:last_index]
+ except:
+ continue
+ output = self.tokenizer.pad(
+ {"input_ids": tokens},
+ padding="max_length",
+ max_length=self.query_length,
+ return_tensors="pt",
+ return_attention_mask=True,
+ )
+ yield output
+
+
+def right_padding_to_left_padding(tokens, pad_id):
+ def pad_row(row):
+ mask = 1 - (row == pad_id) # 1 if not pad_id, 0 if pad_id
+ return row[
+ jnp.argsort(mask)
+ ] # uses the fact that jnp.argsort is stable by default
+
+ return jax.vmap(pad_row)(tokens)
+
+
+def ceil_div(a, b):
+ return (a - 1) // b + 1
+
+
+def exact_div(a, b):
+ q = a // b
+ if a != q * b:
+ raise ValueError(f"Inexact division: {a} / {b} = {a / b}")
+ return q
+
+
+def prepare_reward_forward(args, tokenizer):
+ """Prepare the forward pass of the reward model and parameters."""
+
+ lm_backbone = FlaxAutoModelForCausalLM.from_pretrained(args.base_model)
+ scalar_head = RewardHead(head_input_size=lm_backbone.config.hidden_size)
+
+ def reward_forward(
+ params: LMBackboneWithScalarHeadParams,
+ query_responses_ids: jnp.ndarray,
+ ):
+ """Get reward for each queiry--response pair."""
+ assert query_responses_ids.ndim == 2
+
+ # mask out padding tokens
+ attention_mask = query_responses_ids != tokenizer.pad_token_id
+ query_responses_ids = jnp.where(attention_mask, query_responses_ids, 0)
+
+ # assign position ids
+ position_ids = attention_mask.cumsum(1) - attention_mask
+
+ reward_latents = lm_backbone.module.apply(
+ variables=params.lm_backbone_params,
+ input_ids=query_responses_ids,
+ attention_mask=attention_mask,
+ position_ids=position_ids,
+ output_hidden_states=True,
+ ).hidden_states[-1]
+ # reward_latents: [batch_size, length, hidden_size]
+
+ last_reward_latents = reward_latents[:, -1, :]
+ # last_reward_latents: [batch_size, hidden_size]
+
+ reward = scalar_head.apply(variables=params.head_params, x=last_reward_latents)
+ # reward: [batch_size, 1]
+ return reward
+
+ if args.rewards.trained_model:
+ orbax_checkpointer = orbax.checkpoint.PyTreeCheckpointer()
+ reward_state_params = orbax_checkpointer.restore(args.rewards.trained_model)[
+ "reward_model"
+ ]["params"]
+ reward_params = LMBackboneWithScalarHeadParams(
+ lm_backbone_params=flax.core.FrozenDict(
+ {"params": reward_state_params["lm_backbone_params"]["params"]}
+ ),
+ head_params=flax.core.FrozenDict(
+ {"params": reward_state_params["head_params"]["params"]}
+ ),
+ )
+ pprint(f"Loaded pretrained reward model from {args.rewards.trained_model}")
+ else:
+ key = jax.random.PRNGKey(args.seed)
+ key, init_key = jax.random.split(key, 2)
+ reward_params = LMBackboneWithScalarHeadParams(
+ lm_backbone_params=flax.core.FrozenDict({"params": lm_backbone.params}),
+ head_params=flax.core.FrozenDict(
+ scalar_head.init(
+ init_key,
+ jnp.ones(lm_backbone.config.hidden_size)[None, None, :],
+ )
+ ),
+ )
+
+ return functools.partial(reward_forward, params=reward_params)
+
+
+def prepare_policy_forward_and_policy_generate(args, tokenizer):
+ """Prepare the forward pass of the policy model and parameters."""
+
+ lm_backbone = FlaxAutoModelForCausalLM.from_pretrained(args.base_model)
+ # disable `pad_token_id` and `eos_token_id` because we just want to
+ # generate tokens without truncation / padding
+ lm_backbone.generation_config.eos_token_id = None
+ lm_backbone.generation_config.pad_token_id = tokenizer.pad_token_id
+ scalar_head = ScalarHead(head_input_size=lm_backbone.config.hidden_size)
+
+ generation_config = GenerationConfig(
+ max_new_tokens=args.task.response_length,
+ min_new_tokens=args.task.response_length,
+ temperature=args.task.temperature,
+ top_k=0.0,
+ top_p=1.0,
+ do_sample=True,
+ pad_token_id=tokenizer.pad_token_id,
+ )
+
+ def policy_forward(
+ params: LMBackboneWithScalarHeadParams,
+ input_ids: jnp.ndarray,
+ ):
+ """Get reward for input_ids."""
+ assert input_ids.ndim == 2
+ # shape: [batch_size, length]
+
+ # mask out padding tokens
+ attention_mask = input_ids != tokenizer.pad_token_id
+ input_ids = jnp.where(attention_mask, input_ids, 0)
+
+ # assign position ids
+ position_ids = attention_mask.cumsum(1) - attention_mask
+
+ lm_backbone_out = lm_backbone.module.apply(
+ variables=params.lm_backbone_params,
+ input_ids=input_ids,
+ attention_mask=attention_mask,
+ position_ids=position_ids,
+ output_hidden_states=True,
+ )
+
+ value_latents = lm_backbone_out.hidden_states[-1]
+ # shape: [batch_size, length, hidden_size]
+
+ values = scalar_head.apply(variables=params.head_params, x=value_latents)
+ # shape: [batch_size, length, 1]
+ return lm_backbone_out, values
+
+ def policy_generate(
+ params: LMBackboneWithScalarHeadParams,
+ queries: jnp.ndarray,
+ ):
+ input_ids = queries
+ attention_mask = input_ids != tokenizer.pad_token_id
+ input_ids = jnp.where(attention_mask, queries, 0)
+ output = lm_backbone.generate(
+ params=params["params"],
+ input_ids=input_ids,
+ generation_config=generation_config,
+ attention_mask=attention_mask.astype("i4"),
+ return_dict_in_generate=True,
+ )
+ context_length = input_ids.shape[1]
+ return jnp.concatenate((queries, output.sequences[:, context_length:]), axis=1)
+
+ key = jax.random.PRNGKey(args.seed)
+ key, init_key = jax.random.split(key, 2)
+ policy_params = LMBackboneWithScalarHeadParams(
+ lm_backbone_params=flax.core.FrozenDict({"params": lm_backbone.params}),
+ head_params=flax.core.FrozenDict(
+ scalar_head.init(
+ init_key,
+ jnp.ones(lm_backbone.config.hidden_size)[None, None, :],
+ )
+ ),
+ )
+
+ return policy_forward, policy_generate, policy_params
+
+
+@flax.struct.dataclass
+class RLStatistics(metrics.Collection):
+ approxkl: metrics.Average.from_output("approxkl")
+ entropy: metrics.Average.from_output("entropy")
+ pg_loss: metrics.Average.from_output("pg_loss")
+ pg_clipfrac: metrics.Average.from_output("pg_clipfrac")
+ vf_loss1: metrics.Average.from_output("vf_loss1")
+ vf_loss: metrics.Average.from_output("vf_loss")
+ vf_clipfrac: metrics.Average.from_output("vf_clipfrac")
+ ratio: metrics.Average.from_output("ratio")
+ loss: metrics.Average.from_output("loss")
+
+
+def train_step(
+ policy_state,
+ rl_stats,
+ mb_advantages,
+ mb_returns,
+ mb_values,
+ mb_query_responses,
+ mb_logprobs,
+ args,
+):
+ def loss(params):
+ output, vpred_temp = policy_state.apply_fn(params, mb_query_responses)
+ # vpred_temp: [local_micro_batch_size, query_length + response_length, 1]
+ vpred = jnp.squeeze(vpred_temp[:, args.task.query_length - 1 : -1, :], axis=-1)
+ # vpred: [local_micro_batch_size, response_length]
+ vpredclipped = jnp.clip(
+ vpred,
+ mb_values - args.ppo.cliprange_value,
+ mb_values + args.ppo.cliprange_value,
+ )
+ vf_losses1 = jnp.square(vpred - mb_returns)
+ vf_losses2 = jnp.square(vpredclipped - mb_returns)
+ vf_loss = 0.5 * jnp.maximum(vf_losses1, vf_losses2).mean()
+ vf_clipfrac = (vf_losses2 > vf_losses1).astype(jnp.float32).mean()
+
+ logits = output.logits[:, args.task.query_length - 1 : -1, :]
+ logits /= args.task.temperature
+ responses = mb_query_responses[:, args.task.query_length :]
+ new_logprobs = -optax.softmax_cross_entropy_with_integer_labels(
+ logits, responses
+ )
+
+ logprobs_diff = new_logprobs - mb_logprobs
+ ratio = jnp.exp(logprobs_diff)
+ pg_losses1 = -mb_advantages * ratio
+ pg_losses2 = -mb_advantages * jnp.clip(
+ ratio, 1.0 - args.ppo.cliprange, 1.0 + args.ppo.cliprange
+ )
+ pg_loss = jnp.maximum(pg_losses1, pg_losses2).mean()
+ pg_clipfrac = (pg_losses2 > pg_losses1).astype(jnp.float32).mean()
+
+ pd = jax.nn.softmax(logits, axis=-1)
+ entropy = jax.nn.logsumexp(logits, axis=-1) - jnp.sum(pd * logits, axis=-1)
+
+ approxkl = 0.5 * ((logprobs_diff) ** 2).mean()
+ loss = pg_loss + args.ppo.vf_coef * vf_loss
+
+ current_rl_stats = dict(
+ approxkl=approxkl,
+ entropy=entropy.mean(),
+ ratio=ratio.mean(),
+ pg_loss=pg_loss,
+ pg_clipfrac=pg_clipfrac,
+ vf_loss1=vf_losses1.mean(),
+ vf_loss=vf_loss,
+ vf_clipfrac=vf_clipfrac,
+ loss=loss,
+ )
+ return loss, current_rl_stats
+
+ grad_fn = jax.value_and_grad(loss, has_aux=True)
+ (loss, current_rl_stats), grads = grad_fn(policy_state.params)
+ grads = jax.lax.pmean(grads, "batch")
+ policy_state = policy_state.apply_gradients(grads=grads)
+
+ rl_stats = rl_stats.merge(RLStatistics.gather_from_model_output(**current_rl_stats))
+
+ return policy_state, rl_stats
+
+
+def linear_schedule(optimizer_step, args):
+ """anneal learning rate linearly to reach 0 after one epoch."""
+ update = 1 + optimizer_step // (
+ args.ppo.noptepochs
+ * args.ppo.nminibatches
+ * args.ppo.gradient_accumulation_steps
+ )
+ frac = 1.0 - (update - 1.0) / args.ppo.num_updates
+ lrnow = frac * args.ppo.lr
+ return lrnow
+
+
+def train(args: Args):
+ local_devices = jax.local_devices()
+ global_devices = jax.devices()
+ args.ppo.world_size = jax.process_count()
+ learner_devices = [local_devices[d_id] for d_id in args.learner_device_ids]
+ global_learner_decices = [
+ global_devices[d_id + process_index * len(local_devices)]
+ for process_index in range(args.ppo.world_size)
+ for d_id in args.learner_device_ids
+ ]
+ pprint({"global_learner_decices": global_learner_decices})
+ args.global_learner_decices = [str(item) for item in global_learner_decices]
+ args.learner_devices = [str(item) for item in learner_devices]
+ args.local_rank = jax.process_index()
+ args.ppo.batch_size = int(
+ args.ppo.local_batch_size * len(args.learner_devices) * args.ppo.world_size
+ )
+ args.ppo.minibatch_size = exact_div(args.ppo.batch_size, args.ppo.nminibatches)
+ args.ppo.local_mini_batch_size = exact_div(
+ args.ppo.local_batch_size, args.ppo.nminibatches
+ )
+ args.ppo.local_micro_batch_size = exact_div(
+ args.ppo.local_mini_batch_size, args.ppo.gradient_accumulation_steps
+ )
+ if args.ppo.whiten_rewards:
+ assert (
+ args.ppo.local_mini_batch_size >= 8
+ ), f"Per-rank minibatch size {args.ppo.local_mini_batch_size} is insufficient for whitening"
+ # `per_rank_rollout_batch_size` is our `args.ppo.local_batch_size`
+ # `per_rank_minibatch_size` is our `args.ppo.local_mini_batch_size`
+ args.ppo.num_updates = args.ppo.total_episodes // args.ppo.batch_size
+
+ console = Console(force_terminal=True)
+ run_name = f"{args.exp_name}__{args.seed}__{int(time.time())}"
+ writer = SimpleNamespace() # dummy writer
+ writer.add_scalar = lambda x, y, z: None
+ writer.add_histogram = lambda x, y, z: None
+
+ if args.local_rank == 0:
+ if args.track:
+ import wandb
+
+ wandb.init(
+ project=args.wandb_project_name,
+ entity=args.wandb_entity,
+ sync_tensorboard=True,
+ config=asdict(args),
+ name=run_name,
+ save_code=True,
+ )
+ wandb.run.log_code(".")
+ writer = SummaryWriter(f"runs/{run_name}")
+ writer.add_text(
+ "hyperparameters",
+ "|param|value|\n|-|-|\n%s"
+ % ("\n".join([f"|{key}|{value}|" for key, value in vars(args).items()])),
+ )
+ pprint(args)
+ local_seed = args.seed + args.local_rank * 100003 # Prime
+ tokenizer = AutoTokenizer.from_pretrained(
+ args.base_model,
+ padding_side="right",
+ )
+ # we use the padding token manually but do not resize the token embedding of the model
+ tokenizer.add_special_tokens({"pad_token": "[PAD]"})
+ reward_forward = prepare_reward_forward(args, tokenizer)
+ (
+ policy_forward,
+ policy_generate,
+ policy_params,
+ ) = prepare_policy_forward_and_policy_generate(args, tokenizer)
+ _, _, ref_policy_params = prepare_policy_forward_and_policy_generate( | To simplify, maybe just to clone `ref_policy_params = copy.deepcopy(policy_params)`? |
lm-human-preference-details | github_2023 | python | 18 | vwxyzjn | vwxyzjn | @@ -0,0 +1,1024 @@
+import functools
+import os
+import time
+from dataclasses import asdict, dataclass, field
+from types import SimpleNamespace
+from typing import List, Optional
+import einops
+from clu import metrics
+import flax
+import flax.linen as nn
+import jax
+import jax.numpy as jnp
+import numpy as np
+import optax
+import orbax
+import tyro
+from flax import jax_utils
+from flax.training import common_utils, orbax_utils
+from flax.training.train_state import TrainState
+from optax import ScaleByAdamState, update_moment, update_moment_per_elem_norm
+from optax._src import base, combine, numerics, utils
+from optax._src.alias import _scale_by_learning_rate
+from rich.console import Console
+from rich.pretty import pprint
+from torch.utils.data import DataLoader, IterableDataset
+from torch.utils.tensorboard import SummaryWriter
+from transformers import AutoTokenizer, FlaxAutoModelForCausalLM, GenerationConfig
+
+from lm_human_preference_details.data import DATASET
+
+
+@dataclass
+class AdaptiveKLParams:
+ target: float = 6.0
+ horizon: int = 10000 # in episodes
+
+
+@dataclass
+class RewardHParams:
+ kl_coef: float = 0.15
+ adaptive_kl: Optional[AdaptiveKLParams] = field(default_factory=AdaptiveKLParams)
+ trained_model: Optional[str] = "models/"
+ label_dataset: tyro.conf.Suppress[Optional[str]] = None
+
+
+@dataclass
+class PpoHParams:
+ total_episodes: int = 1000000
+ local_batch_size: int = 64
+ local_mini_batch_size: tyro.conf.Suppress[int] = None
+ batch_size: tyro.conf.Suppress[int] = None
+ mini_batch_size: tyro.conf.Suppress[int] = None
+ gradient_accumulation_steps: int = 1
+ """gradient accumulation steps"""
+ local_micro_batch_size: tyro.conf.Suppress[int] = None
+ """per rank micro batch size"""
+ world_size: tyro.conf.Suppress[int] = None
+ batch_size: tyro.conf.Suppress[int] = None
+ minibatch_size: tyro.conf.Suppress[int] = None
+ num_updates: tyro.conf.Suppress[int] = None
+ nminibatches: int = 1
+ noptepochs: int = 4
+ lr: float = 0.00001
+ eps: float = 1e-5
+ vf_coef: float = 0.1
+ cliprange: float = 0.2
+ cliprange_value: float = 0.2
+ gamma: float = 1
+ lam: float = 0.95
+ whiten_rewards: bool = True
+
+
+@dataclass
+class TaskHParams:
+ # Query params
+ query_length: int = 64
+ query_dataset: str = "books"
+ query_prefix: str = ""
+ query_suffix: str = ""
+ start_text: Optional[str] = None
+ end_text: Optional[str] = None
+
+ # Response params
+ response_length: int = 24
+
+ # Truncate response after the first occurrence of this token at or after index after when sampling.
+ truncate_token: int = 13
+ truncate_after: int = 16
+ penalty_reward_value: int = -1
+
+ # LM params
+ temperature: float = 0.7
+
+
+@dataclass
+class Args:
+ # common args
+ exp_name: str = os.path.basename(__file__)[: -len(".py")]
+ """the name of this experiment"""
+ seed: int = 1
+ """seed of the experiment"""
+ track: bool = False
+ """if toggled, this experiment will be tracked with Weights and Biases"""
+ wandb_project_name: str = "cleanrl"
+ """the wandb's project name"""
+ wandb_entity: Optional[str] = None
+ """the entity (team) of wandb's project"""
+ cuda: bool = True
+ """Whether to use cuda if available."""
+ run_name: tyro.conf.Suppress[str] = None
+ """TO BE FILLED: a unique name of this run"""
+
+ base_model: str = "gpt2"
+ """the name of the pretrained model to use"""
+ print_sample_output_freq: int = 0
+ """How often to print sample output"""
+ save_path: str = "models/policy/"
+ """Where to save the model"""
+ use_tensorflow_adam: bool = True
+ """Whether to use tensorflow-style Adam optimizer instead of PyTorch's"""
+ task: TaskHParams = field(default_factory=TaskHParams)
+ rewards: RewardHParams = field(default_factory=RewardHParams)
+ ppo: PpoHParams = field(default_factory=PpoHParams)
+
+ # distributed settings
+ local_rank: int = 0
+ """the rank of this process"""
+ learner_device_ids: List[int] = field(default_factory=lambda: [0])
+ "the device ids that script will use"
+ learner_devices: tyro.conf.Suppress[int] = None # real type is `List[str]`
+ """the devices that script will use"""
+ global_learner_decices: tyro.conf.Suppress[int] = None # real type is `List[str]`
+ """the total devices (across all nodes and machines) that script will use"""
+
+
+def scale_by_adam_tf_style(
+ b1: float = 0.9,
+ b2: float = 0.999,
+ eps: float = 1e-8,
+ eps_root: float = 0.0,
+ mu_dtype=None,
+) -> base.GradientTransformation:
+ """Rescale updates according to the Adam algorithm.
+
+ References:
+ [Kingma et al, 2014](https://arxiv.org/abs/1412.6980)
+
+ Args:
+ b1: Decay rate for the exponentially weighted average of grads.
+ b2: Decay rate for the exponentially weighted average of squared grads.
+ eps: Term added to the denominator to improve numerical stability.
+ eps_root: Term added to the denominator inside the square-root to improve
+ numerical stability when backpropagating gradients through the rescaling.
+ mu_dtype: Optional `dtype` to be used for the first order accumulator; if
+ `None` then the `dtype` is inferred from `params` and `updates`.
+
+ Returns:
+ A `GradientTransformation` object.
+ """
+
+ mu_dtype = utils.canonicalize_dtype(mu_dtype)
+
+ def init_fn(params):
+ mu = jax.tree_util.tree_map(
+ lambda t: jnp.zeros_like(t, dtype=mu_dtype), params
+ ) # First moment
+ nu = jax.tree_util.tree_map(jnp.zeros_like, params) # Second moment
+ return ScaleByAdamState(count=jnp.zeros([], jnp.int32), mu=mu, nu=nu)
+
+ def update_fn(updates, state, params=None):
+ del params
+ mu = update_moment(updates, state.mu, b1, 1)
+ nu = update_moment_per_elem_norm(updates, state.nu, b2, 2)
+ count_inc = numerics.safe_int32_increment(state.count)
+
+ ### `optax` default adam implementation
+ # mu_hat = bias_correction(mu, b1, count_inc)
+ # nu_hat = bias_correction(nu, b2, count_inc)
+ # updates = jax.tree_util.tree_map(
+ # lambda m, v: m / (jnp.sqrt(v + eps_root) + eps), mu_hat, nu_hat)
+ ### Tensorflow adam implementation
+ updates = jax.tree_util.tree_map(
+ lambda m, v: (jnp.sqrt(1 - b2**count_inc) / (1 - b1**count_inc))
+ * m
+ / (jnp.sqrt(v + eps_root) + eps),
+ mu,
+ nu,
+ ) #
+ mu = utils.cast_tree(mu, mu_dtype)
+ return updates, ScaleByAdamState(count=count_inc, mu=mu, nu=nu)
+
+ return base.GradientTransformation(init_fn, update_fn)
+
+
+def adam_tf_style(
+ learning_rate,
+ b1: float = 0.9,
+ b2: float = 0.999,
+ eps: float = 1e-8,
+ eps_root: float = 0.0,
+ mu_dtype=None,
+):
+ return combine.chain(
+ scale_by_adam_tf_style(
+ b1=b1, b2=b2, eps=eps, eps_root=eps_root, mu_dtype=mu_dtype
+ ),
+ _scale_by_learning_rate(learning_rate),
+ )
+
+
+class AdaptiveKLController:
+ def __init__(self, init_kl_coef: float, hparams: AdaptiveKLParams):
+ self.value = init_kl_coef
+ self.hparams = hparams
+
+ def update(self, current, n_steps):
+ target = self.hparams.target
+ proportional_error = np.clip(current / target - 1, -0.2, 0.2)
+ mult = 1 + proportional_error * n_steps / self.hparams.horizon
+ self.value *= mult
+
+
+def whiten(values, shift_mean=True):
+ # `unbiased=False` matches TF `tf.nn.moments`'s setting
+ mean, var = jnp.mean(values), jnp.var(values)
+ whitened = (values - mean) * jax.lax.rsqrt(var + 1e-8)
+ if not shift_mean:
+ whitened += mean
+ return whitened
+
+
+class ScalarHead(nn.Module):
+ head_input_size: int
+
+ @nn.compact
+ def __call__(self, x):
+ x = nn.Dense(
+ 1,
+ kernel_init=nn.initializers.normal(stddev=0),
+ bias_init=nn.initializers.zeros_init(),
+ )(x)
+ return x
+
+
+class RewardHead(nn.Module):
+ head_input_size: int
+
+ @nn.compact
+ def __call__(self, x):
+ assert x.shape[-1] == self.head_input_size
+ x = nn.Dense(
+ 1,
+ kernel_init=nn.initializers.normal(
+ stddev=1 / np.sqrt(self.head_input_size + 1)
+ ),
+ bias_init=nn.initializers.zeros_init(),
+ )(x)
+ reward_gain = self.param("reward_gain", nn.initializers.ones_init(), ())
+ reward_bias = self.param("reward_bias", nn.initializers.zeros_init(), ())
+ x = x * reward_gain + reward_bias
+ return x
+
+
+@flax.struct.dataclass
+class LMBackboneWithScalarHeadParams:
+ """Parameters for the language model backbone and a scalar head."""
+
+ lm_backbone_params: flax.core.FrozenDict
+ head_params: flax.core.FrozenDict
+
+
+# a pytorch dataset
+class MyDataset(IterableDataset):
+ def __init__(
+ self, generator, tokenizer, query_length, seed, start_text=None, end_text=None
+ ):
+ self.generator = generator
+ self.tokenizer = tokenizer
+ self.query_length = query_length
+ self.start_text = start_text
+ self.end_text = end_text
+ self.seed = seed
+ token_to_index = tokenizer.get_vocab()
+ self.start_token = token_to_index[start_text] if self.start_text else None
+ self.end_token = token_to_index[end_text] if self.end_text else None
+
+ def __iter__(self):
+ for text in self.generator("train", self.seed, shuffle=True):
+ tokens = self.tokenizer.encode(text)
+ if self.start_token is not None:
+ try:
+ first_index = tokens.index(self.start_token) + 1
+ if first_index < len(tokens):
+ tokens = tokens[first_index:]
+ except:
+ continue
+ tokens = tokens[: self.query_length]
+ if self.end_token is not None:
+ try:
+ last_index = len(tokens) - tokens[::-1].index(self.end_token)
+ tokens = tokens[:last_index]
+ except:
+ continue
+ output = self.tokenizer.pad(
+ {"input_ids": tokens},
+ padding="max_length",
+ max_length=self.query_length,
+ return_tensors="pt",
+ return_attention_mask=True,
+ )
+ yield output
+
+
+def right_padding_to_left_padding(tokens, pad_id):
+ def pad_row(row):
+ mask = 1 - (row == pad_id) # 1 if not pad_id, 0 if pad_id
+ return row[
+ jnp.argsort(mask)
+ ] # uses the fact that jnp.argsort is stable by default
+
+ return jax.vmap(pad_row)(tokens)
+
+
+def ceil_div(a, b):
+ return (a - 1) // b + 1
+
+
+def exact_div(a, b):
+ q = a // b
+ if a != q * b:
+ raise ValueError(f"Inexact division: {a} / {b} = {a / b}")
+ return q
+
+
+def prepare_reward_forward(args, tokenizer):
+ """Prepare the forward pass of the reward model and parameters."""
+
+ lm_backbone = FlaxAutoModelForCausalLM.from_pretrained(args.base_model)
+ scalar_head = RewardHead(head_input_size=lm_backbone.config.hidden_size)
+
+ def reward_forward(
+ params: LMBackboneWithScalarHeadParams,
+ query_responses_ids: jnp.ndarray,
+ ):
+ """Get reward for each queiry--response pair."""
+ assert query_responses_ids.ndim == 2
+
+ # mask out padding tokens
+ attention_mask = query_responses_ids != tokenizer.pad_token_id
+ query_responses_ids = jnp.where(attention_mask, query_responses_ids, 0)
+
+ # assign position ids
+ position_ids = attention_mask.cumsum(1) - attention_mask
+
+ reward_latents = lm_backbone.module.apply(
+ variables=params.lm_backbone_params,
+ input_ids=query_responses_ids,
+ attention_mask=attention_mask,
+ position_ids=position_ids,
+ output_hidden_states=True,
+ ).hidden_states[-1]
+ # reward_latents: [batch_size, length, hidden_size]
+
+ last_reward_latents = reward_latents[:, -1, :]
+ # last_reward_latents: [batch_size, hidden_size]
+
+ reward = scalar_head.apply(variables=params.head_params, x=last_reward_latents)
+ # reward: [batch_size, 1]
+ return reward
+
+ if args.rewards.trained_model:
+ orbax_checkpointer = orbax.checkpoint.PyTreeCheckpointer()
+ reward_state_params = orbax_checkpointer.restore(args.rewards.trained_model)[
+ "reward_model"
+ ]["params"]
+ reward_params = LMBackboneWithScalarHeadParams(
+ lm_backbone_params=flax.core.FrozenDict(
+ {"params": reward_state_params["lm_backbone_params"]["params"]}
+ ),
+ head_params=flax.core.FrozenDict(
+ {"params": reward_state_params["head_params"]["params"]}
+ ),
+ )
+ pprint(f"Loaded pretrained reward model from {args.rewards.trained_model}")
+ else:
+ key = jax.random.PRNGKey(args.seed)
+ key, init_key = jax.random.split(key, 2)
+ reward_params = LMBackboneWithScalarHeadParams(
+ lm_backbone_params=flax.core.FrozenDict({"params": lm_backbone.params}),
+ head_params=flax.core.FrozenDict(
+ scalar_head.init(
+ init_key,
+ jnp.ones(lm_backbone.config.hidden_size)[None, None, :],
+ )
+ ),
+ )
+
+ return functools.partial(reward_forward, params=reward_params)
+
+
+def prepare_policy_forward_and_policy_generate(args, tokenizer):
+ """Prepare the forward pass of the policy model and parameters."""
+
+ lm_backbone = FlaxAutoModelForCausalLM.from_pretrained(args.base_model)
+ # disable `pad_token_id` and `eos_token_id` because we just want to
+ # generate tokens without truncation / padding
+ lm_backbone.generation_config.eos_token_id = None
+ lm_backbone.generation_config.pad_token_id = tokenizer.pad_token_id
+ scalar_head = ScalarHead(head_input_size=lm_backbone.config.hidden_size)
+
+ generation_config = GenerationConfig(
+ max_new_tokens=args.task.response_length,
+ min_new_tokens=args.task.response_length,
+ temperature=args.task.temperature,
+ top_k=0.0,
+ top_p=1.0,
+ do_sample=True,
+ pad_token_id=tokenizer.pad_token_id,
+ )
+
+ def policy_forward(
+ params: LMBackboneWithScalarHeadParams,
+ input_ids: jnp.ndarray,
+ ):
+ """Get reward for input_ids."""
+ assert input_ids.ndim == 2
+ # shape: [batch_size, length]
+
+ # mask out padding tokens
+ attention_mask = input_ids != tokenizer.pad_token_id
+ input_ids = jnp.where(attention_mask, input_ids, 0)
+
+ # assign position ids
+ position_ids = attention_mask.cumsum(1) - attention_mask
+
+ lm_backbone_out = lm_backbone.module.apply(
+ variables=params.lm_backbone_params,
+ input_ids=input_ids,
+ attention_mask=attention_mask,
+ position_ids=position_ids,
+ output_hidden_states=True,
+ )
+
+ value_latents = lm_backbone_out.hidden_states[-1]
+ # shape: [batch_size, length, hidden_size]
+
+ values = scalar_head.apply(variables=params.head_params, x=value_latents)
+ # shape: [batch_size, length, 1]
+ return lm_backbone_out, values
+
+ def policy_generate(
+ params: LMBackboneWithScalarHeadParams,
+ queries: jnp.ndarray,
+ ):
+ input_ids = queries
+ attention_mask = input_ids != tokenizer.pad_token_id
+ input_ids = jnp.where(attention_mask, queries, 0)
+ output = lm_backbone.generate(
+ params=params["params"],
+ input_ids=input_ids,
+ generation_config=generation_config,
+ attention_mask=attention_mask.astype("i4"),
+ return_dict_in_generate=True,
+ )
+ context_length = input_ids.shape[1]
+ return jnp.concatenate((queries, output.sequences[:, context_length:]), axis=1)
+
+ key = jax.random.PRNGKey(args.seed)
+ key, init_key = jax.random.split(key, 2)
+ policy_params = LMBackboneWithScalarHeadParams(
+ lm_backbone_params=flax.core.FrozenDict({"params": lm_backbone.params}),
+ head_params=flax.core.FrozenDict(
+ scalar_head.init(
+ init_key,
+ jnp.ones(lm_backbone.config.hidden_size)[None, None, :],
+ )
+ ),
+ )
+
+ return policy_forward, policy_generate, policy_params
+
+
+@flax.struct.dataclass
+class RLStatistics(metrics.Collection):
+ approxkl: metrics.Average.from_output("approxkl")
+ entropy: metrics.Average.from_output("entropy")
+ pg_loss: metrics.Average.from_output("pg_loss")
+ pg_clipfrac: metrics.Average.from_output("pg_clipfrac")
+ vf_loss1: metrics.Average.from_output("vf_loss1")
+ vf_loss: metrics.Average.from_output("vf_loss")
+ vf_clipfrac: metrics.Average.from_output("vf_clipfrac")
+ ratio: metrics.Average.from_output("ratio")
+ loss: metrics.Average.from_output("loss")
+
+
+def train_step(
+ policy_state,
+ rl_stats,
+ mb_advantages,
+ mb_returns,
+ mb_values,
+ mb_query_responses,
+ mb_logprobs,
+ args,
+):
+ def loss(params):
+ output, vpred_temp = policy_state.apply_fn(params, mb_query_responses)
+ # vpred_temp: [local_micro_batch_size, query_length + response_length, 1]
+ vpred = jnp.squeeze(vpred_temp[:, args.task.query_length - 1 : -1, :], axis=-1)
+ # vpred: [local_micro_batch_size, response_length]
+ vpredclipped = jnp.clip(
+ vpred,
+ mb_values - args.ppo.cliprange_value,
+ mb_values + args.ppo.cliprange_value,
+ )
+ vf_losses1 = jnp.square(vpred - mb_returns)
+ vf_losses2 = jnp.square(vpredclipped - mb_returns)
+ vf_loss = 0.5 * jnp.maximum(vf_losses1, vf_losses2).mean()
+ vf_clipfrac = (vf_losses2 > vf_losses1).astype(jnp.float32).mean()
+
+ logits = output.logits[:, args.task.query_length - 1 : -1, :]
+ logits /= args.task.temperature
+ responses = mb_query_responses[:, args.task.query_length :]
+ new_logprobs = -optax.softmax_cross_entropy_with_integer_labels(
+ logits, responses
+ )
+
+ logprobs_diff = new_logprobs - mb_logprobs
+ ratio = jnp.exp(logprobs_diff)
+ pg_losses1 = -mb_advantages * ratio
+ pg_losses2 = -mb_advantages * jnp.clip(
+ ratio, 1.0 - args.ppo.cliprange, 1.0 + args.ppo.cliprange
+ )
+ pg_loss = jnp.maximum(pg_losses1, pg_losses2).mean()
+ pg_clipfrac = (pg_losses2 > pg_losses1).astype(jnp.float32).mean()
+
+ pd = jax.nn.softmax(logits, axis=-1)
+ entropy = jax.nn.logsumexp(logits, axis=-1) - jnp.sum(pd * logits, axis=-1)
+
+ approxkl = 0.5 * ((logprobs_diff) ** 2).mean()
+ loss = pg_loss + args.ppo.vf_coef * vf_loss
+
+ current_rl_stats = dict(
+ approxkl=approxkl,
+ entropy=entropy.mean(),
+ ratio=ratio.mean(),
+ pg_loss=pg_loss,
+ pg_clipfrac=pg_clipfrac,
+ vf_loss1=vf_losses1.mean(),
+ vf_loss=vf_loss,
+ vf_clipfrac=vf_clipfrac,
+ loss=loss,
+ )
+ return loss, current_rl_stats
+
+ grad_fn = jax.value_and_grad(loss, has_aux=True)
+ (loss, current_rl_stats), grads = grad_fn(policy_state.params)
+ grads = jax.lax.pmean(grads, "batch")
+ policy_state = policy_state.apply_gradients(grads=grads)
+
+ rl_stats = rl_stats.merge(RLStatistics.gather_from_model_output(**current_rl_stats))
+
+ return policy_state, rl_stats
+
+
+def linear_schedule(optimizer_step, args):
+ """anneal learning rate linearly to reach 0 after one epoch."""
+ update = 1 + optimizer_step // (
+ args.ppo.noptepochs
+ * args.ppo.nminibatches
+ * args.ppo.gradient_accumulation_steps
+ )
+ frac = 1.0 - (update - 1.0) / args.ppo.num_updates
+ lrnow = frac * args.ppo.lr
+ return lrnow
+
+
+def train(args: Args):
+ local_devices = jax.local_devices()
+ global_devices = jax.devices()
+ args.ppo.world_size = jax.process_count()
+ learner_devices = [local_devices[d_id] for d_id in args.learner_device_ids]
+ global_learner_decices = [
+ global_devices[d_id + process_index * len(local_devices)]
+ for process_index in range(args.ppo.world_size)
+ for d_id in args.learner_device_ids
+ ]
+ pprint({"global_learner_decices": global_learner_decices})
+ args.global_learner_decices = [str(item) for item in global_learner_decices]
+ args.learner_devices = [str(item) for item in learner_devices]
+ args.local_rank = jax.process_index()
+ args.ppo.batch_size = int(
+ args.ppo.local_batch_size * len(args.learner_devices) * args.ppo.world_size
+ )
+ args.ppo.minibatch_size = exact_div(args.ppo.batch_size, args.ppo.nminibatches)
+ args.ppo.local_mini_batch_size = exact_div(
+ args.ppo.local_batch_size, args.ppo.nminibatches
+ )
+ args.ppo.local_micro_batch_size = exact_div(
+ args.ppo.local_mini_batch_size, args.ppo.gradient_accumulation_steps
+ )
+ if args.ppo.whiten_rewards:
+ assert (
+ args.ppo.local_mini_batch_size >= 8
+ ), f"Per-rank minibatch size {args.ppo.local_mini_batch_size} is insufficient for whitening"
+ # `per_rank_rollout_batch_size` is our `args.ppo.local_batch_size`
+ # `per_rank_minibatch_size` is our `args.ppo.local_mini_batch_size`
+ args.ppo.num_updates = args.ppo.total_episodes // args.ppo.batch_size
+
+ console = Console(force_terminal=True)
+ run_name = f"{args.exp_name}__{args.seed}__{int(time.time())}"
+ writer = SimpleNamespace() # dummy writer
+ writer.add_scalar = lambda x, y, z: None
+ writer.add_histogram = lambda x, y, z: None
+
+ if args.local_rank == 0:
+ if args.track:
+ import wandb
+
+ wandb.init(
+ project=args.wandb_project_name,
+ entity=args.wandb_entity,
+ sync_tensorboard=True,
+ config=asdict(args),
+ name=run_name,
+ save_code=True,
+ )
+ wandb.run.log_code(".")
+ writer = SummaryWriter(f"runs/{run_name}")
+ writer.add_text(
+ "hyperparameters",
+ "|param|value|\n|-|-|\n%s"
+ % ("\n".join([f"|{key}|{value}|" for key, value in vars(args).items()])),
+ )
+ pprint(args)
+ local_seed = args.seed + args.local_rank * 100003 # Prime
+ tokenizer = AutoTokenizer.from_pretrained(
+ args.base_model,
+ padding_side="right",
+ )
+ # we use the padding token manually but do not resize the token embedding of the model
+ tokenizer.add_special_tokens({"pad_token": "[PAD]"})
+ reward_forward = prepare_reward_forward(args, tokenizer)
+ (
+ policy_forward,
+ policy_generate,
+ policy_params,
+ ) = prepare_policy_forward_and_policy_generate(args, tokenizer)
+ _, _, ref_policy_params = prepare_policy_forward_and_policy_generate(
+ args, tokenizer
+ )
+
+ if args.use_tensorflow_adam:
+ adam = adam_tf_style
+ else:
+ adam = optax.adam
+
+ optimizer = adam(
+ learning_rate=functools.partial(linear_schedule, args=args),
+ eps=args.ppo.eps,
+ )
+
+ optimizer = optax.MultiSteps(optimizer, args.ppo.gradient_accumulation_steps)
+
+ policy_state = TrainState.create(
+ apply_fn=policy_forward, params=policy_params, tx=optimizer
+ )
+ policy_state = jax_utils.replicate(policy_state)
+
+ dataset = MyDataset(
+ DATASET[args.task.query_dataset],
+ tokenizer,
+ args.task.query_length,
+ seed=local_seed,
+ start_text=args.task.start_text,
+ end_text=args.task.end_text,
+ )
+ dataloader = DataLoader(dataset, batch_size=args.ppo.batch_size)
+ iter_dataloader = iter(dataloader)
+ kl_ctl = AdaptiveKLController(
+ args.rewards.kl_coef, hparams=args.rewards.adaptive_kl
+ )
+
+ def train_update(policy_state, input_ids, rl_stats, kl_ctl_value):
+ queries = right_padding_to_left_padding(input_ids, tokenizer.pad_token_id)
+
+ query_responses = policy_generate(
+ params=policy_state.params.lm_backbone_params,
+ queries=queries,
+ )
+ # query_responses: [local_batch_size, query_length + response_length]
+ responses = query_responses[:, args.task.query_length :]
+
+ output, full_values = policy_forward(policy_state.params, query_responses)
+ values = full_values[:, args.task.query_length - 1 : -1].squeeze(-1)
+ # values: [local_batch_size, response_length]
+ logits = output.logits[:, args.task.query_length - 1 : -1, :]
+ # logits: [local_batch_size, response_length, vocab_size]
+ logits /= args.task.temperature
+ all_logprobs = jax.nn.log_softmax(logits, axis=-1)
+ # all_logprobs: [local_batch_size, response_length, vocab_size]
+ logprobs = jnp.take_along_axis(all_logprobs, responses[..., None], -1).squeeze(
+ -1
+ )
+ # logprobs: [local_batch_size, response_length]
+
+ ref_output, _ = policy_forward(ref_policy_params, query_responses)
+ ref_logits = ref_output.logits[:, args.task.query_length - 1 : -1, :]
+ # ref_logits: [local_batch_size, response_length, vocab_size]
+ ref_logits /= args.task.temperature
+ ref_all_logprobs = jax.nn.log_softmax(ref_logits, axis=-1)
+ ref_logprobs = jnp.take_along_axis(
+ ref_all_logprobs, responses[..., None], -1
+ ).squeeze(-1)
+ # ref_logprobs: [local_batch_size, response_length]
+
+ # **Response Processing**
+ # 1. truncate at the first occurrence of `truncate_token` that appears at or after
+ # position truncate_after in the responses
+ # https://github.com/openai/lm-human-preferences/blob/cbfd210bb8b08f6bc5c26878c10984b90f516c66/lm_human_preferences/train_policy.py#L378
+ truncate_token_mask = responses == args.task.truncate_token
+ truncate_after_or_token_mask = jnp.concatenate(
+ [
+ jnp.zeros_like(truncate_token_mask)[:, : args.task.truncate_after],
+ truncate_token_mask[:, args.task.truncate_after :],
+ ],
+ axis=1,
+ )
+ truncate_mask = (
+ jnp.cumsum(truncate_after_or_token_mask, axis=1)
+ - truncate_after_or_token_mask
+ ).astype("bool")
+ postprocessed_responses = jnp.where(
+ truncate_mask,
+ jnp.full_like(responses, tokenizer.pad_token_id),
+ responses,
+ )
+
+ # 2. run reward model on the truncated responses
+ postprocessed_query_responses = jnp.concatenate(
+ (queries, postprocessed_responses), axis=1
+ )
+ postprocessed_query_responses = right_padding_to_left_padding(
+ postprocessed_query_responses, tokenizer.pad_token_id
+ )
+ scores = reward_forward(
+ query_responses_ids=postprocessed_query_responses
+ ).flatten()
+
+ # 3. filter response. Ensure that the sample contains truncate_token
+ # responses not passing that filter will receive a low (fixed) score
+ # only query humans on responses that pass that filter
+ matches_token = (
+ postprocessed_responses[:, args.task.truncate_after :]
+ == args.task.truncate_token
+ )
+ filter_mask = jnp.any(matches_token, axis=-1)
+
+ scores = jnp.where(
+ filter_mask,
+ scores,
+ jnp.full_like(scores, args.task.penalty_reward_value),
+ )
+
+ # 4. compute rewards
+ kl = logprobs - ref_logprobs
+ non_score_reward = -kl_ctl_value * kl
+ rewards = non_score_reward
+ rewards = rewards.at[:, -1].add(scores)
+
+ # 5. whiten rewards
+ if args.ppo.whiten_rewards:
+ rewards = whiten(rewards, shift_mean=False)
+
+ # 6. compute advantages and returns
+ def compute_gae_once(carry, inp):
+ advantages = carry
+ nextdone, nextvalues, curvalues, reward = inp
+ nextnonterminal = 1.0 - nextdone
+
+ delta = reward + args.ppo.gamma * nextvalues * nextnonterminal - curvalues
+ advantages = (
+ delta + args.ppo.gamma * args.ppo.lam * nextnonterminal * advantages
+ )
+ return advantages, advantages
+
+ extended_values = jnp.concatenate(
+ (values, jnp.zeros((args.ppo.local_batch_size, 1))), axis=1
+ )
+ dones = jnp.zeros_like(rewards)
+ dones = dones.at[:, -1].set(1.0)
+
+ advantages = jnp.zeros((args.ppo.local_batch_size,))
+ _, advantages = jax.lax.scan(
+ compute_gae_once,
+ advantages,
+ (dones.T, extended_values[:, 1:].T, extended_values[:, :-1].T, rewards.T),
+ reverse=True,
+ )
+
+ advantages = advantages.T
+ returns = advantages + values
+ advantages = whiten(advantages)
+
+ def ppo_single_microbatch(carry, inp):
+ policy_state, rl_stats = carry
+ mb_advantages, mb_returns, mb_values, mb_query_responses, mb_logprobs = inp
+
+ policy_state, rl_stats = train_step(
+ policy_state=policy_state,
+ rl_stats=rl_stats,
+ mb_advantages=mb_advantages,
+ mb_returns=mb_returns,
+ mb_values=mb_values,
+ mb_query_responses=mb_query_responses,
+ mb_logprobs=mb_logprobs,
+ args=args,
+ )
+ return (policy_state, rl_stats), None
+
+ def ppo_single_epoch(carry, inp):
+ policy_state, rl_stats, key = carry
+ key, subkey = jax.random.split(key, 2)
+ perm = jax.random.permutation(key, args.ppo.local_batch_size)
+ # advantages, returns, values, query_responses, logprobs = inp
+ mbs_advantages = einops.rearrange(
+ advantages[perm],
+ "(c m) l -> c m l",
+ c=args.ppo.gradient_accumulation_steps,
+ )
+ mbs_returns = einops.rearrange(
+ returns[perm],
+ "(c m) l -> c m l",
+ c=args.ppo.gradient_accumulation_steps,
+ )
+ mbs_values = einops.rearrange(
+ values[perm], "(c m) l -> c m l", c=args.ppo.gradient_accumulation_steps
+ )
+ mbs_query_responses = einops.rearrange(
+ query_responses[perm],
+ "(c m) l -> c m l",
+ c=args.ppo.gradient_accumulation_steps,
+ )
+ mbs_logprobs = einops.rearrange(
+ logprobs[perm],
+ "(c m) l -> c m l",
+ c=args.ppo.gradient_accumulation_steps,
+ )
+ (policy_state, rl_stats), _ = jax.lax.scan(
+ f=ppo_single_microbatch,
+ init=(policy_state, rl_stats),
+ xs=(
+ mbs_advantages,
+ mbs_returns,
+ mbs_values,
+ mbs_query_responses,
+ mbs_logprobs,
+ ),
+ )
+ return (policy_state, rl_stats, key), None
+
+ key = jax.random.PRNGKey(args.seed)
+ # Do multiple epochs of PPO training, with a fresh random shuffle in each epoch
+ (policy_state, rl_stats, _), _ = jax.lax.scan(
+ f=ppo_single_epoch,
+ init=(policy_state, rl_stats, key),
+ xs=None,
+ length=args.ppo.noptepochs,
+ )
+
+ return (
+ policy_state,
+ returns,
+ advantages,
+ values,
+ query_responses,
+ logprobs,
+ kl,
+ scores,
+ rl_stats,
+ )
+
+ p_train_update = jax.pmap(
+ train_update,
+ axis_name="batch",
+ donate_argnums=(0,),
+ )
+
+ print("===training policy===")
+ global_step = 0
+
+ for update in range(1, args.ppo.num_updates + 1):
+ global_step += 1 * args.ppo.batch_size
+ data = next(iter_dataloader)
+ input_ids = common_utils.shard(data["input_ids"].numpy())
+
+ rl_stats = jax_utils.replicate(RLStatistics.empty())
+ (
+ policy_state,
+ returns,
+ advantages,
+ values,
+ query_responses,
+ logprobs,
+ kl,
+ scores,
+ rl_stats,
+ ) = p_train_update(
+ policy_state=policy_state,
+ input_ids=input_ids,
+ rl_stats=rl_stats,
+ kl_ctl_value=jax_utils.replicate(kl_ctl.value), | Avoid using `jax_utils.replicate` in this case I think. Just pass in regular numpy array. I am worried `jax_utils.replicate` creates jax arrays somehow, which if not done under the jitted function can be very slow. |
lm-human-preference-details | github_2023 | python | 18 | vwxyzjn | vwxyzjn | @@ -0,0 +1,1024 @@
+import functools
+import os
+import time
+from dataclasses import asdict, dataclass, field
+from types import SimpleNamespace
+from typing import List, Optional
+import einops
+from clu import metrics
+import flax
+import flax.linen as nn
+import jax
+import jax.numpy as jnp
+import numpy as np
+import optax
+import orbax
+import tyro
+from flax import jax_utils
+from flax.training import common_utils, orbax_utils
+from flax.training.train_state import TrainState
+from optax import ScaleByAdamState, update_moment, update_moment_per_elem_norm
+from optax._src import base, combine, numerics, utils
+from optax._src.alias import _scale_by_learning_rate
+from rich.console import Console
+from rich.pretty import pprint
+from torch.utils.data import DataLoader, IterableDataset
+from torch.utils.tensorboard import SummaryWriter
+from transformers import AutoTokenizer, FlaxAutoModelForCausalLM, GenerationConfig
+
+from lm_human_preference_details.data import DATASET
+
+
+@dataclass
+class AdaptiveKLParams:
+ target: float = 6.0
+ horizon: int = 10000 # in episodes
+
+
+@dataclass
+class RewardHParams:
+ kl_coef: float = 0.15
+ adaptive_kl: Optional[AdaptiveKLParams] = field(default_factory=AdaptiveKLParams)
+ trained_model: Optional[str] = "models/"
+ label_dataset: tyro.conf.Suppress[Optional[str]] = None
+
+
+@dataclass
+class PpoHParams:
+ total_episodes: int = 1000000
+ local_batch_size: int = 64
+ local_mini_batch_size: tyro.conf.Suppress[int] = None
+ batch_size: tyro.conf.Suppress[int] = None
+ mini_batch_size: tyro.conf.Suppress[int] = None
+ gradient_accumulation_steps: int = 1
+ """gradient accumulation steps"""
+ local_micro_batch_size: tyro.conf.Suppress[int] = None
+ """per rank micro batch size"""
+ world_size: tyro.conf.Suppress[int] = None
+ batch_size: tyro.conf.Suppress[int] = None
+ minibatch_size: tyro.conf.Suppress[int] = None
+ num_updates: tyro.conf.Suppress[int] = None
+ nminibatches: int = 1
+ noptepochs: int = 4
+ lr: float = 0.00001
+ eps: float = 1e-5
+ vf_coef: float = 0.1
+ cliprange: float = 0.2
+ cliprange_value: float = 0.2
+ gamma: float = 1
+ lam: float = 0.95
+ whiten_rewards: bool = True
+
+
+@dataclass
+class TaskHParams:
+ # Query params
+ query_length: int = 64
+ query_dataset: str = "books"
+ query_prefix: str = ""
+ query_suffix: str = ""
+ start_text: Optional[str] = None
+ end_text: Optional[str] = None
+
+ # Response params
+ response_length: int = 24
+
+ # Truncate response after the first occurrence of this token at or after index after when sampling.
+ truncate_token: int = 13
+ truncate_after: int = 16
+ penalty_reward_value: int = -1
+
+ # LM params
+ temperature: float = 0.7
+
+
+@dataclass
+class Args:
+ # common args
+ exp_name: str = os.path.basename(__file__)[: -len(".py")]
+ """the name of this experiment"""
+ seed: int = 1
+ """seed of the experiment"""
+ track: bool = False
+ """if toggled, this experiment will be tracked with Weights and Biases"""
+ wandb_project_name: str = "cleanrl"
+ """the wandb's project name"""
+ wandb_entity: Optional[str] = None
+ """the entity (team) of wandb's project"""
+ cuda: bool = True
+ """Whether to use cuda if available."""
+ run_name: tyro.conf.Suppress[str] = None
+ """TO BE FILLED: a unique name of this run"""
+
+ base_model: str = "gpt2"
+ """the name of the pretrained model to use"""
+ print_sample_output_freq: int = 0
+ """How often to print sample output"""
+ save_path: str = "models/policy/"
+ """Where to save the model"""
+ use_tensorflow_adam: bool = True
+ """Whether to use tensorflow-style Adam optimizer instead of PyTorch's"""
+ task: TaskHParams = field(default_factory=TaskHParams)
+ rewards: RewardHParams = field(default_factory=RewardHParams)
+ ppo: PpoHParams = field(default_factory=PpoHParams)
+
+ # distributed settings
+ local_rank: int = 0
+ """the rank of this process"""
+ learner_device_ids: List[int] = field(default_factory=lambda: [0])
+ "the device ids that script will use"
+ learner_devices: tyro.conf.Suppress[int] = None # real type is `List[str]`
+ """the devices that script will use"""
+ global_learner_decices: tyro.conf.Suppress[int] = None # real type is `List[str]`
+ """the total devices (across all nodes and machines) that script will use"""
+
+
+def scale_by_adam_tf_style(
+ b1: float = 0.9,
+ b2: float = 0.999,
+ eps: float = 1e-8,
+ eps_root: float = 0.0,
+ mu_dtype=None,
+) -> base.GradientTransformation:
+ """Rescale updates according to the Adam algorithm.
+
+ References:
+ [Kingma et al, 2014](https://arxiv.org/abs/1412.6980)
+
+ Args:
+ b1: Decay rate for the exponentially weighted average of grads.
+ b2: Decay rate for the exponentially weighted average of squared grads.
+ eps: Term added to the denominator to improve numerical stability.
+ eps_root: Term added to the denominator inside the square-root to improve
+ numerical stability when backpropagating gradients through the rescaling.
+ mu_dtype: Optional `dtype` to be used for the first order accumulator; if
+ `None` then the `dtype` is inferred from `params` and `updates`.
+
+ Returns:
+ A `GradientTransformation` object.
+ """
+
+ mu_dtype = utils.canonicalize_dtype(mu_dtype)
+
+ def init_fn(params):
+ mu = jax.tree_util.tree_map(
+ lambda t: jnp.zeros_like(t, dtype=mu_dtype), params
+ ) # First moment
+ nu = jax.tree_util.tree_map(jnp.zeros_like, params) # Second moment
+ return ScaleByAdamState(count=jnp.zeros([], jnp.int32), mu=mu, nu=nu)
+
+ def update_fn(updates, state, params=None):
+ del params
+ mu = update_moment(updates, state.mu, b1, 1)
+ nu = update_moment_per_elem_norm(updates, state.nu, b2, 2)
+ count_inc = numerics.safe_int32_increment(state.count)
+
+ ### `optax` default adam implementation
+ # mu_hat = bias_correction(mu, b1, count_inc)
+ # nu_hat = bias_correction(nu, b2, count_inc)
+ # updates = jax.tree_util.tree_map(
+ # lambda m, v: m / (jnp.sqrt(v + eps_root) + eps), mu_hat, nu_hat)
+ ### Tensorflow adam implementation
+ updates = jax.tree_util.tree_map(
+ lambda m, v: (jnp.sqrt(1 - b2**count_inc) / (1 - b1**count_inc))
+ * m
+ / (jnp.sqrt(v + eps_root) + eps),
+ mu,
+ nu,
+ ) #
+ mu = utils.cast_tree(mu, mu_dtype)
+ return updates, ScaleByAdamState(count=count_inc, mu=mu, nu=nu)
+
+ return base.GradientTransformation(init_fn, update_fn)
+
+
+def adam_tf_style(
+ learning_rate,
+ b1: float = 0.9,
+ b2: float = 0.999,
+ eps: float = 1e-8,
+ eps_root: float = 0.0,
+ mu_dtype=None,
+):
+ return combine.chain(
+ scale_by_adam_tf_style(
+ b1=b1, b2=b2, eps=eps, eps_root=eps_root, mu_dtype=mu_dtype
+ ),
+ _scale_by_learning_rate(learning_rate),
+ )
+
+
+class AdaptiveKLController:
+ def __init__(self, init_kl_coef: float, hparams: AdaptiveKLParams):
+ self.value = init_kl_coef
+ self.hparams = hparams
+
+ def update(self, current, n_steps):
+ target = self.hparams.target
+ proportional_error = np.clip(current / target - 1, -0.2, 0.2)
+ mult = 1 + proportional_error * n_steps / self.hparams.horizon
+ self.value *= mult
+
+
+def whiten(values, shift_mean=True):
+ # `unbiased=False` matches TF `tf.nn.moments`'s setting
+ mean, var = jnp.mean(values), jnp.var(values)
+ whitened = (values - mean) * jax.lax.rsqrt(var + 1e-8)
+ if not shift_mean:
+ whitened += mean
+ return whitened
+
+
+class ScalarHead(nn.Module):
+ head_input_size: int
+
+ @nn.compact
+ def __call__(self, x):
+ x = nn.Dense(
+ 1,
+ kernel_init=nn.initializers.normal(stddev=0),
+ bias_init=nn.initializers.zeros_init(),
+ )(x)
+ return x
+
+
+class RewardHead(nn.Module):
+ head_input_size: int
+
+ @nn.compact
+ def __call__(self, x):
+ assert x.shape[-1] == self.head_input_size
+ x = nn.Dense(
+ 1,
+ kernel_init=nn.initializers.normal(
+ stddev=1 / np.sqrt(self.head_input_size + 1)
+ ),
+ bias_init=nn.initializers.zeros_init(),
+ )(x)
+ reward_gain = self.param("reward_gain", nn.initializers.ones_init(), ())
+ reward_bias = self.param("reward_bias", nn.initializers.zeros_init(), ())
+ x = x * reward_gain + reward_bias
+ return x
+
+
+@flax.struct.dataclass
+class LMBackboneWithScalarHeadParams:
+ """Parameters for the language model backbone and a scalar head."""
+
+ lm_backbone_params: flax.core.FrozenDict
+ head_params: flax.core.FrozenDict
+
+
+# a pytorch dataset
+class MyDataset(IterableDataset):
+ def __init__(
+ self, generator, tokenizer, query_length, seed, start_text=None, end_text=None
+ ):
+ self.generator = generator
+ self.tokenizer = tokenizer
+ self.query_length = query_length
+ self.start_text = start_text
+ self.end_text = end_text
+ self.seed = seed
+ token_to_index = tokenizer.get_vocab()
+ self.start_token = token_to_index[start_text] if self.start_text else None
+ self.end_token = token_to_index[end_text] if self.end_text else None
+
+ def __iter__(self):
+ for text in self.generator("train", self.seed, shuffle=True):
+ tokens = self.tokenizer.encode(text)
+ if self.start_token is not None:
+ try:
+ first_index = tokens.index(self.start_token) + 1
+ if first_index < len(tokens):
+ tokens = tokens[first_index:]
+ except:
+ continue
+ tokens = tokens[: self.query_length]
+ if self.end_token is not None:
+ try:
+ last_index = len(tokens) - tokens[::-1].index(self.end_token)
+ tokens = tokens[:last_index]
+ except:
+ continue
+ output = self.tokenizer.pad(
+ {"input_ids": tokens},
+ padding="max_length",
+ max_length=self.query_length,
+ return_tensors="pt",
+ return_attention_mask=True,
+ )
+ yield output
+
+
+def right_padding_to_left_padding(tokens, pad_id):
+ def pad_row(row):
+ mask = 1 - (row == pad_id) # 1 if not pad_id, 0 if pad_id
+ return row[
+ jnp.argsort(mask)
+ ] # uses the fact that jnp.argsort is stable by default
+
+ return jax.vmap(pad_row)(tokens)
+
+
+def ceil_div(a, b):
+ return (a - 1) // b + 1
+
+
+def exact_div(a, b):
+ q = a // b
+ if a != q * b:
+ raise ValueError(f"Inexact division: {a} / {b} = {a / b}")
+ return q
+
+
+def prepare_reward_forward(args, tokenizer):
+ """Prepare the forward pass of the reward model and parameters."""
+
+ lm_backbone = FlaxAutoModelForCausalLM.from_pretrained(args.base_model)
+ scalar_head = RewardHead(head_input_size=lm_backbone.config.hidden_size)
+
+ def reward_forward(
+ params: LMBackboneWithScalarHeadParams,
+ query_responses_ids: jnp.ndarray,
+ ):
+ """Get reward for each queiry--response pair."""
+ assert query_responses_ids.ndim == 2
+
+ # mask out padding tokens
+ attention_mask = query_responses_ids != tokenizer.pad_token_id
+ query_responses_ids = jnp.where(attention_mask, query_responses_ids, 0)
+
+ # assign position ids
+ position_ids = attention_mask.cumsum(1) - attention_mask
+
+ reward_latents = lm_backbone.module.apply(
+ variables=params.lm_backbone_params,
+ input_ids=query_responses_ids,
+ attention_mask=attention_mask,
+ position_ids=position_ids,
+ output_hidden_states=True,
+ ).hidden_states[-1]
+ # reward_latents: [batch_size, length, hidden_size]
+
+ last_reward_latents = reward_latents[:, -1, :]
+ # last_reward_latents: [batch_size, hidden_size]
+
+ reward = scalar_head.apply(variables=params.head_params, x=last_reward_latents)
+ # reward: [batch_size, 1]
+ return reward
+
+ if args.rewards.trained_model:
+ orbax_checkpointer = orbax.checkpoint.PyTreeCheckpointer()
+ reward_state_params = orbax_checkpointer.restore(args.rewards.trained_model)[
+ "reward_model"
+ ]["params"]
+ reward_params = LMBackboneWithScalarHeadParams(
+ lm_backbone_params=flax.core.FrozenDict(
+ {"params": reward_state_params["lm_backbone_params"]["params"]}
+ ),
+ head_params=flax.core.FrozenDict(
+ {"params": reward_state_params["head_params"]["params"]}
+ ),
+ )
+ pprint(f"Loaded pretrained reward model from {args.rewards.trained_model}")
+ else:
+ key = jax.random.PRNGKey(args.seed)
+ key, init_key = jax.random.split(key, 2)
+ reward_params = LMBackboneWithScalarHeadParams(
+ lm_backbone_params=flax.core.FrozenDict({"params": lm_backbone.params}),
+ head_params=flax.core.FrozenDict(
+ scalar_head.init(
+ init_key,
+ jnp.ones(lm_backbone.config.hidden_size)[None, None, :],
+ )
+ ),
+ )
+
+ return functools.partial(reward_forward, params=reward_params)
+
+
+def prepare_policy_forward_and_policy_generate(args, tokenizer):
+ """Prepare the forward pass of the policy model and parameters."""
+
+ lm_backbone = FlaxAutoModelForCausalLM.from_pretrained(args.base_model)
+ # disable `pad_token_id` and `eos_token_id` because we just want to
+ # generate tokens without truncation / padding
+ lm_backbone.generation_config.eos_token_id = None
+ lm_backbone.generation_config.pad_token_id = tokenizer.pad_token_id
+ scalar_head = ScalarHead(head_input_size=lm_backbone.config.hidden_size)
+
+ generation_config = GenerationConfig(
+ max_new_tokens=args.task.response_length,
+ min_new_tokens=args.task.response_length,
+ temperature=args.task.temperature,
+ top_k=0.0,
+ top_p=1.0,
+ do_sample=True,
+ pad_token_id=tokenizer.pad_token_id,
+ )
+
+ def policy_forward(
+ params: LMBackboneWithScalarHeadParams,
+ input_ids: jnp.ndarray,
+ ):
+ """Get reward for input_ids."""
+ assert input_ids.ndim == 2
+ # shape: [batch_size, length]
+
+ # mask out padding tokens
+ attention_mask = input_ids != tokenizer.pad_token_id
+ input_ids = jnp.where(attention_mask, input_ids, 0)
+
+ # assign position ids
+ position_ids = attention_mask.cumsum(1) - attention_mask
+
+ lm_backbone_out = lm_backbone.module.apply(
+ variables=params.lm_backbone_params,
+ input_ids=input_ids,
+ attention_mask=attention_mask,
+ position_ids=position_ids,
+ output_hidden_states=True,
+ )
+
+ value_latents = lm_backbone_out.hidden_states[-1]
+ # shape: [batch_size, length, hidden_size]
+
+ values = scalar_head.apply(variables=params.head_params, x=value_latents)
+ # shape: [batch_size, length, 1]
+ return lm_backbone_out, values
+
+ def policy_generate(
+ params: LMBackboneWithScalarHeadParams,
+ queries: jnp.ndarray,
+ ):
+ input_ids = queries
+ attention_mask = input_ids != tokenizer.pad_token_id
+ input_ids = jnp.where(attention_mask, queries, 0)
+ output = lm_backbone.generate(
+ params=params["params"],
+ input_ids=input_ids,
+ generation_config=generation_config,
+ attention_mask=attention_mask.astype("i4"),
+ return_dict_in_generate=True,
+ )
+ context_length = input_ids.shape[1]
+ return jnp.concatenate((queries, output.sequences[:, context_length:]), axis=1)
+
+ key = jax.random.PRNGKey(args.seed)
+ key, init_key = jax.random.split(key, 2)
+ policy_params = LMBackboneWithScalarHeadParams(
+ lm_backbone_params=flax.core.FrozenDict({"params": lm_backbone.params}),
+ head_params=flax.core.FrozenDict(
+ scalar_head.init(
+ init_key,
+ jnp.ones(lm_backbone.config.hidden_size)[None, None, :],
+ )
+ ),
+ )
+
+ return policy_forward, policy_generate, policy_params
+
+
+@flax.struct.dataclass
+class RLStatistics(metrics.Collection):
+ approxkl: metrics.Average.from_output("approxkl")
+ entropy: metrics.Average.from_output("entropy")
+ pg_loss: metrics.Average.from_output("pg_loss")
+ pg_clipfrac: metrics.Average.from_output("pg_clipfrac")
+ vf_loss1: metrics.Average.from_output("vf_loss1")
+ vf_loss: metrics.Average.from_output("vf_loss")
+ vf_clipfrac: metrics.Average.from_output("vf_clipfrac")
+ ratio: metrics.Average.from_output("ratio")
+ loss: metrics.Average.from_output("loss")
+
+
+def train_step(
+ policy_state,
+ rl_stats,
+ mb_advantages,
+ mb_returns,
+ mb_values,
+ mb_query_responses,
+ mb_logprobs,
+ args,
+):
+ def loss(params):
+ output, vpred_temp = policy_state.apply_fn(params, mb_query_responses)
+ # vpred_temp: [local_micro_batch_size, query_length + response_length, 1]
+ vpred = jnp.squeeze(vpred_temp[:, args.task.query_length - 1 : -1, :], axis=-1)
+ # vpred: [local_micro_batch_size, response_length]
+ vpredclipped = jnp.clip(
+ vpred,
+ mb_values - args.ppo.cliprange_value,
+ mb_values + args.ppo.cliprange_value,
+ )
+ vf_losses1 = jnp.square(vpred - mb_returns)
+ vf_losses2 = jnp.square(vpredclipped - mb_returns)
+ vf_loss = 0.5 * jnp.maximum(vf_losses1, vf_losses2).mean()
+ vf_clipfrac = (vf_losses2 > vf_losses1).astype(jnp.float32).mean()
+
+ logits = output.logits[:, args.task.query_length - 1 : -1, :]
+ logits /= args.task.temperature
+ responses = mb_query_responses[:, args.task.query_length :]
+ new_logprobs = -optax.softmax_cross_entropy_with_integer_labels(
+ logits, responses
+ )
+
+ logprobs_diff = new_logprobs - mb_logprobs
+ ratio = jnp.exp(logprobs_diff)
+ pg_losses1 = -mb_advantages * ratio
+ pg_losses2 = -mb_advantages * jnp.clip(
+ ratio, 1.0 - args.ppo.cliprange, 1.0 + args.ppo.cliprange
+ )
+ pg_loss = jnp.maximum(pg_losses1, pg_losses2).mean()
+ pg_clipfrac = (pg_losses2 > pg_losses1).astype(jnp.float32).mean()
+
+ pd = jax.nn.softmax(logits, axis=-1)
+ entropy = jax.nn.logsumexp(logits, axis=-1) - jnp.sum(pd * logits, axis=-1)
+
+ approxkl = 0.5 * ((logprobs_diff) ** 2).mean()
+ loss = pg_loss + args.ppo.vf_coef * vf_loss
+
+ current_rl_stats = dict(
+ approxkl=approxkl,
+ entropy=entropy.mean(),
+ ratio=ratio.mean(),
+ pg_loss=pg_loss,
+ pg_clipfrac=pg_clipfrac,
+ vf_loss1=vf_losses1.mean(),
+ vf_loss=vf_loss,
+ vf_clipfrac=vf_clipfrac,
+ loss=loss,
+ )
+ return loss, current_rl_stats
+
+ grad_fn = jax.value_and_grad(loss, has_aux=True)
+ (loss, current_rl_stats), grads = grad_fn(policy_state.params)
+ grads = jax.lax.pmean(grads, "batch")
+ policy_state = policy_state.apply_gradients(grads=grads)
+
+ rl_stats = rl_stats.merge(RLStatistics.gather_from_model_output(**current_rl_stats))
+
+ return policy_state, rl_stats
+
+
+def linear_schedule(optimizer_step, args):
+ """anneal learning rate linearly to reach 0 after one epoch."""
+ update = 1 + optimizer_step // (
+ args.ppo.noptepochs
+ * args.ppo.nminibatches
+ * args.ppo.gradient_accumulation_steps
+ )
+ frac = 1.0 - (update - 1.0) / args.ppo.num_updates
+ lrnow = frac * args.ppo.lr
+ return lrnow
+
+
+def train(args: Args):
+ local_devices = jax.local_devices()
+ global_devices = jax.devices()
+ args.ppo.world_size = jax.process_count()
+ learner_devices = [local_devices[d_id] for d_id in args.learner_device_ids]
+ global_learner_decices = [
+ global_devices[d_id + process_index * len(local_devices)]
+ for process_index in range(args.ppo.world_size)
+ for d_id in args.learner_device_ids
+ ]
+ pprint({"global_learner_decices": global_learner_decices})
+ args.global_learner_decices = [str(item) for item in global_learner_decices]
+ args.learner_devices = [str(item) for item in learner_devices]
+ args.local_rank = jax.process_index()
+ args.ppo.batch_size = int(
+ args.ppo.local_batch_size * len(args.learner_devices) * args.ppo.world_size
+ )
+ args.ppo.minibatch_size = exact_div(args.ppo.batch_size, args.ppo.nminibatches)
+ args.ppo.local_mini_batch_size = exact_div(
+ args.ppo.local_batch_size, args.ppo.nminibatches
+ )
+ args.ppo.local_micro_batch_size = exact_div(
+ args.ppo.local_mini_batch_size, args.ppo.gradient_accumulation_steps
+ )
+ if args.ppo.whiten_rewards:
+ assert (
+ args.ppo.local_mini_batch_size >= 8
+ ), f"Per-rank minibatch size {args.ppo.local_mini_batch_size} is insufficient for whitening"
+ # `per_rank_rollout_batch_size` is our `args.ppo.local_batch_size`
+ # `per_rank_minibatch_size` is our `args.ppo.local_mini_batch_size`
+ args.ppo.num_updates = args.ppo.total_episodes // args.ppo.batch_size
+
+ console = Console(force_terminal=True)
+ run_name = f"{args.exp_name}__{args.seed}__{int(time.time())}"
+ writer = SimpleNamespace() # dummy writer
+ writer.add_scalar = lambda x, y, z: None
+ writer.add_histogram = lambda x, y, z: None
+
+ if args.local_rank == 0:
+ if args.track:
+ import wandb
+
+ wandb.init(
+ project=args.wandb_project_name,
+ entity=args.wandb_entity,
+ sync_tensorboard=True,
+ config=asdict(args),
+ name=run_name,
+ save_code=True,
+ )
+ wandb.run.log_code(".")
+ writer = SummaryWriter(f"runs/{run_name}")
+ writer.add_text(
+ "hyperparameters",
+ "|param|value|\n|-|-|\n%s"
+ % ("\n".join([f"|{key}|{value}|" for key, value in vars(args).items()])),
+ )
+ pprint(args)
+ local_seed = args.seed + args.local_rank * 100003 # Prime
+ tokenizer = AutoTokenizer.from_pretrained(
+ args.base_model,
+ padding_side="right",
+ )
+ # we use the padding token manually but do not resize the token embedding of the model
+ tokenizer.add_special_tokens({"pad_token": "[PAD]"})
+ reward_forward = prepare_reward_forward(args, tokenizer)
+ (
+ policy_forward,
+ policy_generate,
+ policy_params,
+ ) = prepare_policy_forward_and_policy_generate(args, tokenizer)
+ _, _, ref_policy_params = prepare_policy_forward_and_policy_generate(
+ args, tokenizer
+ )
+
+ if args.use_tensorflow_adam:
+ adam = adam_tf_style
+ else:
+ adam = optax.adam
+
+ optimizer = adam(
+ learning_rate=functools.partial(linear_schedule, args=args),
+ eps=args.ppo.eps,
+ )
+
+ optimizer = optax.MultiSteps(optimizer, args.ppo.gradient_accumulation_steps)
+
+ policy_state = TrainState.create(
+ apply_fn=policy_forward, params=policy_params, tx=optimizer
+ )
+ policy_state = jax_utils.replicate(policy_state)
+
+ dataset = MyDataset(
+ DATASET[args.task.query_dataset],
+ tokenizer,
+ args.task.query_length,
+ seed=local_seed,
+ start_text=args.task.start_text,
+ end_text=args.task.end_text,
+ )
+ dataloader = DataLoader(dataset, batch_size=args.ppo.batch_size)
+ iter_dataloader = iter(dataloader)
+ kl_ctl = AdaptiveKLController(
+ args.rewards.kl_coef, hparams=args.rewards.adaptive_kl
+ )
+
+ def train_update(policy_state, input_ids, rl_stats, kl_ctl_value):
+ queries = right_padding_to_left_padding(input_ids, tokenizer.pad_token_id)
+
+ query_responses = policy_generate(
+ params=policy_state.params.lm_backbone_params,
+ queries=queries,
+ )
+ # query_responses: [local_batch_size, query_length + response_length]
+ responses = query_responses[:, args.task.query_length :]
+
+ output, full_values = policy_forward(policy_state.params, query_responses)
+ values = full_values[:, args.task.query_length - 1 : -1].squeeze(-1)
+ # values: [local_batch_size, response_length]
+ logits = output.logits[:, args.task.query_length - 1 : -1, :]
+ # logits: [local_batch_size, response_length, vocab_size]
+ logits /= args.task.temperature
+ all_logprobs = jax.nn.log_softmax(logits, axis=-1)
+ # all_logprobs: [local_batch_size, response_length, vocab_size]
+ logprobs = jnp.take_along_axis(all_logprobs, responses[..., None], -1).squeeze(
+ -1
+ )
+ # logprobs: [local_batch_size, response_length]
+
+ ref_output, _ = policy_forward(ref_policy_params, query_responses)
+ ref_logits = ref_output.logits[:, args.task.query_length - 1 : -1, :]
+ # ref_logits: [local_batch_size, response_length, vocab_size]
+ ref_logits /= args.task.temperature
+ ref_all_logprobs = jax.nn.log_softmax(ref_logits, axis=-1)
+ ref_logprobs = jnp.take_along_axis(
+ ref_all_logprobs, responses[..., None], -1
+ ).squeeze(-1)
+ # ref_logprobs: [local_batch_size, response_length]
+
+ # **Response Processing**
+ # 1. truncate at the first occurrence of `truncate_token` that appears at or after
+ # position truncate_after in the responses
+ # https://github.com/openai/lm-human-preferences/blob/cbfd210bb8b08f6bc5c26878c10984b90f516c66/lm_human_preferences/train_policy.py#L378
+ truncate_token_mask = responses == args.task.truncate_token
+ truncate_after_or_token_mask = jnp.concatenate(
+ [
+ jnp.zeros_like(truncate_token_mask)[:, : args.task.truncate_after],
+ truncate_token_mask[:, args.task.truncate_after :],
+ ],
+ axis=1,
+ )
+ truncate_mask = (
+ jnp.cumsum(truncate_after_or_token_mask, axis=1)
+ - truncate_after_or_token_mask
+ ).astype("bool")
+ postprocessed_responses = jnp.where(
+ truncate_mask,
+ jnp.full_like(responses, tokenizer.pad_token_id),
+ responses,
+ )
+
+ # 2. run reward model on the truncated responses
+ postprocessed_query_responses = jnp.concatenate(
+ (queries, postprocessed_responses), axis=1
+ )
+ postprocessed_query_responses = right_padding_to_left_padding(
+ postprocessed_query_responses, tokenizer.pad_token_id
+ )
+ scores = reward_forward(
+ query_responses_ids=postprocessed_query_responses
+ ).flatten()
+
+ # 3. filter response. Ensure that the sample contains truncate_token
+ # responses not passing that filter will receive a low (fixed) score
+ # only query humans on responses that pass that filter
+ matches_token = (
+ postprocessed_responses[:, args.task.truncate_after :]
+ == args.task.truncate_token
+ )
+ filter_mask = jnp.any(matches_token, axis=-1)
+
+ scores = jnp.where(
+ filter_mask,
+ scores,
+ jnp.full_like(scores, args.task.penalty_reward_value),
+ )
+
+ # 4. compute rewards
+ kl = logprobs - ref_logprobs
+ non_score_reward = -kl_ctl_value * kl
+ rewards = non_score_reward
+ rewards = rewards.at[:, -1].add(scores)
+
+ # 5. whiten rewards
+ if args.ppo.whiten_rewards:
+ rewards = whiten(rewards, shift_mean=False)
+
+ # 6. compute advantages and returns
+ def compute_gae_once(carry, inp):
+ advantages = carry
+ nextdone, nextvalues, curvalues, reward = inp
+ nextnonterminal = 1.0 - nextdone
+
+ delta = reward + args.ppo.gamma * nextvalues * nextnonterminal - curvalues
+ advantages = (
+ delta + args.ppo.gamma * args.ppo.lam * nextnonterminal * advantages
+ )
+ return advantages, advantages
+
+ extended_values = jnp.concatenate(
+ (values, jnp.zeros((args.ppo.local_batch_size, 1))), axis=1
+ )
+ dones = jnp.zeros_like(rewards)
+ dones = dones.at[:, -1].set(1.0)
+
+ advantages = jnp.zeros((args.ppo.local_batch_size,))
+ _, advantages = jax.lax.scan(
+ compute_gae_once,
+ advantages,
+ (dones.T, extended_values[:, 1:].T, extended_values[:, :-1].T, rewards.T),
+ reverse=True,
+ )
+
+ advantages = advantages.T
+ returns = advantages + values
+ advantages = whiten(advantages)
+
+ def ppo_single_microbatch(carry, inp):
+ policy_state, rl_stats = carry
+ mb_advantages, mb_returns, mb_values, mb_query_responses, mb_logprobs = inp
+
+ policy_state, rl_stats = train_step(
+ policy_state=policy_state,
+ rl_stats=rl_stats,
+ mb_advantages=mb_advantages,
+ mb_returns=mb_returns,
+ mb_values=mb_values,
+ mb_query_responses=mb_query_responses,
+ mb_logprobs=mb_logprobs,
+ args=args,
+ )
+ return (policy_state, rl_stats), None
+
+ def ppo_single_epoch(carry, inp):
+ policy_state, rl_stats, key = carry
+ key, subkey = jax.random.split(key, 2)
+ perm = jax.random.permutation(key, args.ppo.local_batch_size)
+ # advantages, returns, values, query_responses, logprobs = inp
+ mbs_advantages = einops.rearrange(
+ advantages[perm],
+ "(c m) l -> c m l",
+ c=args.ppo.gradient_accumulation_steps,
+ )
+ mbs_returns = einops.rearrange(
+ returns[perm],
+ "(c m) l -> c m l",
+ c=args.ppo.gradient_accumulation_steps,
+ )
+ mbs_values = einops.rearrange(
+ values[perm], "(c m) l -> c m l", c=args.ppo.gradient_accumulation_steps
+ )
+ mbs_query_responses = einops.rearrange(
+ query_responses[perm],
+ "(c m) l -> c m l",
+ c=args.ppo.gradient_accumulation_steps,
+ )
+ mbs_logprobs = einops.rearrange(
+ logprobs[perm],
+ "(c m) l -> c m l",
+ c=args.ppo.gradient_accumulation_steps,
+ )
+ (policy_state, rl_stats), _ = jax.lax.scan(
+ f=ppo_single_microbatch,
+ init=(policy_state, rl_stats),
+ xs=(
+ mbs_advantages,
+ mbs_returns,
+ mbs_values,
+ mbs_query_responses,
+ mbs_logprobs,
+ ),
+ )
+ return (policy_state, rl_stats, key), None
+
+ key = jax.random.PRNGKey(args.seed)
+ # Do multiple epochs of PPO training, with a fresh random shuffle in each epoch
+ (policy_state, rl_stats, _), _ = jax.lax.scan(
+ f=ppo_single_epoch,
+ init=(policy_state, rl_stats, key),
+ xs=None,
+ length=args.ppo.noptepochs,
+ )
+
+ return (
+ policy_state,
+ returns,
+ advantages,
+ values,
+ query_responses,
+ logprobs,
+ kl,
+ scores,
+ rl_stats,
+ )
+
+ p_train_update = jax.pmap(
+ train_update,
+ axis_name="batch",
+ donate_argnums=(0,),
+ )
+
+ print("===training policy===")
+ global_step = 0
+
+ for update in range(1, args.ppo.num_updates + 1):
+ global_step += 1 * args.ppo.batch_size
+ data = next(iter_dataloader)
+ input_ids = common_utils.shard(data["input_ids"].numpy())
+
+ rl_stats = jax_utils.replicate(RLStatistics.empty())
+ (
+ policy_state,
+ returns,
+ advantages,
+ values,
+ query_responses,
+ logprobs,
+ kl,
+ scores,
+ rl_stats,
+ ) = p_train_update(
+ policy_state=policy_state,
+ input_ids=input_ids,
+ rl_stats=rl_stats,
+ kl_ctl_value=jax_utils.replicate(kl_ctl.value),
+ )
+
+ try:
+ sample_kl = kl[0][0].sum().item()
+ sample_score = scores[0][0].item()
+ sample_query_response = query_responses[0][0]
+ console.print(
+ f"[green][bold]{'Query'}:[/]\n"
+ + f"[green]{ tokenizer.decode(sample_query_response[:args.task.query_length], skip_special_tokens=True)}[/]\n\n"
+ + f"[yellow][bold]{'Response'}:[/]\n"
+ + f"[yellow]{tokenizer.decode(sample_query_response[args.task.query_length:], skip_special_tokens=True)}[/]\n\n"
+ + f"[red]score: {sample_score}, kl: {sample_kl}, total reward: {sample_score - kl_ctl.value * sample_kl} [/]"
+ )
+ except Exception as e:
+ print(e)
+
+ # Rollout metrics
+ rl_stats = rl_stats.unreplicate().compute() | Anytime jax related computation happens outside jitted function it can be slow. Maybe do something similar to https://github.com/vwxyzjn/cleanba/blob/81dca0054c8c0930046a2d12b07ada2945014d01/cleanba/cleanba_ppo.py#L655-L659? |
lm-human-preference-details | github_2023 | python | 18 | vwxyzjn | vwxyzjn | @@ -0,0 +1,1024 @@
+import functools
+import os
+import time
+from dataclasses import asdict, dataclass, field
+from types import SimpleNamespace
+from typing import List, Optional
+import einops
+from clu import metrics
+import flax
+import flax.linen as nn
+import jax
+import jax.numpy as jnp
+import numpy as np
+import optax
+import orbax
+import tyro
+from flax import jax_utils
+from flax.training import common_utils, orbax_utils
+from flax.training.train_state import TrainState
+from optax import ScaleByAdamState, update_moment, update_moment_per_elem_norm
+from optax._src import base, combine, numerics, utils
+from optax._src.alias import _scale_by_learning_rate
+from rich.console import Console
+from rich.pretty import pprint
+from torch.utils.data import DataLoader, IterableDataset
+from torch.utils.tensorboard import SummaryWriter
+from transformers import AutoTokenizer, FlaxAutoModelForCausalLM, GenerationConfig
+
+from lm_human_preference_details.data import DATASET
+
+
+@dataclass
+class AdaptiveKLParams:
+ target: float = 6.0
+ horizon: int = 10000 # in episodes
+
+
+@dataclass
+class RewardHParams:
+ kl_coef: float = 0.15
+ adaptive_kl: Optional[AdaptiveKLParams] = field(default_factory=AdaptiveKLParams)
+ trained_model: Optional[str] = "models/"
+ label_dataset: tyro.conf.Suppress[Optional[str]] = None
+
+
+@dataclass
+class PpoHParams:
+ total_episodes: int = 1000000
+ local_batch_size: int = 64
+ local_mini_batch_size: tyro.conf.Suppress[int] = None
+ batch_size: tyro.conf.Suppress[int] = None
+ mini_batch_size: tyro.conf.Suppress[int] = None
+ gradient_accumulation_steps: int = 1
+ """gradient accumulation steps"""
+ local_micro_batch_size: tyro.conf.Suppress[int] = None
+ """per rank micro batch size"""
+ world_size: tyro.conf.Suppress[int] = None
+ batch_size: tyro.conf.Suppress[int] = None
+ minibatch_size: tyro.conf.Suppress[int] = None
+ num_updates: tyro.conf.Suppress[int] = None
+ nminibatches: int = 1
+ noptepochs: int = 4
+ lr: float = 0.00001
+ eps: float = 1e-5
+ vf_coef: float = 0.1
+ cliprange: float = 0.2
+ cliprange_value: float = 0.2
+ gamma: float = 1
+ lam: float = 0.95
+ whiten_rewards: bool = True
+
+
+@dataclass
+class TaskHParams:
+ # Query params
+ query_length: int = 64
+ query_dataset: str = "books"
+ query_prefix: str = ""
+ query_suffix: str = ""
+ start_text: Optional[str] = None
+ end_text: Optional[str] = None
+
+ # Response params
+ response_length: int = 24
+
+ # Truncate response after the first occurrence of this token at or after index after when sampling.
+ truncate_token: int = 13
+ truncate_after: int = 16
+ penalty_reward_value: int = -1
+
+ # LM params
+ temperature: float = 0.7
+
+
+@dataclass
+class Args:
+ # common args
+ exp_name: str = os.path.basename(__file__)[: -len(".py")]
+ """the name of this experiment"""
+ seed: int = 1
+ """seed of the experiment"""
+ track: bool = False
+ """if toggled, this experiment will be tracked with Weights and Biases"""
+ wandb_project_name: str = "cleanrl"
+ """the wandb's project name"""
+ wandb_entity: Optional[str] = None
+ """the entity (team) of wandb's project"""
+ cuda: bool = True
+ """Whether to use cuda if available."""
+ run_name: tyro.conf.Suppress[str] = None
+ """TO BE FILLED: a unique name of this run"""
+
+ base_model: str = "gpt2"
+ """the name of the pretrained model to use"""
+ print_sample_output_freq: int = 0
+ """How often to print sample output"""
+ save_path: str = "models/policy/"
+ """Where to save the model"""
+ use_tensorflow_adam: bool = True
+ """Whether to use tensorflow-style Adam optimizer instead of PyTorch's"""
+ task: TaskHParams = field(default_factory=TaskHParams)
+ rewards: RewardHParams = field(default_factory=RewardHParams)
+ ppo: PpoHParams = field(default_factory=PpoHParams)
+
+ # distributed settings
+ local_rank: int = 0
+ """the rank of this process"""
+ learner_device_ids: List[int] = field(default_factory=lambda: [0])
+ "the device ids that script will use"
+ learner_devices: tyro.conf.Suppress[int] = None # real type is `List[str]`
+ """the devices that script will use"""
+ global_learner_decices: tyro.conf.Suppress[int] = None # real type is `List[str]`
+ """the total devices (across all nodes and machines) that script will use"""
+
+
+def scale_by_adam_tf_style(
+ b1: float = 0.9,
+ b2: float = 0.999,
+ eps: float = 1e-8,
+ eps_root: float = 0.0,
+ mu_dtype=None,
+) -> base.GradientTransformation:
+ """Rescale updates according to the Adam algorithm.
+
+ References:
+ [Kingma et al, 2014](https://arxiv.org/abs/1412.6980)
+
+ Args:
+ b1: Decay rate for the exponentially weighted average of grads.
+ b2: Decay rate for the exponentially weighted average of squared grads.
+ eps: Term added to the denominator to improve numerical stability.
+ eps_root: Term added to the denominator inside the square-root to improve
+ numerical stability when backpropagating gradients through the rescaling.
+ mu_dtype: Optional `dtype` to be used for the first order accumulator; if
+ `None` then the `dtype` is inferred from `params` and `updates`.
+
+ Returns:
+ A `GradientTransformation` object.
+ """
+
+ mu_dtype = utils.canonicalize_dtype(mu_dtype)
+
+ def init_fn(params):
+ mu = jax.tree_util.tree_map(
+ lambda t: jnp.zeros_like(t, dtype=mu_dtype), params
+ ) # First moment
+ nu = jax.tree_util.tree_map(jnp.zeros_like, params) # Second moment
+ return ScaleByAdamState(count=jnp.zeros([], jnp.int32), mu=mu, nu=nu)
+
+ def update_fn(updates, state, params=None):
+ del params
+ mu = update_moment(updates, state.mu, b1, 1)
+ nu = update_moment_per_elem_norm(updates, state.nu, b2, 2)
+ count_inc = numerics.safe_int32_increment(state.count)
+
+ ### `optax` default adam implementation
+ # mu_hat = bias_correction(mu, b1, count_inc)
+ # nu_hat = bias_correction(nu, b2, count_inc)
+ # updates = jax.tree_util.tree_map(
+ # lambda m, v: m / (jnp.sqrt(v + eps_root) + eps), mu_hat, nu_hat)
+ ### Tensorflow adam implementation
+ updates = jax.tree_util.tree_map(
+ lambda m, v: (jnp.sqrt(1 - b2**count_inc) / (1 - b1**count_inc))
+ * m
+ / (jnp.sqrt(v + eps_root) + eps),
+ mu,
+ nu,
+ ) #
+ mu = utils.cast_tree(mu, mu_dtype)
+ return updates, ScaleByAdamState(count=count_inc, mu=mu, nu=nu)
+
+ return base.GradientTransformation(init_fn, update_fn)
+
+
+def adam_tf_style(
+ learning_rate,
+ b1: float = 0.9,
+ b2: float = 0.999,
+ eps: float = 1e-8,
+ eps_root: float = 0.0,
+ mu_dtype=None,
+):
+ return combine.chain(
+ scale_by_adam_tf_style(
+ b1=b1, b2=b2, eps=eps, eps_root=eps_root, mu_dtype=mu_dtype
+ ),
+ _scale_by_learning_rate(learning_rate),
+ )
+
+
+class AdaptiveKLController:
+ def __init__(self, init_kl_coef: float, hparams: AdaptiveKLParams):
+ self.value = init_kl_coef
+ self.hparams = hparams
+
+ def update(self, current, n_steps):
+ target = self.hparams.target
+ proportional_error = np.clip(current / target - 1, -0.2, 0.2)
+ mult = 1 + proportional_error * n_steps / self.hparams.horizon
+ self.value *= mult
+
+
+def whiten(values, shift_mean=True):
+ # `unbiased=False` matches TF `tf.nn.moments`'s setting
+ mean, var = jnp.mean(values), jnp.var(values)
+ whitened = (values - mean) * jax.lax.rsqrt(var + 1e-8)
+ if not shift_mean:
+ whitened += mean
+ return whitened
+
+
+class ScalarHead(nn.Module):
+ head_input_size: int
+
+ @nn.compact
+ def __call__(self, x):
+ x = nn.Dense(
+ 1,
+ kernel_init=nn.initializers.normal(stddev=0),
+ bias_init=nn.initializers.zeros_init(),
+ )(x)
+ return x
+
+
+class RewardHead(nn.Module):
+ head_input_size: int
+
+ @nn.compact
+ def __call__(self, x):
+ assert x.shape[-1] == self.head_input_size
+ x = nn.Dense(
+ 1,
+ kernel_init=nn.initializers.normal(
+ stddev=1 / np.sqrt(self.head_input_size + 1)
+ ),
+ bias_init=nn.initializers.zeros_init(),
+ )(x)
+ reward_gain = self.param("reward_gain", nn.initializers.ones_init(), ())
+ reward_bias = self.param("reward_bias", nn.initializers.zeros_init(), ())
+ x = x * reward_gain + reward_bias
+ return x
+
+
+@flax.struct.dataclass
+class LMBackboneWithScalarHeadParams:
+ """Parameters for the language model backbone and a scalar head."""
+
+ lm_backbone_params: flax.core.FrozenDict
+ head_params: flax.core.FrozenDict
+
+
+# a pytorch dataset
+class MyDataset(IterableDataset):
+ def __init__(
+ self, generator, tokenizer, query_length, seed, start_text=None, end_text=None
+ ):
+ self.generator = generator
+ self.tokenizer = tokenizer
+ self.query_length = query_length
+ self.start_text = start_text
+ self.end_text = end_text
+ self.seed = seed
+ token_to_index = tokenizer.get_vocab()
+ self.start_token = token_to_index[start_text] if self.start_text else None
+ self.end_token = token_to_index[end_text] if self.end_text else None
+
+ def __iter__(self):
+ for text in self.generator("train", self.seed, shuffle=True):
+ tokens = self.tokenizer.encode(text)
+ if self.start_token is not None:
+ try:
+ first_index = tokens.index(self.start_token) + 1
+ if first_index < len(tokens):
+ tokens = tokens[first_index:]
+ except:
+ continue
+ tokens = tokens[: self.query_length]
+ if self.end_token is not None:
+ try:
+ last_index = len(tokens) - tokens[::-1].index(self.end_token)
+ tokens = tokens[:last_index]
+ except:
+ continue
+ output = self.tokenizer.pad(
+ {"input_ids": tokens},
+ padding="max_length",
+ max_length=self.query_length,
+ return_tensors="pt",
+ return_attention_mask=True,
+ )
+ yield output
+
+
+def right_padding_to_left_padding(tokens, pad_id):
+ def pad_row(row):
+ mask = 1 - (row == pad_id) # 1 if not pad_id, 0 if pad_id
+ return row[
+ jnp.argsort(mask)
+ ] # uses the fact that jnp.argsort is stable by default
+
+ return jax.vmap(pad_row)(tokens)
+
+
+def ceil_div(a, b):
+ return (a - 1) // b + 1
+
+
+def exact_div(a, b):
+ q = a // b
+ if a != q * b:
+ raise ValueError(f"Inexact division: {a} / {b} = {a / b}")
+ return q
+
+
+def prepare_reward_forward(args, tokenizer):
+ """Prepare the forward pass of the reward model and parameters."""
+
+ lm_backbone = FlaxAutoModelForCausalLM.from_pretrained(args.base_model)
+ scalar_head = RewardHead(head_input_size=lm_backbone.config.hidden_size)
+
+ def reward_forward(
+ params: LMBackboneWithScalarHeadParams,
+ query_responses_ids: jnp.ndarray,
+ ):
+ """Get reward for each queiry--response pair."""
+ assert query_responses_ids.ndim == 2
+
+ # mask out padding tokens
+ attention_mask = query_responses_ids != tokenizer.pad_token_id
+ query_responses_ids = jnp.where(attention_mask, query_responses_ids, 0)
+
+ # assign position ids
+ position_ids = attention_mask.cumsum(1) - attention_mask
+
+ reward_latents = lm_backbone.module.apply(
+ variables=params.lm_backbone_params,
+ input_ids=query_responses_ids,
+ attention_mask=attention_mask,
+ position_ids=position_ids,
+ output_hidden_states=True,
+ ).hidden_states[-1]
+ # reward_latents: [batch_size, length, hidden_size]
+
+ last_reward_latents = reward_latents[:, -1, :]
+ # last_reward_latents: [batch_size, hidden_size]
+
+ reward = scalar_head.apply(variables=params.head_params, x=last_reward_latents)
+ # reward: [batch_size, 1]
+ return reward
+
+ if args.rewards.trained_model:
+ orbax_checkpointer = orbax.checkpoint.PyTreeCheckpointer()
+ reward_state_params = orbax_checkpointer.restore(args.rewards.trained_model)[
+ "reward_model"
+ ]["params"]
+ reward_params = LMBackboneWithScalarHeadParams(
+ lm_backbone_params=flax.core.FrozenDict(
+ {"params": reward_state_params["lm_backbone_params"]["params"]}
+ ),
+ head_params=flax.core.FrozenDict(
+ {"params": reward_state_params["head_params"]["params"]}
+ ),
+ )
+ pprint(f"Loaded pretrained reward model from {args.rewards.trained_model}")
+ else:
+ key = jax.random.PRNGKey(args.seed)
+ key, init_key = jax.random.split(key, 2)
+ reward_params = LMBackboneWithScalarHeadParams(
+ lm_backbone_params=flax.core.FrozenDict({"params": lm_backbone.params}),
+ head_params=flax.core.FrozenDict(
+ scalar_head.init(
+ init_key,
+ jnp.ones(lm_backbone.config.hidden_size)[None, None, :],
+ )
+ ),
+ )
+
+ return functools.partial(reward_forward, params=reward_params)
+
+
+def prepare_policy_forward_and_policy_generate(args, tokenizer):
+ """Prepare the forward pass of the policy model and parameters."""
+
+ lm_backbone = FlaxAutoModelForCausalLM.from_pretrained(args.base_model)
+ # disable `pad_token_id` and `eos_token_id` because we just want to
+ # generate tokens without truncation / padding
+ lm_backbone.generation_config.eos_token_id = None
+ lm_backbone.generation_config.pad_token_id = tokenizer.pad_token_id
+ scalar_head = ScalarHead(head_input_size=lm_backbone.config.hidden_size)
+
+ generation_config = GenerationConfig(
+ max_new_tokens=args.task.response_length,
+ min_new_tokens=args.task.response_length,
+ temperature=args.task.temperature,
+ top_k=0.0,
+ top_p=1.0,
+ do_sample=True,
+ pad_token_id=tokenizer.pad_token_id,
+ )
+
+ def policy_forward(
+ params: LMBackboneWithScalarHeadParams,
+ input_ids: jnp.ndarray,
+ ):
+ """Get reward for input_ids."""
+ assert input_ids.ndim == 2
+ # shape: [batch_size, length]
+
+ # mask out padding tokens
+ attention_mask = input_ids != tokenizer.pad_token_id
+ input_ids = jnp.where(attention_mask, input_ids, 0)
+
+ # assign position ids
+ position_ids = attention_mask.cumsum(1) - attention_mask
+
+ lm_backbone_out = lm_backbone.module.apply(
+ variables=params.lm_backbone_params,
+ input_ids=input_ids,
+ attention_mask=attention_mask,
+ position_ids=position_ids,
+ output_hidden_states=True,
+ )
+
+ value_latents = lm_backbone_out.hidden_states[-1]
+ # shape: [batch_size, length, hidden_size]
+
+ values = scalar_head.apply(variables=params.head_params, x=value_latents)
+ # shape: [batch_size, length, 1]
+ return lm_backbone_out, values
+
+ def policy_generate(
+ params: LMBackboneWithScalarHeadParams,
+ queries: jnp.ndarray,
+ ):
+ input_ids = queries
+ attention_mask = input_ids != tokenizer.pad_token_id
+ input_ids = jnp.where(attention_mask, queries, 0)
+ output = lm_backbone.generate(
+ params=params["params"],
+ input_ids=input_ids,
+ generation_config=generation_config,
+ attention_mask=attention_mask.astype("i4"),
+ return_dict_in_generate=True,
+ )
+ context_length = input_ids.shape[1]
+ return jnp.concatenate((queries, output.sequences[:, context_length:]), axis=1)
+
+ key = jax.random.PRNGKey(args.seed)
+ key, init_key = jax.random.split(key, 2)
+ policy_params = LMBackboneWithScalarHeadParams(
+ lm_backbone_params=flax.core.FrozenDict({"params": lm_backbone.params}),
+ head_params=flax.core.FrozenDict(
+ scalar_head.init(
+ init_key,
+ jnp.ones(lm_backbone.config.hidden_size)[None, None, :],
+ )
+ ),
+ )
+
+ return policy_forward, policy_generate, policy_params
+
+
+@flax.struct.dataclass
+class RLStatistics(metrics.Collection):
+ approxkl: metrics.Average.from_output("approxkl")
+ entropy: metrics.Average.from_output("entropy")
+ pg_loss: metrics.Average.from_output("pg_loss")
+ pg_clipfrac: metrics.Average.from_output("pg_clipfrac")
+ vf_loss1: metrics.Average.from_output("vf_loss1")
+ vf_loss: metrics.Average.from_output("vf_loss")
+ vf_clipfrac: metrics.Average.from_output("vf_clipfrac")
+ ratio: metrics.Average.from_output("ratio")
+ loss: metrics.Average.from_output("loss")
+
+
+def train_step(
+ policy_state,
+ rl_stats,
+ mb_advantages,
+ mb_returns,
+ mb_values,
+ mb_query_responses,
+ mb_logprobs,
+ args,
+):
+ def loss(params):
+ output, vpred_temp = policy_state.apply_fn(params, mb_query_responses)
+ # vpred_temp: [local_micro_batch_size, query_length + response_length, 1]
+ vpred = jnp.squeeze(vpred_temp[:, args.task.query_length - 1 : -1, :], axis=-1)
+ # vpred: [local_micro_batch_size, response_length]
+ vpredclipped = jnp.clip(
+ vpred,
+ mb_values - args.ppo.cliprange_value,
+ mb_values + args.ppo.cliprange_value,
+ )
+ vf_losses1 = jnp.square(vpred - mb_returns)
+ vf_losses2 = jnp.square(vpredclipped - mb_returns)
+ vf_loss = 0.5 * jnp.maximum(vf_losses1, vf_losses2).mean()
+ vf_clipfrac = (vf_losses2 > vf_losses1).astype(jnp.float32).mean()
+
+ logits = output.logits[:, args.task.query_length - 1 : -1, :]
+ logits /= args.task.temperature
+ responses = mb_query_responses[:, args.task.query_length :]
+ new_logprobs = -optax.softmax_cross_entropy_with_integer_labels(
+ logits, responses
+ )
+
+ logprobs_diff = new_logprobs - mb_logprobs
+ ratio = jnp.exp(logprobs_diff)
+ pg_losses1 = -mb_advantages * ratio
+ pg_losses2 = -mb_advantages * jnp.clip(
+ ratio, 1.0 - args.ppo.cliprange, 1.0 + args.ppo.cliprange
+ )
+ pg_loss = jnp.maximum(pg_losses1, pg_losses2).mean()
+ pg_clipfrac = (pg_losses2 > pg_losses1).astype(jnp.float32).mean()
+
+ pd = jax.nn.softmax(logits, axis=-1)
+ entropy = jax.nn.logsumexp(logits, axis=-1) - jnp.sum(pd * logits, axis=-1)
+
+ approxkl = 0.5 * ((logprobs_diff) ** 2).mean()
+ loss = pg_loss + args.ppo.vf_coef * vf_loss
+
+ current_rl_stats = dict(
+ approxkl=approxkl,
+ entropy=entropy.mean(),
+ ratio=ratio.mean(),
+ pg_loss=pg_loss,
+ pg_clipfrac=pg_clipfrac,
+ vf_loss1=vf_losses1.mean(),
+ vf_loss=vf_loss,
+ vf_clipfrac=vf_clipfrac,
+ loss=loss,
+ )
+ return loss, current_rl_stats
+
+ grad_fn = jax.value_and_grad(loss, has_aux=True)
+ (loss, current_rl_stats), grads = grad_fn(policy_state.params)
+ grads = jax.lax.pmean(grads, "batch")
+ policy_state = policy_state.apply_gradients(grads=grads)
+
+ rl_stats = rl_stats.merge(RLStatistics.gather_from_model_output(**current_rl_stats))
+
+ return policy_state, rl_stats
+
+
+def linear_schedule(optimizer_step, args):
+ """anneal learning rate linearly to reach 0 after one epoch."""
+ update = 1 + optimizer_step // (
+ args.ppo.noptepochs
+ * args.ppo.nminibatches
+ * args.ppo.gradient_accumulation_steps
+ )
+ frac = 1.0 - (update - 1.0) / args.ppo.num_updates
+ lrnow = frac * args.ppo.lr
+ return lrnow
+
+
+def train(args: Args):
+ local_devices = jax.local_devices()
+ global_devices = jax.devices()
+ args.ppo.world_size = jax.process_count()
+ learner_devices = [local_devices[d_id] for d_id in args.learner_device_ids]
+ global_learner_decices = [
+ global_devices[d_id + process_index * len(local_devices)]
+ for process_index in range(args.ppo.world_size)
+ for d_id in args.learner_device_ids
+ ]
+ pprint({"global_learner_decices": global_learner_decices})
+ args.global_learner_decices = [str(item) for item in global_learner_decices]
+ args.learner_devices = [str(item) for item in learner_devices]
+ args.local_rank = jax.process_index()
+ args.ppo.batch_size = int(
+ args.ppo.local_batch_size * len(args.learner_devices) * args.ppo.world_size
+ )
+ args.ppo.minibatch_size = exact_div(args.ppo.batch_size, args.ppo.nminibatches)
+ args.ppo.local_mini_batch_size = exact_div(
+ args.ppo.local_batch_size, args.ppo.nminibatches
+ )
+ args.ppo.local_micro_batch_size = exact_div(
+ args.ppo.local_mini_batch_size, args.ppo.gradient_accumulation_steps
+ )
+ if args.ppo.whiten_rewards:
+ assert (
+ args.ppo.local_mini_batch_size >= 8
+ ), f"Per-rank minibatch size {args.ppo.local_mini_batch_size} is insufficient for whitening"
+ # `per_rank_rollout_batch_size` is our `args.ppo.local_batch_size`
+ # `per_rank_minibatch_size` is our `args.ppo.local_mini_batch_size`
+ args.ppo.num_updates = args.ppo.total_episodes // args.ppo.batch_size
+
+ console = Console(force_terminal=True)
+ run_name = f"{args.exp_name}__{args.seed}__{int(time.time())}"
+ writer = SimpleNamespace() # dummy writer
+ writer.add_scalar = lambda x, y, z: None
+ writer.add_histogram = lambda x, y, z: None
+
+ if args.local_rank == 0:
+ if args.track:
+ import wandb
+
+ wandb.init(
+ project=args.wandb_project_name,
+ entity=args.wandb_entity,
+ sync_tensorboard=True,
+ config=asdict(args),
+ name=run_name,
+ save_code=True,
+ )
+ wandb.run.log_code(".")
+ writer = SummaryWriter(f"runs/{run_name}")
+ writer.add_text(
+ "hyperparameters",
+ "|param|value|\n|-|-|\n%s"
+ % ("\n".join([f"|{key}|{value}|" for key, value in vars(args).items()])),
+ )
+ pprint(args)
+ local_seed = args.seed + args.local_rank * 100003 # Prime
+ tokenizer = AutoTokenizer.from_pretrained(
+ args.base_model,
+ padding_side="right",
+ )
+ # we use the padding token manually but do not resize the token embedding of the model
+ tokenizer.add_special_tokens({"pad_token": "[PAD]"})
+ reward_forward = prepare_reward_forward(args, tokenizer)
+ (
+ policy_forward,
+ policy_generate,
+ policy_params,
+ ) = prepare_policy_forward_and_policy_generate(args, tokenizer)
+ _, _, ref_policy_params = prepare_policy_forward_and_policy_generate(
+ args, tokenizer
+ )
+
+ if args.use_tensorflow_adam:
+ adam = adam_tf_style
+ else:
+ adam = optax.adam
+
+ optimizer = adam(
+ learning_rate=functools.partial(linear_schedule, args=args),
+ eps=args.ppo.eps,
+ )
+
+ optimizer = optax.MultiSteps(optimizer, args.ppo.gradient_accumulation_steps)
+
+ policy_state = TrainState.create(
+ apply_fn=policy_forward, params=policy_params, tx=optimizer
+ )
+ policy_state = jax_utils.replicate(policy_state)
+
+ dataset = MyDataset(
+ DATASET[args.task.query_dataset],
+ tokenizer,
+ args.task.query_length,
+ seed=local_seed,
+ start_text=args.task.start_text,
+ end_text=args.task.end_text,
+ )
+ dataloader = DataLoader(dataset, batch_size=args.ppo.batch_size)
+ iter_dataloader = iter(dataloader)
+ kl_ctl = AdaptiveKLController(
+ args.rewards.kl_coef, hparams=args.rewards.adaptive_kl
+ )
+
+ def train_update(policy_state, input_ids, rl_stats, kl_ctl_value):
+ queries = right_padding_to_left_padding(input_ids, tokenizer.pad_token_id)
+
+ query_responses = policy_generate(
+ params=policy_state.params.lm_backbone_params,
+ queries=queries,
+ )
+ # query_responses: [local_batch_size, query_length + response_length]
+ responses = query_responses[:, args.task.query_length :]
+
+ output, full_values = policy_forward(policy_state.params, query_responses)
+ values = full_values[:, args.task.query_length - 1 : -1].squeeze(-1)
+ # values: [local_batch_size, response_length]
+ logits = output.logits[:, args.task.query_length - 1 : -1, :]
+ # logits: [local_batch_size, response_length, vocab_size]
+ logits /= args.task.temperature
+ all_logprobs = jax.nn.log_softmax(logits, axis=-1)
+ # all_logprobs: [local_batch_size, response_length, vocab_size]
+ logprobs = jnp.take_along_axis(all_logprobs, responses[..., None], -1).squeeze(
+ -1
+ )
+ # logprobs: [local_batch_size, response_length]
+
+ ref_output, _ = policy_forward(ref_policy_params, query_responses)
+ ref_logits = ref_output.logits[:, args.task.query_length - 1 : -1, :]
+ # ref_logits: [local_batch_size, response_length, vocab_size]
+ ref_logits /= args.task.temperature
+ ref_all_logprobs = jax.nn.log_softmax(ref_logits, axis=-1)
+ ref_logprobs = jnp.take_along_axis(
+ ref_all_logprobs, responses[..., None], -1
+ ).squeeze(-1)
+ # ref_logprobs: [local_batch_size, response_length]
+
+ # **Response Processing**
+ # 1. truncate at the first occurrence of `truncate_token` that appears at or after
+ # position truncate_after in the responses
+ # https://github.com/openai/lm-human-preferences/blob/cbfd210bb8b08f6bc5c26878c10984b90f516c66/lm_human_preferences/train_policy.py#L378
+ truncate_token_mask = responses == args.task.truncate_token
+ truncate_after_or_token_mask = jnp.concatenate(
+ [
+ jnp.zeros_like(truncate_token_mask)[:, : args.task.truncate_after],
+ truncate_token_mask[:, args.task.truncate_after :],
+ ],
+ axis=1,
+ )
+ truncate_mask = (
+ jnp.cumsum(truncate_after_or_token_mask, axis=1)
+ - truncate_after_or_token_mask
+ ).astype("bool")
+ postprocessed_responses = jnp.where(
+ truncate_mask,
+ jnp.full_like(responses, tokenizer.pad_token_id),
+ responses,
+ )
+
+ # 2. run reward model on the truncated responses
+ postprocessed_query_responses = jnp.concatenate(
+ (queries, postprocessed_responses), axis=1
+ )
+ postprocessed_query_responses = right_padding_to_left_padding(
+ postprocessed_query_responses, tokenizer.pad_token_id
+ )
+ scores = reward_forward(
+ query_responses_ids=postprocessed_query_responses
+ ).flatten()
+
+ # 3. filter response. Ensure that the sample contains truncate_token
+ # responses not passing that filter will receive a low (fixed) score
+ # only query humans on responses that pass that filter
+ matches_token = (
+ postprocessed_responses[:, args.task.truncate_after :]
+ == args.task.truncate_token
+ )
+ filter_mask = jnp.any(matches_token, axis=-1)
+
+ scores = jnp.where(
+ filter_mask,
+ scores,
+ jnp.full_like(scores, args.task.penalty_reward_value),
+ )
+
+ # 4. compute rewards
+ kl = logprobs - ref_logprobs
+ non_score_reward = -kl_ctl_value * kl
+ rewards = non_score_reward
+ rewards = rewards.at[:, -1].add(scores)
+
+ # 5. whiten rewards
+ if args.ppo.whiten_rewards:
+ rewards = whiten(rewards, shift_mean=False)
+
+ # 6. compute advantages and returns
+ def compute_gae_once(carry, inp):
+ advantages = carry
+ nextdone, nextvalues, curvalues, reward = inp
+ nextnonterminal = 1.0 - nextdone
+
+ delta = reward + args.ppo.gamma * nextvalues * nextnonterminal - curvalues
+ advantages = (
+ delta + args.ppo.gamma * args.ppo.lam * nextnonterminal * advantages
+ )
+ return advantages, advantages
+
+ extended_values = jnp.concatenate(
+ (values, jnp.zeros((args.ppo.local_batch_size, 1))), axis=1
+ )
+ dones = jnp.zeros_like(rewards)
+ dones = dones.at[:, -1].set(1.0)
+
+ advantages = jnp.zeros((args.ppo.local_batch_size,))
+ _, advantages = jax.lax.scan(
+ compute_gae_once,
+ advantages,
+ (dones.T, extended_values[:, 1:].T, extended_values[:, :-1].T, rewards.T),
+ reverse=True,
+ )
+
+ advantages = advantages.T
+ returns = advantages + values
+ advantages = whiten(advantages)
+
+ def ppo_single_microbatch(carry, inp):
+ policy_state, rl_stats = carry
+ mb_advantages, mb_returns, mb_values, mb_query_responses, mb_logprobs = inp
+
+ policy_state, rl_stats = train_step(
+ policy_state=policy_state,
+ rl_stats=rl_stats,
+ mb_advantages=mb_advantages,
+ mb_returns=mb_returns,
+ mb_values=mb_values,
+ mb_query_responses=mb_query_responses,
+ mb_logprobs=mb_logprobs,
+ args=args,
+ )
+ return (policy_state, rl_stats), None
+
+ def ppo_single_epoch(carry, inp):
+ policy_state, rl_stats, key = carry
+ key, subkey = jax.random.split(key, 2)
+ perm = jax.random.permutation(key, args.ppo.local_batch_size)
+ # advantages, returns, values, query_responses, logprobs = inp
+ mbs_advantages = einops.rearrange(
+ advantages[perm],
+ "(c m) l -> c m l",
+ c=args.ppo.gradient_accumulation_steps,
+ )
+ mbs_returns = einops.rearrange(
+ returns[perm],
+ "(c m) l -> c m l",
+ c=args.ppo.gradient_accumulation_steps,
+ )
+ mbs_values = einops.rearrange(
+ values[perm], "(c m) l -> c m l", c=args.ppo.gradient_accumulation_steps
+ )
+ mbs_query_responses = einops.rearrange(
+ query_responses[perm],
+ "(c m) l -> c m l",
+ c=args.ppo.gradient_accumulation_steps,
+ )
+ mbs_logprobs = einops.rearrange(
+ logprobs[perm],
+ "(c m) l -> c m l",
+ c=args.ppo.gradient_accumulation_steps,
+ )
+ (policy_state, rl_stats), _ = jax.lax.scan(
+ f=ppo_single_microbatch,
+ init=(policy_state, rl_stats),
+ xs=(
+ mbs_advantages,
+ mbs_returns,
+ mbs_values,
+ mbs_query_responses,
+ mbs_logprobs,
+ ),
+ )
+ return (policy_state, rl_stats, key), None
+
+ key = jax.random.PRNGKey(args.seed)
+ # Do multiple epochs of PPO training, with a fresh random shuffle in each epoch
+ (policy_state, rl_stats, _), _ = jax.lax.scan(
+ f=ppo_single_epoch,
+ init=(policy_state, rl_stats, key),
+ xs=None,
+ length=args.ppo.noptepochs,
+ )
+
+ return (
+ policy_state,
+ returns,
+ advantages,
+ values,
+ query_responses,
+ logprobs,
+ kl,
+ scores,
+ rl_stats,
+ )
+
+ p_train_update = jax.pmap(
+ train_update,
+ axis_name="batch",
+ donate_argnums=(0,),
+ )
+
+ print("===training policy===")
+ global_step = 0
+
+ for update in range(1, args.ppo.num_updates + 1):
+ global_step += 1 * args.ppo.batch_size
+ data = next(iter_dataloader)
+ input_ids = common_utils.shard(data["input_ids"].numpy())
+
+ rl_stats = jax_utils.replicate(RLStatistics.empty())
+ (
+ policy_state,
+ returns,
+ advantages,
+ values,
+ query_responses,
+ logprobs,
+ kl,
+ scores,
+ rl_stats,
+ ) = p_train_update(
+ policy_state=policy_state,
+ input_ids=input_ids,
+ rl_stats=rl_stats,
+ kl_ctl_value=jax_utils.replicate(kl_ctl.value),
+ )
+
+ try:
+ sample_kl = kl[0][0].sum().item()
+ sample_score = scores[0][0].item()
+ sample_query_response = query_responses[0][0]
+ console.print(
+ f"[green][bold]{'Query'}:[/]\n"
+ + f"[green]{ tokenizer.decode(sample_query_response[:args.task.query_length], skip_special_tokens=True)}[/]\n\n"
+ + f"[yellow][bold]{'Response'}:[/]\n"
+ + f"[yellow]{tokenizer.decode(sample_query_response[args.task.query_length:], skip_special_tokens=True)}[/]\n\n"
+ + f"[red]score: {sample_score}, kl: {sample_kl}, total reward: {sample_score - kl_ctl.value * sample_kl} [/]"
+ )
+ except Exception as e:
+ print(e)
+
+ # Rollout metrics
+ rl_stats = rl_stats.unreplicate().compute()
+ non_score_reward = -kl_ctl.value * kl
+
+ mean_kl = kl.sum(-1).mean()
+ mean_entropy = (-logprobs).sum(-1).mean() | This is jax computation happenning outside jitted function, slow. |
lm-human-preference-details | github_2023 | python | 18 | vwxyzjn | vwxyzjn | @@ -0,0 +1,1024 @@
+import functools
+import os
+import time
+from dataclasses import asdict, dataclass, field
+from types import SimpleNamespace
+from typing import List, Optional
+import einops
+from clu import metrics
+import flax
+import flax.linen as nn
+import jax
+import jax.numpy as jnp
+import numpy as np
+import optax
+import orbax
+import tyro
+from flax import jax_utils
+from flax.training import common_utils, orbax_utils
+from flax.training.train_state import TrainState
+from optax import ScaleByAdamState, update_moment, update_moment_per_elem_norm
+from optax._src import base, combine, numerics, utils
+from optax._src.alias import _scale_by_learning_rate
+from rich.console import Console
+from rich.pretty import pprint
+from torch.utils.data import DataLoader, IterableDataset
+from torch.utils.tensorboard import SummaryWriter
+from transformers import AutoTokenizer, FlaxAutoModelForCausalLM, GenerationConfig
+
+from lm_human_preference_details.data import DATASET
+
+
+@dataclass
+class AdaptiveKLParams:
+ target: float = 6.0
+ horizon: int = 10000 # in episodes
+
+
+@dataclass
+class RewardHParams:
+ kl_coef: float = 0.15
+ adaptive_kl: Optional[AdaptiveKLParams] = field(default_factory=AdaptiveKLParams)
+ trained_model: Optional[str] = "models/"
+ label_dataset: tyro.conf.Suppress[Optional[str]] = None
+
+
+@dataclass
+class PpoHParams:
+ total_episodes: int = 1000000
+ local_batch_size: int = 64
+ local_mini_batch_size: tyro.conf.Suppress[int] = None
+ batch_size: tyro.conf.Suppress[int] = None
+ mini_batch_size: tyro.conf.Suppress[int] = None
+ gradient_accumulation_steps: int = 1
+ """gradient accumulation steps"""
+ local_micro_batch_size: tyro.conf.Suppress[int] = None
+ """per rank micro batch size"""
+ world_size: tyro.conf.Suppress[int] = None
+ batch_size: tyro.conf.Suppress[int] = None
+ minibatch_size: tyro.conf.Suppress[int] = None
+ num_updates: tyro.conf.Suppress[int] = None
+ nminibatches: int = 1
+ noptepochs: int = 4
+ lr: float = 0.00001
+ eps: float = 1e-5
+ vf_coef: float = 0.1
+ cliprange: float = 0.2
+ cliprange_value: float = 0.2
+ gamma: float = 1
+ lam: float = 0.95
+ whiten_rewards: bool = True
+
+
+@dataclass
+class TaskHParams:
+ # Query params
+ query_length: int = 64
+ query_dataset: str = "books"
+ query_prefix: str = ""
+ query_suffix: str = ""
+ start_text: Optional[str] = None
+ end_text: Optional[str] = None
+
+ # Response params
+ response_length: int = 24
+
+ # Truncate response after the first occurrence of this token at or after index after when sampling.
+ truncate_token: int = 13
+ truncate_after: int = 16
+ penalty_reward_value: int = -1
+
+ # LM params
+ temperature: float = 0.7
+
+
+@dataclass
+class Args:
+ # common args
+ exp_name: str = os.path.basename(__file__)[: -len(".py")]
+ """the name of this experiment"""
+ seed: int = 1
+ """seed of the experiment"""
+ track: bool = False
+ """if toggled, this experiment will be tracked with Weights and Biases"""
+ wandb_project_name: str = "cleanrl"
+ """the wandb's project name"""
+ wandb_entity: Optional[str] = None
+ """the entity (team) of wandb's project"""
+ cuda: bool = True
+ """Whether to use cuda if available."""
+ run_name: tyro.conf.Suppress[str] = None
+ """TO BE FILLED: a unique name of this run"""
+
+ base_model: str = "gpt2"
+ """the name of the pretrained model to use"""
+ print_sample_output_freq: int = 0
+ """How often to print sample output"""
+ save_path: str = "models/policy/"
+ """Where to save the model"""
+ use_tensorflow_adam: bool = True
+ """Whether to use tensorflow-style Adam optimizer instead of PyTorch's"""
+ task: TaskHParams = field(default_factory=TaskHParams)
+ rewards: RewardHParams = field(default_factory=RewardHParams)
+ ppo: PpoHParams = field(default_factory=PpoHParams)
+
+ # distributed settings
+ local_rank: int = 0
+ """the rank of this process"""
+ learner_device_ids: List[int] = field(default_factory=lambda: [0])
+ "the device ids that script will use"
+ learner_devices: tyro.conf.Suppress[int] = None # real type is `List[str]`
+ """the devices that script will use"""
+ global_learner_decices: tyro.conf.Suppress[int] = None # real type is `List[str]`
+ """the total devices (across all nodes and machines) that script will use"""
+
+
+def scale_by_adam_tf_style(
+ b1: float = 0.9,
+ b2: float = 0.999,
+ eps: float = 1e-8,
+ eps_root: float = 0.0,
+ mu_dtype=None,
+) -> base.GradientTransformation:
+ """Rescale updates according to the Adam algorithm.
+
+ References:
+ [Kingma et al, 2014](https://arxiv.org/abs/1412.6980)
+
+ Args:
+ b1: Decay rate for the exponentially weighted average of grads.
+ b2: Decay rate for the exponentially weighted average of squared grads.
+ eps: Term added to the denominator to improve numerical stability.
+ eps_root: Term added to the denominator inside the square-root to improve
+ numerical stability when backpropagating gradients through the rescaling.
+ mu_dtype: Optional `dtype` to be used for the first order accumulator; if
+ `None` then the `dtype` is inferred from `params` and `updates`.
+
+ Returns:
+ A `GradientTransformation` object.
+ """
+
+ mu_dtype = utils.canonicalize_dtype(mu_dtype)
+
+ def init_fn(params):
+ mu = jax.tree_util.tree_map(
+ lambda t: jnp.zeros_like(t, dtype=mu_dtype), params
+ ) # First moment
+ nu = jax.tree_util.tree_map(jnp.zeros_like, params) # Second moment
+ return ScaleByAdamState(count=jnp.zeros([], jnp.int32), mu=mu, nu=nu)
+
+ def update_fn(updates, state, params=None):
+ del params
+ mu = update_moment(updates, state.mu, b1, 1)
+ nu = update_moment_per_elem_norm(updates, state.nu, b2, 2)
+ count_inc = numerics.safe_int32_increment(state.count)
+
+ ### `optax` default adam implementation
+ # mu_hat = bias_correction(mu, b1, count_inc)
+ # nu_hat = bias_correction(nu, b2, count_inc)
+ # updates = jax.tree_util.tree_map(
+ # lambda m, v: m / (jnp.sqrt(v + eps_root) + eps), mu_hat, nu_hat)
+ ### Tensorflow adam implementation
+ updates = jax.tree_util.tree_map(
+ lambda m, v: (jnp.sqrt(1 - b2**count_inc) / (1 - b1**count_inc))
+ * m
+ / (jnp.sqrt(v + eps_root) + eps),
+ mu,
+ nu,
+ ) #
+ mu = utils.cast_tree(mu, mu_dtype)
+ return updates, ScaleByAdamState(count=count_inc, mu=mu, nu=nu)
+
+ return base.GradientTransformation(init_fn, update_fn)
+
+
+def adam_tf_style(
+ learning_rate,
+ b1: float = 0.9,
+ b2: float = 0.999,
+ eps: float = 1e-8,
+ eps_root: float = 0.0,
+ mu_dtype=None,
+):
+ return combine.chain(
+ scale_by_adam_tf_style(
+ b1=b1, b2=b2, eps=eps, eps_root=eps_root, mu_dtype=mu_dtype
+ ),
+ _scale_by_learning_rate(learning_rate),
+ )
+
+
+class AdaptiveKLController:
+ def __init__(self, init_kl_coef: float, hparams: AdaptiveKLParams):
+ self.value = init_kl_coef
+ self.hparams = hparams
+
+ def update(self, current, n_steps):
+ target = self.hparams.target
+ proportional_error = np.clip(current / target - 1, -0.2, 0.2)
+ mult = 1 + proportional_error * n_steps / self.hparams.horizon
+ self.value *= mult
+
+
+def whiten(values, shift_mean=True):
+ # `unbiased=False` matches TF `tf.nn.moments`'s setting
+ mean, var = jnp.mean(values), jnp.var(values)
+ whitened = (values - mean) * jax.lax.rsqrt(var + 1e-8)
+ if not shift_mean:
+ whitened += mean
+ return whitened
+
+
+class ScalarHead(nn.Module):
+ head_input_size: int
+
+ @nn.compact
+ def __call__(self, x):
+ x = nn.Dense(
+ 1,
+ kernel_init=nn.initializers.normal(stddev=0),
+ bias_init=nn.initializers.zeros_init(),
+ )(x)
+ return x
+
+
+class RewardHead(nn.Module):
+ head_input_size: int
+
+ @nn.compact
+ def __call__(self, x):
+ assert x.shape[-1] == self.head_input_size
+ x = nn.Dense(
+ 1,
+ kernel_init=nn.initializers.normal(
+ stddev=1 / np.sqrt(self.head_input_size + 1)
+ ),
+ bias_init=nn.initializers.zeros_init(),
+ )(x)
+ reward_gain = self.param("reward_gain", nn.initializers.ones_init(), ())
+ reward_bias = self.param("reward_bias", nn.initializers.zeros_init(), ())
+ x = x * reward_gain + reward_bias
+ return x
+
+
+@flax.struct.dataclass
+class LMBackboneWithScalarHeadParams:
+ """Parameters for the language model backbone and a scalar head."""
+
+ lm_backbone_params: flax.core.FrozenDict
+ head_params: flax.core.FrozenDict
+
+
+# a pytorch dataset
+class MyDataset(IterableDataset):
+ def __init__(
+ self, generator, tokenizer, query_length, seed, start_text=None, end_text=None
+ ):
+ self.generator = generator
+ self.tokenizer = tokenizer
+ self.query_length = query_length
+ self.start_text = start_text
+ self.end_text = end_text
+ self.seed = seed
+ token_to_index = tokenizer.get_vocab()
+ self.start_token = token_to_index[start_text] if self.start_text else None
+ self.end_token = token_to_index[end_text] if self.end_text else None
+
+ def __iter__(self):
+ for text in self.generator("train", self.seed, shuffle=True):
+ tokens = self.tokenizer.encode(text)
+ if self.start_token is not None:
+ try:
+ first_index = tokens.index(self.start_token) + 1
+ if first_index < len(tokens):
+ tokens = tokens[first_index:]
+ except:
+ continue
+ tokens = tokens[: self.query_length]
+ if self.end_token is not None:
+ try:
+ last_index = len(tokens) - tokens[::-1].index(self.end_token)
+ tokens = tokens[:last_index]
+ except:
+ continue
+ output = self.tokenizer.pad(
+ {"input_ids": tokens},
+ padding="max_length",
+ max_length=self.query_length,
+ return_tensors="pt",
+ return_attention_mask=True,
+ )
+ yield output
+
+
+def right_padding_to_left_padding(tokens, pad_id):
+ def pad_row(row):
+ mask = 1 - (row == pad_id) # 1 if not pad_id, 0 if pad_id
+ return row[
+ jnp.argsort(mask)
+ ] # uses the fact that jnp.argsort is stable by default
+
+ return jax.vmap(pad_row)(tokens)
+
+
+def ceil_div(a, b):
+ return (a - 1) // b + 1
+
+
+def exact_div(a, b):
+ q = a // b
+ if a != q * b:
+ raise ValueError(f"Inexact division: {a} / {b} = {a / b}")
+ return q
+
+
+def prepare_reward_forward(args, tokenizer):
+ """Prepare the forward pass of the reward model and parameters."""
+
+ lm_backbone = FlaxAutoModelForCausalLM.from_pretrained(args.base_model)
+ scalar_head = RewardHead(head_input_size=lm_backbone.config.hidden_size)
+
+ def reward_forward(
+ params: LMBackboneWithScalarHeadParams,
+ query_responses_ids: jnp.ndarray,
+ ):
+ """Get reward for each queiry--response pair."""
+ assert query_responses_ids.ndim == 2
+
+ # mask out padding tokens
+ attention_mask = query_responses_ids != tokenizer.pad_token_id
+ query_responses_ids = jnp.where(attention_mask, query_responses_ids, 0)
+
+ # assign position ids
+ position_ids = attention_mask.cumsum(1) - attention_mask
+
+ reward_latents = lm_backbone.module.apply(
+ variables=params.lm_backbone_params,
+ input_ids=query_responses_ids,
+ attention_mask=attention_mask,
+ position_ids=position_ids,
+ output_hidden_states=True,
+ ).hidden_states[-1]
+ # reward_latents: [batch_size, length, hidden_size]
+
+ last_reward_latents = reward_latents[:, -1, :]
+ # last_reward_latents: [batch_size, hidden_size]
+
+ reward = scalar_head.apply(variables=params.head_params, x=last_reward_latents)
+ # reward: [batch_size, 1]
+ return reward
+
+ if args.rewards.trained_model:
+ orbax_checkpointer = orbax.checkpoint.PyTreeCheckpointer()
+ reward_state_params = orbax_checkpointer.restore(args.rewards.trained_model)[
+ "reward_model"
+ ]["params"]
+ reward_params = LMBackboneWithScalarHeadParams(
+ lm_backbone_params=flax.core.FrozenDict(
+ {"params": reward_state_params["lm_backbone_params"]["params"]}
+ ),
+ head_params=flax.core.FrozenDict(
+ {"params": reward_state_params["head_params"]["params"]}
+ ),
+ )
+ pprint(f"Loaded pretrained reward model from {args.rewards.trained_model}")
+ else:
+ key = jax.random.PRNGKey(args.seed)
+ key, init_key = jax.random.split(key, 2)
+ reward_params = LMBackboneWithScalarHeadParams(
+ lm_backbone_params=flax.core.FrozenDict({"params": lm_backbone.params}),
+ head_params=flax.core.FrozenDict(
+ scalar_head.init(
+ init_key,
+ jnp.ones(lm_backbone.config.hidden_size)[None, None, :],
+ )
+ ),
+ )
+
+ return functools.partial(reward_forward, params=reward_params)
+
+
+def prepare_policy_forward_and_policy_generate(args, tokenizer):
+ """Prepare the forward pass of the policy model and parameters."""
+
+ lm_backbone = FlaxAutoModelForCausalLM.from_pretrained(args.base_model)
+ # disable `pad_token_id` and `eos_token_id` because we just want to
+ # generate tokens without truncation / padding
+ lm_backbone.generation_config.eos_token_id = None
+ lm_backbone.generation_config.pad_token_id = tokenizer.pad_token_id
+ scalar_head = ScalarHead(head_input_size=lm_backbone.config.hidden_size)
+
+ generation_config = GenerationConfig(
+ max_new_tokens=args.task.response_length,
+ min_new_tokens=args.task.response_length,
+ temperature=args.task.temperature,
+ top_k=0.0,
+ top_p=1.0,
+ do_sample=True,
+ pad_token_id=tokenizer.pad_token_id,
+ )
+
+ def policy_forward(
+ params: LMBackboneWithScalarHeadParams,
+ input_ids: jnp.ndarray,
+ ):
+ """Get reward for input_ids."""
+ assert input_ids.ndim == 2
+ # shape: [batch_size, length]
+
+ # mask out padding tokens
+ attention_mask = input_ids != tokenizer.pad_token_id
+ input_ids = jnp.where(attention_mask, input_ids, 0)
+
+ # assign position ids
+ position_ids = attention_mask.cumsum(1) - attention_mask
+
+ lm_backbone_out = lm_backbone.module.apply(
+ variables=params.lm_backbone_params,
+ input_ids=input_ids,
+ attention_mask=attention_mask,
+ position_ids=position_ids,
+ output_hidden_states=True,
+ )
+
+ value_latents = lm_backbone_out.hidden_states[-1]
+ # shape: [batch_size, length, hidden_size]
+
+ values = scalar_head.apply(variables=params.head_params, x=value_latents)
+ # shape: [batch_size, length, 1]
+ return lm_backbone_out, values
+
+ def policy_generate(
+ params: LMBackboneWithScalarHeadParams,
+ queries: jnp.ndarray,
+ ):
+ input_ids = queries
+ attention_mask = input_ids != tokenizer.pad_token_id
+ input_ids = jnp.where(attention_mask, queries, 0)
+ output = lm_backbone.generate(
+ params=params["params"],
+ input_ids=input_ids,
+ generation_config=generation_config,
+ attention_mask=attention_mask.astype("i4"),
+ return_dict_in_generate=True,
+ )
+ context_length = input_ids.shape[1]
+ return jnp.concatenate((queries, output.sequences[:, context_length:]), axis=1)
+
+ key = jax.random.PRNGKey(args.seed)
+ key, init_key = jax.random.split(key, 2)
+ policy_params = LMBackboneWithScalarHeadParams(
+ lm_backbone_params=flax.core.FrozenDict({"params": lm_backbone.params}),
+ head_params=flax.core.FrozenDict(
+ scalar_head.init(
+ init_key,
+ jnp.ones(lm_backbone.config.hidden_size)[None, None, :],
+ )
+ ),
+ )
+
+ return policy_forward, policy_generate, policy_params
+
+
+@flax.struct.dataclass
+class RLStatistics(metrics.Collection):
+ approxkl: metrics.Average.from_output("approxkl")
+ entropy: metrics.Average.from_output("entropy")
+ pg_loss: metrics.Average.from_output("pg_loss")
+ pg_clipfrac: metrics.Average.from_output("pg_clipfrac")
+ vf_loss1: metrics.Average.from_output("vf_loss1")
+ vf_loss: metrics.Average.from_output("vf_loss")
+ vf_clipfrac: metrics.Average.from_output("vf_clipfrac")
+ ratio: metrics.Average.from_output("ratio")
+ loss: metrics.Average.from_output("loss")
+
+
+def train_step(
+ policy_state,
+ rl_stats,
+ mb_advantages,
+ mb_returns,
+ mb_values,
+ mb_query_responses,
+ mb_logprobs,
+ args,
+):
+ def loss(params):
+ output, vpred_temp = policy_state.apply_fn(params, mb_query_responses)
+ # vpred_temp: [local_micro_batch_size, query_length + response_length, 1]
+ vpred = jnp.squeeze(vpred_temp[:, args.task.query_length - 1 : -1, :], axis=-1)
+ # vpred: [local_micro_batch_size, response_length]
+ vpredclipped = jnp.clip(
+ vpred,
+ mb_values - args.ppo.cliprange_value,
+ mb_values + args.ppo.cliprange_value,
+ )
+ vf_losses1 = jnp.square(vpred - mb_returns)
+ vf_losses2 = jnp.square(vpredclipped - mb_returns)
+ vf_loss = 0.5 * jnp.maximum(vf_losses1, vf_losses2).mean()
+ vf_clipfrac = (vf_losses2 > vf_losses1).astype(jnp.float32).mean()
+
+ logits = output.logits[:, args.task.query_length - 1 : -1, :]
+ logits /= args.task.temperature
+ responses = mb_query_responses[:, args.task.query_length :]
+ new_logprobs = -optax.softmax_cross_entropy_with_integer_labels(
+ logits, responses
+ )
+
+ logprobs_diff = new_logprobs - mb_logprobs
+ ratio = jnp.exp(logprobs_diff)
+ pg_losses1 = -mb_advantages * ratio
+ pg_losses2 = -mb_advantages * jnp.clip(
+ ratio, 1.0 - args.ppo.cliprange, 1.0 + args.ppo.cliprange
+ )
+ pg_loss = jnp.maximum(pg_losses1, pg_losses2).mean()
+ pg_clipfrac = (pg_losses2 > pg_losses1).astype(jnp.float32).mean()
+
+ pd = jax.nn.softmax(logits, axis=-1)
+ entropy = jax.nn.logsumexp(logits, axis=-1) - jnp.sum(pd * logits, axis=-1)
+
+ approxkl = 0.5 * ((logprobs_diff) ** 2).mean()
+ loss = pg_loss + args.ppo.vf_coef * vf_loss
+
+ current_rl_stats = dict(
+ approxkl=approxkl,
+ entropy=entropy.mean(),
+ ratio=ratio.mean(),
+ pg_loss=pg_loss,
+ pg_clipfrac=pg_clipfrac,
+ vf_loss1=vf_losses1.mean(),
+ vf_loss=vf_loss,
+ vf_clipfrac=vf_clipfrac,
+ loss=loss,
+ )
+ return loss, current_rl_stats
+
+ grad_fn = jax.value_and_grad(loss, has_aux=True)
+ (loss, current_rl_stats), grads = grad_fn(policy_state.params)
+ grads = jax.lax.pmean(grads, "batch")
+ policy_state = policy_state.apply_gradients(grads=grads)
+
+ rl_stats = rl_stats.merge(RLStatistics.gather_from_model_output(**current_rl_stats))
+
+ return policy_state, rl_stats
+
+
+def linear_schedule(optimizer_step, args):
+ """anneal learning rate linearly to reach 0 after one epoch."""
+ update = 1 + optimizer_step // (
+ args.ppo.noptepochs
+ * args.ppo.nminibatches
+ * args.ppo.gradient_accumulation_steps
+ )
+ frac = 1.0 - (update - 1.0) / args.ppo.num_updates
+ lrnow = frac * args.ppo.lr
+ return lrnow
+
+
+def train(args: Args):
+ local_devices = jax.local_devices()
+ global_devices = jax.devices()
+ args.ppo.world_size = jax.process_count()
+ learner_devices = [local_devices[d_id] for d_id in args.learner_device_ids]
+ global_learner_decices = [
+ global_devices[d_id + process_index * len(local_devices)]
+ for process_index in range(args.ppo.world_size)
+ for d_id in args.learner_device_ids
+ ]
+ pprint({"global_learner_decices": global_learner_decices})
+ args.global_learner_decices = [str(item) for item in global_learner_decices]
+ args.learner_devices = [str(item) for item in learner_devices]
+ args.local_rank = jax.process_index()
+ args.ppo.batch_size = int(
+ args.ppo.local_batch_size * len(args.learner_devices) * args.ppo.world_size
+ )
+ args.ppo.minibatch_size = exact_div(args.ppo.batch_size, args.ppo.nminibatches)
+ args.ppo.local_mini_batch_size = exact_div(
+ args.ppo.local_batch_size, args.ppo.nminibatches
+ )
+ args.ppo.local_micro_batch_size = exact_div(
+ args.ppo.local_mini_batch_size, args.ppo.gradient_accumulation_steps
+ )
+ if args.ppo.whiten_rewards:
+ assert (
+ args.ppo.local_mini_batch_size >= 8
+ ), f"Per-rank minibatch size {args.ppo.local_mini_batch_size} is insufficient for whitening"
+ # `per_rank_rollout_batch_size` is our `args.ppo.local_batch_size`
+ # `per_rank_minibatch_size` is our `args.ppo.local_mini_batch_size`
+ args.ppo.num_updates = args.ppo.total_episodes // args.ppo.batch_size
+
+ console = Console(force_terminal=True)
+ run_name = f"{args.exp_name}__{args.seed}__{int(time.time())}"
+ writer = SimpleNamespace() # dummy writer
+ writer.add_scalar = lambda x, y, z: None
+ writer.add_histogram = lambda x, y, z: None
+
+ if args.local_rank == 0:
+ if args.track:
+ import wandb
+
+ wandb.init(
+ project=args.wandb_project_name,
+ entity=args.wandb_entity,
+ sync_tensorboard=True,
+ config=asdict(args),
+ name=run_name,
+ save_code=True,
+ )
+ wandb.run.log_code(".")
+ writer = SummaryWriter(f"runs/{run_name}")
+ writer.add_text(
+ "hyperparameters",
+ "|param|value|\n|-|-|\n%s"
+ % ("\n".join([f"|{key}|{value}|" for key, value in vars(args).items()])),
+ )
+ pprint(args)
+ local_seed = args.seed + args.local_rank * 100003 # Prime
+ tokenizer = AutoTokenizer.from_pretrained(
+ args.base_model,
+ padding_side="right",
+ )
+ # we use the padding token manually but do not resize the token embedding of the model
+ tokenizer.add_special_tokens({"pad_token": "[PAD]"})
+ reward_forward = prepare_reward_forward(args, tokenizer)
+ (
+ policy_forward,
+ policy_generate,
+ policy_params,
+ ) = prepare_policy_forward_and_policy_generate(args, tokenizer)
+ _, _, ref_policy_params = prepare_policy_forward_and_policy_generate(
+ args, tokenizer
+ )
+
+ if args.use_tensorflow_adam:
+ adam = adam_tf_style
+ else:
+ adam = optax.adam
+
+ optimizer = adam(
+ learning_rate=functools.partial(linear_schedule, args=args),
+ eps=args.ppo.eps,
+ )
+
+ optimizer = optax.MultiSteps(optimizer, args.ppo.gradient_accumulation_steps)
+
+ policy_state = TrainState.create(
+ apply_fn=policy_forward, params=policy_params, tx=optimizer
+ )
+ policy_state = jax_utils.replicate(policy_state)
+
+ dataset = MyDataset(
+ DATASET[args.task.query_dataset],
+ tokenizer,
+ args.task.query_length,
+ seed=local_seed,
+ start_text=args.task.start_text,
+ end_text=args.task.end_text,
+ )
+ dataloader = DataLoader(dataset, batch_size=args.ppo.batch_size)
+ iter_dataloader = iter(dataloader)
+ kl_ctl = AdaptiveKLController(
+ args.rewards.kl_coef, hparams=args.rewards.adaptive_kl
+ )
+
+ def train_update(policy_state, input_ids, rl_stats, kl_ctl_value):
+ queries = right_padding_to_left_padding(input_ids, tokenizer.pad_token_id)
+
+ query_responses = policy_generate(
+ params=policy_state.params.lm_backbone_params,
+ queries=queries,
+ )
+ # query_responses: [local_batch_size, query_length + response_length]
+ responses = query_responses[:, args.task.query_length :]
+
+ output, full_values = policy_forward(policy_state.params, query_responses)
+ values = full_values[:, args.task.query_length - 1 : -1].squeeze(-1)
+ # values: [local_batch_size, response_length]
+ logits = output.logits[:, args.task.query_length - 1 : -1, :]
+ # logits: [local_batch_size, response_length, vocab_size]
+ logits /= args.task.temperature
+ all_logprobs = jax.nn.log_softmax(logits, axis=-1)
+ # all_logprobs: [local_batch_size, response_length, vocab_size]
+ logprobs = jnp.take_along_axis(all_logprobs, responses[..., None], -1).squeeze(
+ -1
+ )
+ # logprobs: [local_batch_size, response_length]
+
+ ref_output, _ = policy_forward(ref_policy_params, query_responses)
+ ref_logits = ref_output.logits[:, args.task.query_length - 1 : -1, :]
+ # ref_logits: [local_batch_size, response_length, vocab_size]
+ ref_logits /= args.task.temperature
+ ref_all_logprobs = jax.nn.log_softmax(ref_logits, axis=-1)
+ ref_logprobs = jnp.take_along_axis(
+ ref_all_logprobs, responses[..., None], -1
+ ).squeeze(-1)
+ # ref_logprobs: [local_batch_size, response_length]
+
+ # **Response Processing**
+ # 1. truncate at the first occurrence of `truncate_token` that appears at or after
+ # position truncate_after in the responses
+ # https://github.com/openai/lm-human-preferences/blob/cbfd210bb8b08f6bc5c26878c10984b90f516c66/lm_human_preferences/train_policy.py#L378
+ truncate_token_mask = responses == args.task.truncate_token
+ truncate_after_or_token_mask = jnp.concatenate(
+ [
+ jnp.zeros_like(truncate_token_mask)[:, : args.task.truncate_after],
+ truncate_token_mask[:, args.task.truncate_after :],
+ ],
+ axis=1,
+ )
+ truncate_mask = (
+ jnp.cumsum(truncate_after_or_token_mask, axis=1)
+ - truncate_after_or_token_mask
+ ).astype("bool")
+ postprocessed_responses = jnp.where(
+ truncate_mask,
+ jnp.full_like(responses, tokenizer.pad_token_id),
+ responses,
+ )
+
+ # 2. run reward model on the truncated responses
+ postprocessed_query_responses = jnp.concatenate(
+ (queries, postprocessed_responses), axis=1
+ )
+ postprocessed_query_responses = right_padding_to_left_padding(
+ postprocessed_query_responses, tokenizer.pad_token_id
+ )
+ scores = reward_forward(
+ query_responses_ids=postprocessed_query_responses
+ ).flatten()
+
+ # 3. filter response. Ensure that the sample contains truncate_token
+ # responses not passing that filter will receive a low (fixed) score
+ # only query humans on responses that pass that filter
+ matches_token = (
+ postprocessed_responses[:, args.task.truncate_after :]
+ == args.task.truncate_token
+ )
+ filter_mask = jnp.any(matches_token, axis=-1)
+
+ scores = jnp.where(
+ filter_mask,
+ scores,
+ jnp.full_like(scores, args.task.penalty_reward_value),
+ )
+
+ # 4. compute rewards
+ kl = logprobs - ref_logprobs
+ non_score_reward = -kl_ctl_value * kl
+ rewards = non_score_reward
+ rewards = rewards.at[:, -1].add(scores)
+
+ # 5. whiten rewards
+ if args.ppo.whiten_rewards:
+ rewards = whiten(rewards, shift_mean=False)
+
+ # 6. compute advantages and returns
+ def compute_gae_once(carry, inp):
+ advantages = carry
+ nextdone, nextvalues, curvalues, reward = inp
+ nextnonterminal = 1.0 - nextdone
+
+ delta = reward + args.ppo.gamma * nextvalues * nextnonterminal - curvalues
+ advantages = (
+ delta + args.ppo.gamma * args.ppo.lam * nextnonterminal * advantages
+ )
+ return advantages, advantages
+
+ extended_values = jnp.concatenate(
+ (values, jnp.zeros((args.ppo.local_batch_size, 1))), axis=1
+ )
+ dones = jnp.zeros_like(rewards)
+ dones = dones.at[:, -1].set(1.0)
+
+ advantages = jnp.zeros((args.ppo.local_batch_size,))
+ _, advantages = jax.lax.scan(
+ compute_gae_once,
+ advantages,
+ (dones.T, extended_values[:, 1:].T, extended_values[:, :-1].T, rewards.T),
+ reverse=True,
+ )
+
+ advantages = advantages.T
+ returns = advantages + values
+ advantages = whiten(advantages)
+
+ def ppo_single_microbatch(carry, inp):
+ policy_state, rl_stats = carry
+ mb_advantages, mb_returns, mb_values, mb_query_responses, mb_logprobs = inp
+
+ policy_state, rl_stats = train_step(
+ policy_state=policy_state,
+ rl_stats=rl_stats,
+ mb_advantages=mb_advantages,
+ mb_returns=mb_returns,
+ mb_values=mb_values,
+ mb_query_responses=mb_query_responses,
+ mb_logprobs=mb_logprobs,
+ args=args,
+ )
+ return (policy_state, rl_stats), None
+
+ def ppo_single_epoch(carry, inp):
+ policy_state, rl_stats, key = carry
+ key, subkey = jax.random.split(key, 2)
+ perm = jax.random.permutation(key, args.ppo.local_batch_size)
+ # advantages, returns, values, query_responses, logprobs = inp
+ mbs_advantages = einops.rearrange(
+ advantages[perm],
+ "(c m) l -> c m l",
+ c=args.ppo.gradient_accumulation_steps,
+ )
+ mbs_returns = einops.rearrange(
+ returns[perm],
+ "(c m) l -> c m l",
+ c=args.ppo.gradient_accumulation_steps,
+ )
+ mbs_values = einops.rearrange(
+ values[perm], "(c m) l -> c m l", c=args.ppo.gradient_accumulation_steps
+ )
+ mbs_query_responses = einops.rearrange(
+ query_responses[perm],
+ "(c m) l -> c m l",
+ c=args.ppo.gradient_accumulation_steps,
+ )
+ mbs_logprobs = einops.rearrange(
+ logprobs[perm],
+ "(c m) l -> c m l",
+ c=args.ppo.gradient_accumulation_steps,
+ )
+ (policy_state, rl_stats), _ = jax.lax.scan(
+ f=ppo_single_microbatch,
+ init=(policy_state, rl_stats),
+ xs=(
+ mbs_advantages,
+ mbs_returns,
+ mbs_values,
+ mbs_query_responses,
+ mbs_logprobs,
+ ),
+ )
+ return (policy_state, rl_stats, key), None
+
+ key = jax.random.PRNGKey(args.seed)
+ # Do multiple epochs of PPO training, with a fresh random shuffle in each epoch
+ (policy_state, rl_stats, _), _ = jax.lax.scan(
+ f=ppo_single_epoch,
+ init=(policy_state, rl_stats, key),
+ xs=None,
+ length=args.ppo.noptepochs,
+ )
+
+ return (
+ policy_state,
+ returns,
+ advantages,
+ values,
+ query_responses,
+ logprobs,
+ kl,
+ scores,
+ rl_stats,
+ )
+
+ p_train_update = jax.pmap(
+ train_update,
+ axis_name="batch",
+ donate_argnums=(0,),
+ )
+
+ print("===training policy===")
+ global_step = 0
+
+ for update in range(1, args.ppo.num_updates + 1):
+ global_step += 1 * args.ppo.batch_size
+ data = next(iter_dataloader)
+ input_ids = common_utils.shard(data["input_ids"].numpy())
+
+ rl_stats = jax_utils.replicate(RLStatistics.empty())
+ (
+ policy_state,
+ returns,
+ advantages,
+ values,
+ query_responses,
+ logprobs,
+ kl,
+ scores,
+ rl_stats,
+ ) = p_train_update(
+ policy_state=policy_state,
+ input_ids=input_ids,
+ rl_stats=rl_stats,
+ kl_ctl_value=jax_utils.replicate(kl_ctl.value),
+ )
+
+ try:
+ sample_kl = kl[0][0].sum().item()
+ sample_score = scores[0][0].item()
+ sample_query_response = query_responses[0][0]
+ console.print(
+ f"[green][bold]{'Query'}:[/]\n"
+ + f"[green]{ tokenizer.decode(sample_query_response[:args.task.query_length], skip_special_tokens=True)}[/]\n\n"
+ + f"[yellow][bold]{'Response'}:[/]\n"
+ + f"[yellow]{tokenizer.decode(sample_query_response[args.task.query_length:], skip_special_tokens=True)}[/]\n\n"
+ + f"[red]score: {sample_score}, kl: {sample_kl}, total reward: {sample_score - kl_ctl.value * sample_kl} [/]"
+ )
+ except Exception as e:
+ print(e)
+
+ # Rollout metrics
+ rl_stats = rl_stats.unreplicate().compute()
+ non_score_reward = -kl_ctl.value * kl
+
+ mean_kl = kl.sum(-1).mean() | computation happenning outside jitted function |
lm-human-preference-details | github_2023 | python | 18 | vwxyzjn | vwxyzjn | @@ -0,0 +1,1024 @@
+import functools
+import os
+import time
+from dataclasses import asdict, dataclass, field
+from types import SimpleNamespace
+from typing import List, Optional
+import einops
+from clu import metrics
+import flax
+import flax.linen as nn
+import jax
+import jax.numpy as jnp
+import numpy as np
+import optax
+import orbax
+import tyro
+from flax import jax_utils
+from flax.training import common_utils, orbax_utils
+from flax.training.train_state import TrainState
+from optax import ScaleByAdamState, update_moment, update_moment_per_elem_norm
+from optax._src import base, combine, numerics, utils
+from optax._src.alias import _scale_by_learning_rate
+from rich.console import Console
+from rich.pretty import pprint
+from torch.utils.data import DataLoader, IterableDataset
+from torch.utils.tensorboard import SummaryWriter
+from transformers import AutoTokenizer, FlaxAutoModelForCausalLM, GenerationConfig
+
+from lm_human_preference_details.data import DATASET
+
+
+@dataclass
+class AdaptiveKLParams:
+ target: float = 6.0
+ horizon: int = 10000 # in episodes
+
+
+@dataclass
+class RewardHParams:
+ kl_coef: float = 0.15
+ adaptive_kl: Optional[AdaptiveKLParams] = field(default_factory=AdaptiveKLParams)
+ trained_model: Optional[str] = "models/"
+ label_dataset: tyro.conf.Suppress[Optional[str]] = None
+
+
+@dataclass
+class PpoHParams:
+ total_episodes: int = 1000000
+ local_batch_size: int = 64
+ local_mini_batch_size: tyro.conf.Suppress[int] = None
+ batch_size: tyro.conf.Suppress[int] = None
+ mini_batch_size: tyro.conf.Suppress[int] = None
+ gradient_accumulation_steps: int = 1
+ """gradient accumulation steps"""
+ local_micro_batch_size: tyro.conf.Suppress[int] = None
+ """per rank micro batch size"""
+ world_size: tyro.conf.Suppress[int] = None
+ batch_size: tyro.conf.Suppress[int] = None
+ minibatch_size: tyro.conf.Suppress[int] = None
+ num_updates: tyro.conf.Suppress[int] = None
+ nminibatches: int = 1
+ noptepochs: int = 4
+ lr: float = 0.00001
+ eps: float = 1e-5
+ vf_coef: float = 0.1
+ cliprange: float = 0.2
+ cliprange_value: float = 0.2
+ gamma: float = 1
+ lam: float = 0.95
+ whiten_rewards: bool = True
+
+
+@dataclass
+class TaskHParams:
+ # Query params
+ query_length: int = 64
+ query_dataset: str = "books"
+ query_prefix: str = ""
+ query_suffix: str = ""
+ start_text: Optional[str] = None
+ end_text: Optional[str] = None
+
+ # Response params
+ response_length: int = 24
+
+ # Truncate response after the first occurrence of this token at or after index after when sampling.
+ truncate_token: int = 13
+ truncate_after: int = 16
+ penalty_reward_value: int = -1
+
+ # LM params
+ temperature: float = 0.7
+
+
+@dataclass
+class Args:
+ # common args
+ exp_name: str = os.path.basename(__file__)[: -len(".py")]
+ """the name of this experiment"""
+ seed: int = 1
+ """seed of the experiment"""
+ track: bool = False
+ """if toggled, this experiment will be tracked with Weights and Biases"""
+ wandb_project_name: str = "cleanrl"
+ """the wandb's project name"""
+ wandb_entity: Optional[str] = None
+ """the entity (team) of wandb's project"""
+ cuda: bool = True
+ """Whether to use cuda if available."""
+ run_name: tyro.conf.Suppress[str] = None
+ """TO BE FILLED: a unique name of this run"""
+
+ base_model: str = "gpt2"
+ """the name of the pretrained model to use"""
+ print_sample_output_freq: int = 0
+ """How often to print sample output"""
+ save_path: str = "models/policy/"
+ """Where to save the model"""
+ use_tensorflow_adam: bool = True
+ """Whether to use tensorflow-style Adam optimizer instead of PyTorch's"""
+ task: TaskHParams = field(default_factory=TaskHParams)
+ rewards: RewardHParams = field(default_factory=RewardHParams)
+ ppo: PpoHParams = field(default_factory=PpoHParams)
+
+ # distributed settings
+ local_rank: int = 0
+ """the rank of this process"""
+ learner_device_ids: List[int] = field(default_factory=lambda: [0])
+ "the device ids that script will use"
+ learner_devices: tyro.conf.Suppress[int] = None # real type is `List[str]`
+ """the devices that script will use"""
+ global_learner_decices: tyro.conf.Suppress[int] = None # real type is `List[str]`
+ """the total devices (across all nodes and machines) that script will use"""
+
+
+def scale_by_adam_tf_style(
+ b1: float = 0.9,
+ b2: float = 0.999,
+ eps: float = 1e-8,
+ eps_root: float = 0.0,
+ mu_dtype=None,
+) -> base.GradientTransformation:
+ """Rescale updates according to the Adam algorithm.
+
+ References:
+ [Kingma et al, 2014](https://arxiv.org/abs/1412.6980)
+
+ Args:
+ b1: Decay rate for the exponentially weighted average of grads.
+ b2: Decay rate for the exponentially weighted average of squared grads.
+ eps: Term added to the denominator to improve numerical stability.
+ eps_root: Term added to the denominator inside the square-root to improve
+ numerical stability when backpropagating gradients through the rescaling.
+ mu_dtype: Optional `dtype` to be used for the first order accumulator; if
+ `None` then the `dtype` is inferred from `params` and `updates`.
+
+ Returns:
+ A `GradientTransformation` object.
+ """
+
+ mu_dtype = utils.canonicalize_dtype(mu_dtype)
+
+ def init_fn(params):
+ mu = jax.tree_util.tree_map(
+ lambda t: jnp.zeros_like(t, dtype=mu_dtype), params
+ ) # First moment
+ nu = jax.tree_util.tree_map(jnp.zeros_like, params) # Second moment
+ return ScaleByAdamState(count=jnp.zeros([], jnp.int32), mu=mu, nu=nu)
+
+ def update_fn(updates, state, params=None):
+ del params
+ mu = update_moment(updates, state.mu, b1, 1)
+ nu = update_moment_per_elem_norm(updates, state.nu, b2, 2)
+ count_inc = numerics.safe_int32_increment(state.count)
+
+ ### `optax` default adam implementation
+ # mu_hat = bias_correction(mu, b1, count_inc)
+ # nu_hat = bias_correction(nu, b2, count_inc)
+ # updates = jax.tree_util.tree_map(
+ # lambda m, v: m / (jnp.sqrt(v + eps_root) + eps), mu_hat, nu_hat)
+ ### Tensorflow adam implementation
+ updates = jax.tree_util.tree_map(
+ lambda m, v: (jnp.sqrt(1 - b2**count_inc) / (1 - b1**count_inc))
+ * m
+ / (jnp.sqrt(v + eps_root) + eps),
+ mu,
+ nu,
+ ) #
+ mu = utils.cast_tree(mu, mu_dtype)
+ return updates, ScaleByAdamState(count=count_inc, mu=mu, nu=nu)
+
+ return base.GradientTransformation(init_fn, update_fn)
+
+
+def adam_tf_style(
+ learning_rate,
+ b1: float = 0.9,
+ b2: float = 0.999,
+ eps: float = 1e-8,
+ eps_root: float = 0.0,
+ mu_dtype=None,
+):
+ return combine.chain(
+ scale_by_adam_tf_style(
+ b1=b1, b2=b2, eps=eps, eps_root=eps_root, mu_dtype=mu_dtype
+ ),
+ _scale_by_learning_rate(learning_rate),
+ )
+
+
+class AdaptiveKLController:
+ def __init__(self, init_kl_coef: float, hparams: AdaptiveKLParams):
+ self.value = init_kl_coef
+ self.hparams = hparams
+
+ def update(self, current, n_steps):
+ target = self.hparams.target
+ proportional_error = np.clip(current / target - 1, -0.2, 0.2)
+ mult = 1 + proportional_error * n_steps / self.hparams.horizon
+ self.value *= mult
+
+
+def whiten(values, shift_mean=True):
+ # `unbiased=False` matches TF `tf.nn.moments`'s setting
+ mean, var = jnp.mean(values), jnp.var(values)
+ whitened = (values - mean) * jax.lax.rsqrt(var + 1e-8)
+ if not shift_mean:
+ whitened += mean
+ return whitened
+
+
+class ScalarHead(nn.Module):
+ head_input_size: int
+
+ @nn.compact
+ def __call__(self, x):
+ x = nn.Dense(
+ 1,
+ kernel_init=nn.initializers.normal(stddev=0),
+ bias_init=nn.initializers.zeros_init(),
+ )(x)
+ return x
+
+
+class RewardHead(nn.Module):
+ head_input_size: int
+
+ @nn.compact
+ def __call__(self, x):
+ assert x.shape[-1] == self.head_input_size
+ x = nn.Dense(
+ 1,
+ kernel_init=nn.initializers.normal(
+ stddev=1 / np.sqrt(self.head_input_size + 1)
+ ),
+ bias_init=nn.initializers.zeros_init(),
+ )(x)
+ reward_gain = self.param("reward_gain", nn.initializers.ones_init(), ())
+ reward_bias = self.param("reward_bias", nn.initializers.zeros_init(), ())
+ x = x * reward_gain + reward_bias
+ return x
+
+
+@flax.struct.dataclass
+class LMBackboneWithScalarHeadParams:
+ """Parameters for the language model backbone and a scalar head."""
+
+ lm_backbone_params: flax.core.FrozenDict
+ head_params: flax.core.FrozenDict
+
+
+# a pytorch dataset
+class MyDataset(IterableDataset):
+ def __init__(
+ self, generator, tokenizer, query_length, seed, start_text=None, end_text=None
+ ):
+ self.generator = generator
+ self.tokenizer = tokenizer
+ self.query_length = query_length
+ self.start_text = start_text
+ self.end_text = end_text
+ self.seed = seed
+ token_to_index = tokenizer.get_vocab()
+ self.start_token = token_to_index[start_text] if self.start_text else None
+ self.end_token = token_to_index[end_text] if self.end_text else None
+
+ def __iter__(self):
+ for text in self.generator("train", self.seed, shuffle=True):
+ tokens = self.tokenizer.encode(text)
+ if self.start_token is not None:
+ try:
+ first_index = tokens.index(self.start_token) + 1
+ if first_index < len(tokens):
+ tokens = tokens[first_index:]
+ except:
+ continue
+ tokens = tokens[: self.query_length]
+ if self.end_token is not None:
+ try:
+ last_index = len(tokens) - tokens[::-1].index(self.end_token)
+ tokens = tokens[:last_index]
+ except:
+ continue
+ output = self.tokenizer.pad(
+ {"input_ids": tokens},
+ padding="max_length",
+ max_length=self.query_length,
+ return_tensors="pt",
+ return_attention_mask=True,
+ )
+ yield output
+
+
+def right_padding_to_left_padding(tokens, pad_id):
+ def pad_row(row):
+ mask = 1 - (row == pad_id) # 1 if not pad_id, 0 if pad_id
+ return row[
+ jnp.argsort(mask)
+ ] # uses the fact that jnp.argsort is stable by default
+
+ return jax.vmap(pad_row)(tokens)
+
+
+def ceil_div(a, b):
+ return (a - 1) // b + 1
+
+
+def exact_div(a, b):
+ q = a // b
+ if a != q * b:
+ raise ValueError(f"Inexact division: {a} / {b} = {a / b}")
+ return q
+
+
+def prepare_reward_forward(args, tokenizer):
+ """Prepare the forward pass of the reward model and parameters."""
+
+ lm_backbone = FlaxAutoModelForCausalLM.from_pretrained(args.base_model)
+ scalar_head = RewardHead(head_input_size=lm_backbone.config.hidden_size)
+
+ def reward_forward(
+ params: LMBackboneWithScalarHeadParams,
+ query_responses_ids: jnp.ndarray,
+ ):
+ """Get reward for each queiry--response pair."""
+ assert query_responses_ids.ndim == 2
+
+ # mask out padding tokens
+ attention_mask = query_responses_ids != tokenizer.pad_token_id
+ query_responses_ids = jnp.where(attention_mask, query_responses_ids, 0)
+
+ # assign position ids
+ position_ids = attention_mask.cumsum(1) - attention_mask
+
+ reward_latents = lm_backbone.module.apply(
+ variables=params.lm_backbone_params,
+ input_ids=query_responses_ids,
+ attention_mask=attention_mask,
+ position_ids=position_ids,
+ output_hidden_states=True,
+ ).hidden_states[-1]
+ # reward_latents: [batch_size, length, hidden_size]
+
+ last_reward_latents = reward_latents[:, -1, :]
+ # last_reward_latents: [batch_size, hidden_size]
+
+ reward = scalar_head.apply(variables=params.head_params, x=last_reward_latents)
+ # reward: [batch_size, 1]
+ return reward
+
+ if args.rewards.trained_model:
+ orbax_checkpointer = orbax.checkpoint.PyTreeCheckpointer()
+ reward_state_params = orbax_checkpointer.restore(args.rewards.trained_model)[
+ "reward_model"
+ ]["params"]
+ reward_params = LMBackboneWithScalarHeadParams(
+ lm_backbone_params=flax.core.FrozenDict(
+ {"params": reward_state_params["lm_backbone_params"]["params"]}
+ ),
+ head_params=flax.core.FrozenDict(
+ {"params": reward_state_params["head_params"]["params"]}
+ ),
+ )
+ pprint(f"Loaded pretrained reward model from {args.rewards.trained_model}")
+ else:
+ key = jax.random.PRNGKey(args.seed)
+ key, init_key = jax.random.split(key, 2)
+ reward_params = LMBackboneWithScalarHeadParams(
+ lm_backbone_params=flax.core.FrozenDict({"params": lm_backbone.params}),
+ head_params=flax.core.FrozenDict(
+ scalar_head.init(
+ init_key,
+ jnp.ones(lm_backbone.config.hidden_size)[None, None, :],
+ )
+ ),
+ )
+
+ return functools.partial(reward_forward, params=reward_params)
+
+
+def prepare_policy_forward_and_policy_generate(args, tokenizer):
+ """Prepare the forward pass of the policy model and parameters."""
+
+ lm_backbone = FlaxAutoModelForCausalLM.from_pretrained(args.base_model)
+ # disable `pad_token_id` and `eos_token_id` because we just want to
+ # generate tokens without truncation / padding
+ lm_backbone.generation_config.eos_token_id = None
+ lm_backbone.generation_config.pad_token_id = tokenizer.pad_token_id
+ scalar_head = ScalarHead(head_input_size=lm_backbone.config.hidden_size)
+
+ generation_config = GenerationConfig(
+ max_new_tokens=args.task.response_length,
+ min_new_tokens=args.task.response_length,
+ temperature=args.task.temperature,
+ top_k=0.0,
+ top_p=1.0,
+ do_sample=True,
+ pad_token_id=tokenizer.pad_token_id,
+ )
+
+ def policy_forward(
+ params: LMBackboneWithScalarHeadParams,
+ input_ids: jnp.ndarray,
+ ):
+ """Get reward for input_ids."""
+ assert input_ids.ndim == 2
+ # shape: [batch_size, length]
+
+ # mask out padding tokens
+ attention_mask = input_ids != tokenizer.pad_token_id
+ input_ids = jnp.where(attention_mask, input_ids, 0)
+
+ # assign position ids
+ position_ids = attention_mask.cumsum(1) - attention_mask
+
+ lm_backbone_out = lm_backbone.module.apply(
+ variables=params.lm_backbone_params,
+ input_ids=input_ids,
+ attention_mask=attention_mask,
+ position_ids=position_ids,
+ output_hidden_states=True,
+ )
+
+ value_latents = lm_backbone_out.hidden_states[-1]
+ # shape: [batch_size, length, hidden_size]
+
+ values = scalar_head.apply(variables=params.head_params, x=value_latents)
+ # shape: [batch_size, length, 1]
+ return lm_backbone_out, values
+
+ def policy_generate(
+ params: LMBackboneWithScalarHeadParams,
+ queries: jnp.ndarray,
+ ):
+ input_ids = queries
+ attention_mask = input_ids != tokenizer.pad_token_id
+ input_ids = jnp.where(attention_mask, queries, 0)
+ output = lm_backbone.generate(
+ params=params["params"],
+ input_ids=input_ids,
+ generation_config=generation_config,
+ attention_mask=attention_mask.astype("i4"),
+ return_dict_in_generate=True,
+ )
+ context_length = input_ids.shape[1]
+ return jnp.concatenate((queries, output.sequences[:, context_length:]), axis=1)
+
+ key = jax.random.PRNGKey(args.seed)
+ key, init_key = jax.random.split(key, 2)
+ policy_params = LMBackboneWithScalarHeadParams(
+ lm_backbone_params=flax.core.FrozenDict({"params": lm_backbone.params}),
+ head_params=flax.core.FrozenDict(
+ scalar_head.init(
+ init_key,
+ jnp.ones(lm_backbone.config.hidden_size)[None, None, :],
+ )
+ ),
+ )
+
+ return policy_forward, policy_generate, policy_params
+
+
+@flax.struct.dataclass
+class RLStatistics(metrics.Collection):
+ approxkl: metrics.Average.from_output("approxkl")
+ entropy: metrics.Average.from_output("entropy")
+ pg_loss: metrics.Average.from_output("pg_loss")
+ pg_clipfrac: metrics.Average.from_output("pg_clipfrac")
+ vf_loss1: metrics.Average.from_output("vf_loss1")
+ vf_loss: metrics.Average.from_output("vf_loss")
+ vf_clipfrac: metrics.Average.from_output("vf_clipfrac")
+ ratio: metrics.Average.from_output("ratio")
+ loss: metrics.Average.from_output("loss")
+
+
+def train_step(
+ policy_state,
+ rl_stats,
+ mb_advantages,
+ mb_returns,
+ mb_values,
+ mb_query_responses,
+ mb_logprobs,
+ args,
+):
+ def loss(params):
+ output, vpred_temp = policy_state.apply_fn(params, mb_query_responses)
+ # vpred_temp: [local_micro_batch_size, query_length + response_length, 1]
+ vpred = jnp.squeeze(vpred_temp[:, args.task.query_length - 1 : -1, :], axis=-1)
+ # vpred: [local_micro_batch_size, response_length]
+ vpredclipped = jnp.clip(
+ vpred,
+ mb_values - args.ppo.cliprange_value,
+ mb_values + args.ppo.cliprange_value,
+ )
+ vf_losses1 = jnp.square(vpred - mb_returns)
+ vf_losses2 = jnp.square(vpredclipped - mb_returns)
+ vf_loss = 0.5 * jnp.maximum(vf_losses1, vf_losses2).mean()
+ vf_clipfrac = (vf_losses2 > vf_losses1).astype(jnp.float32).mean()
+
+ logits = output.logits[:, args.task.query_length - 1 : -1, :]
+ logits /= args.task.temperature
+ responses = mb_query_responses[:, args.task.query_length :]
+ new_logprobs = -optax.softmax_cross_entropy_with_integer_labels(
+ logits, responses
+ )
+
+ logprobs_diff = new_logprobs - mb_logprobs
+ ratio = jnp.exp(logprobs_diff)
+ pg_losses1 = -mb_advantages * ratio
+ pg_losses2 = -mb_advantages * jnp.clip(
+ ratio, 1.0 - args.ppo.cliprange, 1.0 + args.ppo.cliprange
+ )
+ pg_loss = jnp.maximum(pg_losses1, pg_losses2).mean()
+ pg_clipfrac = (pg_losses2 > pg_losses1).astype(jnp.float32).mean()
+
+ pd = jax.nn.softmax(logits, axis=-1)
+ entropy = jax.nn.logsumexp(logits, axis=-1) - jnp.sum(pd * logits, axis=-1)
+
+ approxkl = 0.5 * ((logprobs_diff) ** 2).mean()
+ loss = pg_loss + args.ppo.vf_coef * vf_loss
+
+ current_rl_stats = dict(
+ approxkl=approxkl,
+ entropy=entropy.mean(),
+ ratio=ratio.mean(),
+ pg_loss=pg_loss,
+ pg_clipfrac=pg_clipfrac,
+ vf_loss1=vf_losses1.mean(),
+ vf_loss=vf_loss,
+ vf_clipfrac=vf_clipfrac,
+ loss=loss,
+ )
+ return loss, current_rl_stats
+
+ grad_fn = jax.value_and_grad(loss, has_aux=True)
+ (loss, current_rl_stats), grads = grad_fn(policy_state.params)
+ grads = jax.lax.pmean(grads, "batch")
+ policy_state = policy_state.apply_gradients(grads=grads)
+
+ rl_stats = rl_stats.merge(RLStatistics.gather_from_model_output(**current_rl_stats))
+
+ return policy_state, rl_stats
+
+
+def linear_schedule(optimizer_step, args):
+ """anneal learning rate linearly to reach 0 after one epoch."""
+ update = 1 + optimizer_step // (
+ args.ppo.noptepochs
+ * args.ppo.nminibatches
+ * args.ppo.gradient_accumulation_steps
+ )
+ frac = 1.0 - (update - 1.0) / args.ppo.num_updates
+ lrnow = frac * args.ppo.lr
+ return lrnow
+
+
+def train(args: Args):
+ local_devices = jax.local_devices()
+ global_devices = jax.devices()
+ args.ppo.world_size = jax.process_count()
+ learner_devices = [local_devices[d_id] for d_id in args.learner_device_ids]
+ global_learner_decices = [
+ global_devices[d_id + process_index * len(local_devices)]
+ for process_index in range(args.ppo.world_size)
+ for d_id in args.learner_device_ids
+ ]
+ pprint({"global_learner_decices": global_learner_decices})
+ args.global_learner_decices = [str(item) for item in global_learner_decices]
+ args.learner_devices = [str(item) for item in learner_devices]
+ args.local_rank = jax.process_index()
+ args.ppo.batch_size = int(
+ args.ppo.local_batch_size * len(args.learner_devices) * args.ppo.world_size
+ )
+ args.ppo.minibatch_size = exact_div(args.ppo.batch_size, args.ppo.nminibatches)
+ args.ppo.local_mini_batch_size = exact_div(
+ args.ppo.local_batch_size, args.ppo.nminibatches
+ )
+ args.ppo.local_micro_batch_size = exact_div(
+ args.ppo.local_mini_batch_size, args.ppo.gradient_accumulation_steps
+ )
+ if args.ppo.whiten_rewards:
+ assert (
+ args.ppo.local_mini_batch_size >= 8
+ ), f"Per-rank minibatch size {args.ppo.local_mini_batch_size} is insufficient for whitening"
+ # `per_rank_rollout_batch_size` is our `args.ppo.local_batch_size`
+ # `per_rank_minibatch_size` is our `args.ppo.local_mini_batch_size`
+ args.ppo.num_updates = args.ppo.total_episodes // args.ppo.batch_size
+
+ console = Console(force_terminal=True)
+ run_name = f"{args.exp_name}__{args.seed}__{int(time.time())}"
+ writer = SimpleNamespace() # dummy writer
+ writer.add_scalar = lambda x, y, z: None
+ writer.add_histogram = lambda x, y, z: None
+
+ if args.local_rank == 0:
+ if args.track:
+ import wandb
+
+ wandb.init(
+ project=args.wandb_project_name,
+ entity=args.wandb_entity,
+ sync_tensorboard=True,
+ config=asdict(args),
+ name=run_name,
+ save_code=True,
+ )
+ wandb.run.log_code(".")
+ writer = SummaryWriter(f"runs/{run_name}")
+ writer.add_text(
+ "hyperparameters",
+ "|param|value|\n|-|-|\n%s"
+ % ("\n".join([f"|{key}|{value}|" for key, value in vars(args).items()])),
+ )
+ pprint(args)
+ local_seed = args.seed + args.local_rank * 100003 # Prime
+ tokenizer = AutoTokenizer.from_pretrained(
+ args.base_model,
+ padding_side="right",
+ )
+ # we use the padding token manually but do not resize the token embedding of the model
+ tokenizer.add_special_tokens({"pad_token": "[PAD]"})
+ reward_forward = prepare_reward_forward(args, tokenizer)
+ (
+ policy_forward,
+ policy_generate,
+ policy_params,
+ ) = prepare_policy_forward_and_policy_generate(args, tokenizer)
+ _, _, ref_policy_params = prepare_policy_forward_and_policy_generate(
+ args, tokenizer
+ )
+
+ if args.use_tensorflow_adam:
+ adam = adam_tf_style
+ else:
+ adam = optax.adam
+
+ optimizer = adam(
+ learning_rate=functools.partial(linear_schedule, args=args),
+ eps=args.ppo.eps,
+ )
+
+ optimizer = optax.MultiSteps(optimizer, args.ppo.gradient_accumulation_steps)
+
+ policy_state = TrainState.create(
+ apply_fn=policy_forward, params=policy_params, tx=optimizer
+ )
+ policy_state = jax_utils.replicate(policy_state)
+
+ dataset = MyDataset(
+ DATASET[args.task.query_dataset],
+ tokenizer,
+ args.task.query_length,
+ seed=local_seed,
+ start_text=args.task.start_text,
+ end_text=args.task.end_text,
+ )
+ dataloader = DataLoader(dataset, batch_size=args.ppo.batch_size)
+ iter_dataloader = iter(dataloader)
+ kl_ctl = AdaptiveKLController(
+ args.rewards.kl_coef, hparams=args.rewards.adaptive_kl
+ )
+
+ def train_update(policy_state, input_ids, rl_stats, kl_ctl_value):
+ queries = right_padding_to_left_padding(input_ids, tokenizer.pad_token_id)
+
+ query_responses = policy_generate(
+ params=policy_state.params.lm_backbone_params,
+ queries=queries,
+ )
+ # query_responses: [local_batch_size, query_length + response_length]
+ responses = query_responses[:, args.task.query_length :]
+
+ output, full_values = policy_forward(policy_state.params, query_responses)
+ values = full_values[:, args.task.query_length - 1 : -1].squeeze(-1)
+ # values: [local_batch_size, response_length]
+ logits = output.logits[:, args.task.query_length - 1 : -1, :]
+ # logits: [local_batch_size, response_length, vocab_size]
+ logits /= args.task.temperature
+ all_logprobs = jax.nn.log_softmax(logits, axis=-1)
+ # all_logprobs: [local_batch_size, response_length, vocab_size]
+ logprobs = jnp.take_along_axis(all_logprobs, responses[..., None], -1).squeeze(
+ -1
+ )
+ # logprobs: [local_batch_size, response_length]
+
+ ref_output, _ = policy_forward(ref_policy_params, query_responses)
+ ref_logits = ref_output.logits[:, args.task.query_length - 1 : -1, :]
+ # ref_logits: [local_batch_size, response_length, vocab_size]
+ ref_logits /= args.task.temperature
+ ref_all_logprobs = jax.nn.log_softmax(ref_logits, axis=-1)
+ ref_logprobs = jnp.take_along_axis(
+ ref_all_logprobs, responses[..., None], -1
+ ).squeeze(-1)
+ # ref_logprobs: [local_batch_size, response_length]
+
+ # **Response Processing**
+ # 1. truncate at the first occurrence of `truncate_token` that appears at or after
+ # position truncate_after in the responses
+ # https://github.com/openai/lm-human-preferences/blob/cbfd210bb8b08f6bc5c26878c10984b90f516c66/lm_human_preferences/train_policy.py#L378
+ truncate_token_mask = responses == args.task.truncate_token
+ truncate_after_or_token_mask = jnp.concatenate(
+ [
+ jnp.zeros_like(truncate_token_mask)[:, : args.task.truncate_after],
+ truncate_token_mask[:, args.task.truncate_after :],
+ ],
+ axis=1,
+ )
+ truncate_mask = (
+ jnp.cumsum(truncate_after_or_token_mask, axis=1)
+ - truncate_after_or_token_mask
+ ).astype("bool")
+ postprocessed_responses = jnp.where(
+ truncate_mask,
+ jnp.full_like(responses, tokenizer.pad_token_id),
+ responses,
+ )
+
+ # 2. run reward model on the truncated responses
+ postprocessed_query_responses = jnp.concatenate(
+ (queries, postprocessed_responses), axis=1
+ )
+ postprocessed_query_responses = right_padding_to_left_padding(
+ postprocessed_query_responses, tokenizer.pad_token_id
+ )
+ scores = reward_forward(
+ query_responses_ids=postprocessed_query_responses
+ ).flatten()
+
+ # 3. filter response. Ensure that the sample contains truncate_token
+ # responses not passing that filter will receive a low (fixed) score
+ # only query humans on responses that pass that filter
+ matches_token = (
+ postprocessed_responses[:, args.task.truncate_after :]
+ == args.task.truncate_token
+ )
+ filter_mask = jnp.any(matches_token, axis=-1)
+
+ scores = jnp.where(
+ filter_mask,
+ scores,
+ jnp.full_like(scores, args.task.penalty_reward_value),
+ )
+
+ # 4. compute rewards
+ kl = logprobs - ref_logprobs
+ non_score_reward = -kl_ctl_value * kl
+ rewards = non_score_reward
+ rewards = rewards.at[:, -1].add(scores)
+
+ # 5. whiten rewards
+ if args.ppo.whiten_rewards:
+ rewards = whiten(rewards, shift_mean=False)
+
+ # 6. compute advantages and returns
+ def compute_gae_once(carry, inp):
+ advantages = carry
+ nextdone, nextvalues, curvalues, reward = inp
+ nextnonterminal = 1.0 - nextdone
+
+ delta = reward + args.ppo.gamma * nextvalues * nextnonterminal - curvalues
+ advantages = (
+ delta + args.ppo.gamma * args.ppo.lam * nextnonterminal * advantages
+ )
+ return advantages, advantages
+
+ extended_values = jnp.concatenate(
+ (values, jnp.zeros((args.ppo.local_batch_size, 1))), axis=1
+ )
+ dones = jnp.zeros_like(rewards)
+ dones = dones.at[:, -1].set(1.0)
+
+ advantages = jnp.zeros((args.ppo.local_batch_size,))
+ _, advantages = jax.lax.scan(
+ compute_gae_once,
+ advantages,
+ (dones.T, extended_values[:, 1:].T, extended_values[:, :-1].T, rewards.T),
+ reverse=True,
+ )
+
+ advantages = advantages.T
+ returns = advantages + values
+ advantages = whiten(advantages)
+
+ def ppo_single_microbatch(carry, inp):
+ policy_state, rl_stats = carry
+ mb_advantages, mb_returns, mb_values, mb_query_responses, mb_logprobs = inp
+
+ policy_state, rl_stats = train_step(
+ policy_state=policy_state,
+ rl_stats=rl_stats,
+ mb_advantages=mb_advantages,
+ mb_returns=mb_returns,
+ mb_values=mb_values,
+ mb_query_responses=mb_query_responses,
+ mb_logprobs=mb_logprobs,
+ args=args,
+ )
+ return (policy_state, rl_stats), None
+
+ def ppo_single_epoch(carry, inp):
+ policy_state, rl_stats, key = carry
+ key, subkey = jax.random.split(key, 2)
+ perm = jax.random.permutation(key, args.ppo.local_batch_size)
+ # advantages, returns, values, query_responses, logprobs = inp
+ mbs_advantages = einops.rearrange(
+ advantages[perm],
+ "(c m) l -> c m l",
+ c=args.ppo.gradient_accumulation_steps,
+ )
+ mbs_returns = einops.rearrange(
+ returns[perm],
+ "(c m) l -> c m l",
+ c=args.ppo.gradient_accumulation_steps,
+ )
+ mbs_values = einops.rearrange(
+ values[perm], "(c m) l -> c m l", c=args.ppo.gradient_accumulation_steps
+ )
+ mbs_query_responses = einops.rearrange(
+ query_responses[perm],
+ "(c m) l -> c m l",
+ c=args.ppo.gradient_accumulation_steps,
+ )
+ mbs_logprobs = einops.rearrange(
+ logprobs[perm],
+ "(c m) l -> c m l",
+ c=args.ppo.gradient_accumulation_steps,
+ )
+ (policy_state, rl_stats), _ = jax.lax.scan(
+ f=ppo_single_microbatch,
+ init=(policy_state, rl_stats),
+ xs=(
+ mbs_advantages,
+ mbs_returns,
+ mbs_values,
+ mbs_query_responses,
+ mbs_logprobs,
+ ),
+ )
+ return (policy_state, rl_stats, key), None
+
+ key = jax.random.PRNGKey(args.seed)
+ # Do multiple epochs of PPO training, with a fresh random shuffle in each epoch
+ (policy_state, rl_stats, _), _ = jax.lax.scan(
+ f=ppo_single_epoch,
+ init=(policy_state, rl_stats, key),
+ xs=None,
+ length=args.ppo.noptepochs,
+ )
+
+ return (
+ policy_state,
+ returns,
+ advantages,
+ values,
+ query_responses,
+ logprobs,
+ kl,
+ scores,
+ rl_stats,
+ )
+
+ p_train_update = jax.pmap(
+ train_update,
+ axis_name="batch",
+ donate_argnums=(0,),
+ )
+
+ print("===training policy===")
+ global_step = 0
+
+ for update in range(1, args.ppo.num_updates + 1):
+ global_step += 1 * args.ppo.batch_size
+ data = next(iter_dataloader)
+ input_ids = common_utils.shard(data["input_ids"].numpy())
+
+ rl_stats = jax_utils.replicate(RLStatistics.empty())
+ (
+ policy_state,
+ returns,
+ advantages,
+ values,
+ query_responses,
+ logprobs,
+ kl,
+ scores,
+ rl_stats,
+ ) = p_train_update(
+ policy_state=policy_state,
+ input_ids=input_ids,
+ rl_stats=rl_stats,
+ kl_ctl_value=jax_utils.replicate(kl_ctl.value),
+ )
+
+ try:
+ sample_kl = kl[0][0].sum().item()
+ sample_score = scores[0][0].item()
+ sample_query_response = query_responses[0][0]
+ console.print(
+ f"[green][bold]{'Query'}:[/]\n"
+ + f"[green]{ tokenizer.decode(sample_query_response[:args.task.query_length], skip_special_tokens=True)}[/]\n\n"
+ + f"[yellow][bold]{'Response'}:[/]\n"
+ + f"[yellow]{tokenizer.decode(sample_query_response[args.task.query_length:], skip_special_tokens=True)}[/]\n\n"
+ + f"[red]score: {sample_score}, kl: {sample_kl}, total reward: {sample_score - kl_ctl.value * sample_kl} [/]"
+ )
+ except Exception as e:
+ print(e)
+
+ # Rollout metrics
+ rl_stats = rl_stats.unreplicate().compute()
+ non_score_reward = -kl_ctl.value * kl
+
+ mean_kl = kl.sum(-1).mean()
+ mean_entropy = (-logprobs).sum(-1).mean()
+ mean_non_score_reward = non_score_reward.sum(-1).mean()
+ writer.add_scalar("objective/kl_coef", np.array(kl_ctl.value), update)
+ writer.add_scalar("objective/kl", mean_kl.item(), update)
+ writer.add_scalar("objective/entropy", mean_entropy.item(), update)
+ writer.add_scalar(
+ "objective/non_score_reward", mean_non_score_reward.item(), update
+ )
+ writer.add_scalar(
+ "objective/score_total",
+ mean_non_score_reward.item() + scores.mean().item(),
+ update,
+ )
+ writer.add_scalar("objective/scores", scores.mean().item(), update)
+ writer.add_scalar("ppo/returns/mean", returns.mean().item(), update)
+ writer.add_scalar("ppo/returns/var", returns.var().item(), update)
+ writer.add_scalar("ppo/val/mean", values.mean().item(), update)
+ writer.add_scalar("ppo/val/var", values.var().item(), update)
+ writer.add_scalar("ppo/val/advantage", advantages.mean().item(), update)
+ writer.add_scalar(
+ "ppo/val/num_eos_tokens",
+ (query_responses[:, :, args.task.query_length :] == tokenizer.eos_token_id).sum().item(),
+ update,
+ )
+
+ # RL metrics aggregated at the batch level
+ writer.add_scalar(
+ "ppo/policy/approxkl_avg", rl_stats["approxkl"].item(), update
+ )
+ writer.add_scalar(
+ "ppo/policy/entropy_avg",
+ rl_stats["entropy"].item(),
+ update,
+ )
+ writer.add_scalar(
+ "ppo/loss/policy_avg",
+ rl_stats["pg_loss"].item(),
+ update,
+ )
+ writer.add_scalar(
+ "ppo/policy/clipfrac_avg",
+ rl_stats["pg_clipfrac"].item(),
+ update,
+ )
+ writer.add_scalar(
+ "ppo/val/error",
+ rl_stats["vf_loss1"].item(),
+ update,
+ )
+ writer.add_scalar(
+ "ppo/loss/value_avg",
+ rl_stats["vf_loss"].item(),
+ update,
+ )
+ writer.add_scalar(
+ "ppo/val/clipfrac_avg",
+ rl_stats["vf_clipfrac"].item(),
+ update,
+ )
+ writer.add_scalar(
+ "ppo/val/ratio_avg",
+ rl_stats["ratio"].item(),
+ update,
+ )
+ writer.add_scalar("ppo/loss/total", rl_stats["loss"].item(), update)
+
+ # Logging learning rate and learning progress
+ lrnow = linear_schedule(policy_state.step - 1, args) | learning rate annealing happening outside of `otpax`is a big no no. I remember seeing it makes the training twice as slow. Instead do https://github.com/vwxyzjn/cleanba/blob/81dca0054c8c0930046a2d12b07ada2945014d01/cleanba/cleanba_ppo.py#L497 |
lm-human-preference-details | github_2023 | python | 17 | vwxyzjn | vwxyzjn | @@ -402,11 +401,21 @@ def normalize(
generation_config,
):
# number of minibatches for computing the normalization statistics
- n_batches = ceil_div(args.local_normalize_samples, args.rollout_batch_size)
+ n_batches = ceil_div(args.normalize_samples, args.rollout_batch_size) | This should still be `local_normalize_samples`, right? |
lm-human-preference-details | github_2023 | python | 17 | vwxyzjn | vwxyzjn | @@ -636,7 +649,7 @@ def train(args: Args):
start_text=args.task.start_text,
end_text=args.task.end_text,
)
- normalization_dataloader = DataLoader(normalization_dataset, batch_size=args.rollout_batch_size)
+ normalization_dataloader = DataLoader(normalization_dataset, batch_size=args.rollout_batch_size * len(local_devices)) | Imagine we have
* `rollout_batch_size = 16`
* `local_rollout_batch_size = 1`
* `len(devices) = 8` (8 GPUs)
* `world_size=2` (2 distributed training processes, 16 GPUs total)
Then the data loader should have a batch size of 8, which we then shard to 8 devices locally in each of the two processes.
|
lm-human-preference-details | github_2023 | python | 13 | vwxyzjn | vwxyzjn | @@ -0,0 +1,765 @@
+""" Train jax-based reward model for LM human preference details."""
+import os
+from dataclasses import asdict, dataclass, field
+from typing import Optional
+import time
+import functools
+import numpy as np
+from torch.utils.data import DataLoader, IterableDataset
+import jax
+import jax.numpy as jnp
+import orbax
+import optax
+from einops import rearrange
+import tyro
+from datasets import load_dataset
+from rich.pretty import pprint
+import transformers
+import flax
+from flax.training import common_utils
+from flax.training import orbax_utils
+from flax.core.frozen_dict import freeze
+from flax import traverse_util, jax_utils
+from flax.training.train_state import TrainState
+import flax.linen as nn
+
+from lm_human_preference_details.data import DATASET
+from torch.utils import tensorboard
+
+
+@dataclass
+class LabelHParams:
+ type: str = None
+ num_train: int = 4992
+ num_labels: int = 4
+ source: str = None
+
+
+@dataclass
+class TaskHParams:
+ # Query params
+ query_length: int = 64
+ query_dataset: str = "books"
+ query_prefix: str = ""
+ query_suffix: str = ""
+ start_text: Optional[str] = None
+ end_text: Optional[str] = None
+
+ # Response params
+ response_length: int = 24
+
+ # LM params
+ temperature: float = 0.7
+
+
+@dataclass
+class Args:
+ # common args
+ exp_name: str = os.path.basename(__file__)[: -len(".py")]
+ """the name of this experiment"""
+ seed: int = 1
+ """seed of the experiment"""
+ track: bool = False
+ """if toggled, this experiment will be tracked with Weights and Biases"""
+ wandb_project_name: str = "lm_human_preference_details"
+ """the wandb's project name"""
+ wandb_entity: Optional[str] = None
+ """the entity (team) of wandb's project"""
+ cuda: bool = True
+ """Whether to use cuda if available."""
+ run_name: tyro.conf.Suppress[str] = None
+ """TO BE FILLED: a unique name of this run"""
+
+ base_model: str = "gpt2"
+ """the name of the pretrained model to use"""
+ label_dataset: str = "sentiment/offline_5k.json"
+ """the name of the dataset to use for labels in
+ `https://huggingface.co/datasets/vwxyzjn/lm-human-preferences`"""
+ local_batch_size: int = 4
+ """per rank batch size"""
+ gradient_accumulation_steps: int = 1
+ """gradient accumulation steps"""
+ local_micro_batch_size: tyro.conf.Suppress[int] = None
+ """per rank micro batch size"""
+ lr: float = 0.00005
+ """the learning rate"""
+ eps: float = 1e-5
+ """the epsilon for AdamW"""
+ rollout_batch_size: int = 512 # decrease this to e.g. 64 if OOM
+ """rollout batch size"""
+ world_size: tyro.conf.Suppress[int] = None
+ """the number of processes to use"""
+ batch_size: tyro.conf.Suppress[int] = None
+ """the batch size across all ranks"""
+ local_normalize_samples: int = 256
+ """Samples used to estimate reward mean and std"""
+ normalize_samples: tyro.conf.Suppress[int] = None
+ """Samples used to estimate reward mean and std across all ranks"""
+ debug_normalize: int = 0
+ """Samples used to check that normalization worked"""
+ normalize_before: bool = True
+ """Whether, before training, to normalize the rewards on the policy to the
+ scales on the training buffer. (For comparisons, just use mean 0, var 1.)"""
+ normalize_after: bool = True
+ """Whether, after training, to normalize the rewards on the ref policy to
+ mean 0, var 1 (so the KL coefficient always has the same meaning)."""
+ print_sample_output_freq: int = 10
+ """How often to print sample output"""
+ save_path: str = "models/"
+ """Where to save the model"""
+ use_tensorflow_adam: bool = False
+ """Whether to use tensorflow-style Adam optimizer instead of PyTorch's"""
+ task: TaskHParams = field(default_factory=TaskHParams)
+ labels: LabelHParams = field(default_factory=LabelHParams)
+
+
+OPENAI_PAD_TOKEN_ID = 50259
+
+
+@flax.struct.dataclass
+class RewardModelParams:
+ """Parameters for the reward model."""
+
+ backbone_params: flax.core.FrozenDict
+ head_params: flax.core.FrozenDict
+
+
+class RewardHead(nn.Module):
+ """Affine transform head for the reward model.
+
+ Attributes:
+ head_input_size: Size of the input to the head. The weights of the linear
+ layer are initialized from mean-zero Gaussians with std
+ 1/sqrt(head_input_size + 1).
+
+ Example:
+ model = RewardHead(head_input_size=768)
+ variables = model.init(jax.random.PRNGKey(0),
+ jnp.ones((1, 6, 768)))
+ """
+
+ head_input_size: int
+
+ def setup(self):
+ self.reward_linear = nn.Dense(
+ 1,
+ kernel_init=nn.initializers.normal(
+ stddev=1 / np.sqrt(self.head_input_size + 1)
+ ),
+ bias_init=nn.initializers.zeros_init(),
+ )
+ self.reward_gain = self.param("reward_gain", nn.initializers.ones_init(), ())
+ self.reward_bias = self.param("reward_bias", nn.initializers.zeros_init(), ())
+
+ def __call__(self, x):
+ assert x.shape[-1] == self.head_input_size
+ x = self.reward_linear(x)
+ x = x * self.reward_gain + self.reward_bias
+ return x
+
+
+class MyDataset(IterableDataset):
+ """A dataset for reward model normalization."""
+
+ def __init__(
+ self, generator, tokenizer, query_length, seed, start_text=None, end_text=None
+ ):
+ self.generator = generator
+ self.tokenizer = tokenizer
+ self.query_length = query_length
+ self.start_text = start_text
+ self.end_text = end_text
+ self.seed = seed
+ token_to_index = tokenizer.get_vocab()
+ self.start_token = token_to_index[start_text] if self.start_text else None
+ self.end_token = token_to_index[end_text] if self.end_text else None
+
+ def __iter__(self):
+ for text in self.generator("train", self.seed, shuffle=True):
+ tokens = self.tokenizer.encode(text)
+ if self.start_token is not None:
+ try:
+ first_index = tokens.index(self.start_token) + 1
+ if first_index < len(tokens):
+ tokens = tokens[first_index:]
+ except:
+ continue
+ tokens = tokens[: self.query_length]
+ if self.end_token is not None:
+ try:
+ last_index = len(tokens) - tokens[::-1].index(self.end_token)
+ tokens = tokens[:last_index]
+ except:
+ continue
+ output = self.tokenizer.pad(
+ {"input_ids": tokens},
+ padding="max_length",
+ max_length=self.query_length,
+ return_tensors="pt",
+ return_attention_mask=True,
+ )
+ yield output
+
+
+def right_padding_to_left_padding(tokens, pad_id):
+ """Convert from right padding to left padding."""
+ assert tokens.ndim == 2
+ return np.array(
+ [
+ [pad_id] * (row == pad_id).sum() + [x for x in row if x != pad_id]
+ for row in tokens
+ ]
+ )
+
+
+def ceil_div(a, b):
+ return (a - 1) // b + 1
+
+
+def exact_div(a, b):
+ q = a // b
+ if a != q * b:
+ raise ValueError(f"Inexact division: {a} / {b} = {a/b}")
+ return q
+
+
+# TODO: pmap `generate` to accelerate reward model normalization?
+def generate(pretrained_model, queries, args, generation_config):
+ """generate in a way that does not affect padding tokens"""
+ context_length = queries.shape[1]
+ attention_mask = queries != args.pad_token_id
+ # set padding tokens to 0
+ input_ids = jnp.where(attention_mask, queries, 0)
+ output = pretrained_model.generate(
+ input_ids=input_ids,
+ attention_mask=attention_mask.astype("int32"),
+ # position_ids=attention_mask.cumsum(1) - attention_mask.long(),
+ # generation collapsed if this was turned on.
+ # TODO: why does generation collapse with this?
+ generation_config=generation_config,
+ return_dict_in_generate=True,
+ )
+ return jnp.concatenate((queries, output.sequences[:, context_length:]), axis=1)
+
+
+def single_epoch_linear_schedule(global_step, args):
+ """ anneal learning rate linearly to reach 0 after one epoch."""
+ frac = 1.0 - global_step * args.batch_size / args.labels.num_train
+ return args.lr * frac
+
+
+def create_initial_reward_state_and_models(init_key, args):
+ # pylint: disable=redefined-outer-name
+ """reate reward model and initial reward state."""
+
+ reward_backbone = transformers.FlaxAutoModelForCausalLM.from_pretrained(
+ args.base_model
+ )
+ reward_head = RewardHead(head_input_size=reward_backbone.config.hidden_size)
+
+ if args.use_tensorflow_adam:
+ raise NotImplementedError("tensorflow adam is not implemented yet.")
+ else:
+ optimizer = optax.adam(
+ learning_rate=functools.partial(single_epoch_linear_schedule, args=args),
+ eps=args.eps,
+ )
+
+ if args.gradient_accumulation_steps > 1:
+ optimizer = optax.MultiSteps(optimizer, args.gradient_accumulation_steps) | Maybe just do `optimizer = optax.MultiSteps(optimizer, args.gradient_accumulation_steps)`. This will make logging learning rate easier. If `gradient_accumulation_steps==1`, the behavior is equivalent.
See
https://github.com/vwxyzjn/cleanba/blob/352aa6ff495c7cdca02890ec92feb01fd20c2e63/cleanba/cleanba_impala.py#L532-L539
https://github.com/vwxyzjn/cleanba/blob/352aa6ff495c7cdca02890ec92feb01fd20c2e63/cleanba/cleanba_impala.py#L719-L721 |
lm-human-preference-details | github_2023 | python | 13 | vwxyzjn | vwxyzjn | @@ -0,0 +1,765 @@
+""" Train jax-based reward model for LM human preference details."""
+import os
+from dataclasses import asdict, dataclass, field
+from typing import Optional
+import time
+import functools
+import numpy as np
+from torch.utils.data import DataLoader, IterableDataset
+import jax
+import jax.numpy as jnp
+import orbax
+import optax
+from einops import rearrange
+import tyro
+from datasets import load_dataset
+from rich.pretty import pprint
+import transformers
+import flax
+from flax.training import common_utils
+from flax.training import orbax_utils
+from flax.core.frozen_dict import freeze
+from flax import traverse_util, jax_utils
+from flax.training.train_state import TrainState
+import flax.linen as nn
+
+from lm_human_preference_details.data import DATASET
+from torch.utils import tensorboard
+
+
+@dataclass
+class LabelHParams:
+ type: str = None
+ num_train: int = 4992
+ num_labels: int = 4
+ source: str = None
+
+
+@dataclass
+class TaskHParams:
+ # Query params
+ query_length: int = 64
+ query_dataset: str = "books"
+ query_prefix: str = ""
+ query_suffix: str = ""
+ start_text: Optional[str] = None
+ end_text: Optional[str] = None
+
+ # Response params
+ response_length: int = 24
+
+ # LM params
+ temperature: float = 0.7
+
+
+@dataclass
+class Args:
+ # common args
+ exp_name: str = os.path.basename(__file__)[: -len(".py")]
+ """the name of this experiment"""
+ seed: int = 1
+ """seed of the experiment"""
+ track: bool = False
+ """if toggled, this experiment will be tracked with Weights and Biases"""
+ wandb_project_name: str = "lm_human_preference_details"
+ """the wandb's project name"""
+ wandb_entity: Optional[str] = None
+ """the entity (team) of wandb's project"""
+ cuda: bool = True
+ """Whether to use cuda if available."""
+ run_name: tyro.conf.Suppress[str] = None
+ """TO BE FILLED: a unique name of this run"""
+
+ base_model: str = "gpt2"
+ """the name of the pretrained model to use"""
+ label_dataset: str = "sentiment/offline_5k.json"
+ """the name of the dataset to use for labels in
+ `https://huggingface.co/datasets/vwxyzjn/lm-human-preferences`"""
+ local_batch_size: int = 4
+ """per rank batch size"""
+ gradient_accumulation_steps: int = 1
+ """gradient accumulation steps"""
+ local_micro_batch_size: tyro.conf.Suppress[int] = None
+ """per rank micro batch size"""
+ lr: float = 0.00005
+ """the learning rate"""
+ eps: float = 1e-5
+ """the epsilon for AdamW"""
+ rollout_batch_size: int = 512 # decrease this to e.g. 64 if OOM
+ """rollout batch size"""
+ world_size: tyro.conf.Suppress[int] = None
+ """the number of processes to use"""
+ batch_size: tyro.conf.Suppress[int] = None
+ """the batch size across all ranks"""
+ local_normalize_samples: int = 256
+ """Samples used to estimate reward mean and std"""
+ normalize_samples: tyro.conf.Suppress[int] = None
+ """Samples used to estimate reward mean and std across all ranks"""
+ debug_normalize: int = 0
+ """Samples used to check that normalization worked"""
+ normalize_before: bool = True
+ """Whether, before training, to normalize the rewards on the policy to the
+ scales on the training buffer. (For comparisons, just use mean 0, var 1.)"""
+ normalize_after: bool = True
+ """Whether, after training, to normalize the rewards on the ref policy to
+ mean 0, var 1 (so the KL coefficient always has the same meaning)."""
+ print_sample_output_freq: int = 10
+ """How often to print sample output"""
+ save_path: str = "models/"
+ """Where to save the model"""
+ use_tensorflow_adam: bool = False
+ """Whether to use tensorflow-style Adam optimizer instead of PyTorch's"""
+ task: TaskHParams = field(default_factory=TaskHParams)
+ labels: LabelHParams = field(default_factory=LabelHParams)
+
+
+OPENAI_PAD_TOKEN_ID = 50259
+
+
+@flax.struct.dataclass
+class RewardModelParams:
+ """Parameters for the reward model."""
+
+ backbone_params: flax.core.FrozenDict
+ head_params: flax.core.FrozenDict
+
+
+class RewardHead(nn.Module):
+ """Affine transform head for the reward model.
+
+ Attributes:
+ head_input_size: Size of the input to the head. The weights of the linear
+ layer are initialized from mean-zero Gaussians with std
+ 1/sqrt(head_input_size + 1).
+
+ Example:
+ model = RewardHead(head_input_size=768)
+ variables = model.init(jax.random.PRNGKey(0),
+ jnp.ones((1, 6, 768)))
+ """
+
+ head_input_size: int
+
+ def setup(self):
+ self.reward_linear = nn.Dense(
+ 1,
+ kernel_init=nn.initializers.normal(
+ stddev=1 / np.sqrt(self.head_input_size + 1)
+ ),
+ bias_init=nn.initializers.zeros_init(),
+ )
+ self.reward_gain = self.param("reward_gain", nn.initializers.ones_init(), ())
+ self.reward_bias = self.param("reward_bias", nn.initializers.zeros_init(), ())
+
+ def __call__(self, x):
+ assert x.shape[-1] == self.head_input_size
+ x = self.reward_linear(x)
+ x = x * self.reward_gain + self.reward_bias
+ return x
+
+
+class MyDataset(IterableDataset):
+ """A dataset for reward model normalization."""
+
+ def __init__(
+ self, generator, tokenizer, query_length, seed, start_text=None, end_text=None
+ ):
+ self.generator = generator
+ self.tokenizer = tokenizer
+ self.query_length = query_length
+ self.start_text = start_text
+ self.end_text = end_text
+ self.seed = seed
+ token_to_index = tokenizer.get_vocab()
+ self.start_token = token_to_index[start_text] if self.start_text else None
+ self.end_token = token_to_index[end_text] if self.end_text else None
+
+ def __iter__(self):
+ for text in self.generator("train", self.seed, shuffle=True):
+ tokens = self.tokenizer.encode(text)
+ if self.start_token is not None:
+ try:
+ first_index = tokens.index(self.start_token) + 1
+ if first_index < len(tokens):
+ tokens = tokens[first_index:]
+ except:
+ continue
+ tokens = tokens[: self.query_length]
+ if self.end_token is not None:
+ try:
+ last_index = len(tokens) - tokens[::-1].index(self.end_token)
+ tokens = tokens[:last_index]
+ except:
+ continue
+ output = self.tokenizer.pad(
+ {"input_ids": tokens},
+ padding="max_length",
+ max_length=self.query_length,
+ return_tensors="pt",
+ return_attention_mask=True,
+ )
+ yield output
+
+
+def right_padding_to_left_padding(tokens, pad_id):
+ """Convert from right padding to left padding."""
+ assert tokens.ndim == 2
+ return np.array(
+ [
+ [pad_id] * (row == pad_id).sum() + [x for x in row if x != pad_id]
+ for row in tokens
+ ]
+ )
+
+
+def ceil_div(a, b):
+ return (a - 1) // b + 1
+
+
+def exact_div(a, b):
+ q = a // b
+ if a != q * b:
+ raise ValueError(f"Inexact division: {a} / {b} = {a/b}")
+ return q
+
+
+# TODO: pmap `generate` to accelerate reward model normalization?
+def generate(pretrained_model, queries, args, generation_config):
+ """generate in a way that does not affect padding tokens"""
+ context_length = queries.shape[1]
+ attention_mask = queries != args.pad_token_id
+ # set padding tokens to 0
+ input_ids = jnp.where(attention_mask, queries, 0)
+ output = pretrained_model.generate(
+ input_ids=input_ids,
+ attention_mask=attention_mask.astype("int32"),
+ # position_ids=attention_mask.cumsum(1) - attention_mask.long(),
+ # generation collapsed if this was turned on.
+ # TODO: why does generation collapse with this?
+ generation_config=generation_config,
+ return_dict_in_generate=True,
+ )
+ return jnp.concatenate((queries, output.sequences[:, context_length:]), axis=1)
+
+
+def single_epoch_linear_schedule(global_step, args):
+ """ anneal learning rate linearly to reach 0 after one epoch."""
+ frac = 1.0 - global_step * args.batch_size / args.labels.num_train
+ return args.lr * frac
+
+
+def create_initial_reward_state_and_models(init_key, args):
+ # pylint: disable=redefined-outer-name
+ """reate reward model and initial reward state."""
+
+ reward_backbone = transformers.FlaxAutoModelForCausalLM.from_pretrained(
+ args.base_model
+ )
+ reward_head = RewardHead(head_input_size=reward_backbone.config.hidden_size)
+
+ if args.use_tensorflow_adam:
+ raise NotImplementedError("tensorflow adam is not implemented yet.")
+ else:
+ optimizer = optax.adam(
+ learning_rate=functools.partial(single_epoch_linear_schedule, args=args),
+ eps=args.eps,
+ )
+
+ if args.gradient_accumulation_steps > 1:
+ optimizer = optax.MultiSteps(optimizer, args.gradient_accumulation_steps)
+ state = TrainState.create(
+ apply_fn=None,
+ params=RewardModelParams(
+ backbone_params=flax.core.FrozenDict({"params": reward_backbone.params}),
+ head_params=flax.core.FrozenDict(
+ reward_head.init(
+ init_key,
+ jnp.ones(reward_backbone.config.hidden_size)[None, None, :],
+ )
+ ),
+ ),
+ tx=optimizer,
+ )
+ return state, reward_backbone, reward_head
+
+
+def get_reward(
+ params: RewardModelParams,
+ reward_backbone,
+ reward_head,
+ query_responses_ids: jnp.ndarray,
+ args: Args,
+):
+ """Get reward for each queiry--response pair."""
+ assert query_responses_ids.ndim == 2
+ # query_responses_ids: [batch_size, length]
+
+ # mask out padding tokens
+ attention_mask = query_responses_ids != args.pad_token_id
+ query_responses_ids = jnp.where(attention_mask, query_responses_ids, 0)
+
+ # assign position ids
+ position_ids = attention_mask.cumsum(1) - attention_mask
+
+ reward_latents = reward_backbone.module.apply(
+ variables=params.backbone_params,
+ input_ids=query_responses_ids,
+ attention_mask=attention_mask,
+ position_ids=position_ids,
+ output_hidden_states=True,
+ ).hidden_states[-1]
+ # shape: [batch_size, length, hidden_size]
+
+ last_reward_latents = reward_latents[:, -1, :]
+ # shape: [batch_size, hidden_size]
+
+ reward = reward_head.apply(variables=params.head_params, x=last_reward_latents)
+ # shape: [batch_size, 1]
+ return reward
+
+
+def set_reward_state_head_params(
+ reward_state: TrainState, gain: float = 1.0, bias: float = 0.0
+):
+ """Set gain and bias of the reward head.
+ Args:
+ reward_state: Reward state.
+ gain: Gain of the reward head.
+ bias: Bias of the reward head.
+
+ Example:
+ reward_state = set_reward_state_head_params(
+ reward_state, gain=0.1, bias=0.2)
+ print(reward_state.params.head_params['params'])
+ """
+ flat_head_params = traverse_util.flatten_dict(
+ reward_state.params.head_params, sep="/"
+ )
+
+ flat_head_params["params/reward_gain"] = jnp.array(gain, dtype=jnp.float32)
+ flat_head_params["params/reward_bias"] = jnp.array(bias, dtype=jnp.float32)
+
+ unflat_head_params = freeze(traverse_util.unflatten_dict(flat_head_params, sep="/"))
+
+ reward_state = reward_state.replace(
+ params=RewardModelParams(
+ backbone_params=reward_state.params.backbone_params,
+ head_params=unflat_head_params,
+ )
+ )
+ return reward_state
+
+
+def normalize(
+ args,
+ tokenizer,
+ pretrained_model,
+ reward_state,
+ iter_dataloader,
+ generation_config,
+ reward_backbone,
+ reward_head,
+):
+ # number of minibatches for computing the normalization statistics
+ n_batches = ceil_div(args.local_normalize_samples, args.rollout_batch_size)
+
+ # reset reward scales
+ reward_state = set_reward_state_head_params(reward_state, gain=1.0, bias=0.0)
+
+ def get_normalization_stats(reward_state):
+ """compute mean and std of rewards"""
+
+ sample_queries_responses = []
+ for _ in range(n_batches):
+ data = next(iter_dataloader)
+ queries = data["input_ids"]
+ queries = right_padding_to_left_padding(
+ data["input_ids"], args.pad_token_id
+ )
+ query_responses = generate(
+ pretrained_model, queries, tokenizer, generation_config
+ )
+ sample_queries_responses.append(query_responses)
+
+ rewards = []
+ for query_responses in sample_queries_responses:
+ rewards.append(
+ get_reward(
+ reward_state.params,
+ reward_backbone,
+ reward_head,
+ query_responses,
+ args,
+ )
+ )
+ # Here, len(rewards) = n_batches
+ # each rewards[i] is a (args.rollout_batch_size, 1) array.
+
+ rewards = np.concatenate(rewards)
+ # rewards shape: [args.local_normalize_samples, 1]
+ mean, std = rewards.mean(), rewards.std()
+ print(f"mean: {mean}, std: {std}")
+ return mean, std
+
+ mean, std = get_normalization_stats(reward_state)
+ target_mean, target_std = 0.0, 1.0
+ gain = target_std / std
+ bias = target_mean - gain * mean
+ print(f"gain: {gain}, bias: {bias}")
+
+ # do normalization
+ reward_state = set_reward_state_head_params(reward_state, gain=gain, bias=bias)
+
+ # validate normalization
+ _, _ = get_normalization_stats(reward_state)
+ return reward_state
+
+
+def prepare_left_padded_query_responses_with_labels(dataset, args):
+ """Prepare left padded, concatenated queries and responses, and add labels.
+ Args:
+ dataset: a dictionary that contains 'query', 'best', and 'sample{i}',
+ where i is from 0 to args.labels.num_labels-1.
+ args: a dataclass that contains 'labels.num_labels' and 'pad_token_id'.
+
+ Returns:
+ queries_responses: array of concatenated queries and responses, with shape
+ [num_queires, num_responses_per_query, max_query_len + max_response_len]
+ labels:
+ array of the best response idx for each label, with shape
+ [num_queires, 1]
+ """
+
+ labels = np.array(dataset["best"])
+ # [num_queires,]
+
+ queries = np.stack(dataset["query"])
+ # [num_queires, max_query_length]
+
+ queries = np.repeat(queries, args.labels.num_labels, axis=0)
+ queries = rearrange(queries, "(q r) l -> q r l", r=args.labels.num_labels)
+ # [num_queires, num_queires, max_query_length]
+
+ responses = np.array(
+ [np.stack(dataset[f"sample{i}"]) for i in range(args.labels.num_labels)]
+ )
+ # [num_response_per_query, num_queires, max_response_len]
+
+ responses = rearrange(responses, "r q l -> q r l")
+ # [num_queires, num_responses_per_query, max_response_len]
+
+ queries_responses = np.concatenate([queries, responses], axis=-1)
+ # [num_queires, num_responses_per_query, max_query_length + max_response_len]
+
+ queries_responses[queries_responses == OPENAI_PAD_TOKEN_ID] = args.pad_token_id
+
+ queries_responses = right_padding_to_left_padding(
+ rearrange(queries_responses, "q r l -> (q r) l"), pad_id=args.pad_token_id,
+ )
+
+ queries_responses = rearrange(
+ queries_responses, "(q r) l -> q r l", r=args.labels.num_labels
+ )
+ # [num_queires, num_responses_per_query, max_query_len + max_response_len]
+ return queries_responses, labels
+
+
+def get_dataloader_iter(rng, dataset_tokens, dataset_labels, args):
+ """Get iteration of dataloader."""
+ assert dataset_tokens.shape[0] == dataset_labels.shape[0]
+ num_samples = dataset_tokens.shape[0]
+
+ steps_per_epoch = num_samples // args.batch_size
+ perms = jax.random.permutation(rng, num_samples)
+ # Skip incomplete batch:
+ perms = perms[: steps_per_epoch * args.batch_size]
+ perms = perms.reshape((steps_per_epoch, args.batch_size))
+
+ for perm in perms:
+ batch = (dataset_tokens[perm], dataset_labels[perm])
+ yield batch
+
+
+def train_step(state, batch, reward_backbone, reward_head, args):
+ """Train reward model for one step."""
+ query_responses, labels = batch
+ query_responses_ids = rearrange(query_responses, "q r l -> (q r) l")
+ # query_responses_ids: [num_queries * num_responses_per_query, length]
+
+ def loss_function(params):
+ logits = get_reward(
+ params, reward_backbone, reward_head, query_responses_ids, args
+ )
+
+ logits_reshaped = rearrange(logits, "(q r) 1 -> q r", r=args.labels.num_labels)
+
+ loss = optax.softmax_cross_entropy_with_integer_labels(
+ logits_reshaped, labels
+ ).mean()
+
+ accuracy = (logits_reshaped.argmax(axis=1) == labels).astype("float32").mean()
+ return loss, accuracy
+
+ loss_grad_fn = jax.value_and_grad(loss_function, has_aux=True)
+ (loss, accuracy), grads = loss_grad_fn(state.params)
+ grads = jax.lax.pmean(grads, "batch")
+ state = state.apply_gradients(grads=grads)
+ loss = jax.lax.pmean(loss, axis_name="batch")
+ accuracy = jax.lax.pmean(accuracy, axis_name="batch")
+ return state, {"loss": loss, "accuracy": accuracy}
+
+
+def val_step(state, batch, reward_backbone, reward_head, args):
+ """Eval reward model for one step."""
+ query_responses, labels = batch
+ query_responses_ids = rearrange(query_responses, "q r l -> (q r) l")
+ # query_responses_ids: [num_queries * num_responses_per_query, length]
+
+ def loss_function(params):
+ logits = get_reward(
+ params, reward_backbone, reward_head, query_responses_ids, args
+ )
+
+ logits_reshaped = rearrange(logits, "(q r) 1 -> q r", r=args.labels.num_labels)
+
+ loss = optax.softmax_cross_entropy_with_integer_labels(
+ logits_reshaped, labels
+ ).mean()
+
+ accuracy = (logits_reshaped.argmax(axis=1) == labels).astype("float32").mean()
+ return loss, accuracy
+
+ loss, accuracy = loss_function(state.params)
+ loss = jax.lax.pmean(loss, axis_name="batch")
+ accuracy = jax.lax.pmean(accuracy, axis_name="batch")
+ return {"loss": loss, "accuracy": accuracy}
+
+
+def train(args: Args):
+ args.world_size = len(jax.devices())
+
+ args.batch_size = int(args.local_batch_size * args.world_size)
+ args.normalize_samples = int(args.local_normalize_samples * args.world_size)
+ args.local_micro_batch_size = exact_div(
+ args.local_batch_size, args.gradient_accumulation_steps
+ )
+
+ run_name = f"{args.exp_name}__{args.seed}__{int(time.time())}"
+
+ if args.track:
+ import wandb
+
+ wandb.init(
+ project=args.wandb_project_name,
+ entity=args.wandb_entity,
+ sync_tensorboard=True,
+ config=asdict(args),
+ name=run_name,
+ save_code=True,
+ )
+ wandb.run.log_code(".") | Maybe use the boilerplate here https://github.com/vwxyzjn/cleanba/blob/352aa6ff495c7cdca02890ec92feb01fd20c2e63/cleanba/cleanba_impala.py#L459-L503, which helps you scale beyond 8 GPU with local_devices and global_devices.
Also e.g.,
```
args.world_size = jax.process_count()
args.local_rank = jax.process_index()
```
Then later you can do
```
if args.track and args.local_rank == 0:
import wandb
wandb.init(
project=args.wandb_project_name,
entity=args.wandb_entity,
sync_tensorboard=True,
config=vars(args),
name=run_name,
save_code=True,
)
``` |
lm-human-preference-details | github_2023 | python | 13 | vwxyzjn | vwxyzjn | @@ -0,0 +1,765 @@
+""" Train jax-based reward model for LM human preference details."""
+import os
+from dataclasses import asdict, dataclass, field
+from typing import Optional
+import time
+import functools
+import numpy as np
+from torch.utils.data import DataLoader, IterableDataset
+import jax
+import jax.numpy as jnp
+import orbax
+import optax
+from einops import rearrange
+import tyro
+from datasets import load_dataset
+from rich.pretty import pprint
+import transformers
+import flax
+from flax.training import common_utils
+from flax.training import orbax_utils
+from flax.core.frozen_dict import freeze
+from flax import traverse_util, jax_utils
+from flax.training.train_state import TrainState
+import flax.linen as nn
+
+from lm_human_preference_details.data import DATASET
+from torch.utils import tensorboard
+
+
+@dataclass
+class LabelHParams:
+ type: str = None
+ num_train: int = 4992
+ num_labels: int = 4
+ source: str = None
+
+
+@dataclass
+class TaskHParams:
+ # Query params
+ query_length: int = 64
+ query_dataset: str = "books"
+ query_prefix: str = ""
+ query_suffix: str = ""
+ start_text: Optional[str] = None
+ end_text: Optional[str] = None
+
+ # Response params
+ response_length: int = 24
+
+ # LM params
+ temperature: float = 0.7
+
+
+@dataclass
+class Args:
+ # common args
+ exp_name: str = os.path.basename(__file__)[: -len(".py")]
+ """the name of this experiment"""
+ seed: int = 1
+ """seed of the experiment"""
+ track: bool = False
+ """if toggled, this experiment will be tracked with Weights and Biases"""
+ wandb_project_name: str = "lm_human_preference_details"
+ """the wandb's project name"""
+ wandb_entity: Optional[str] = None
+ """the entity (team) of wandb's project"""
+ cuda: bool = True
+ """Whether to use cuda if available."""
+ run_name: tyro.conf.Suppress[str] = None
+ """TO BE FILLED: a unique name of this run"""
+
+ base_model: str = "gpt2"
+ """the name of the pretrained model to use"""
+ label_dataset: str = "sentiment/offline_5k.json"
+ """the name of the dataset to use for labels in
+ `https://huggingface.co/datasets/vwxyzjn/lm-human-preferences`"""
+ local_batch_size: int = 4
+ """per rank batch size"""
+ gradient_accumulation_steps: int = 1
+ """gradient accumulation steps"""
+ local_micro_batch_size: tyro.conf.Suppress[int] = None
+ """per rank micro batch size"""
+ lr: float = 0.00005
+ """the learning rate"""
+ eps: float = 1e-5
+ """the epsilon for AdamW"""
+ rollout_batch_size: int = 512 # decrease this to e.g. 64 if OOM
+ """rollout batch size"""
+ world_size: tyro.conf.Suppress[int] = None
+ """the number of processes to use"""
+ batch_size: tyro.conf.Suppress[int] = None
+ """the batch size across all ranks"""
+ local_normalize_samples: int = 256
+ """Samples used to estimate reward mean and std"""
+ normalize_samples: tyro.conf.Suppress[int] = None
+ """Samples used to estimate reward mean and std across all ranks"""
+ debug_normalize: int = 0
+ """Samples used to check that normalization worked"""
+ normalize_before: bool = True
+ """Whether, before training, to normalize the rewards on the policy to the
+ scales on the training buffer. (For comparisons, just use mean 0, var 1.)"""
+ normalize_after: bool = True
+ """Whether, after training, to normalize the rewards on the ref policy to
+ mean 0, var 1 (so the KL coefficient always has the same meaning)."""
+ print_sample_output_freq: int = 10
+ """How often to print sample output"""
+ save_path: str = "models/"
+ """Where to save the model"""
+ use_tensorflow_adam: bool = False
+ """Whether to use tensorflow-style Adam optimizer instead of PyTorch's"""
+ task: TaskHParams = field(default_factory=TaskHParams)
+ labels: LabelHParams = field(default_factory=LabelHParams)
+
+
+OPENAI_PAD_TOKEN_ID = 50259
+
+
+@flax.struct.dataclass
+class RewardModelParams:
+ """Parameters for the reward model."""
+
+ backbone_params: flax.core.FrozenDict
+ head_params: flax.core.FrozenDict
+
+
+class RewardHead(nn.Module):
+ """Affine transform head for the reward model.
+
+ Attributes:
+ head_input_size: Size of the input to the head. The weights of the linear
+ layer are initialized from mean-zero Gaussians with std
+ 1/sqrt(head_input_size + 1).
+
+ Example:
+ model = RewardHead(head_input_size=768)
+ variables = model.init(jax.random.PRNGKey(0),
+ jnp.ones((1, 6, 768)))
+ """
+
+ head_input_size: int
+
+ def setup(self):
+ self.reward_linear = nn.Dense(
+ 1,
+ kernel_init=nn.initializers.normal(
+ stddev=1 / np.sqrt(self.head_input_size + 1)
+ ),
+ bias_init=nn.initializers.zeros_init(),
+ )
+ self.reward_gain = self.param("reward_gain", nn.initializers.ones_init(), ())
+ self.reward_bias = self.param("reward_bias", nn.initializers.zeros_init(), ())
+
+ def __call__(self, x):
+ assert x.shape[-1] == self.head_input_size
+ x = self.reward_linear(x)
+ x = x * self.reward_gain + self.reward_bias
+ return x
+
+
+class MyDataset(IterableDataset):
+ """A dataset for reward model normalization."""
+
+ def __init__(
+ self, generator, tokenizer, query_length, seed, start_text=None, end_text=None
+ ):
+ self.generator = generator
+ self.tokenizer = tokenizer
+ self.query_length = query_length
+ self.start_text = start_text
+ self.end_text = end_text
+ self.seed = seed
+ token_to_index = tokenizer.get_vocab()
+ self.start_token = token_to_index[start_text] if self.start_text else None
+ self.end_token = token_to_index[end_text] if self.end_text else None
+
+ def __iter__(self):
+ for text in self.generator("train", self.seed, shuffle=True):
+ tokens = self.tokenizer.encode(text)
+ if self.start_token is not None:
+ try:
+ first_index = tokens.index(self.start_token) + 1
+ if first_index < len(tokens):
+ tokens = tokens[first_index:]
+ except:
+ continue
+ tokens = tokens[: self.query_length]
+ if self.end_token is not None:
+ try:
+ last_index = len(tokens) - tokens[::-1].index(self.end_token)
+ tokens = tokens[:last_index]
+ except:
+ continue
+ output = self.tokenizer.pad(
+ {"input_ids": tokens},
+ padding="max_length",
+ max_length=self.query_length,
+ return_tensors="pt",
+ return_attention_mask=True,
+ )
+ yield output
+
+
+def right_padding_to_left_padding(tokens, pad_id):
+ """Convert from right padding to left padding."""
+ assert tokens.ndim == 2
+ return np.array(
+ [
+ [pad_id] * (row == pad_id).sum() + [x for x in row if x != pad_id]
+ for row in tokens
+ ]
+ )
+
+
+def ceil_div(a, b):
+ return (a - 1) // b + 1
+
+
+def exact_div(a, b):
+ q = a // b
+ if a != q * b:
+ raise ValueError(f"Inexact division: {a} / {b} = {a/b}")
+ return q
+
+
+# TODO: pmap `generate` to accelerate reward model normalization?
+def generate(pretrained_model, queries, args, generation_config):
+ """generate in a way that does not affect padding tokens"""
+ context_length = queries.shape[1]
+ attention_mask = queries != args.pad_token_id
+ # set padding tokens to 0
+ input_ids = jnp.where(attention_mask, queries, 0)
+ output = pretrained_model.generate(
+ input_ids=input_ids,
+ attention_mask=attention_mask.astype("int32"),
+ # position_ids=attention_mask.cumsum(1) - attention_mask.long(),
+ # generation collapsed if this was turned on.
+ # TODO: why does generation collapse with this?
+ generation_config=generation_config,
+ return_dict_in_generate=True,
+ )
+ return jnp.concatenate((queries, output.sequences[:, context_length:]), axis=1)
+
+
+def single_epoch_linear_schedule(global_step, args):
+ """ anneal learning rate linearly to reach 0 after one epoch."""
+ frac = 1.0 - global_step * args.batch_size / args.labels.num_train
+ return args.lr * frac
+
+
+def create_initial_reward_state_and_models(init_key, args):
+ # pylint: disable=redefined-outer-name
+ """reate reward model and initial reward state."""
+
+ reward_backbone = transformers.FlaxAutoModelForCausalLM.from_pretrained(
+ args.base_model
+ )
+ reward_head = RewardHead(head_input_size=reward_backbone.config.hidden_size)
+
+ if args.use_tensorflow_adam:
+ raise NotImplementedError("tensorflow adam is not implemented yet.")
+ else:
+ optimizer = optax.adam(
+ learning_rate=functools.partial(single_epoch_linear_schedule, args=args),
+ eps=args.eps,
+ )
+
+ if args.gradient_accumulation_steps > 1:
+ optimizer = optax.MultiSteps(optimizer, args.gradient_accumulation_steps)
+ state = TrainState.create(
+ apply_fn=None,
+ params=RewardModelParams(
+ backbone_params=flax.core.FrozenDict({"params": reward_backbone.params}),
+ head_params=flax.core.FrozenDict(
+ reward_head.init(
+ init_key,
+ jnp.ones(reward_backbone.config.hidden_size)[None, None, :],
+ )
+ ),
+ ),
+ tx=optimizer,
+ )
+ return state, reward_backbone, reward_head
+
+
+def get_reward(
+ params: RewardModelParams,
+ reward_backbone,
+ reward_head,
+ query_responses_ids: jnp.ndarray,
+ args: Args,
+):
+ """Get reward for each queiry--response pair."""
+ assert query_responses_ids.ndim == 2
+ # query_responses_ids: [batch_size, length]
+
+ # mask out padding tokens
+ attention_mask = query_responses_ids != args.pad_token_id
+ query_responses_ids = jnp.where(attention_mask, query_responses_ids, 0)
+
+ # assign position ids
+ position_ids = attention_mask.cumsum(1) - attention_mask
+
+ reward_latents = reward_backbone.module.apply(
+ variables=params.backbone_params,
+ input_ids=query_responses_ids,
+ attention_mask=attention_mask,
+ position_ids=position_ids,
+ output_hidden_states=True,
+ ).hidden_states[-1]
+ # shape: [batch_size, length, hidden_size]
+
+ last_reward_latents = reward_latents[:, -1, :]
+ # shape: [batch_size, hidden_size]
+
+ reward = reward_head.apply(variables=params.head_params, x=last_reward_latents)
+ # shape: [batch_size, 1]
+ return reward
+
+
+def set_reward_state_head_params(
+ reward_state: TrainState, gain: float = 1.0, bias: float = 0.0
+):
+ """Set gain and bias of the reward head.
+ Args:
+ reward_state: Reward state.
+ gain: Gain of the reward head.
+ bias: Bias of the reward head.
+
+ Example:
+ reward_state = set_reward_state_head_params(
+ reward_state, gain=0.1, bias=0.2)
+ print(reward_state.params.head_params['params'])
+ """
+ flat_head_params = traverse_util.flatten_dict(
+ reward_state.params.head_params, sep="/"
+ )
+
+ flat_head_params["params/reward_gain"] = jnp.array(gain, dtype=jnp.float32)
+ flat_head_params["params/reward_bias"] = jnp.array(bias, dtype=jnp.float32)
+
+ unflat_head_params = freeze(traverse_util.unflatten_dict(flat_head_params, sep="/"))
+
+ reward_state = reward_state.replace(
+ params=RewardModelParams(
+ backbone_params=reward_state.params.backbone_params,
+ head_params=unflat_head_params,
+ )
+ )
+ return reward_state
+
+
+def normalize(
+ args,
+ tokenizer,
+ pretrained_model,
+ reward_state,
+ iter_dataloader,
+ generation_config,
+ reward_backbone,
+ reward_head,
+):
+ # number of minibatches for computing the normalization statistics
+ n_batches = ceil_div(args.local_normalize_samples, args.rollout_batch_size)
+
+ # reset reward scales
+ reward_state = set_reward_state_head_params(reward_state, gain=1.0, bias=0.0)
+
+ def get_normalization_stats(reward_state):
+ """compute mean and std of rewards"""
+
+ sample_queries_responses = []
+ for _ in range(n_batches):
+ data = next(iter_dataloader)
+ queries = data["input_ids"]
+ queries = right_padding_to_left_padding(
+ data["input_ids"], args.pad_token_id
+ )
+ query_responses = generate(
+ pretrained_model, queries, tokenizer, generation_config
+ )
+ sample_queries_responses.append(query_responses)
+
+ rewards = []
+ for query_responses in sample_queries_responses:
+ rewards.append(
+ get_reward(
+ reward_state.params,
+ reward_backbone,
+ reward_head,
+ query_responses,
+ args,
+ )
+ )
+ # Here, len(rewards) = n_batches
+ # each rewards[i] is a (args.rollout_batch_size, 1) array.
+
+ rewards = np.concatenate(rewards)
+ # rewards shape: [args.local_normalize_samples, 1]
+ mean, std = rewards.mean(), rewards.std()
+ print(f"mean: {mean}, std: {std}")
+ return mean, std
+
+ mean, std = get_normalization_stats(reward_state)
+ target_mean, target_std = 0.0, 1.0
+ gain = target_std / std
+ bias = target_mean - gain * mean
+ print(f"gain: {gain}, bias: {bias}")
+
+ # do normalization
+ reward_state = set_reward_state_head_params(reward_state, gain=gain, bias=bias)
+
+ # validate normalization
+ _, _ = get_normalization_stats(reward_state)
+ return reward_state
+
+
+def prepare_left_padded_query_responses_with_labels(dataset, args):
+ """Prepare left padded, concatenated queries and responses, and add labels.
+ Args:
+ dataset: a dictionary that contains 'query', 'best', and 'sample{i}',
+ where i is from 0 to args.labels.num_labels-1.
+ args: a dataclass that contains 'labels.num_labels' and 'pad_token_id'.
+
+ Returns:
+ queries_responses: array of concatenated queries and responses, with shape
+ [num_queires, num_responses_per_query, max_query_len + max_response_len]
+ labels:
+ array of the best response idx for each label, with shape
+ [num_queires, 1]
+ """
+
+ labels = np.array(dataset["best"])
+ # [num_queires,]
+
+ queries = np.stack(dataset["query"])
+ # [num_queires, max_query_length]
+
+ queries = np.repeat(queries, args.labels.num_labels, axis=0)
+ queries = rearrange(queries, "(q r) l -> q r l", r=args.labels.num_labels)
+ # [num_queires, num_queires, max_query_length]
+
+ responses = np.array(
+ [np.stack(dataset[f"sample{i}"]) for i in range(args.labels.num_labels)]
+ )
+ # [num_response_per_query, num_queires, max_response_len]
+
+ responses = rearrange(responses, "r q l -> q r l")
+ # [num_queires, num_responses_per_query, max_response_len]
+
+ queries_responses = np.concatenate([queries, responses], axis=-1)
+ # [num_queires, num_responses_per_query, max_query_length + max_response_len]
+
+ queries_responses[queries_responses == OPENAI_PAD_TOKEN_ID] = args.pad_token_id
+
+ queries_responses = right_padding_to_left_padding(
+ rearrange(queries_responses, "q r l -> (q r) l"), pad_id=args.pad_token_id,
+ )
+
+ queries_responses = rearrange(
+ queries_responses, "(q r) l -> q r l", r=args.labels.num_labels
+ )
+ # [num_queires, num_responses_per_query, max_query_len + max_response_len]
+ return queries_responses, labels
+
+
+def get_dataloader_iter(rng, dataset_tokens, dataset_labels, args):
+ """Get iteration of dataloader."""
+ assert dataset_tokens.shape[0] == dataset_labels.shape[0]
+ num_samples = dataset_tokens.shape[0]
+
+ steps_per_epoch = num_samples // args.batch_size
+ perms = jax.random.permutation(rng, num_samples)
+ # Skip incomplete batch:
+ perms = perms[: steps_per_epoch * args.batch_size]
+ perms = perms.reshape((steps_per_epoch, args.batch_size))
+
+ for perm in perms:
+ batch = (dataset_tokens[perm], dataset_labels[perm])
+ yield batch
+
+
+def train_step(state, batch, reward_backbone, reward_head, args):
+ """Train reward model for one step."""
+ query_responses, labels = batch
+ query_responses_ids = rearrange(query_responses, "q r l -> (q r) l")
+ # query_responses_ids: [num_queries * num_responses_per_query, length]
+
+ def loss_function(params):
+ logits = get_reward(
+ params, reward_backbone, reward_head, query_responses_ids, args
+ )
+
+ logits_reshaped = rearrange(logits, "(q r) 1 -> q r", r=args.labels.num_labels)
+
+ loss = optax.softmax_cross_entropy_with_integer_labels(
+ logits_reshaped, labels
+ ).mean()
+
+ accuracy = (logits_reshaped.argmax(axis=1) == labels).astype("float32").mean()
+ return loss, accuracy
+
+ loss_grad_fn = jax.value_and_grad(loss_function, has_aux=True)
+ (loss, accuracy), grads = loss_grad_fn(state.params)
+ grads = jax.lax.pmean(grads, "batch")
+ state = state.apply_gradients(grads=grads)
+ loss = jax.lax.pmean(loss, axis_name="batch")
+ accuracy = jax.lax.pmean(accuracy, axis_name="batch")
+ return state, {"loss": loss, "accuracy": accuracy}
+
+
+def val_step(state, batch, reward_backbone, reward_head, args):
+ """Eval reward model for one step."""
+ query_responses, labels = batch
+ query_responses_ids = rearrange(query_responses, "q r l -> (q r) l")
+ # query_responses_ids: [num_queries * num_responses_per_query, length]
+
+ def loss_function(params):
+ logits = get_reward(
+ params, reward_backbone, reward_head, query_responses_ids, args
+ )
+
+ logits_reshaped = rearrange(logits, "(q r) 1 -> q r", r=args.labels.num_labels)
+
+ loss = optax.softmax_cross_entropy_with_integer_labels(
+ logits_reshaped, labels
+ ).mean()
+
+ accuracy = (logits_reshaped.argmax(axis=1) == labels).astype("float32").mean()
+ return loss, accuracy
+
+ loss, accuracy = loss_function(state.params)
+ loss = jax.lax.pmean(loss, axis_name="batch")
+ accuracy = jax.lax.pmean(accuracy, axis_name="batch")
+ return {"loss": loss, "accuracy": accuracy}
+
+
+def train(args: Args):
+ args.world_size = len(jax.devices())
+
+ args.batch_size = int(args.local_batch_size * args.world_size)
+ args.normalize_samples = int(args.local_normalize_samples * args.world_size)
+ args.local_micro_batch_size = exact_div(
+ args.local_batch_size, args.gradient_accumulation_steps
+ )
+
+ run_name = f"{args.exp_name}__{args.seed}__{int(time.time())}"
+
+ if args.track:
+ import wandb
+
+ wandb.init(
+ project=args.wandb_project_name,
+ entity=args.wandb_entity,
+ sync_tensorboard=True,
+ config=asdict(args),
+ name=run_name,
+ save_code=True,
+ )
+ wandb.run.log_code(".")
+
+ writer = tensorboard.SummaryWriter(f"runs/{run_name}")
+ writer.add_text(
+ "hyperparameters",
+ "|param|value|\n|-|-|\n%s"
+ % ("\n".join([f"|{key}|{value}|" for key, value in vars(args).items()])),
+ )
+ pprint(args)
+
+ tokenizer = transformers.AutoTokenizer.from_pretrained(
+ args.base_model, padding_side="right",
+ )
+ # we use the padding token manually but do not resize the token embedding of the model
+ tokenizer.add_special_tokens({"pad_token": "[PAD]"})
+ args.pad_token_id = tokenizer.pad_token_id
+
+ untrained_model = transformers.FlaxAutoModelForCausalLM.from_pretrained(
+ args.base_model
+ )
+
+ reward_state, reward_backbone, reward_head = create_initial_reward_state_and_models(
+ jax.random.PRNGKey(args.seed), args
+ ) | This is more of a style thing — in CleanRL's style, we try to avoid creating a function unless it is actually used multiple times like `right_padding_to_left_padding`. This is because when reading this line of code I would then need to traverse up to see where the code is defined, causing some jumping around looking experinece. |
lm-human-preference-details | github_2023 | python | 13 | vwxyzjn | vwxyzjn | @@ -0,0 +1,765 @@
+""" Train jax-based reward model for LM human preference details."""
+import os
+from dataclasses import asdict, dataclass, field
+from typing import Optional
+import time
+import functools
+import numpy as np
+from torch.utils.data import DataLoader, IterableDataset
+import jax
+import jax.numpy as jnp
+import orbax
+import optax
+from einops import rearrange
+import tyro
+from datasets import load_dataset
+from rich.pretty import pprint
+import transformers
+import flax
+from flax.training import common_utils
+from flax.training import orbax_utils
+from flax.core.frozen_dict import freeze
+from flax import traverse_util, jax_utils
+from flax.training.train_state import TrainState
+import flax.linen as nn
+
+from lm_human_preference_details.data import DATASET
+from torch.utils import tensorboard
+
+
+@dataclass
+class LabelHParams:
+ type: str = None
+ num_train: int = 4992
+ num_labels: int = 4
+ source: str = None
+
+
+@dataclass
+class TaskHParams:
+ # Query params
+ query_length: int = 64
+ query_dataset: str = "books"
+ query_prefix: str = ""
+ query_suffix: str = ""
+ start_text: Optional[str] = None
+ end_text: Optional[str] = None
+
+ # Response params
+ response_length: int = 24
+
+ # LM params
+ temperature: float = 0.7
+
+
+@dataclass
+class Args:
+ # common args
+ exp_name: str = os.path.basename(__file__)[: -len(".py")]
+ """the name of this experiment"""
+ seed: int = 1
+ """seed of the experiment"""
+ track: bool = False
+ """if toggled, this experiment will be tracked with Weights and Biases"""
+ wandb_project_name: str = "lm_human_preference_details"
+ """the wandb's project name"""
+ wandb_entity: Optional[str] = None
+ """the entity (team) of wandb's project"""
+ cuda: bool = True
+ """Whether to use cuda if available."""
+ run_name: tyro.conf.Suppress[str] = None
+ """TO BE FILLED: a unique name of this run"""
+
+ base_model: str = "gpt2"
+ """the name of the pretrained model to use"""
+ label_dataset: str = "sentiment/offline_5k.json"
+ """the name of the dataset to use for labels in
+ `https://huggingface.co/datasets/vwxyzjn/lm-human-preferences`"""
+ local_batch_size: int = 4
+ """per rank batch size"""
+ gradient_accumulation_steps: int = 1
+ """gradient accumulation steps"""
+ local_micro_batch_size: tyro.conf.Suppress[int] = None
+ """per rank micro batch size"""
+ lr: float = 0.00005
+ """the learning rate"""
+ eps: float = 1e-5
+ """the epsilon for AdamW"""
+ rollout_batch_size: int = 512 # decrease this to e.g. 64 if OOM
+ """rollout batch size"""
+ world_size: tyro.conf.Suppress[int] = None
+ """the number of processes to use"""
+ batch_size: tyro.conf.Suppress[int] = None
+ """the batch size across all ranks"""
+ local_normalize_samples: int = 256
+ """Samples used to estimate reward mean and std"""
+ normalize_samples: tyro.conf.Suppress[int] = None
+ """Samples used to estimate reward mean and std across all ranks"""
+ debug_normalize: int = 0
+ """Samples used to check that normalization worked"""
+ normalize_before: bool = True
+ """Whether, before training, to normalize the rewards on the policy to the
+ scales on the training buffer. (For comparisons, just use mean 0, var 1.)"""
+ normalize_after: bool = True
+ """Whether, after training, to normalize the rewards on the ref policy to
+ mean 0, var 1 (so the KL coefficient always has the same meaning)."""
+ print_sample_output_freq: int = 10
+ """How often to print sample output"""
+ save_path: str = "models/"
+ """Where to save the model"""
+ use_tensorflow_adam: bool = False
+ """Whether to use tensorflow-style Adam optimizer instead of PyTorch's"""
+ task: TaskHParams = field(default_factory=TaskHParams)
+ labels: LabelHParams = field(default_factory=LabelHParams)
+
+
+OPENAI_PAD_TOKEN_ID = 50259
+
+
+@flax.struct.dataclass
+class RewardModelParams:
+ """Parameters for the reward model."""
+
+ backbone_params: flax.core.FrozenDict
+ head_params: flax.core.FrozenDict
+
+
+class RewardHead(nn.Module):
+ """Affine transform head for the reward model.
+
+ Attributes:
+ head_input_size: Size of the input to the head. The weights of the linear
+ layer are initialized from mean-zero Gaussians with std
+ 1/sqrt(head_input_size + 1).
+
+ Example:
+ model = RewardHead(head_input_size=768)
+ variables = model.init(jax.random.PRNGKey(0),
+ jnp.ones((1, 6, 768)))
+ """
+
+ head_input_size: int
+
+ def setup(self):
+ self.reward_linear = nn.Dense(
+ 1,
+ kernel_init=nn.initializers.normal(
+ stddev=1 / np.sqrt(self.head_input_size + 1)
+ ),
+ bias_init=nn.initializers.zeros_init(),
+ )
+ self.reward_gain = self.param("reward_gain", nn.initializers.ones_init(), ())
+ self.reward_bias = self.param("reward_bias", nn.initializers.zeros_init(), ())
+
+ def __call__(self, x):
+ assert x.shape[-1] == self.head_input_size
+ x = self.reward_linear(x)
+ x = x * self.reward_gain + self.reward_bias
+ return x
+
+
+class MyDataset(IterableDataset):
+ """A dataset for reward model normalization."""
+
+ def __init__(
+ self, generator, tokenizer, query_length, seed, start_text=None, end_text=None
+ ):
+ self.generator = generator
+ self.tokenizer = tokenizer
+ self.query_length = query_length
+ self.start_text = start_text
+ self.end_text = end_text
+ self.seed = seed
+ token_to_index = tokenizer.get_vocab()
+ self.start_token = token_to_index[start_text] if self.start_text else None
+ self.end_token = token_to_index[end_text] if self.end_text else None
+
+ def __iter__(self):
+ for text in self.generator("train", self.seed, shuffle=True):
+ tokens = self.tokenizer.encode(text)
+ if self.start_token is not None:
+ try:
+ first_index = tokens.index(self.start_token) + 1
+ if first_index < len(tokens):
+ tokens = tokens[first_index:]
+ except:
+ continue
+ tokens = tokens[: self.query_length]
+ if self.end_token is not None:
+ try:
+ last_index = len(tokens) - tokens[::-1].index(self.end_token)
+ tokens = tokens[:last_index]
+ except:
+ continue
+ output = self.tokenizer.pad(
+ {"input_ids": tokens},
+ padding="max_length",
+ max_length=self.query_length,
+ return_tensors="pt",
+ return_attention_mask=True,
+ )
+ yield output
+
+
+def right_padding_to_left_padding(tokens, pad_id):
+ """Convert from right padding to left padding."""
+ assert tokens.ndim == 2
+ return np.array(
+ [
+ [pad_id] * (row == pad_id).sum() + [x for x in row if x != pad_id]
+ for row in tokens
+ ]
+ )
+
+
+def ceil_div(a, b):
+ return (a - 1) // b + 1
+
+
+def exact_div(a, b):
+ q = a // b
+ if a != q * b:
+ raise ValueError(f"Inexact division: {a} / {b} = {a/b}")
+ return q
+
+
+# TODO: pmap `generate` to accelerate reward model normalization?
+def generate(pretrained_model, queries, args, generation_config):
+ """generate in a way that does not affect padding tokens"""
+ context_length = queries.shape[1]
+ attention_mask = queries != args.pad_token_id
+ # set padding tokens to 0
+ input_ids = jnp.where(attention_mask, queries, 0)
+ output = pretrained_model.generate(
+ input_ids=input_ids,
+ attention_mask=attention_mask.astype("int32"),
+ # position_ids=attention_mask.cumsum(1) - attention_mask.long(),
+ # generation collapsed if this was turned on.
+ # TODO: why does generation collapse with this?
+ generation_config=generation_config,
+ return_dict_in_generate=True,
+ )
+ return jnp.concatenate((queries, output.sequences[:, context_length:]), axis=1)
+
+
+def single_epoch_linear_schedule(global_step, args):
+ """ anneal learning rate linearly to reach 0 after one epoch."""
+ frac = 1.0 - global_step * args.batch_size / args.labels.num_train
+ return args.lr * frac
+
+
+def create_initial_reward_state_and_models(init_key, args):
+ # pylint: disable=redefined-outer-name
+ """reate reward model and initial reward state."""
+
+ reward_backbone = transformers.FlaxAutoModelForCausalLM.from_pretrained(
+ args.base_model
+ )
+ reward_head = RewardHead(head_input_size=reward_backbone.config.hidden_size)
+
+ if args.use_tensorflow_adam:
+ raise NotImplementedError("tensorflow adam is not implemented yet.")
+ else:
+ optimizer = optax.adam(
+ learning_rate=functools.partial(single_epoch_linear_schedule, args=args),
+ eps=args.eps,
+ )
+
+ if args.gradient_accumulation_steps > 1:
+ optimizer = optax.MultiSteps(optimizer, args.gradient_accumulation_steps)
+ state = TrainState.create(
+ apply_fn=None,
+ params=RewardModelParams(
+ backbone_params=flax.core.FrozenDict({"params": reward_backbone.params}),
+ head_params=flax.core.FrozenDict(
+ reward_head.init(
+ init_key,
+ jnp.ones(reward_backbone.config.hidden_size)[None, None, :],
+ )
+ ),
+ ),
+ tx=optimizer,
+ )
+ return state, reward_backbone, reward_head
+
+
+def get_reward(
+ params: RewardModelParams,
+ reward_backbone,
+ reward_head,
+ query_responses_ids: jnp.ndarray,
+ args: Args,
+):
+ """Get reward for each queiry--response pair."""
+ assert query_responses_ids.ndim == 2
+ # query_responses_ids: [batch_size, length]
+
+ # mask out padding tokens
+ attention_mask = query_responses_ids != args.pad_token_id
+ query_responses_ids = jnp.where(attention_mask, query_responses_ids, 0)
+
+ # assign position ids
+ position_ids = attention_mask.cumsum(1) - attention_mask
+
+ reward_latents = reward_backbone.module.apply(
+ variables=params.backbone_params,
+ input_ids=query_responses_ids,
+ attention_mask=attention_mask,
+ position_ids=position_ids,
+ output_hidden_states=True,
+ ).hidden_states[-1]
+ # shape: [batch_size, length, hidden_size]
+
+ last_reward_latents = reward_latents[:, -1, :]
+ # shape: [batch_size, hidden_size]
+
+ reward = reward_head.apply(variables=params.head_params, x=last_reward_latents)
+ # shape: [batch_size, 1]
+ return reward
+
+
+def set_reward_state_head_params(
+ reward_state: TrainState, gain: float = 1.0, bias: float = 0.0
+):
+ """Set gain and bias of the reward head.
+ Args:
+ reward_state: Reward state.
+ gain: Gain of the reward head.
+ bias: Bias of the reward head.
+
+ Example:
+ reward_state = set_reward_state_head_params(
+ reward_state, gain=0.1, bias=0.2)
+ print(reward_state.params.head_params['params'])
+ """
+ flat_head_params = traverse_util.flatten_dict(
+ reward_state.params.head_params, sep="/"
+ )
+
+ flat_head_params["params/reward_gain"] = jnp.array(gain, dtype=jnp.float32)
+ flat_head_params["params/reward_bias"] = jnp.array(bias, dtype=jnp.float32)
+
+ unflat_head_params = freeze(traverse_util.unflatten_dict(flat_head_params, sep="/"))
+
+ reward_state = reward_state.replace(
+ params=RewardModelParams(
+ backbone_params=reward_state.params.backbone_params,
+ head_params=unflat_head_params,
+ )
+ )
+ return reward_state
+
+
+def normalize(
+ args,
+ tokenizer,
+ pretrained_model,
+ reward_state,
+ iter_dataloader,
+ generation_config,
+ reward_backbone,
+ reward_head,
+):
+ # number of minibatches for computing the normalization statistics
+ n_batches = ceil_div(args.local_normalize_samples, args.rollout_batch_size)
+
+ # reset reward scales
+ reward_state = set_reward_state_head_params(reward_state, gain=1.0, bias=0.0)
+
+ def get_normalization_stats(reward_state):
+ """compute mean and std of rewards"""
+
+ sample_queries_responses = []
+ for _ in range(n_batches):
+ data = next(iter_dataloader)
+ queries = data["input_ids"]
+ queries = right_padding_to_left_padding(
+ data["input_ids"], args.pad_token_id
+ )
+ query_responses = generate(
+ pretrained_model, queries, tokenizer, generation_config
+ )
+ sample_queries_responses.append(query_responses)
+
+ rewards = []
+ for query_responses in sample_queries_responses:
+ rewards.append(
+ get_reward(
+ reward_state.params,
+ reward_backbone,
+ reward_head,
+ query_responses,
+ args,
+ )
+ )
+ # Here, len(rewards) = n_batches
+ # each rewards[i] is a (args.rollout_batch_size, 1) array.
+
+ rewards = np.concatenate(rewards)
+ # rewards shape: [args.local_normalize_samples, 1]
+ mean, std = rewards.mean(), rewards.std()
+ print(f"mean: {mean}, std: {std}")
+ return mean, std
+
+ mean, std = get_normalization_stats(reward_state)
+ target_mean, target_std = 0.0, 1.0
+ gain = target_std / std
+ bias = target_mean - gain * mean
+ print(f"gain: {gain}, bias: {bias}")
+
+ # do normalization
+ reward_state = set_reward_state_head_params(reward_state, gain=gain, bias=bias)
+
+ # validate normalization
+ _, _ = get_normalization_stats(reward_state)
+ return reward_state
+
+
+def prepare_left_padded_query_responses_with_labels(dataset, args):
+ """Prepare left padded, concatenated queries and responses, and add labels.
+ Args:
+ dataset: a dictionary that contains 'query', 'best', and 'sample{i}',
+ where i is from 0 to args.labels.num_labels-1.
+ args: a dataclass that contains 'labels.num_labels' and 'pad_token_id'.
+
+ Returns:
+ queries_responses: array of concatenated queries and responses, with shape
+ [num_queires, num_responses_per_query, max_query_len + max_response_len]
+ labels:
+ array of the best response idx for each label, with shape
+ [num_queires, 1]
+ """
+
+ labels = np.array(dataset["best"])
+ # [num_queires,]
+
+ queries = np.stack(dataset["query"])
+ # [num_queires, max_query_length]
+
+ queries = np.repeat(queries, args.labels.num_labels, axis=0)
+ queries = rearrange(queries, "(q r) l -> q r l", r=args.labels.num_labels)
+ # [num_queires, num_queires, max_query_length]
+
+ responses = np.array(
+ [np.stack(dataset[f"sample{i}"]) for i in range(args.labels.num_labels)]
+ )
+ # [num_response_per_query, num_queires, max_response_len]
+
+ responses = rearrange(responses, "r q l -> q r l")
+ # [num_queires, num_responses_per_query, max_response_len]
+
+ queries_responses = np.concatenate([queries, responses], axis=-1)
+ # [num_queires, num_responses_per_query, max_query_length + max_response_len]
+
+ queries_responses[queries_responses == OPENAI_PAD_TOKEN_ID] = args.pad_token_id
+
+ queries_responses = right_padding_to_left_padding(
+ rearrange(queries_responses, "q r l -> (q r) l"), pad_id=args.pad_token_id,
+ )
+
+ queries_responses = rearrange(
+ queries_responses, "(q r) l -> q r l", r=args.labels.num_labels
+ )
+ # [num_queires, num_responses_per_query, max_query_len + max_response_len]
+ return queries_responses, labels
+
+
+def get_dataloader_iter(rng, dataset_tokens, dataset_labels, args):
+ """Get iteration of dataloader."""
+ assert dataset_tokens.shape[0] == dataset_labels.shape[0]
+ num_samples = dataset_tokens.shape[0]
+
+ steps_per_epoch = num_samples // args.batch_size
+ perms = jax.random.permutation(rng, num_samples)
+ # Skip incomplete batch:
+ perms = perms[: steps_per_epoch * args.batch_size]
+ perms = perms.reshape((steps_per_epoch, args.batch_size))
+
+ for perm in perms:
+ batch = (dataset_tokens[perm], dataset_labels[perm])
+ yield batch
+
+
+def train_step(state, batch, reward_backbone, reward_head, args):
+ """Train reward model for one step."""
+ query_responses, labels = batch
+ query_responses_ids = rearrange(query_responses, "q r l -> (q r) l")
+ # query_responses_ids: [num_queries * num_responses_per_query, length]
+
+ def loss_function(params):
+ logits = get_reward(
+ params, reward_backbone, reward_head, query_responses_ids, args
+ )
+
+ logits_reshaped = rearrange(logits, "(q r) 1 -> q r", r=args.labels.num_labels)
+
+ loss = optax.softmax_cross_entropy_with_integer_labels(
+ logits_reshaped, labels
+ ).mean()
+
+ accuracy = (logits_reshaped.argmax(axis=1) == labels).astype("float32").mean()
+ return loss, accuracy
+
+ loss_grad_fn = jax.value_and_grad(loss_function, has_aux=True)
+ (loss, accuracy), grads = loss_grad_fn(state.params)
+ grads = jax.lax.pmean(grads, "batch")
+ state = state.apply_gradients(grads=grads)
+ loss = jax.lax.pmean(loss, axis_name="batch")
+ accuracy = jax.lax.pmean(accuracy, axis_name="batch")
+ return state, {"loss": loss, "accuracy": accuracy}
+
+
+def val_step(state, batch, reward_backbone, reward_head, args):
+ """Eval reward model for one step."""
+ query_responses, labels = batch
+ query_responses_ids = rearrange(query_responses, "q r l -> (q r) l")
+ # query_responses_ids: [num_queries * num_responses_per_query, length]
+
+ def loss_function(params):
+ logits = get_reward(
+ params, reward_backbone, reward_head, query_responses_ids, args
+ )
+
+ logits_reshaped = rearrange(logits, "(q r) 1 -> q r", r=args.labels.num_labels)
+
+ loss = optax.softmax_cross_entropy_with_integer_labels(
+ logits_reshaped, labels
+ ).mean()
+
+ accuracy = (logits_reshaped.argmax(axis=1) == labels).astype("float32").mean()
+ return loss, accuracy
+
+ loss, accuracy = loss_function(state.params)
+ loss = jax.lax.pmean(loss, axis_name="batch")
+ accuracy = jax.lax.pmean(accuracy, axis_name="batch")
+ return {"loss": loss, "accuracy": accuracy}
+
+
+def train(args: Args):
+ args.world_size = len(jax.devices())
+
+ args.batch_size = int(args.local_batch_size * args.world_size)
+ args.normalize_samples = int(args.local_normalize_samples * args.world_size)
+ args.local_micro_batch_size = exact_div(
+ args.local_batch_size, args.gradient_accumulation_steps
+ )
+
+ run_name = f"{args.exp_name}__{args.seed}__{int(time.time())}"
+
+ if args.track:
+ import wandb
+
+ wandb.init(
+ project=args.wandb_project_name,
+ entity=args.wandb_entity,
+ sync_tensorboard=True,
+ config=asdict(args),
+ name=run_name,
+ save_code=True,
+ )
+ wandb.run.log_code(".")
+
+ writer = tensorboard.SummaryWriter(f"runs/{run_name}")
+ writer.add_text(
+ "hyperparameters",
+ "|param|value|\n|-|-|\n%s"
+ % ("\n".join([f"|{key}|{value}|" for key, value in vars(args).items()])),
+ )
+ pprint(args)
+
+ tokenizer = transformers.AutoTokenizer.from_pretrained(
+ args.base_model, padding_side="right",
+ )
+ # we use the padding token manually but do not resize the token embedding of the model
+ tokenizer.add_special_tokens({"pad_token": "[PAD]"})
+ args.pad_token_id = tokenizer.pad_token_id
+
+ untrained_model = transformers.FlaxAutoModelForCausalLM.from_pretrained(
+ args.base_model
+ )
+
+ reward_state, reward_backbone, reward_head = create_initial_reward_state_and_models(
+ jax.random.PRNGKey(args.seed), args
+ )
+
+ p_train_step = jax.pmap(
+ functools.partial(
+ train_step,
+ args=args,
+ reward_backbone=reward_backbone,
+ reward_head=reward_head,
+ ),
+ axis_name="batch",
+ donate_argnums=(0,),
+ )
+ p_val_step = jax.pmap(
+ functools.partial(
+ val_step,
+ args=args,
+ reward_backbone=reward_backbone,
+ reward_head=reward_head,
+ ),
+ axis_name="batch",
+ )
+
+ normalization_dataset = MyDataset(
+ DATASET[args.task.query_dataset],
+ tokenizer,
+ args.task.query_length,
+ seed=args.seed, | This can be changed to `args.seed + jax.process_index() * 100003 # Prime` |
lm-human-preference-details | github_2023 | python | 13 | vwxyzjn | vwxyzjn | @@ -0,0 +1,765 @@
+""" Train jax-based reward model for LM human preference details."""
+import os
+from dataclasses import asdict, dataclass, field
+from typing import Optional
+import time
+import functools
+import numpy as np
+from torch.utils.data import DataLoader, IterableDataset
+import jax
+import jax.numpy as jnp
+import orbax
+import optax
+from einops import rearrange
+import tyro
+from datasets import load_dataset
+from rich.pretty import pprint
+import transformers
+import flax
+from flax.training import common_utils
+from flax.training import orbax_utils
+from flax.core.frozen_dict import freeze
+from flax import traverse_util, jax_utils
+from flax.training.train_state import TrainState
+import flax.linen as nn
+
+from lm_human_preference_details.data import DATASET
+from torch.utils import tensorboard
+
+
+@dataclass
+class LabelHParams:
+ type: str = None
+ num_train: int = 4992
+ num_labels: int = 4
+ source: str = None
+
+
+@dataclass
+class TaskHParams:
+ # Query params
+ query_length: int = 64
+ query_dataset: str = "books"
+ query_prefix: str = ""
+ query_suffix: str = ""
+ start_text: Optional[str] = None
+ end_text: Optional[str] = None
+
+ # Response params
+ response_length: int = 24
+
+ # LM params
+ temperature: float = 0.7
+
+
+@dataclass
+class Args:
+ # common args
+ exp_name: str = os.path.basename(__file__)[: -len(".py")]
+ """the name of this experiment"""
+ seed: int = 1
+ """seed of the experiment"""
+ track: bool = False
+ """if toggled, this experiment will be tracked with Weights and Biases"""
+ wandb_project_name: str = "lm_human_preference_details"
+ """the wandb's project name"""
+ wandb_entity: Optional[str] = None
+ """the entity (team) of wandb's project"""
+ cuda: bool = True
+ """Whether to use cuda if available."""
+ run_name: tyro.conf.Suppress[str] = None
+ """TO BE FILLED: a unique name of this run"""
+
+ base_model: str = "gpt2"
+ """the name of the pretrained model to use"""
+ label_dataset: str = "sentiment/offline_5k.json"
+ """the name of the dataset to use for labels in
+ `https://huggingface.co/datasets/vwxyzjn/lm-human-preferences`"""
+ local_batch_size: int = 4
+ """per rank batch size"""
+ gradient_accumulation_steps: int = 1
+ """gradient accumulation steps"""
+ local_micro_batch_size: tyro.conf.Suppress[int] = None
+ """per rank micro batch size"""
+ lr: float = 0.00005
+ """the learning rate"""
+ eps: float = 1e-5
+ """the epsilon for AdamW"""
+ rollout_batch_size: int = 512 # decrease this to e.g. 64 if OOM
+ """rollout batch size"""
+ world_size: tyro.conf.Suppress[int] = None
+ """the number of processes to use"""
+ batch_size: tyro.conf.Suppress[int] = None
+ """the batch size across all ranks"""
+ local_normalize_samples: int = 256
+ """Samples used to estimate reward mean and std"""
+ normalize_samples: tyro.conf.Suppress[int] = None
+ """Samples used to estimate reward mean and std across all ranks"""
+ debug_normalize: int = 0
+ """Samples used to check that normalization worked"""
+ normalize_before: bool = True
+ """Whether, before training, to normalize the rewards on the policy to the
+ scales on the training buffer. (For comparisons, just use mean 0, var 1.)"""
+ normalize_after: bool = True
+ """Whether, after training, to normalize the rewards on the ref policy to
+ mean 0, var 1 (so the KL coefficient always has the same meaning)."""
+ print_sample_output_freq: int = 10
+ """How often to print sample output"""
+ save_path: str = "models/"
+ """Where to save the model"""
+ use_tensorflow_adam: bool = False
+ """Whether to use tensorflow-style Adam optimizer instead of PyTorch's"""
+ task: TaskHParams = field(default_factory=TaskHParams)
+ labels: LabelHParams = field(default_factory=LabelHParams)
+
+
+OPENAI_PAD_TOKEN_ID = 50259
+
+
+@flax.struct.dataclass
+class RewardModelParams:
+ """Parameters for the reward model."""
+
+ backbone_params: flax.core.FrozenDict
+ head_params: flax.core.FrozenDict
+
+
+class RewardHead(nn.Module):
+ """Affine transform head for the reward model.
+
+ Attributes:
+ head_input_size: Size of the input to the head. The weights of the linear
+ layer are initialized from mean-zero Gaussians with std
+ 1/sqrt(head_input_size + 1).
+
+ Example:
+ model = RewardHead(head_input_size=768)
+ variables = model.init(jax.random.PRNGKey(0),
+ jnp.ones((1, 6, 768)))
+ """
+
+ head_input_size: int
+
+ def setup(self):
+ self.reward_linear = nn.Dense(
+ 1,
+ kernel_init=nn.initializers.normal(
+ stddev=1 / np.sqrt(self.head_input_size + 1)
+ ),
+ bias_init=nn.initializers.zeros_init(),
+ )
+ self.reward_gain = self.param("reward_gain", nn.initializers.ones_init(), ())
+ self.reward_bias = self.param("reward_bias", nn.initializers.zeros_init(), ())
+
+ def __call__(self, x):
+ assert x.shape[-1] == self.head_input_size
+ x = self.reward_linear(x)
+ x = x * self.reward_gain + self.reward_bias
+ return x | My ideal format looks like the following, where we eliminate the need for `RewardModelParams`
```
class AutoModelForCausalLMWithRewardHead(nn.Module):
model_name: str
@nn.compact
def __call__(self, x):
backbone = transformers.FlaxAutoModelForCausalLM.from_pretrained(
self.model_name
)
x = backbone(
input_ids=x,
# attention_mask=np.ones(x),
# position_ids=xxx,
return_dict=True,
output_hidden_states=True,
).logits
x = nn.Dense(
1,
kernel_init=nn.initializers.normal(
stddev=1 / jnp.sqrt(backbone.config.hidden_size + 1)
),
bias_init=nn.initializers.zeros_init(),
)(x)
reward_gain = self.param("reward_gain", nn.initializers.ones_init(), ())
reward_bias = self.param("reward_bias", nn.initializers.zeros_init(), ())
x = x * reward_gain + reward_bias
return x
reward_model = AutoModelForCausalLMWithRewardHead("gpt2")
params = reward_model.init(
jax.random.PRNGKey(0),
jnp.array([
[11, 339, 561],
]),
)
```
However, this only initialize the params for the reward model and the backbone params are not included.
Maybe for now we can change it to
```
class RewardHead(nn.Module):
head_input_size: int
@nn.compact
def __call__(self, x):
assert x.shape[-1] == self.head_input_size
x = nn.Dense(
1,
kernel_init=nn.initializers.normal(
stddev=1 / np.sqrt(self.head_input_size + 1)
),
bias_init=nn.initializers.zeros_init(),
)(x)
reward_gain = self.param("reward_gain", nn.initializers.ones_init(), ())
reward_bias = self.param("reward_bias", nn.initializers.zeros_init(), ())
x = x * reward_gain + reward_bias
return x
``` |
lm-human-preference-details | github_2023 | python | 13 | vwxyzjn | vwxyzjn | @@ -0,0 +1,765 @@
+""" Train jax-based reward model for LM human preference details."""
+import os
+from dataclasses import asdict, dataclass, field
+from typing import Optional
+import time
+import functools
+import numpy as np
+from torch.utils.data import DataLoader, IterableDataset
+import jax
+import jax.numpy as jnp
+import orbax
+import optax
+from einops import rearrange
+import tyro
+from datasets import load_dataset
+from rich.pretty import pprint
+import transformers
+import flax
+from flax.training import common_utils
+from flax.training import orbax_utils
+from flax.core.frozen_dict import freeze
+from flax import traverse_util, jax_utils
+from flax.training.train_state import TrainState
+import flax.linen as nn
+
+from lm_human_preference_details.data import DATASET
+from torch.utils import tensorboard
+
+
+@dataclass
+class LabelHParams:
+ type: str = None
+ num_train: int = 4992
+ num_labels: int = 4
+ source: str = None
+
+
+@dataclass
+class TaskHParams:
+ # Query params
+ query_length: int = 64
+ query_dataset: str = "books"
+ query_prefix: str = ""
+ query_suffix: str = ""
+ start_text: Optional[str] = None
+ end_text: Optional[str] = None
+
+ # Response params
+ response_length: int = 24
+
+ # LM params
+ temperature: float = 0.7
+
+
+@dataclass
+class Args:
+ # common args
+ exp_name: str = os.path.basename(__file__)[: -len(".py")]
+ """the name of this experiment"""
+ seed: int = 1
+ """seed of the experiment"""
+ track: bool = False
+ """if toggled, this experiment will be tracked with Weights and Biases"""
+ wandb_project_name: str = "lm_human_preference_details"
+ """the wandb's project name"""
+ wandb_entity: Optional[str] = None
+ """the entity (team) of wandb's project"""
+ cuda: bool = True
+ """Whether to use cuda if available."""
+ run_name: tyro.conf.Suppress[str] = None
+ """TO BE FILLED: a unique name of this run"""
+
+ base_model: str = "gpt2"
+ """the name of the pretrained model to use"""
+ label_dataset: str = "sentiment/offline_5k.json"
+ """the name of the dataset to use for labels in
+ `https://huggingface.co/datasets/vwxyzjn/lm-human-preferences`"""
+ local_batch_size: int = 4
+ """per rank batch size"""
+ gradient_accumulation_steps: int = 1
+ """gradient accumulation steps"""
+ local_micro_batch_size: tyro.conf.Suppress[int] = None
+ """per rank micro batch size"""
+ lr: float = 0.00005
+ """the learning rate"""
+ eps: float = 1e-5
+ """the epsilon for AdamW"""
+ rollout_batch_size: int = 512 # decrease this to e.g. 64 if OOM
+ """rollout batch size"""
+ world_size: tyro.conf.Suppress[int] = None
+ """the number of processes to use"""
+ batch_size: tyro.conf.Suppress[int] = None
+ """the batch size across all ranks"""
+ local_normalize_samples: int = 256
+ """Samples used to estimate reward mean and std"""
+ normalize_samples: tyro.conf.Suppress[int] = None
+ """Samples used to estimate reward mean and std across all ranks"""
+ debug_normalize: int = 0
+ """Samples used to check that normalization worked"""
+ normalize_before: bool = True
+ """Whether, before training, to normalize the rewards on the policy to the
+ scales on the training buffer. (For comparisons, just use mean 0, var 1.)"""
+ normalize_after: bool = True
+ """Whether, after training, to normalize the rewards on the ref policy to
+ mean 0, var 1 (so the KL coefficient always has the same meaning)."""
+ print_sample_output_freq: int = 10
+ """How often to print sample output"""
+ save_path: str = "models/"
+ """Where to save the model"""
+ use_tensorflow_adam: bool = False
+ """Whether to use tensorflow-style Adam optimizer instead of PyTorch's"""
+ task: TaskHParams = field(default_factory=TaskHParams)
+ labels: LabelHParams = field(default_factory=LabelHParams)
+
+
+OPENAI_PAD_TOKEN_ID = 50259
+
+
+@flax.struct.dataclass
+class RewardModelParams:
+ """Parameters for the reward model."""
+
+ backbone_params: flax.core.FrozenDict
+ head_params: flax.core.FrozenDict
+
+
+class RewardHead(nn.Module):
+ """Affine transform head for the reward model.
+
+ Attributes:
+ head_input_size: Size of the input to the head. The weights of the linear
+ layer are initialized from mean-zero Gaussians with std
+ 1/sqrt(head_input_size + 1).
+
+ Example:
+ model = RewardHead(head_input_size=768)
+ variables = model.init(jax.random.PRNGKey(0),
+ jnp.ones((1, 6, 768)))
+ """
+
+ head_input_size: int
+
+ def setup(self):
+ self.reward_linear = nn.Dense(
+ 1,
+ kernel_init=nn.initializers.normal(
+ stddev=1 / np.sqrt(self.head_input_size + 1)
+ ),
+ bias_init=nn.initializers.zeros_init(),
+ )
+ self.reward_gain = self.param("reward_gain", nn.initializers.ones_init(), ())
+ self.reward_bias = self.param("reward_bias", nn.initializers.zeros_init(), ())
+
+ def __call__(self, x):
+ assert x.shape[-1] == self.head_input_size
+ x = self.reward_linear(x)
+ x = x * self.reward_gain + self.reward_bias
+ return x
+
+
+class MyDataset(IterableDataset):
+ """A dataset for reward model normalization."""
+
+ def __init__(
+ self, generator, tokenizer, query_length, seed, start_text=None, end_text=None
+ ):
+ self.generator = generator
+ self.tokenizer = tokenizer
+ self.query_length = query_length
+ self.start_text = start_text
+ self.end_text = end_text
+ self.seed = seed
+ token_to_index = tokenizer.get_vocab()
+ self.start_token = token_to_index[start_text] if self.start_text else None
+ self.end_token = token_to_index[end_text] if self.end_text else None
+
+ def __iter__(self):
+ for text in self.generator("train", self.seed, shuffle=True):
+ tokens = self.tokenizer.encode(text)
+ if self.start_token is not None:
+ try:
+ first_index = tokens.index(self.start_token) + 1
+ if first_index < len(tokens):
+ tokens = tokens[first_index:]
+ except:
+ continue
+ tokens = tokens[: self.query_length]
+ if self.end_token is not None:
+ try:
+ last_index = len(tokens) - tokens[::-1].index(self.end_token)
+ tokens = tokens[:last_index]
+ except:
+ continue
+ output = self.tokenizer.pad(
+ {"input_ids": tokens},
+ padding="max_length",
+ max_length=self.query_length,
+ return_tensors="pt",
+ return_attention_mask=True,
+ )
+ yield output
+
+
+def right_padding_to_left_padding(tokens, pad_id):
+ """Convert from right padding to left padding."""
+ assert tokens.ndim == 2
+ return np.array(
+ [
+ [pad_id] * (row == pad_id).sum() + [x for x in row if x != pad_id]
+ for row in tokens
+ ]
+ )
+
+
+def ceil_div(a, b):
+ return (a - 1) // b + 1
+
+
+def exact_div(a, b):
+ q = a // b
+ if a != q * b:
+ raise ValueError(f"Inexact division: {a} / {b} = {a/b}")
+ return q
+
+
+# TODO: pmap `generate` to accelerate reward model normalization?
+def generate(pretrained_model, queries, args, generation_config):
+ """generate in a way that does not affect padding tokens"""
+ context_length = queries.shape[1]
+ attention_mask = queries != args.pad_token_id
+ # set padding tokens to 0
+ input_ids = jnp.where(attention_mask, queries, 0)
+ output = pretrained_model.generate(
+ input_ids=input_ids,
+ attention_mask=attention_mask.astype("int32"),
+ # position_ids=attention_mask.cumsum(1) - attention_mask.long(),
+ # generation collapsed if this was turned on.
+ # TODO: why does generation collapse with this?
+ generation_config=generation_config,
+ return_dict_in_generate=True,
+ )
+ return jnp.concatenate((queries, output.sequences[:, context_length:]), axis=1)
+
+
+def single_epoch_linear_schedule(global_step, args):
+ """ anneal learning rate linearly to reach 0 after one epoch."""
+ frac = 1.0 - global_step * args.batch_size / args.labels.num_train
+ return args.lr * frac
+
+
+def create_initial_reward_state_and_models(init_key, args):
+ # pylint: disable=redefined-outer-name
+ """reate reward model and initial reward state."""
+
+ reward_backbone = transformers.FlaxAutoModelForCausalLM.from_pretrained(
+ args.base_model
+ )
+ reward_head = RewardHead(head_input_size=reward_backbone.config.hidden_size)
+
+ if args.use_tensorflow_adam:
+ raise NotImplementedError("tensorflow adam is not implemented yet.") |
Btw optax's adam can be made equivalent to tensorflow's adam with the following optimizer. We should include this `use_tensorflow_adam` in the jax version as well.
https://gist.github.com/vwxyzjn/7005a81ba39deb3bc8043041bd715be1#file-main_jax-py-L69-L78 |
lm-human-preference-details | github_2023 | python | 13 | vwxyzjn | vwxyzjn | @@ -0,0 +1,765 @@
+""" Train jax-based reward model for LM human preference details."""
+import os
+from dataclasses import asdict, dataclass, field
+from typing import Optional
+import time
+import functools
+import numpy as np
+from torch.utils.data import DataLoader, IterableDataset
+import jax
+import jax.numpy as jnp
+import orbax
+import optax
+from einops import rearrange
+import tyro
+from datasets import load_dataset
+from rich.pretty import pprint
+import transformers
+import flax
+from flax.training import common_utils
+from flax.training import orbax_utils
+from flax.core.frozen_dict import freeze
+from flax import traverse_util, jax_utils
+from flax.training.train_state import TrainState
+import flax.linen as nn
+
+from lm_human_preference_details.data import DATASET
+from torch.utils import tensorboard
+
+
+@dataclass
+class LabelHParams:
+ type: str = None
+ num_train: int = 4992
+ num_labels: int = 4
+ source: str = None
+
+
+@dataclass
+class TaskHParams:
+ # Query params
+ query_length: int = 64
+ query_dataset: str = "books"
+ query_prefix: str = ""
+ query_suffix: str = ""
+ start_text: Optional[str] = None
+ end_text: Optional[str] = None
+
+ # Response params
+ response_length: int = 24
+
+ # LM params
+ temperature: float = 0.7
+
+
+@dataclass
+class Args:
+ # common args
+ exp_name: str = os.path.basename(__file__)[: -len(".py")]
+ """the name of this experiment"""
+ seed: int = 1
+ """seed of the experiment"""
+ track: bool = False
+ """if toggled, this experiment will be tracked with Weights and Biases"""
+ wandb_project_name: str = "lm_human_preference_details"
+ """the wandb's project name"""
+ wandb_entity: Optional[str] = None
+ """the entity (team) of wandb's project"""
+ cuda: bool = True
+ """Whether to use cuda if available."""
+ run_name: tyro.conf.Suppress[str] = None
+ """TO BE FILLED: a unique name of this run"""
+
+ base_model: str = "gpt2"
+ """the name of the pretrained model to use"""
+ label_dataset: str = "sentiment/offline_5k.json"
+ """the name of the dataset to use for labels in
+ `https://huggingface.co/datasets/vwxyzjn/lm-human-preferences`"""
+ local_batch_size: int = 4
+ """per rank batch size"""
+ gradient_accumulation_steps: int = 1
+ """gradient accumulation steps"""
+ local_micro_batch_size: tyro.conf.Suppress[int] = None
+ """per rank micro batch size"""
+ lr: float = 0.00005
+ """the learning rate"""
+ eps: float = 1e-5
+ """the epsilon for AdamW"""
+ rollout_batch_size: int = 512 # decrease this to e.g. 64 if OOM
+ """rollout batch size"""
+ world_size: tyro.conf.Suppress[int] = None
+ """the number of processes to use"""
+ batch_size: tyro.conf.Suppress[int] = None
+ """the batch size across all ranks"""
+ local_normalize_samples: int = 256
+ """Samples used to estimate reward mean and std"""
+ normalize_samples: tyro.conf.Suppress[int] = None
+ """Samples used to estimate reward mean and std across all ranks"""
+ debug_normalize: int = 0
+ """Samples used to check that normalization worked"""
+ normalize_before: bool = True
+ """Whether, before training, to normalize the rewards on the policy to the
+ scales on the training buffer. (For comparisons, just use mean 0, var 1.)"""
+ normalize_after: bool = True
+ """Whether, after training, to normalize the rewards on the ref policy to
+ mean 0, var 1 (so the KL coefficient always has the same meaning)."""
+ print_sample_output_freq: int = 10
+ """How often to print sample output"""
+ save_path: str = "models/"
+ """Where to save the model"""
+ use_tensorflow_adam: bool = False
+ """Whether to use tensorflow-style Adam optimizer instead of PyTorch's"""
+ task: TaskHParams = field(default_factory=TaskHParams)
+ labels: LabelHParams = field(default_factory=LabelHParams)
+
+
+OPENAI_PAD_TOKEN_ID = 50259
+
+
+@flax.struct.dataclass
+class RewardModelParams:
+ """Parameters for the reward model."""
+
+ backbone_params: flax.core.FrozenDict
+ head_params: flax.core.FrozenDict
+
+
+class RewardHead(nn.Module):
+ """Affine transform head for the reward model.
+
+ Attributes:
+ head_input_size: Size of the input to the head. The weights of the linear
+ layer are initialized from mean-zero Gaussians with std
+ 1/sqrt(head_input_size + 1).
+
+ Example:
+ model = RewardHead(head_input_size=768)
+ variables = model.init(jax.random.PRNGKey(0),
+ jnp.ones((1, 6, 768)))
+ """
+
+ head_input_size: int
+
+ def setup(self):
+ self.reward_linear = nn.Dense(
+ 1,
+ kernel_init=nn.initializers.normal(
+ stddev=1 / np.sqrt(self.head_input_size + 1)
+ ),
+ bias_init=nn.initializers.zeros_init(),
+ )
+ self.reward_gain = self.param("reward_gain", nn.initializers.ones_init(), ())
+ self.reward_bias = self.param("reward_bias", nn.initializers.zeros_init(), ())
+
+ def __call__(self, x):
+ assert x.shape[-1] == self.head_input_size
+ x = self.reward_linear(x)
+ x = x * self.reward_gain + self.reward_bias
+ return x
+
+
+class MyDataset(IterableDataset):
+ """A dataset for reward model normalization."""
+
+ def __init__(
+ self, generator, tokenizer, query_length, seed, start_text=None, end_text=None
+ ):
+ self.generator = generator
+ self.tokenizer = tokenizer
+ self.query_length = query_length
+ self.start_text = start_text
+ self.end_text = end_text
+ self.seed = seed
+ token_to_index = tokenizer.get_vocab()
+ self.start_token = token_to_index[start_text] if self.start_text else None
+ self.end_token = token_to_index[end_text] if self.end_text else None
+
+ def __iter__(self):
+ for text in self.generator("train", self.seed, shuffle=True):
+ tokens = self.tokenizer.encode(text)
+ if self.start_token is not None:
+ try:
+ first_index = tokens.index(self.start_token) + 1
+ if first_index < len(tokens):
+ tokens = tokens[first_index:]
+ except:
+ continue
+ tokens = tokens[: self.query_length]
+ if self.end_token is not None:
+ try:
+ last_index = len(tokens) - tokens[::-1].index(self.end_token)
+ tokens = tokens[:last_index]
+ except:
+ continue
+ output = self.tokenizer.pad(
+ {"input_ids": tokens},
+ padding="max_length",
+ max_length=self.query_length,
+ return_tensors="pt",
+ return_attention_mask=True,
+ )
+ yield output
+
+
+def right_padding_to_left_padding(tokens, pad_id):
+ """Convert from right padding to left padding."""
+ assert tokens.ndim == 2
+ return np.array(
+ [
+ [pad_id] * (row == pad_id).sum() + [x for x in row if x != pad_id]
+ for row in tokens
+ ]
+ )
+
+
+def ceil_div(a, b):
+ return (a - 1) // b + 1
+
+
+def exact_div(a, b):
+ q = a // b
+ if a != q * b:
+ raise ValueError(f"Inexact division: {a} / {b} = {a/b}")
+ return q
+
+
+# TODO: pmap `generate` to accelerate reward model normalization?
+def generate(pretrained_model, queries, args, generation_config):
+ """generate in a way that does not affect padding tokens"""
+ context_length = queries.shape[1]
+ attention_mask = queries != args.pad_token_id
+ # set padding tokens to 0
+ input_ids = jnp.where(attention_mask, queries, 0)
+ output = pretrained_model.generate(
+ input_ids=input_ids,
+ attention_mask=attention_mask.astype("int32"),
+ # position_ids=attention_mask.cumsum(1) - attention_mask.long(),
+ # generation collapsed if this was turned on.
+ # TODO: why does generation collapse with this?
+ generation_config=generation_config,
+ return_dict_in_generate=True,
+ )
+ return jnp.concatenate((queries, output.sequences[:, context_length:]), axis=1)
+
+
+def single_epoch_linear_schedule(global_step, args):
+ """ anneal learning rate linearly to reach 0 after one epoch."""
+ frac = 1.0 - global_step * args.batch_size / args.labels.num_train
+ return args.lr * frac
+
+
+def create_initial_reward_state_and_models(init_key, args):
+ # pylint: disable=redefined-outer-name
+ """reate reward model and initial reward state."""
+
+ reward_backbone = transformers.FlaxAutoModelForCausalLM.from_pretrained(
+ args.base_model
+ )
+ reward_head = RewardHead(head_input_size=reward_backbone.config.hidden_size)
+
+ if args.use_tensorflow_adam:
+ raise NotImplementedError("tensorflow adam is not implemented yet.")
+ else:
+ optimizer = optax.adam(
+ learning_rate=functools.partial(single_epoch_linear_schedule, args=args),
+ eps=args.eps,
+ )
+
+ if args.gradient_accumulation_steps > 1:
+ optimizer = optax.MultiSteps(optimizer, args.gradient_accumulation_steps)
+ state = TrainState.create(
+ apply_fn=None,
+ params=RewardModelParams(
+ backbone_params=flax.core.FrozenDict({"params": reward_backbone.params}),
+ head_params=flax.core.FrozenDict(
+ reward_head.init(
+ init_key,
+ jnp.ones(reward_backbone.config.hidden_size)[None, None, :],
+ )
+ ),
+ ),
+ tx=optimizer,
+ )
+ return state, reward_backbone, reward_head
+
+
+def get_reward(
+ params: RewardModelParams,
+ reward_backbone,
+ reward_head,
+ query_responses_ids: jnp.ndarray,
+ args: Args,
+):
+ """Get reward for each queiry--response pair."""
+ assert query_responses_ids.ndim == 2
+ # query_responses_ids: [batch_size, length]
+
+ # mask out padding tokens
+ attention_mask = query_responses_ids != args.pad_token_id
+ query_responses_ids = jnp.where(attention_mask, query_responses_ids, 0)
+
+ # assign position ids
+ position_ids = attention_mask.cumsum(1) - attention_mask
+
+ reward_latents = reward_backbone.module.apply(
+ variables=params.backbone_params,
+ input_ids=query_responses_ids,
+ attention_mask=attention_mask,
+ position_ids=position_ids,
+ output_hidden_states=True,
+ ).hidden_states[-1]
+ # shape: [batch_size, length, hidden_size]
+
+ last_reward_latents = reward_latents[:, -1, :]
+ # shape: [batch_size, hidden_size]
+
+ reward = reward_head.apply(variables=params.head_params, x=last_reward_latents)
+ # shape: [batch_size, 1]
+ return reward
+
+
+def set_reward_state_head_params(
+ reward_state: TrainState, gain: float = 1.0, bias: float = 0.0
+):
+ """Set gain and bias of the reward head.
+ Args:
+ reward_state: Reward state.
+ gain: Gain of the reward head.
+ bias: Bias of the reward head.
+
+ Example:
+ reward_state = set_reward_state_head_params(
+ reward_state, gain=0.1, bias=0.2)
+ print(reward_state.params.head_params['params'])
+ """
+ flat_head_params = traverse_util.flatten_dict(
+ reward_state.params.head_params, sep="/"
+ )
+
+ flat_head_params["params/reward_gain"] = jnp.array(gain, dtype=jnp.float32)
+ flat_head_params["params/reward_bias"] = jnp.array(bias, dtype=jnp.float32)
+
+ unflat_head_params = freeze(traverse_util.unflatten_dict(flat_head_params, sep="/"))
+
+ reward_state = reward_state.replace(
+ params=RewardModelParams(
+ backbone_params=reward_state.params.backbone_params,
+ head_params=unflat_head_params,
+ )
+ )
+ return reward_state
+
+
+def normalize(
+ args,
+ tokenizer,
+ pretrained_model,
+ reward_state,
+ iter_dataloader,
+ generation_config,
+ reward_backbone,
+ reward_head,
+):
+ # number of minibatches for computing the normalization statistics
+ n_batches = ceil_div(args.local_normalize_samples, args.rollout_batch_size)
+
+ # reset reward scales
+ reward_state = set_reward_state_head_params(reward_state, gain=1.0, bias=0.0)
+
+ def get_normalization_stats(reward_state):
+ """compute mean and std of rewards"""
+
+ sample_queries_responses = []
+ for _ in range(n_batches):
+ data = next(iter_dataloader)
+ queries = data["input_ids"]
+ queries = right_padding_to_left_padding(
+ data["input_ids"], args.pad_token_id
+ )
+ query_responses = generate(
+ pretrained_model, queries, tokenizer, generation_config
+ )
+ sample_queries_responses.append(query_responses)
+
+ rewards = []
+ for query_responses in sample_queries_responses:
+ rewards.append(
+ get_reward(
+ reward_state.params,
+ reward_backbone,
+ reward_head,
+ query_responses,
+ args,
+ )
+ )
+ # Here, len(rewards) = n_batches
+ # each rewards[i] is a (args.rollout_batch_size, 1) array.
+
+ rewards = np.concatenate(rewards)
+ # rewards shape: [args.local_normalize_samples, 1]
+ mean, std = rewards.mean(), rewards.std()
+ print(f"mean: {mean}, std: {std}")
+ return mean, std
+
+ mean, std = get_normalization_stats(reward_state)
+ target_mean, target_std = 0.0, 1.0
+ gain = target_std / std
+ bias = target_mean - gain * mean
+ print(f"gain: {gain}, bias: {bias}")
+
+ # do normalization
+ reward_state = set_reward_state_head_params(reward_state, gain=gain, bias=bias)
+
+ # validate normalization
+ _, _ = get_normalization_stats(reward_state)
+ return reward_state
+
+
+def prepare_left_padded_query_responses_with_labels(dataset, args):
+ """Prepare left padded, concatenated queries and responses, and add labels.
+ Args:
+ dataset: a dictionary that contains 'query', 'best', and 'sample{i}',
+ where i is from 0 to args.labels.num_labels-1.
+ args: a dataclass that contains 'labels.num_labels' and 'pad_token_id'.
+
+ Returns:
+ queries_responses: array of concatenated queries and responses, with shape
+ [num_queires, num_responses_per_query, max_query_len + max_response_len]
+ labels:
+ array of the best response idx for each label, with shape
+ [num_queires, 1]
+ """
+
+ labels = np.array(dataset["best"])
+ # [num_queires,]
+
+ queries = np.stack(dataset["query"])
+ # [num_queires, max_query_length]
+
+ queries = np.repeat(queries, args.labels.num_labels, axis=0)
+ queries = rearrange(queries, "(q r) l -> q r l", r=args.labels.num_labels)
+ # [num_queires, num_queires, max_query_length]
+
+ responses = np.array(
+ [np.stack(dataset[f"sample{i}"]) for i in range(args.labels.num_labels)]
+ )
+ # [num_response_per_query, num_queires, max_response_len]
+
+ responses = rearrange(responses, "r q l -> q r l")
+ # [num_queires, num_responses_per_query, max_response_len]
+
+ queries_responses = np.concatenate([queries, responses], axis=-1)
+ # [num_queires, num_responses_per_query, max_query_length + max_response_len]
+
+ queries_responses[queries_responses == OPENAI_PAD_TOKEN_ID] = args.pad_token_id
+
+ queries_responses = right_padding_to_left_padding(
+ rearrange(queries_responses, "q r l -> (q r) l"), pad_id=args.pad_token_id,
+ )
+
+ queries_responses = rearrange(
+ queries_responses, "(q r) l -> q r l", r=args.labels.num_labels
+ )
+ # [num_queires, num_responses_per_query, max_query_len + max_response_len]
+ return queries_responses, labels
+
+
+def get_dataloader_iter(rng, dataset_tokens, dataset_labels, args):
+ """Get iteration of dataloader."""
+ assert dataset_tokens.shape[0] == dataset_labels.shape[0]
+ num_samples = dataset_tokens.shape[0]
+
+ steps_per_epoch = num_samples // args.batch_size
+ perms = jax.random.permutation(rng, num_samples)
+ # Skip incomplete batch:
+ perms = perms[: steps_per_epoch * args.batch_size]
+ perms = perms.reshape((steps_per_epoch, args.batch_size))
+
+ for perm in perms:
+ batch = (dataset_tokens[perm], dataset_labels[perm])
+ yield batch
+
+
+def train_step(state, batch, reward_backbone, reward_head, args):
+ """Train reward model for one step."""
+ query_responses, labels = batch
+ query_responses_ids = rearrange(query_responses, "q r l -> (q r) l")
+ # query_responses_ids: [num_queries * num_responses_per_query, length]
+
+ def loss_function(params):
+ logits = get_reward(
+ params, reward_backbone, reward_head, query_responses_ids, args
+ )
+
+ logits_reshaped = rearrange(logits, "(q r) 1 -> q r", r=args.labels.num_labels)
+
+ loss = optax.softmax_cross_entropy_with_integer_labels(
+ logits_reshaped, labels
+ ).mean()
+
+ accuracy = (logits_reshaped.argmax(axis=1) == labels).astype("float32").mean()
+ return loss, accuracy
+
+ loss_grad_fn = jax.value_and_grad(loss_function, has_aux=True)
+ (loss, accuracy), grads = loss_grad_fn(state.params)
+ grads = jax.lax.pmean(grads, "batch")
+ state = state.apply_gradients(grads=grads)
+ loss = jax.lax.pmean(loss, axis_name="batch")
+ accuracy = jax.lax.pmean(accuracy, axis_name="batch")
+ return state, {"loss": loss, "accuracy": accuracy}
+
+
+def val_step(state, batch, reward_backbone, reward_head, args):
+ """Eval reward model for one step."""
+ query_responses, labels = batch
+ query_responses_ids = rearrange(query_responses, "q r l -> (q r) l")
+ # query_responses_ids: [num_queries * num_responses_per_query, length]
+
+ def loss_function(params):
+ logits = get_reward(
+ params, reward_backbone, reward_head, query_responses_ids, args
+ )
+
+ logits_reshaped = rearrange(logits, "(q r) 1 -> q r", r=args.labels.num_labels)
+
+ loss = optax.softmax_cross_entropy_with_integer_labels(
+ logits_reshaped, labels
+ ).mean()
+
+ accuracy = (logits_reshaped.argmax(axis=1) == labels).astype("float32").mean()
+ return loss, accuracy
+
+ loss, accuracy = loss_function(state.params)
+ loss = jax.lax.pmean(loss, axis_name="batch")
+ accuracy = jax.lax.pmean(accuracy, axis_name="batch")
+ return {"loss": loss, "accuracy": accuracy}
+
+
+def train(args: Args):
+ args.world_size = len(jax.devices())
+
+ args.batch_size = int(args.local_batch_size * args.world_size)
+ args.normalize_samples = int(args.local_normalize_samples * args.world_size)
+ args.local_micro_batch_size = exact_div(
+ args.local_batch_size, args.gradient_accumulation_steps
+ )
+
+ run_name = f"{args.exp_name}__{args.seed}__{int(time.time())}"
+
+ if args.track:
+ import wandb
+
+ wandb.init(
+ project=args.wandb_project_name,
+ entity=args.wandb_entity,
+ sync_tensorboard=True,
+ config=asdict(args),
+ name=run_name,
+ save_code=True,
+ )
+ wandb.run.log_code(".")
+
+ writer = tensorboard.SummaryWriter(f"runs/{run_name}")
+ writer.add_text(
+ "hyperparameters",
+ "|param|value|\n|-|-|\n%s"
+ % ("\n".join([f"|{key}|{value}|" for key, value in vars(args).items()])),
+ )
+ pprint(args)
+
+ tokenizer = transformers.AutoTokenizer.from_pretrained(
+ args.base_model, padding_side="right",
+ )
+ # we use the padding token manually but do not resize the token embedding of the model
+ tokenizer.add_special_tokens({"pad_token": "[PAD]"})
+ args.pad_token_id = tokenizer.pad_token_id
+
+ untrained_model = transformers.FlaxAutoModelForCausalLM.from_pretrained(
+ args.base_model
+ )
+
+ reward_state, reward_backbone, reward_head = create_initial_reward_state_and_models(
+ jax.random.PRNGKey(args.seed), args
+ )
+
+ p_train_step = jax.pmap(
+ functools.partial(
+ train_step,
+ args=args,
+ reward_backbone=reward_backbone,
+ reward_head=reward_head,
+ ),
+ axis_name="batch",
+ donate_argnums=(0,),
+ )
+ p_val_step = jax.pmap(
+ functools.partial(
+ val_step,
+ args=args,
+ reward_backbone=reward_backbone,
+ reward_head=reward_head,
+ ),
+ axis_name="batch",
+ )
+
+ normalization_dataset = MyDataset(
+ DATASET[args.task.query_dataset],
+ tokenizer,
+ args.task.query_length,
+ seed=args.seed,
+ start_text=args.task.start_text,
+ end_text=args.task.end_text,
+ )
+ normalization_dataloader = DataLoader(
+ normalization_dataset, batch_size=args.rollout_batch_size
+ )
+ iter_normalization_dataloader = iter(normalization_dataloader)
+
+ generation_config = transformers.GenerationConfig(
+ max_new_tokens=args.task.response_length,
+ min_new_tokens=args.task.response_length,
+ temperature=args.task.temperature,
+ top_k=0.0,
+ top_p=1.0,
+ do_sample=True,
+ pad_token_id=args.pad_token_id,
+ )
+
+ if args.normalize_before:
+ print("===Normalize reward model *before* training===")
+
+ # pylint: disable=E1101:no-member
+ print(
+ "before normalization. "
+ + f"Gain: {reward_state.params.head_params['params']['reward_gain']}"
+ + f" Bias: {reward_state.params.head_params['params']['reward_bias']}"
+ )
+
+ reward_state = normalize(
+ args,
+ tokenizer,
+ untrained_model,
+ reward_state,
+ iter_normalization_dataloader,
+ generation_config,
+ reward_backbone,
+ reward_head,
+ )
+
+ print(
+ "after normalization. "
+ + f"Gain: {reward_state.params.head_params['params']['reward_gain']}"
+ + f" Bias: {reward_state.params.head_params['params']['reward_bias']}"
+ )
+
+ reward_state = jax_utils.replicate(reward_state)
+
+ # `labeled_dataset` has keys
+ # `['sample0', 'query', 'best', 'sample3', 'sample1', 'sample2']`
+ labeled_dataset = load_dataset(
+ "vwxyzjn/lm-human-preferences", data_files=[args.label_dataset],
+ )["train"]
+ print("Num labels found in source:", len(labeled_dataset))
+ print("training on", args.labels.num_train, "in batches of", args.local_batch_size)
+
+ all_queries_responses, all_labels = prepare_left_padded_query_responses_with_labels(
+ labeled_dataset, args
+ )
+
+ assert args.labels.num_train < all_queries_responses.shape[0]
+ train_queries_responses = all_queries_responses[: args.labels.num_train]
+ train_labels = all_labels[: args.labels.num_train]
+
+ val_queries_responses = all_queries_responses[args.labels.num_train :]
+ val_labels = all_labels[args.labels.num_train :]
+
+ train_iter = get_dataloader_iter(
+ jax.random.PRNGKey(args.seed), | The common practice seems to be creating an RNG then splitting them in the beginning. E.g.,
```
key = jax.random.PRNGKey(args.seed)
key, network_key, actor_key, critic_key = jax.random.split(key, 4)
``` |
lm-human-preference-details | github_2023 | python | 13 | vwxyzjn | vwxyzjn | @@ -0,0 +1,765 @@
+""" Train jax-based reward model for LM human preference details."""
+import os
+from dataclasses import asdict, dataclass, field
+from typing import Optional
+import time
+import functools
+import numpy as np
+from torch.utils.data import DataLoader, IterableDataset
+import jax
+import jax.numpy as jnp
+import orbax
+import optax
+from einops import rearrange
+import tyro
+from datasets import load_dataset
+from rich.pretty import pprint
+import transformers
+import flax
+from flax.training import common_utils
+from flax.training import orbax_utils
+from flax.core.frozen_dict import freeze
+from flax import traverse_util, jax_utils
+from flax.training.train_state import TrainState
+import flax.linen as nn
+
+from lm_human_preference_details.data import DATASET
+from torch.utils import tensorboard
+
+
+@dataclass
+class LabelHParams:
+ type: str = None
+ num_train: int = 4992
+ num_labels: int = 4
+ source: str = None
+
+
+@dataclass
+class TaskHParams:
+ # Query params
+ query_length: int = 64
+ query_dataset: str = "books"
+ query_prefix: str = ""
+ query_suffix: str = ""
+ start_text: Optional[str] = None
+ end_text: Optional[str] = None
+
+ # Response params
+ response_length: int = 24
+
+ # LM params
+ temperature: float = 0.7
+
+
+@dataclass
+class Args:
+ # common args
+ exp_name: str = os.path.basename(__file__)[: -len(".py")]
+ """the name of this experiment"""
+ seed: int = 1
+ """seed of the experiment"""
+ track: bool = False
+ """if toggled, this experiment will be tracked with Weights and Biases"""
+ wandb_project_name: str = "lm_human_preference_details"
+ """the wandb's project name"""
+ wandb_entity: Optional[str] = None
+ """the entity (team) of wandb's project"""
+ cuda: bool = True
+ """Whether to use cuda if available."""
+ run_name: tyro.conf.Suppress[str] = None
+ """TO BE FILLED: a unique name of this run"""
+
+ base_model: str = "gpt2"
+ """the name of the pretrained model to use"""
+ label_dataset: str = "sentiment/offline_5k.json"
+ """the name of the dataset to use for labels in
+ `https://huggingface.co/datasets/vwxyzjn/lm-human-preferences`"""
+ local_batch_size: int = 4
+ """per rank batch size"""
+ gradient_accumulation_steps: int = 1
+ """gradient accumulation steps"""
+ local_micro_batch_size: tyro.conf.Suppress[int] = None
+ """per rank micro batch size"""
+ lr: float = 0.00005
+ """the learning rate"""
+ eps: float = 1e-5
+ """the epsilon for AdamW"""
+ rollout_batch_size: int = 512 # decrease this to e.g. 64 if OOM
+ """rollout batch size"""
+ world_size: tyro.conf.Suppress[int] = None
+ """the number of processes to use"""
+ batch_size: tyro.conf.Suppress[int] = None
+ """the batch size across all ranks"""
+ local_normalize_samples: int = 256
+ """Samples used to estimate reward mean and std"""
+ normalize_samples: tyro.conf.Suppress[int] = None
+ """Samples used to estimate reward mean and std across all ranks"""
+ debug_normalize: int = 0
+ """Samples used to check that normalization worked"""
+ normalize_before: bool = True
+ """Whether, before training, to normalize the rewards on the policy to the
+ scales on the training buffer. (For comparisons, just use mean 0, var 1.)"""
+ normalize_after: bool = True
+ """Whether, after training, to normalize the rewards on the ref policy to
+ mean 0, var 1 (so the KL coefficient always has the same meaning)."""
+ print_sample_output_freq: int = 10
+ """How often to print sample output"""
+ save_path: str = "models/"
+ """Where to save the model"""
+ use_tensorflow_adam: bool = False
+ """Whether to use tensorflow-style Adam optimizer instead of PyTorch's"""
+ task: TaskHParams = field(default_factory=TaskHParams)
+ labels: LabelHParams = field(default_factory=LabelHParams)
+
+
+OPENAI_PAD_TOKEN_ID = 50259
+
+
+@flax.struct.dataclass
+class RewardModelParams:
+ """Parameters for the reward model."""
+
+ backbone_params: flax.core.FrozenDict
+ head_params: flax.core.FrozenDict
+
+
+class RewardHead(nn.Module):
+ """Affine transform head for the reward model.
+
+ Attributes:
+ head_input_size: Size of the input to the head. The weights of the linear
+ layer are initialized from mean-zero Gaussians with std
+ 1/sqrt(head_input_size + 1).
+
+ Example:
+ model = RewardHead(head_input_size=768)
+ variables = model.init(jax.random.PRNGKey(0),
+ jnp.ones((1, 6, 768)))
+ """
+
+ head_input_size: int
+
+ def setup(self):
+ self.reward_linear = nn.Dense(
+ 1,
+ kernel_init=nn.initializers.normal(
+ stddev=1 / np.sqrt(self.head_input_size + 1)
+ ),
+ bias_init=nn.initializers.zeros_init(),
+ )
+ self.reward_gain = self.param("reward_gain", nn.initializers.ones_init(), ())
+ self.reward_bias = self.param("reward_bias", nn.initializers.zeros_init(), ())
+
+ def __call__(self, x):
+ assert x.shape[-1] == self.head_input_size
+ x = self.reward_linear(x)
+ x = x * self.reward_gain + self.reward_bias
+ return x
+
+
+class MyDataset(IterableDataset):
+ """A dataset for reward model normalization."""
+
+ def __init__(
+ self, generator, tokenizer, query_length, seed, start_text=None, end_text=None
+ ):
+ self.generator = generator
+ self.tokenizer = tokenizer
+ self.query_length = query_length
+ self.start_text = start_text
+ self.end_text = end_text
+ self.seed = seed
+ token_to_index = tokenizer.get_vocab()
+ self.start_token = token_to_index[start_text] if self.start_text else None
+ self.end_token = token_to_index[end_text] if self.end_text else None
+
+ def __iter__(self):
+ for text in self.generator("train", self.seed, shuffle=True):
+ tokens = self.tokenizer.encode(text)
+ if self.start_token is not None:
+ try:
+ first_index = tokens.index(self.start_token) + 1
+ if first_index < len(tokens):
+ tokens = tokens[first_index:]
+ except:
+ continue
+ tokens = tokens[: self.query_length]
+ if self.end_token is not None:
+ try:
+ last_index = len(tokens) - tokens[::-1].index(self.end_token)
+ tokens = tokens[:last_index]
+ except:
+ continue
+ output = self.tokenizer.pad(
+ {"input_ids": tokens},
+ padding="max_length",
+ max_length=self.query_length,
+ return_tensors="pt",
+ return_attention_mask=True,
+ )
+ yield output
+
+
+def right_padding_to_left_padding(tokens, pad_id):
+ """Convert from right padding to left padding."""
+ assert tokens.ndim == 2
+ return np.array(
+ [
+ [pad_id] * (row == pad_id).sum() + [x for x in row if x != pad_id]
+ for row in tokens
+ ]
+ )
+
+
+def ceil_div(a, b):
+ return (a - 1) // b + 1
+
+
+def exact_div(a, b):
+ q = a // b
+ if a != q * b:
+ raise ValueError(f"Inexact division: {a} / {b} = {a/b}")
+ return q
+
+
+# TODO: pmap `generate` to accelerate reward model normalization?
+def generate(pretrained_model, queries, args, generation_config):
+ """generate in a way that does not affect padding tokens"""
+ context_length = queries.shape[1]
+ attention_mask = queries != args.pad_token_id
+ # set padding tokens to 0
+ input_ids = jnp.where(attention_mask, queries, 0)
+ output = pretrained_model.generate(
+ input_ids=input_ids,
+ attention_mask=attention_mask.astype("int32"),
+ # position_ids=attention_mask.cumsum(1) - attention_mask.long(),
+ # generation collapsed if this was turned on.
+ # TODO: why does generation collapse with this?
+ generation_config=generation_config,
+ return_dict_in_generate=True,
+ )
+ return jnp.concatenate((queries, output.sequences[:, context_length:]), axis=1)
+
+
+def single_epoch_linear_schedule(global_step, args):
+ """ anneal learning rate linearly to reach 0 after one epoch."""
+ frac = 1.0 - global_step * args.batch_size / args.labels.num_train
+ return args.lr * frac
+
+
+def create_initial_reward_state_and_models(init_key, args):
+ # pylint: disable=redefined-outer-name
+ """reate reward model and initial reward state."""
+
+ reward_backbone = transformers.FlaxAutoModelForCausalLM.from_pretrained(
+ args.base_model
+ )
+ reward_head = RewardHead(head_input_size=reward_backbone.config.hidden_size)
+
+ if args.use_tensorflow_adam:
+ raise NotImplementedError("tensorflow adam is not implemented yet.")
+ else:
+ optimizer = optax.adam(
+ learning_rate=functools.partial(single_epoch_linear_schedule, args=args),
+ eps=args.eps,
+ )
+
+ if args.gradient_accumulation_steps > 1:
+ optimizer = optax.MultiSteps(optimizer, args.gradient_accumulation_steps)
+ state = TrainState.create(
+ apply_fn=None,
+ params=RewardModelParams(
+ backbone_params=flax.core.FrozenDict({"params": reward_backbone.params}),
+ head_params=flax.core.FrozenDict(
+ reward_head.init(
+ init_key,
+ jnp.ones(reward_backbone.config.hidden_size)[None, None, :],
+ )
+ ),
+ ),
+ tx=optimizer,
+ )
+ return state, reward_backbone, reward_head
+
+
+def get_reward(
+ params: RewardModelParams,
+ reward_backbone,
+ reward_head,
+ query_responses_ids: jnp.ndarray,
+ args: Args,
+):
+ """Get reward for each queiry--response pair."""
+ assert query_responses_ids.ndim == 2
+ # query_responses_ids: [batch_size, length]
+
+ # mask out padding tokens
+ attention_mask = query_responses_ids != args.pad_token_id
+ query_responses_ids = jnp.where(attention_mask, query_responses_ids, 0)
+
+ # assign position ids
+ position_ids = attention_mask.cumsum(1) - attention_mask
+
+ reward_latents = reward_backbone.module.apply(
+ variables=params.backbone_params,
+ input_ids=query_responses_ids,
+ attention_mask=attention_mask,
+ position_ids=position_ids,
+ output_hidden_states=True,
+ ).hidden_states[-1]
+ # shape: [batch_size, length, hidden_size]
+
+ last_reward_latents = reward_latents[:, -1, :]
+ # shape: [batch_size, hidden_size]
+
+ reward = reward_head.apply(variables=params.head_params, x=last_reward_latents)
+ # shape: [batch_size, 1]
+ return reward
+
+
+def set_reward_state_head_params(
+ reward_state: TrainState, gain: float = 1.0, bias: float = 0.0
+):
+ """Set gain and bias of the reward head.
+ Args:
+ reward_state: Reward state.
+ gain: Gain of the reward head.
+ bias: Bias of the reward head.
+
+ Example:
+ reward_state = set_reward_state_head_params(
+ reward_state, gain=0.1, bias=0.2)
+ print(reward_state.params.head_params['params'])
+ """
+ flat_head_params = traverse_util.flatten_dict(
+ reward_state.params.head_params, sep="/"
+ )
+
+ flat_head_params["params/reward_gain"] = jnp.array(gain, dtype=jnp.float32)
+ flat_head_params["params/reward_bias"] = jnp.array(bias, dtype=jnp.float32)
+
+ unflat_head_params = freeze(traverse_util.unflatten_dict(flat_head_params, sep="/"))
+
+ reward_state = reward_state.replace(
+ params=RewardModelParams(
+ backbone_params=reward_state.params.backbone_params,
+ head_params=unflat_head_params,
+ )
+ )
+ return reward_state
+
+
+def normalize(
+ args,
+ tokenizer,
+ pretrained_model,
+ reward_state,
+ iter_dataloader,
+ generation_config,
+ reward_backbone,
+ reward_head,
+):
+ # number of minibatches for computing the normalization statistics
+ n_batches = ceil_div(args.local_normalize_samples, args.rollout_batch_size)
+
+ # reset reward scales
+ reward_state = set_reward_state_head_params(reward_state, gain=1.0, bias=0.0)
+
+ def get_normalization_stats(reward_state):
+ """compute mean and std of rewards"""
+
+ sample_queries_responses = []
+ for _ in range(n_batches):
+ data = next(iter_dataloader)
+ queries = data["input_ids"]
+ queries = right_padding_to_left_padding(
+ data["input_ids"], args.pad_token_id
+ )
+ query_responses = generate(
+ pretrained_model, queries, tokenizer, generation_config
+ )
+ sample_queries_responses.append(query_responses)
+
+ rewards = []
+ for query_responses in sample_queries_responses:
+ rewards.append(
+ get_reward(
+ reward_state.params,
+ reward_backbone,
+ reward_head,
+ query_responses,
+ args,
+ )
+ )
+ # Here, len(rewards) = n_batches
+ # each rewards[i] is a (args.rollout_batch_size, 1) array.
+
+ rewards = np.concatenate(rewards)
+ # rewards shape: [args.local_normalize_samples, 1]
+ mean, std = rewards.mean(), rewards.std()
+ print(f"mean: {mean}, std: {std}")
+ return mean, std
+
+ mean, std = get_normalization_stats(reward_state)
+ target_mean, target_std = 0.0, 1.0
+ gain = target_std / std
+ bias = target_mean - gain * mean
+ print(f"gain: {gain}, bias: {bias}")
+
+ # do normalization
+ reward_state = set_reward_state_head_params(reward_state, gain=gain, bias=bias)
+
+ # validate normalization
+ _, _ = get_normalization_stats(reward_state)
+ return reward_state
+
+
+def prepare_left_padded_query_responses_with_labels(dataset, args):
+ """Prepare left padded, concatenated queries and responses, and add labels.
+ Args:
+ dataset: a dictionary that contains 'query', 'best', and 'sample{i}',
+ where i is from 0 to args.labels.num_labels-1.
+ args: a dataclass that contains 'labels.num_labels' and 'pad_token_id'.
+
+ Returns:
+ queries_responses: array of concatenated queries and responses, with shape
+ [num_queires, num_responses_per_query, max_query_len + max_response_len]
+ labels:
+ array of the best response idx for each label, with shape
+ [num_queires, 1]
+ """
+
+ labels = np.array(dataset["best"])
+ # [num_queires,]
+
+ queries = np.stack(dataset["query"])
+ # [num_queires, max_query_length]
+
+ queries = np.repeat(queries, args.labels.num_labels, axis=0)
+ queries = rearrange(queries, "(q r) l -> q r l", r=args.labels.num_labels)
+ # [num_queires, num_queires, max_query_length]
+
+ responses = np.array(
+ [np.stack(dataset[f"sample{i}"]) for i in range(args.labels.num_labels)]
+ )
+ # [num_response_per_query, num_queires, max_response_len]
+
+ responses = rearrange(responses, "r q l -> q r l")
+ # [num_queires, num_responses_per_query, max_response_len]
+
+ queries_responses = np.concatenate([queries, responses], axis=-1)
+ # [num_queires, num_responses_per_query, max_query_length + max_response_len]
+
+ queries_responses[queries_responses == OPENAI_PAD_TOKEN_ID] = args.pad_token_id
+
+ queries_responses = right_padding_to_left_padding(
+ rearrange(queries_responses, "q r l -> (q r) l"), pad_id=args.pad_token_id,
+ )
+
+ queries_responses = rearrange(
+ queries_responses, "(q r) l -> q r l", r=args.labels.num_labels
+ )
+ # [num_queires, num_responses_per_query, max_query_len + max_response_len]
+ return queries_responses, labels
+
+
+def get_dataloader_iter(rng, dataset_tokens, dataset_labels, args):
+ """Get iteration of dataloader."""
+ assert dataset_tokens.shape[0] == dataset_labels.shape[0]
+ num_samples = dataset_tokens.shape[0]
+
+ steps_per_epoch = num_samples // args.batch_size
+ perms = jax.random.permutation(rng, num_samples)
+ # Skip incomplete batch:
+ perms = perms[: steps_per_epoch * args.batch_size]
+ perms = perms.reshape((steps_per_epoch, args.batch_size))
+
+ for perm in perms:
+ batch = (dataset_tokens[perm], dataset_labels[perm])
+ yield batch
+
+
+def train_step(state, batch, reward_backbone, reward_head, args):
+ """Train reward model for one step."""
+ query_responses, labels = batch
+ query_responses_ids = rearrange(query_responses, "q r l -> (q r) l")
+ # query_responses_ids: [num_queries * num_responses_per_query, length]
+
+ def loss_function(params):
+ logits = get_reward(
+ params, reward_backbone, reward_head, query_responses_ids, args
+ )
+
+ logits_reshaped = rearrange(logits, "(q r) 1 -> q r", r=args.labels.num_labels)
+
+ loss = optax.softmax_cross_entropy_with_integer_labels(
+ logits_reshaped, labels
+ ).mean()
+
+ accuracy = (logits_reshaped.argmax(axis=1) == labels).astype("float32").mean()
+ return loss, accuracy
+
+ loss_grad_fn = jax.value_and_grad(loss_function, has_aux=True)
+ (loss, accuracy), grads = loss_grad_fn(state.params)
+ grads = jax.lax.pmean(grads, "batch")
+ state = state.apply_gradients(grads=grads)
+ loss = jax.lax.pmean(loss, axis_name="batch")
+ accuracy = jax.lax.pmean(accuracy, axis_name="batch")
+ return state, {"loss": loss, "accuracy": accuracy}
+
+
+def val_step(state, batch, reward_backbone, reward_head, args):
+ """Eval reward model for one step."""
+ query_responses, labels = batch
+ query_responses_ids = rearrange(query_responses, "q r l -> (q r) l")
+ # query_responses_ids: [num_queries * num_responses_per_query, length]
+
+ def loss_function(params):
+ logits = get_reward(
+ params, reward_backbone, reward_head, query_responses_ids, args
+ )
+
+ logits_reshaped = rearrange(logits, "(q r) 1 -> q r", r=args.labels.num_labels)
+
+ loss = optax.softmax_cross_entropy_with_integer_labels(
+ logits_reshaped, labels
+ ).mean()
+
+ accuracy = (logits_reshaped.argmax(axis=1) == labels).astype("float32").mean()
+ return loss, accuracy
+
+ loss, accuracy = loss_function(state.params)
+ loss = jax.lax.pmean(loss, axis_name="batch")
+ accuracy = jax.lax.pmean(accuracy, axis_name="batch")
+ return {"loss": loss, "accuracy": accuracy}
+
+
+def train(args: Args):
+ args.world_size = len(jax.devices())
+
+ args.batch_size = int(args.local_batch_size * args.world_size)
+ args.normalize_samples = int(args.local_normalize_samples * args.world_size)
+ args.local_micro_batch_size = exact_div(
+ args.local_batch_size, args.gradient_accumulation_steps
+ )
+
+ run_name = f"{args.exp_name}__{args.seed}__{int(time.time())}"
+
+ if args.track:
+ import wandb
+
+ wandb.init(
+ project=args.wandb_project_name,
+ entity=args.wandb_entity,
+ sync_tensorboard=True,
+ config=asdict(args),
+ name=run_name,
+ save_code=True,
+ )
+ wandb.run.log_code(".")
+
+ writer = tensorboard.SummaryWriter(f"runs/{run_name}")
+ writer.add_text(
+ "hyperparameters",
+ "|param|value|\n|-|-|\n%s"
+ % ("\n".join([f"|{key}|{value}|" for key, value in vars(args).items()])),
+ )
+ pprint(args)
+
+ tokenizer = transformers.AutoTokenizer.from_pretrained(
+ args.base_model, padding_side="right",
+ )
+ # we use the padding token manually but do not resize the token embedding of the model
+ tokenizer.add_special_tokens({"pad_token": "[PAD]"})
+ args.pad_token_id = tokenizer.pad_token_id
+
+ untrained_model = transformers.FlaxAutoModelForCausalLM.from_pretrained(
+ args.base_model
+ )
+
+ reward_state, reward_backbone, reward_head = create_initial_reward_state_and_models(
+ jax.random.PRNGKey(args.seed), args
+ )
+
+ p_train_step = jax.pmap(
+ functools.partial(
+ train_step,
+ args=args,
+ reward_backbone=reward_backbone,
+ reward_head=reward_head,
+ ),
+ axis_name="batch",
+ donate_argnums=(0,),
+ )
+ p_val_step = jax.pmap(
+ functools.partial(
+ val_step,
+ args=args,
+ reward_backbone=reward_backbone,
+ reward_head=reward_head,
+ ),
+ axis_name="batch",
+ )
+
+ normalization_dataset = MyDataset(
+ DATASET[args.task.query_dataset],
+ tokenizer,
+ args.task.query_length,
+ seed=args.seed,
+ start_text=args.task.start_text,
+ end_text=args.task.end_text,
+ )
+ normalization_dataloader = DataLoader(
+ normalization_dataset, batch_size=args.rollout_batch_size
+ )
+ iter_normalization_dataloader = iter(normalization_dataloader)
+
+ generation_config = transformers.GenerationConfig(
+ max_new_tokens=args.task.response_length,
+ min_new_tokens=args.task.response_length,
+ temperature=args.task.temperature,
+ top_k=0.0,
+ top_p=1.0,
+ do_sample=True,
+ pad_token_id=args.pad_token_id,
+ )
+
+ if args.normalize_before:
+ print("===Normalize reward model *before* training===")
+
+ # pylint: disable=E1101:no-member
+ print(
+ "before normalization. "
+ + f"Gain: {reward_state.params.head_params['params']['reward_gain']}"
+ + f" Bias: {reward_state.params.head_params['params']['reward_bias']}"
+ )
+
+ reward_state = normalize(
+ args,
+ tokenizer,
+ untrained_model,
+ reward_state,
+ iter_normalization_dataloader,
+ generation_config,
+ reward_backbone,
+ reward_head,
+ )
+
+ print(
+ "after normalization. "
+ + f"Gain: {reward_state.params.head_params['params']['reward_gain']}"
+ + f" Bias: {reward_state.params.head_params['params']['reward_bias']}"
+ )
+
+ reward_state = jax_utils.replicate(reward_state)
+
+ # `labeled_dataset` has keys
+ # `['sample0', 'query', 'best', 'sample3', 'sample1', 'sample2']`
+ labeled_dataset = load_dataset(
+ "vwxyzjn/lm-human-preferences", data_files=[args.label_dataset],
+ )["train"]
+ print("Num labels found in source:", len(labeled_dataset))
+ print("training on", args.labels.num_train, "in batches of", args.local_batch_size)
+
+ all_queries_responses, all_labels = prepare_left_padded_query_responses_with_labels(
+ labeled_dataset, args
+ )
+
+ assert args.labels.num_train < all_queries_responses.shape[0]
+ train_queries_responses = all_queries_responses[: args.labels.num_train]
+ train_labels = all_labels[: args.labels.num_train]
+
+ val_queries_responses = all_queries_responses[args.labels.num_train :]
+ val_labels = all_labels[args.labels.num_train :]
+
+ train_iter = get_dataloader_iter(
+ jax.random.PRNGKey(args.seed),
+ dataset_tokens=train_queries_responses,
+ dataset_labels=train_labels,
+ args=args,
+ )
+
+ print("===training reward model===")
+
+ for global_step, train_batch in enumerate(train_iter):
+ train_batch = common_utils.shard(train_batch)
+ reward_state, train_metrics = p_train_step(reward_state, train_batch)
+ writer.add_scalar(
+ "train/lr", single_epoch_linear_schedule(global_step, args), global_step
+ )
+
+ # gathering replicated metric data
+ train_metrics = common_utils.get_metrics([train_metrics])
+
+ for key, value in train_metrics.items():
+ writer.add_scalar(f"train/{key}", value, global_step)
+
+ if (
+ args.print_sample_output_freq > 0
+ and global_step % args.print_sample_output_freq == 0
+ ):
+ val_iter = get_dataloader_iter(
+ jax.random.PRNGKey(0),
+ dataset_tokens=val_queries_responses,
+ dataset_labels=val_labels,
+ args=args,
+ )
+
+ val_metrics_list = []
+ for val_batch in val_iter:
+ val_batch = common_utils.shard(val_batch)
+ val_metrics = p_val_step(reward_state, val_batch)
+ val_metrics_list.append(val_metrics)
+
+ val_metrics = common_utils.get_metrics(val_metrics_list)
+ for key, value in val_metrics.items():
+ val_metrics[key] = value.mean()
+ writer.add_scalar(f"test/{key}", val_metrics[key], global_step)
+
+ print(
+ f"gloabl_step: {global_step} | "
+ + f"test/accuracy {val_metrics['accuracy']}"
+ )
+
+ reward_state = jax_utils.unreplicate(reward_state)
+
+ if args.normalize_after:
+ print("===Normalize reward model *after* training===")
+
+ # pylint: disable=E1101:no-member
+ print(
+ "before normalization. "
+ + f"Gain: {reward_state.params.head_params['params']['reward_gain']}"
+ + f" Bias: {reward_state.params.head_params['params']['reward_bias']}"
+ )
+
+ reward_state = normalize(
+ args,
+ tokenizer,
+ untrained_model,
+ reward_state,
+ iter_normalization_dataloader,
+ generation_config,
+ reward_backbone,
+ reward_head,
+ )
+ print(
+ "after normalization. "
+ + f"Gain: {reward_state.params.head_params['params']['reward_gain']}"
+ + f" Bias: {reward_state.params.head_params['params']['reward_bias']}"
+ )
+
+ if args.save_path:
+ ckpt = {"reward_model": reward_state, "args": vars(args)}
+ orbax_checkpointer = orbax.checkpoint.PyTreeCheckpointer()
+ save_args = orbax_utils.save_args_from_target(ckpt)
+ orbax_checkpointer.save(args.save_path, ckpt, save_args=save_args, force=True)
+
+ if args.track: | if args.track and args.local_rank == 0: |
lm-human-preference-details | github_2023 | python | 13 | vwxyzjn | vwxyzjn | @@ -0,0 +1,765 @@
+""" Train jax-based reward model for LM human preference details."""
+import os
+from dataclasses import asdict, dataclass, field
+from typing import Optional
+import time
+import functools
+import numpy as np
+from torch.utils.data import DataLoader, IterableDataset
+import jax
+import jax.numpy as jnp
+import orbax
+import optax
+from einops import rearrange
+import tyro
+from datasets import load_dataset
+from rich.pretty import pprint
+import transformers
+import flax
+from flax.training import common_utils
+from flax.training import orbax_utils
+from flax.core.frozen_dict import freeze
+from flax import traverse_util, jax_utils
+from flax.training.train_state import TrainState
+import flax.linen as nn
+
+from lm_human_preference_details.data import DATASET
+from torch.utils import tensorboard
+
+
+@dataclass
+class LabelHParams:
+ type: str = None
+ num_train: int = 4992
+ num_labels: int = 4
+ source: str = None
+
+
+@dataclass
+class TaskHParams:
+ # Query params
+ query_length: int = 64
+ query_dataset: str = "books"
+ query_prefix: str = ""
+ query_suffix: str = ""
+ start_text: Optional[str] = None
+ end_text: Optional[str] = None
+
+ # Response params
+ response_length: int = 24
+
+ # LM params
+ temperature: float = 0.7
+
+
+@dataclass
+class Args:
+ # common args
+ exp_name: str = os.path.basename(__file__)[: -len(".py")]
+ """the name of this experiment"""
+ seed: int = 1
+ """seed of the experiment"""
+ track: bool = False
+ """if toggled, this experiment will be tracked with Weights and Biases"""
+ wandb_project_name: str = "lm_human_preference_details"
+ """the wandb's project name"""
+ wandb_entity: Optional[str] = None
+ """the entity (team) of wandb's project"""
+ cuda: bool = True
+ """Whether to use cuda if available."""
+ run_name: tyro.conf.Suppress[str] = None
+ """TO BE FILLED: a unique name of this run"""
+
+ base_model: str = "gpt2"
+ """the name of the pretrained model to use"""
+ label_dataset: str = "sentiment/offline_5k.json"
+ """the name of the dataset to use for labels in
+ `https://huggingface.co/datasets/vwxyzjn/lm-human-preferences`"""
+ local_batch_size: int = 4
+ """per rank batch size"""
+ gradient_accumulation_steps: int = 1
+ """gradient accumulation steps"""
+ local_micro_batch_size: tyro.conf.Suppress[int] = None
+ """per rank micro batch size"""
+ lr: float = 0.00005
+ """the learning rate"""
+ eps: float = 1e-5
+ """the epsilon for AdamW"""
+ rollout_batch_size: int = 512 # decrease this to e.g. 64 if OOM
+ """rollout batch size"""
+ world_size: tyro.conf.Suppress[int] = None
+ """the number of processes to use"""
+ batch_size: tyro.conf.Suppress[int] = None
+ """the batch size across all ranks"""
+ local_normalize_samples: int = 256
+ """Samples used to estimate reward mean and std"""
+ normalize_samples: tyro.conf.Suppress[int] = None
+ """Samples used to estimate reward mean and std across all ranks"""
+ debug_normalize: int = 0
+ """Samples used to check that normalization worked"""
+ normalize_before: bool = True
+ """Whether, before training, to normalize the rewards on the policy to the
+ scales on the training buffer. (For comparisons, just use mean 0, var 1.)"""
+ normalize_after: bool = True
+ """Whether, after training, to normalize the rewards on the ref policy to
+ mean 0, var 1 (so the KL coefficient always has the same meaning)."""
+ print_sample_output_freq: int = 10
+ """How often to print sample output"""
+ save_path: str = "models/"
+ """Where to save the model"""
+ use_tensorflow_adam: bool = False
+ """Whether to use tensorflow-style Adam optimizer instead of PyTorch's"""
+ task: TaskHParams = field(default_factory=TaskHParams)
+ labels: LabelHParams = field(default_factory=LabelHParams)
+
+
+OPENAI_PAD_TOKEN_ID = 50259
+
+
+@flax.struct.dataclass
+class RewardModelParams:
+ """Parameters for the reward model."""
+
+ backbone_params: flax.core.FrozenDict
+ head_params: flax.core.FrozenDict
+
+
+class RewardHead(nn.Module):
+ """Affine transform head for the reward model.
+
+ Attributes:
+ head_input_size: Size of the input to the head. The weights of the linear
+ layer are initialized from mean-zero Gaussians with std
+ 1/sqrt(head_input_size + 1).
+
+ Example:
+ model = RewardHead(head_input_size=768)
+ variables = model.init(jax.random.PRNGKey(0),
+ jnp.ones((1, 6, 768)))
+ """
+
+ head_input_size: int
+
+ def setup(self):
+ self.reward_linear = nn.Dense(
+ 1,
+ kernel_init=nn.initializers.normal(
+ stddev=1 / np.sqrt(self.head_input_size + 1)
+ ),
+ bias_init=nn.initializers.zeros_init(),
+ )
+ self.reward_gain = self.param("reward_gain", nn.initializers.ones_init(), ())
+ self.reward_bias = self.param("reward_bias", nn.initializers.zeros_init(), ())
+
+ def __call__(self, x):
+ assert x.shape[-1] == self.head_input_size
+ x = self.reward_linear(x)
+ x = x * self.reward_gain + self.reward_bias
+ return x
+
+
+class MyDataset(IterableDataset):
+ """A dataset for reward model normalization."""
+
+ def __init__(
+ self, generator, tokenizer, query_length, seed, start_text=None, end_text=None
+ ):
+ self.generator = generator
+ self.tokenizer = tokenizer
+ self.query_length = query_length
+ self.start_text = start_text
+ self.end_text = end_text
+ self.seed = seed
+ token_to_index = tokenizer.get_vocab()
+ self.start_token = token_to_index[start_text] if self.start_text else None
+ self.end_token = token_to_index[end_text] if self.end_text else None
+
+ def __iter__(self):
+ for text in self.generator("train", self.seed, shuffle=True):
+ tokens = self.tokenizer.encode(text)
+ if self.start_token is not None:
+ try:
+ first_index = tokens.index(self.start_token) + 1
+ if first_index < len(tokens):
+ tokens = tokens[first_index:]
+ except:
+ continue
+ tokens = tokens[: self.query_length]
+ if self.end_token is not None:
+ try:
+ last_index = len(tokens) - tokens[::-1].index(self.end_token)
+ tokens = tokens[:last_index]
+ except:
+ continue
+ output = self.tokenizer.pad(
+ {"input_ids": tokens},
+ padding="max_length",
+ max_length=self.query_length,
+ return_tensors="pt",
+ return_attention_mask=True,
+ )
+ yield output
+
+
+def right_padding_to_left_padding(tokens, pad_id):
+ """Convert from right padding to left padding."""
+ assert tokens.ndim == 2
+ return np.array(
+ [
+ [pad_id] * (row == pad_id).sum() + [x for x in row if x != pad_id]
+ for row in tokens
+ ]
+ )
+
+
+def ceil_div(a, b):
+ return (a - 1) // b + 1
+
+
+def exact_div(a, b):
+ q = a // b
+ if a != q * b:
+ raise ValueError(f"Inexact division: {a} / {b} = {a/b}")
+ return q
+
+
+# TODO: pmap `generate` to accelerate reward model normalization?
+def generate(pretrained_model, queries, args, generation_config):
+ """generate in a way that does not affect padding tokens"""
+ context_length = queries.shape[1]
+ attention_mask = queries != args.pad_token_id
+ # set padding tokens to 0
+ input_ids = jnp.where(attention_mask, queries, 0)
+ output = pretrained_model.generate(
+ input_ids=input_ids,
+ attention_mask=attention_mask.astype("int32"),
+ # position_ids=attention_mask.cumsum(1) - attention_mask.long(),
+ # generation collapsed if this was turned on.
+ # TODO: why does generation collapse with this?
+ generation_config=generation_config,
+ return_dict_in_generate=True,
+ )
+ return jnp.concatenate((queries, output.sequences[:, context_length:]), axis=1)
+
+
+def single_epoch_linear_schedule(global_step, args):
+ """ anneal learning rate linearly to reach 0 after one epoch."""
+ frac = 1.0 - global_step * args.batch_size / args.labels.num_train
+ return args.lr * frac
+
+
+def create_initial_reward_state_and_models(init_key, args):
+ # pylint: disable=redefined-outer-name
+ """reate reward model and initial reward state."""
+
+ reward_backbone = transformers.FlaxAutoModelForCausalLM.from_pretrained(
+ args.base_model
+ )
+ reward_head = RewardHead(head_input_size=reward_backbone.config.hidden_size)
+
+ if args.use_tensorflow_adam:
+ raise NotImplementedError("tensorflow adam is not implemented yet.")
+ else:
+ optimizer = optax.adam(
+ learning_rate=functools.partial(single_epoch_linear_schedule, args=args),
+ eps=args.eps,
+ )
+
+ if args.gradient_accumulation_steps > 1:
+ optimizer = optax.MultiSteps(optimizer, args.gradient_accumulation_steps)
+ state = TrainState.create(
+ apply_fn=None,
+ params=RewardModelParams(
+ backbone_params=flax.core.FrozenDict({"params": reward_backbone.params}),
+ head_params=flax.core.FrozenDict(
+ reward_head.init(
+ init_key,
+ jnp.ones(reward_backbone.config.hidden_size)[None, None, :],
+ )
+ ),
+ ),
+ tx=optimizer,
+ )
+ return state, reward_backbone, reward_head
+
+
+def get_reward(
+ params: RewardModelParams,
+ reward_backbone,
+ reward_head,
+ query_responses_ids: jnp.ndarray,
+ args: Args,
+):
+ """Get reward for each queiry--response pair."""
+ assert query_responses_ids.ndim == 2
+ # query_responses_ids: [batch_size, length]
+
+ # mask out padding tokens
+ attention_mask = query_responses_ids != args.pad_token_id
+ query_responses_ids = jnp.where(attention_mask, query_responses_ids, 0)
+
+ # assign position ids
+ position_ids = attention_mask.cumsum(1) - attention_mask
+
+ reward_latents = reward_backbone.module.apply(
+ variables=params.backbone_params,
+ input_ids=query_responses_ids,
+ attention_mask=attention_mask,
+ position_ids=position_ids,
+ output_hidden_states=True,
+ ).hidden_states[-1]
+ # shape: [batch_size, length, hidden_size]
+
+ last_reward_latents = reward_latents[:, -1, :]
+ # shape: [batch_size, hidden_size]
+
+ reward = reward_head.apply(variables=params.head_params, x=last_reward_latents)
+ # shape: [batch_size, 1]
+ return reward
+
+
+def set_reward_state_head_params(
+ reward_state: TrainState, gain: float = 1.0, bias: float = 0.0
+):
+ """Set gain and bias of the reward head.
+ Args:
+ reward_state: Reward state.
+ gain: Gain of the reward head.
+ bias: Bias of the reward head.
+
+ Example:
+ reward_state = set_reward_state_head_params(
+ reward_state, gain=0.1, bias=0.2)
+ print(reward_state.params.head_params['params'])
+ """
+ flat_head_params = traverse_util.flatten_dict(
+ reward_state.params.head_params, sep="/"
+ )
+
+ flat_head_params["params/reward_gain"] = jnp.array(gain, dtype=jnp.float32)
+ flat_head_params["params/reward_bias"] = jnp.array(bias, dtype=jnp.float32)
+
+ unflat_head_params = freeze(traverse_util.unflatten_dict(flat_head_params, sep="/"))
+
+ reward_state = reward_state.replace(
+ params=RewardModelParams(
+ backbone_params=reward_state.params.backbone_params,
+ head_params=unflat_head_params,
+ )
+ )
+ return reward_state
+
+
+def normalize(
+ args,
+ tokenizer,
+ pretrained_model,
+ reward_state,
+ iter_dataloader,
+ generation_config,
+ reward_backbone,
+ reward_head,
+):
+ # number of minibatches for computing the normalization statistics
+ n_batches = ceil_div(args.local_normalize_samples, args.rollout_batch_size)
+
+ # reset reward scales
+ reward_state = set_reward_state_head_params(reward_state, gain=1.0, bias=0.0)
+
+ def get_normalization_stats(reward_state):
+ """compute mean and std of rewards"""
+
+ sample_queries_responses = []
+ for _ in range(n_batches):
+ data = next(iter_dataloader)
+ queries = data["input_ids"]
+ queries = right_padding_to_left_padding(
+ data["input_ids"], args.pad_token_id
+ )
+ query_responses = generate(
+ pretrained_model, queries, tokenizer, generation_config
+ )
+ sample_queries_responses.append(query_responses)
+
+ rewards = []
+ for query_responses in sample_queries_responses:
+ rewards.append(
+ get_reward(
+ reward_state.params,
+ reward_backbone,
+ reward_head,
+ query_responses,
+ args,
+ )
+ )
+ # Here, len(rewards) = n_batches
+ # each rewards[i] is a (args.rollout_batch_size, 1) array.
+
+ rewards = np.concatenate(rewards)
+ # rewards shape: [args.local_normalize_samples, 1]
+ mean, std = rewards.mean(), rewards.std()
+ print(f"mean: {mean}, std: {std}")
+ return mean, std
+
+ mean, std = get_normalization_stats(reward_state)
+ target_mean, target_std = 0.0, 1.0
+ gain = target_std / std
+ bias = target_mean - gain * mean
+ print(f"gain: {gain}, bias: {bias}")
+
+ # do normalization
+ reward_state = set_reward_state_head_params(reward_state, gain=gain, bias=bias)
+
+ # validate normalization
+ _, _ = get_normalization_stats(reward_state)
+ return reward_state
+
+
+def prepare_left_padded_query_responses_with_labels(dataset, args):
+ """Prepare left padded, concatenated queries and responses, and add labels.
+ Args:
+ dataset: a dictionary that contains 'query', 'best', and 'sample{i}',
+ where i is from 0 to args.labels.num_labels-1.
+ args: a dataclass that contains 'labels.num_labels' and 'pad_token_id'.
+
+ Returns:
+ queries_responses: array of concatenated queries and responses, with shape
+ [num_queires, num_responses_per_query, max_query_len + max_response_len]
+ labels:
+ array of the best response idx for each label, with shape
+ [num_queires, 1]
+ """
+
+ labels = np.array(dataset["best"])
+ # [num_queires,]
+
+ queries = np.stack(dataset["query"])
+ # [num_queires, max_query_length]
+
+ queries = np.repeat(queries, args.labels.num_labels, axis=0)
+ queries = rearrange(queries, "(q r) l -> q r l", r=args.labels.num_labels)
+ # [num_queires, num_queires, max_query_length]
+
+ responses = np.array(
+ [np.stack(dataset[f"sample{i}"]) for i in range(args.labels.num_labels)]
+ )
+ # [num_response_per_query, num_queires, max_response_len]
+
+ responses = rearrange(responses, "r q l -> q r l")
+ # [num_queires, num_responses_per_query, max_response_len]
+
+ queries_responses = np.concatenate([queries, responses], axis=-1)
+ # [num_queires, num_responses_per_query, max_query_length + max_response_len]
+
+ queries_responses[queries_responses == OPENAI_PAD_TOKEN_ID] = args.pad_token_id
+
+ queries_responses = right_padding_to_left_padding(
+ rearrange(queries_responses, "q r l -> (q r) l"), pad_id=args.pad_token_id,
+ )
+
+ queries_responses = rearrange(
+ queries_responses, "(q r) l -> q r l", r=args.labels.num_labels
+ )
+ # [num_queires, num_responses_per_query, max_query_len + max_response_len]
+ return queries_responses, labels
+
+
+def get_dataloader_iter(rng, dataset_tokens, dataset_labels, args):
+ """Get iteration of dataloader."""
+ assert dataset_tokens.shape[0] == dataset_labels.shape[0]
+ num_samples = dataset_tokens.shape[0]
+
+ steps_per_epoch = num_samples // args.batch_size
+ perms = jax.random.permutation(rng, num_samples)
+ # Skip incomplete batch:
+ perms = perms[: steps_per_epoch * args.batch_size]
+ perms = perms.reshape((steps_per_epoch, args.batch_size))
+
+ for perm in perms:
+ batch = (dataset_tokens[perm], dataset_labels[perm])
+ yield batch
+
+
+def train_step(state, batch, reward_backbone, reward_head, args):
+ """Train reward model for one step."""
+ query_responses, labels = batch
+ query_responses_ids = rearrange(query_responses, "q r l -> (q r) l")
+ # query_responses_ids: [num_queries * num_responses_per_query, length]
+
+ def loss_function(params):
+ logits = get_reward(
+ params, reward_backbone, reward_head, query_responses_ids, args
+ )
+
+ logits_reshaped = rearrange(logits, "(q r) 1 -> q r", r=args.labels.num_labels)
+
+ loss = optax.softmax_cross_entropy_with_integer_labels(
+ logits_reshaped, labels
+ ).mean()
+
+ accuracy = (logits_reshaped.argmax(axis=1) == labels).astype("float32").mean()
+ return loss, accuracy
+
+ loss_grad_fn = jax.value_and_grad(loss_function, has_aux=True)
+ (loss, accuracy), grads = loss_grad_fn(state.params)
+ grads = jax.lax.pmean(grads, "batch")
+ state = state.apply_gradients(grads=grads)
+ loss = jax.lax.pmean(loss, axis_name="batch")
+ accuracy = jax.lax.pmean(accuracy, axis_name="batch")
+ return state, {"loss": loss, "accuracy": accuracy}
+
+
+def val_step(state, batch, reward_backbone, reward_head, args):
+ """Eval reward model for one step."""
+ query_responses, labels = batch
+ query_responses_ids = rearrange(query_responses, "q r l -> (q r) l")
+ # query_responses_ids: [num_queries * num_responses_per_query, length]
+
+ def loss_function(params):
+ logits = get_reward(
+ params, reward_backbone, reward_head, query_responses_ids, args
+ )
+
+ logits_reshaped = rearrange(logits, "(q r) 1 -> q r", r=args.labels.num_labels)
+
+ loss = optax.softmax_cross_entropy_with_integer_labels(
+ logits_reshaped, labels
+ ).mean()
+
+ accuracy = (logits_reshaped.argmax(axis=1) == labels).astype("float32").mean()
+ return loss, accuracy
+
+ loss, accuracy = loss_function(state.params)
+ loss = jax.lax.pmean(loss, axis_name="batch")
+ accuracy = jax.lax.pmean(accuracy, axis_name="batch")
+ return {"loss": loss, "accuracy": accuracy}
+
+
+def train(args: Args):
+ args.world_size = len(jax.devices())
+
+ args.batch_size = int(args.local_batch_size * args.world_size)
+ args.normalize_samples = int(args.local_normalize_samples * args.world_size)
+ args.local_micro_batch_size = exact_div(
+ args.local_batch_size, args.gradient_accumulation_steps
+ )
+
+ run_name = f"{args.exp_name}__{args.seed}__{int(time.time())}"
+
+ if args.track:
+ import wandb
+
+ wandb.init(
+ project=args.wandb_project_name,
+ entity=args.wandb_entity,
+ sync_tensorboard=True,
+ config=asdict(args),
+ name=run_name,
+ save_code=True,
+ )
+ wandb.run.log_code(".")
+
+ writer = tensorboard.SummaryWriter(f"runs/{run_name}")
+ writer.add_text(
+ "hyperparameters",
+ "|param|value|\n|-|-|\n%s"
+ % ("\n".join([f"|{key}|{value}|" for key, value in vars(args).items()])),
+ )
+ pprint(args)
+
+ tokenizer = transformers.AutoTokenizer.from_pretrained(
+ args.base_model, padding_side="right",
+ )
+ # we use the padding token manually but do not resize the token embedding of the model
+ tokenizer.add_special_tokens({"pad_token": "[PAD]"})
+ args.pad_token_id = tokenizer.pad_token_id
+
+ untrained_model = transformers.FlaxAutoModelForCausalLM.from_pretrained(
+ args.base_model
+ )
+
+ reward_state, reward_backbone, reward_head = create_initial_reward_state_and_models(
+ jax.random.PRNGKey(args.seed), args
+ )
+
+ p_train_step = jax.pmap(
+ functools.partial(
+ train_step,
+ args=args,
+ reward_backbone=reward_backbone,
+ reward_head=reward_head,
+ ),
+ axis_name="batch",
+ donate_argnums=(0,),
+ )
+ p_val_step = jax.pmap(
+ functools.partial(
+ val_step,
+ args=args,
+ reward_backbone=reward_backbone,
+ reward_head=reward_head,
+ ),
+ axis_name="batch",
+ )
+
+ normalization_dataset = MyDataset(
+ DATASET[args.task.query_dataset],
+ tokenizer,
+ args.task.query_length,
+ seed=args.seed,
+ start_text=args.task.start_text,
+ end_text=args.task.end_text,
+ )
+ normalization_dataloader = DataLoader(
+ normalization_dataset, batch_size=args.rollout_batch_size
+ )
+ iter_normalization_dataloader = iter(normalization_dataloader)
+
+ generation_config = transformers.GenerationConfig(
+ max_new_tokens=args.task.response_length,
+ min_new_tokens=args.task.response_length,
+ temperature=args.task.temperature,
+ top_k=0.0,
+ top_p=1.0,
+ do_sample=True,
+ pad_token_id=args.pad_token_id,
+ )
+
+ if args.normalize_before:
+ print("===Normalize reward model *before* training===")
+
+ # pylint: disable=E1101:no-member
+ print(
+ "before normalization. "
+ + f"Gain: {reward_state.params.head_params['params']['reward_gain']}"
+ + f" Bias: {reward_state.params.head_params['params']['reward_bias']}"
+ )
+
+ reward_state = normalize(
+ args,
+ tokenizer,
+ untrained_model,
+ reward_state,
+ iter_normalization_dataloader,
+ generation_config,
+ reward_backbone,
+ reward_head,
+ )
+
+ print(
+ "after normalization. "
+ + f"Gain: {reward_state.params.head_params['params']['reward_gain']}"
+ + f" Bias: {reward_state.params.head_params['params']['reward_bias']}"
+ )
+
+ reward_state = jax_utils.replicate(reward_state)
+
+ # `labeled_dataset` has keys
+ # `['sample0', 'query', 'best', 'sample3', 'sample1', 'sample2']`
+ labeled_dataset = load_dataset(
+ "vwxyzjn/lm-human-preferences", data_files=[args.label_dataset],
+ )["train"]
+ print("Num labels found in source:", len(labeled_dataset))
+ print("training on", args.labels.num_train, "in batches of", args.local_batch_size)
+
+ all_queries_responses, all_labels = prepare_left_padded_query_responses_with_labels(
+ labeled_dataset, args
+ )
+
+ assert args.labels.num_train < all_queries_responses.shape[0]
+ train_queries_responses = all_queries_responses[: args.labels.num_train]
+ train_labels = all_labels[: args.labels.num_train]
+
+ val_queries_responses = all_queries_responses[args.labels.num_train :]
+ val_labels = all_labels[args.labels.num_train :]
+
+ train_iter = get_dataloader_iter(
+ jax.random.PRNGKey(args.seed),
+ dataset_tokens=train_queries_responses,
+ dataset_labels=train_labels,
+ args=args,
+ )
+
+ print("===training reward model===")
+
+ for global_step, train_batch in enumerate(train_iter):
+ train_batch = common_utils.shard(train_batch)
+ reward_state, train_metrics = p_train_step(reward_state, train_batch)
+ writer.add_scalar(
+ "train/lr", single_epoch_linear_schedule(global_step, args), global_step
+ )
+
+ # gathering replicated metric data
+ train_metrics = common_utils.get_metrics([train_metrics])
+
+ for key, value in train_metrics.items():
+ writer.add_scalar(f"train/{key}", value, global_step)
+
+ if (
+ args.print_sample_output_freq > 0
+ and global_step % args.print_sample_output_freq == 0
+ ):
+ val_iter = get_dataloader_iter(
+ jax.random.PRNGKey(0),
+ dataset_tokens=val_queries_responses,
+ dataset_labels=val_labels,
+ args=args,
+ )
+
+ val_metrics_list = []
+ for val_batch in val_iter:
+ val_batch = common_utils.shard(val_batch)
+ val_metrics = p_val_step(reward_state, val_batch)
+ val_metrics_list.append(val_metrics)
+
+ val_metrics = common_utils.get_metrics(val_metrics_list)
+ for key, value in val_metrics.items():
+ val_metrics[key] = value.mean()
+ writer.add_scalar(f"test/{key}", val_metrics[key], global_step)
+
+ print(
+ f"gloabl_step: {global_step} | "
+ + f"test/accuracy {val_metrics['accuracy']}"
+ )
+
+ reward_state = jax_utils.unreplicate(reward_state)
+
+ if args.normalize_after:
+ print("===Normalize reward model *after* training===")
+
+ # pylint: disable=E1101:no-member
+ print(
+ "before normalization. "
+ + f"Gain: {reward_state.params.head_params['params']['reward_gain']}"
+ + f" Bias: {reward_state.params.head_params['params']['reward_bias']}"
+ )
+
+ reward_state = normalize(
+ args,
+ tokenizer,
+ untrained_model,
+ reward_state,
+ iter_normalization_dataloader,
+ generation_config,
+ reward_backbone,
+ reward_head,
+ )
+ print(
+ "after normalization. "
+ + f"Gain: {reward_state.params.head_params['params']['reward_gain']}"
+ + f" Bias: {reward_state.params.head_params['params']['reward_bias']}"
+ )
+
+ if args.save_path: | if args.save_path and args.local_rank == 0: |
Queryable | github_2023 | others | 22 | mazzzystar | mazzzystar | @@ -64,7 +64,7 @@ struct SearchResultsView: View {
case .HAS_RESULT:
// Has result
VStack {
- if photoSearcher.totalUnIndexedPhotosNum > 0 {
+ if photoSearcher.totalUnIndexedPhotosNum > 0 || PHPhotoLibrary.authorizationStatus(for: .readWrite) == .limited { | Reasonable. |
nuxt-open-fetch | github_2023 | typescript | 41 | enkot | enkot | @@ -100,6 +101,11 @@ export default defineNuxtModule<ModuleOptions>({
}
}
+ nuxt.options.alias = {
+ ...nuxt.options.alias,
+ '#nuxt-open-fetch-schemas': join(nuxt.options.buildDir, 'types', moduleName, 'schemas'), | What do you think about '#open-fetch-schemas'? |
nuxt-open-fetch | github_2023 | typescript | 22 | enkot | IlyaSemenov | @@ -0,0 +1,12 @@
+export default defineEventHandler(async (event) => {
+ const { $fetchPets } = useNuxtOpenFetchServer()
+ const data = await $fetchPets ("/pet/{petId}", {
+ path: {
+ petId: 1,
+ },
+ });
+
+ return ({
+ data,
+ })
+}) | Please lint this (get rid of 'no new line at the end of file'). The project uses [editorconfig](https://github.com/enkot/nuxt-open-fetch/blob/18c4ef73a106f95378fd4313ebeb1ef0c25de22a/.editorconfig#L9) but its setup was ignored. |
nuxt-open-fetch | github_2023 | typescript | 22 | enkot | IlyaSemenov | @@ -12,3 +12,4 @@ export default defineNuxtPlugin(() => {
}), {})
}
})
+ | why the stray change? |
nuxt-open-fetch | github_2023 | others | 23 | enkot | skf-funzt | @@ -1,6 +1,19 @@
# Changelog
+## v0.5.0
+
+[compare changes](https://github.com/enkot/nuxt-open-fetch/compare/v0.4.5...v0.5.0)
+
+### 🚀 Enhancements
+
+- Nitro support ([d30287f](https://github.com/enkot/nuxt-open-fetch/commit/d30287f))
+
+### ❤️ Contributors
+
+- Enkot ([@enkot](http://github.com/enkot))
+- Stephan Koglin-Fischer ([@skf-funzt](http://github.com/skf-funzt)) | :star_struck: |
nuxt-open-fetch | github_2023 | others | 17 | enkot | IlyaSemenov | @@ -1 +1,2 @@
typescript.includeWorkspace=true
+imports.autoImport=false | The project has `.editorconfig` which explicitly forces final new line:
https://github.com/enkot/nuxt-open-fetch/blob/55a619189adde50a418c64d1d1da1bd93c0bdc9c/.editorconfig#L9
Please lint your code accordingly. |
magic-spreadsheets.github.io | github_2023 | typescript | 43 | magic-spreadsheets | hihumikan | @@ -2,6 +2,6 @@ import type { ContactDetails } from './src/types';
// お問い合わせ先
export const contactDetails: ContactDetails = {
- text: '@keigomichi',
- href: 'https://twitter.com/keigomichi',
+ text: '@hihumikan', | ```suggestion
text: '@mikan_54951',
``` |
magic-spreadsheets.github.io | github_2023 | typescript | 43 | magic-spreadsheets | hihumikan | @@ -1,6 +1,12 @@
import type { Maintainers } from './src/types';
export const maintainers: Maintainers = {
+ 2024: [
+ {
+ text: '@hihumikan', | ```suggestion
text: '@mikan_54951',
``` |
magic-spreadsheets.github.io | github_2023 | others | 43 | magic-spreadsheets | hihumikan | @@ -33,7 +33,7 @@ const hashtags = encodeURIComponent(`魔法のスプレッドシート,インタ
<meta property="og:type" content="website" />
<meta property="og:image" content="https://magic-spreadsheets.pages.dev/thumbnail.png" />
<meta property="twitter:card" content="summary_large_image" />
- <meta property="twitter:site" content="@keigomichi" />
+ <meta property="twitter:site" content="@hihumikan" /> | ```suggestion
<meta property="twitter:site" content="@mikan_54951" />
``` |
magic-spreadsheets.github.io | github_2023 | others | 43 | magic-spreadsheets | hihumikan | @@ -1,6 +1,6 @@
MIT License
-Copyright (c) 2023 keigomichi
+Copyright (c) 2023 hihumikan | ```suggestion
Copyright (c) 2023 magic-spreadsheets
``` |
magic-spreadsheets.github.io | github_2023 | others | 2 | magic-spreadsheets | keigomichi | @@ -1,15 +1,271 @@
---
---
-<html lang="en">
+<html lang="ja">
<head>
<meta charset="utf-8" />
<link rel="icon" type="image/svg+xml" href="/favicon.svg" />
<meta name="viewport" content="width=device-width" />
<meta name="generator" content={Astro.generator} />
- <title>Astro</title>
+ <title>魔法のスプレッドシート</title>
</head>
<body>
- <h1>Astro</h1>
+ <main>
+ <div class="content">
+ <div class="content-title">
+ <div class="top-icon">
+ <img src="https://www.notion.so/icons/table_green.svg?mode=dark" />
+ </div>
+ <h1>魔法のスプレッドシート</h1>
+ <p class="gray">
+ ITエンジニアインターン情報が集まる魔法のスプレッドシート
+ </p>
+ </div>
+ <a href="https://www.notion.so/049ca5329bbc4493bc9f4f5b3727d027">
+ <div class="content-link">
+ <img src="https://www.notion.so/icons/table_green.svg?mode=dark" />
+ <div>魔法のスプレッドシート2023・夏</div>
+ </div>
+ </a>
+ <h2 id="目次">目次</h2>
+ <ul class="contents">
+ <li><a href="#目次">目次</a></li>
+ <li>
+ <a href="#魔法のスプレッドシートとは?"
+ >魔法のスプレッドシートとは?</a
+ >
+ </li>
+ <li><a href="#フィードバック">フィードバック</a></li>
+ <li><a href="#バックナンバー">バックナンバー</a></li>
+ <li><a href="#管理者">管理者</a></li>
+ </ul>
+ <h2 id="魔法のスプレッドシートとは?">魔法のスプレッドシートとは?</h2>
+ <p>
+ ソフトウェアエンジニア志望の学生のためのインターンシップの情報が閲覧できる便利なデータベースです。
+ </p>
+ <p>応募締め切りや開催方法などの情報が一覧で確認できます。</p>
+ <p>毎年、有志によって管理されています。</p>
+ <h2 id="フィードバック">フィードバック</h2>
+ <p>
+ 魔法のスプレッドシートに追加したいインターンシップ等の情報がありましたらURLをお知らせください。
+ </p>
+ <p>またデータベースに誤り等がありましたら改善点をお知らせください。</p>
+ <p class="gray">データベースへの反映には数日かかる場合がございます。</p>
+ <iframe | @cardseditor フォームについて、tally以外で作成することも検討するので、一旦iframeごと削除してもらっていいですか? |
tonlib-rs | github_2023 | others | 143 | ston-fi | ruslanracheev | @@ -85,4 +85,7 @@
* Impl #BE-2088: Wallet v5 message building
### v0.24.2
* Bump tonlib-sys to 2025.2.2
-
+### v0.24.3
+* Impl #ni: tonaddres::from_msg_address | typo `tonaddress` |
tonlib-rs | github_2023 | others | 115 | ston-fi | dbaranovstonfi | @@ -50,7 +50,7 @@ tokio-retry = "0.3"
tokio-test = "0.4"
ton_liteapi = "0.1.0"
adnl = "2.0"
-tonlib-sys = "=2024.9.0"
+tonlib-sys = "=2024.11.0" | The version of tonlib-sys will be 2024.10.1. |
tonlib-rs | github_2023 | others | 39 | ston-fi | dbaranovstonfi | @@ -329,22 +329,22 @@ mod tests {
#[ignore]
#[test]
fn check_code_hash() -> Result<(), TonCellError> {
- let raw = include_str!("../../resources/wallet/wallet_v3_code.hex");
- let boc = BagOfCells::parse(&hex::decode(raw).map_boc_deserialization_error()?)?;
+ let raw = include_str!("../../resources/wallet/wallet_v3r1.code");
+ let boc = BagOfCells::parse(&base64::decode(raw).map_boc_deserialization_error()?)?; | I would suggest to use function `BagOfCells::parse_base64` function here and in all similar places below.
|
tonlib-rs | github_2023 | others | 39 | ston-fi | dbaranovstonfi | @@ -42,19 +108,44 @@ impl WalletVersion {
&self,
workchain: i32,
key_pair: &KeyPair,
+ sub_wallet_id: Option<i32>,
) -> Result<BagOfCells, TonCellError> {
- let mut data_builder = CellBuilder::new();
- data_builder
- .store_u32(32, 0)?
- // seqno
- .store_u32(32, 698983191 + workchain as u32)?
- //wallet_id
- .store_slice(key_pair.public_key.as_slice())?; // public key
- if *self == WalletVersion::V4R2 {
- data_builder.store_bit(false)?;
- // empty plugin dict
- }
- let data_cell = data_builder.build()?;
+ let wallet_id = sub_wallet_id.unwrap_or(698983191 + workchain);
+ let public_key: [u8; 32] = key_pair
+ .public_key
+ .clone()
+ .try_into()
+ .expect("pubkey is always [u8; 32]");
+
+ let data_cell: Cell = match &self {
+ Self::V1R1 | Self::V1R2 | Self::V1R3 | Self::V2R1 | Self::V2R2 => DataV1R1 { | I think that using `WalletVersion::V1R1` instead of `Self::V1R1` would improve readability a lot. |
tonlib-rs | github_2023 | others | 39 | ston-fi | dbaranovstonfi | @@ -42,19 +108,44 @@ impl WalletVersion {
&self,
workchain: i32,
key_pair: &KeyPair,
+ sub_wallet_id: Option<i32>,
) -> Result<BagOfCells, TonCellError> {
- let mut data_builder = CellBuilder::new();
- data_builder
- .store_u32(32, 0)?
- // seqno
- .store_u32(32, 698983191 + workchain as u32)?
- //wallet_id
- .store_slice(key_pair.public_key.as_slice())?; // public key
- if *self == WalletVersion::V4R2 {
- data_builder.store_bit(false)?;
- // empty plugin dict
- }
- let data_cell = data_builder.build()?;
+ let wallet_id = sub_wallet_id.unwrap_or(698983191 + workchain);
+ let public_key: [u8; 32] = key_pair
+ .public_key
+ .clone()
+ .try_into()
+ .expect("pubkey is always [u8; 32]");
+
+ let data_cell: Cell = match &self {
+ Self::V1R1 | Self::V1R2 | Self::V1R3 | Self::V2R1 | Self::V2R2 => DataV1R1 {
+ seqno: 0,
+ public_key,
+ }
+ .try_into()?,
+ Self::V3R1 | Self::V3R2 => DataV3R1 {
+ seqno: 0,
+ wallet_id: wallet_id as u32,
+ public_key,
+ }
+ .try_into()?,
+ Self::V4R1 | Self::V4R2 => DataV4R1 {
+ seqno: 0,
+ wallet_id: wallet_id as u32,
+ public_key,
+ }
+ .try_into()?,
+ Self::HighloadV2R2 => DataHighloadV2R2 {
+ wallet_id: wallet_id as u32,
+ last_cleaned_time: 0,
+ public_key,
+ }
+ .try_into()?,
+ _ => { | It is better to explicitly mention all possible WalletVersion members in this match . |
tonlib-rs | github_2023 | others | 39 | ston-fi | dbaranovstonfi | @@ -42,19 +108,44 @@ impl WalletVersion {
&self,
workchain: i32,
key_pair: &KeyPair,
+ sub_wallet_id: Option<i32>,
) -> Result<BagOfCells, TonCellError> {
- let mut data_builder = CellBuilder::new();
- data_builder
- .store_u32(32, 0)?
- // seqno
- .store_u32(32, 698983191 + workchain as u32)?
- //wallet_id
- .store_slice(key_pair.public_key.as_slice())?; // public key
- if *self == WalletVersion::V4R2 {
- data_builder.store_bit(false)?;
- // empty plugin dict
- }
- let data_cell = data_builder.build()?;
+ let wallet_id = sub_wallet_id.unwrap_or(698983191 + workchain);
+ let public_key: [u8; 32] = key_pair
+ .public_key
+ .clone()
+ .try_into()
+ .expect("pubkey is always [u8; 32]");
+
+ let data_cell: Cell = match &self {
+ Self::V1R1 | Self::V1R2 | Self::V1R3 | Self::V2R1 | Self::V2R2 => DataV1R1 {
+ seqno: 0,
+ public_key,
+ }
+ .try_into()?,
+ Self::V3R1 | Self::V3R2 => DataV3R1 {
+ seqno: 0,
+ wallet_id: wallet_id as u32,
+ public_key,
+ }
+ .try_into()?,
+ Self::V4R1 | Self::V4R2 => DataV4R1 {
+ seqno: 0,
+ wallet_id: wallet_id as u32,
+ public_key,
+ }
+ .try_into()?,
+ Self::HighloadV2R2 => DataHighloadV2R2 {
+ wallet_id: wallet_id as u32,
+ last_cleaned_time: 0,
+ public_key,
+ }
+ .try_into()?,
+ _ => {
+ unimplemented!("no generation for that wallet version") | We prefer to avoid usage of `unimplemented!` and other code that panics. |
tonlib-rs | github_2023 | others | 39 | ston-fi | dbaranovstonfi | @@ -70,6 +161,150 @@ impl WalletVersion {
}
}
+/// WalletVersion::V1R1 | WalletVersion::V1R2 | WalletVersion::V1R3 | WalletVersion::V2R1 | WalletVersion::V2R2
+pub struct DataV1R1 { | I would suggest to move this struct and the traits to wallet/types.rs |
tonlib-rs | github_2023 | others | 39 | ston-fi | dbaranovstonfi | @@ -70,6 +161,150 @@ impl WalletVersion {
}
}
+/// WalletVersion::V1R1 | WalletVersion::V1R2 | WalletVersion::V1R3 | WalletVersion::V2R1 | WalletVersion::V2R2
+pub struct DataV1R1 {
+ pub seqno: u32,
+ pub public_key: [u8; 32],
+}
+
+impl TryFrom<&Cell> for DataV1R1 {
+ type Error = TonCellError;
+
+ fn try_from(value: &Cell) -> Result<Self, Self::Error> {
+ let mut parser = value.parser();
+ let seqno = parser.load_u32(32)?;
+ let mut public_key = [0u8; 32];
+ parser.load_slice(&mut public_key)?;
+ Ok(Self { seqno, public_key })
+ }
+}
+
+impl TryInto<Cell> for DataV1R1 {
+ type Error = TonCellError;
+
+ fn try_into(self) -> Result<Cell, Self::Error> {
+ CellBuilder::new()
+ .store_u32(32, self.seqno)?
+ .store_slice(&self.public_key)?
+ .build()
+ }
+}
+
+/// WalletVersion::V3R1 | WalletVersion::V3R2
+pub struct DataV3R1 {
+ pub seqno: u32,
+ pub wallet_id: u32, | Looks like ` wallet_id` should be i32 to avoid unnecessary typecasts above. However I have noticed that we do not have a corresponding loader function.
Please feel free to add
`pub fn load_i32(&mut self, bit_len: usize) -> Result<i32, TonCellError>` to `pub struct CellParser<'a>` |
tonlib-rs | github_2023 | others | 39 | ston-fi | dbaranovstonfi | @@ -42,19 +108,44 @@ impl WalletVersion {
&self,
workchain: i32,
key_pair: &KeyPair,
+ sub_wallet_id: Option<i32>,
) -> Result<BagOfCells, TonCellError> {
- let mut data_builder = CellBuilder::new();
- data_builder
- .store_u32(32, 0)?
- // seqno
- .store_u32(32, 698983191 + workchain as u32)?
- //wallet_id
- .store_slice(key_pair.public_key.as_slice())?; // public key
- if *self == WalletVersion::V4R2 {
- data_builder.store_bit(false)?;
- // empty plugin dict
- }
- let data_cell = data_builder.build()?;
+ let wallet_id = sub_wallet_id.unwrap_or(698983191 + workchain);
+ let public_key: [u8; 32] = key_pair
+ .public_key
+ .clone()
+ .try_into()
+ .expect("pubkey is always [u8; 32]"); | We avoid using `expect` as it panics. Returning ` TonCellError::InternalError("Invalid public key size".to_string())` looks more suitable. |
tonlib-rs | github_2023 | others | 39 | ston-fi | dbaranovstonfi | @@ -70,6 +161,150 @@ impl WalletVersion {
}
}
+/// WalletVersion::V1R1 | WalletVersion::V1R2 | WalletVersion::V1R3 | WalletVersion::V2R1 | WalletVersion::V2R2
+pub struct DataV1R1 {
+ pub seqno: u32,
+ pub public_key: [u8; 32],
+}
+
+impl TryFrom<&Cell> for DataV1R1 {
+ type Error = TonCellError;
+
+ fn try_from(value: &Cell) -> Result<Self, Self::Error> {
+ let mut parser = value.parser();
+ let seqno = parser.load_u32(32)?;
+ let mut public_key = [0u8; 32];
+ parser.load_slice(&mut public_key)?;
+ Ok(Self { seqno, public_key })
+ }
+}
+
+impl TryInto<Cell> for DataV1R1 { | I would suggest to follow guideline in rust [documentation](https://doc.rust-lang.org/std/convert/trait.TryInto.html) , which recommends to use ` impl From` and `impl TryFrom` instead of `impl Into` and `impl TryInto`respectively.
|
tonlib-rs | github_2023 | others | 40 | ston-fi | ruslanracheev | @@ -1,6 +1,6 @@
[package]
name = "tonlib"
-version = "0.12.1-dev"
+version = "0.12.2" | it is not a minor update. there are incompatible changes. |
tonlib-rs | github_2023 | others | 16 | ston-fi | dbaranovstonfi | @@ -0,0 +1,18 @@
+use tonlib::cell::BagOfCells; | I would recommend to move this test to client_test.rs to preserve consistency |
tonlib-rs | github_2023 | others | 16 | ston-fi | dbaranovstonfi | @@ -65,6 +66,9 @@ pub enum TonResult {
#[serde(rename = "blocks.header")]
BlocksHeader(BlocksHeader),
// tonlib_api.tl, line 228
+ #[serde(rename = "configInfo")]
+ ChainConfigInfo(ChainConfigInfo), | I would suggest preserving the original naming from tonlob_api.tl: ConfigInfo |
tonlib-rs | github_2023 | others | 15 | ston-fi | ruslanracheev | @@ -42,6 +43,8 @@ pub enum TonResult {
// tonlib_api.tl, line 181
#[serde(rename = "smc.runResult")]
SmcRunResult(SmcRunResult),
+ #[serde(rename = "tvm.cell")] | здесь надо указать в комменте на какой строке это в tonlib_api.tl |
tonlib-rs | github_2023 | others | 6 | ston-fi | Vasilisck | @@ -0,0 +1,141 @@
+{ | Видимо гит игнор надо обновить |
tonlib-rs | github_2023 | others | 6 | ston-fi | Vasilisck | @@ -226,6 +226,15 @@ pub trait TonFunctions {
}
}
+ async fn smc_forget(&self, id: i64) -> anyhow::Result<(TonConnection, i64)> {
+ let func = TonFunction::SmcForget { id };
+ let (conn, result) = self.invoke_on_connection(&func).await?; | тут точно надо invoke_on_connection а не invoke ? |
tonlib-rs | github_2023 | others | 6 | ston-fi | Vasilisck | @@ -113,3 +113,33 @@ impl TonContract {
}
}
}
+
+impl Drop for TonContract {
+ fn drop(&mut self) {
+ let runtime = match tokio::runtime::Builder::new_current_thread() | Я думаю стоит переписать на tokio::spawn т.к. в других местах мы его используем. |
tonlib-rs | github_2023 | others | 6 | ston-fi | ruslanracheev | @@ -112,4 +112,23 @@ impl TonContract {
)),
}
}
+
+ async fn forget_state(&self) -> anyhow::Result<()> {
+ if let Ok(state) = self.load_state().await { | зачем мы в методе forget_state сначала его загружаем? |
halo-theme-chirpy | github_2023 | others | 90 | AirboZH | AirboZH | @@ -0,0 +1,22 @@
+<!DOCTYPE html>
+<html
+ xmlns:th="https://www.thymeleaf.org"
+ th:replace="~{modules/layout :: html(title = ${error.status} + ': ' + ${#strings.defaultString(error.title, 'Internal server error')} + ' - ' +${site.title},
+ content = ~{modules/page :: page(~{::content}, '404')})}" | 页面layout可以写为error |
halo-theme-chirpy | github_2023 | others | 90 | AirboZH | AirboZH | @@ -0,0 +1,22 @@
+<!DOCTYPE html>
+<html
+ xmlns:th="https://www.thymeleaf.org"
+ th:replace="~{modules/layout :: html(title = ${error.status} + ': ' + ${#strings.defaultString(error.title, 'Internal server error')} + ' - ' +${site.title},
+ content = ~{modules/page :: page(~{::content}, '404')})}"
+>
+ <th:block th:fragment="content">
+ <h1 data-toc-skip>
+ [[${error.status} + ': ' + ${#strings.defaultString(error.title, 'Internal
+ server error')}]]
+ </h1>
+
+ <div class="content">
+ <p class="lead">
+ Sorry, we've misplaced that URL or it's pointing to something that
+ doesn't exist.
+ <br />
+ Click <a href="/">here</a> to back home.
+ </p> | 可否根据error.status做出调整,或者也可以做成,`我们遇到一点小错误`之类笼统的说法。
目前这个404的提示可能不适用于其他error |
halo-theme-chirpy | github_2023 | others | 82 | AirboZH | AirboZH | @@ -0,0 +1,46 @@
+<!DOCTYPE html>
+<html lang="en" xmlns:th="http://www.thymeleaf.org">
+
+<th:block th:fragment="relatedPosts">
+ <!-- 阅读建议 -->
+ <aside id="related-posts" aria-labelledby="related-label">
+ <h3 class="mb-4">Further Reading</h3>
+ <div th:if="${not #lists.isEmpty(post.categories)}">
+ <nav class="row row-cols-1 row-cols-md-2 row-cols-xl-3 g-4 mb-4"
+ th:each="recommendPosts : ${post.categories[0]}"
+ th:with="recommendPosts = ${postFinder.listByCategory(1, 10, recommendPosts.metadata.name)}">
+ <article class="col"
+ th:each="recommendPost : ${recommendPosts.items}"
+ th:if="${post.metadata.name != recommendPost.metadata.name}">
+ <a th:href="@{${recommendPost.status.permalink}}" class="post-preview card h-100">
+ <div class="card-body">
+ <time class="small" style="color: #A4A4A4;"
+ th:text="${#dates.format(recommendPost.spec.publishTime,'MMM d, yyyy')}"></time>
+ <h4 class="pt-0 my-2" th:text="${recommendPost.spec.title}"></h4>
+ <div class="text-muted small">
+ <p th:text="${recommendPost.spec.excerpt.raw}"></p> | ```suggestion
<p th:text="${recommendPost.status.excerpt}"></p>
```
|
halo-theme-chirpy | github_2023 | others | 82 | AirboZH | AirboZH | @@ -0,0 +1,44 @@
+<!DOCTYPE html>
+<html lang="en" xmlns:th="http://www.thymeleaf.org">
+
+<th:block th:fragment="relatedPosts">
+ <!-- 阅读建议 -->
+ <aside id="related-posts" aria-labelledby="related-label">
+ <h3 class="mb-4">Further Reading</h3>
+ <div th:if="${not #lists.isEmpty(post.categories)}">
+ <nav class="row row-cols-1 row-cols-md-2 row-cols-xl-3 g-4 mb-4"
+ th:with="recommendPosts = ${postFinder.listByCategory(1, 3, post.categories[0].metadata.name)}">
+ <article class="col"
+ th:each="recommendPost : ${recommendPosts.items}">
+ <a th:href="@{${recommendPost.status.permalink}}" class="post-preview card h-100">
+ <div class="card-body">
+ <time class="small" style="color: #A4A4A4;"
+ th:text="${#dates.format(recommendPost.spec.publishTime,'MMM d, yyyy')}"></time>
+ <h4 class="pt-0 my-2" th:text="${recommendPost.spec.title}"></h4>
+ <div class="text-muted small">
+ <p th:text="${recommendPost.status.excerpt}"></p>
+ </div>
+ </div>
+ </a>
+ </article>
+ </nav>
+ </div>
+ </aside>
+
+ <!-- 文章上下页 -->
+ <nav class="post-navigation d-flex justify-content-between pt-0" aria-label="Post Navigation"
+ th:with="postCursor = ${postFinder.cursor(post.metadata.name)}">
+ <a th:href="@{${postCursor.previous.status.permalink}}" class="btn btn-outline-primary">
+ <span style="color: #A4A4A4; font-size: 8px;">OLDER</span>
+ <p th:text="${postCursor.previous.spec.title}"></p>
+ </a>
+ <a th:href="@{${postCursor.next.status.permalink}}" class="btn btn-outline-primary">
+ <span style="color: #A4A4A4; font-size: 8px;">NEWER</span>
+ <p th:text="${postCursor.next.spec.title}"></p>
+ </a> | 可以判断`postCursor.next`是否为空
为空则为
```html
<div
class="btn btn-outline-primary disabled"
prompt="{{ site.data.locales[include.lang].post.button.previous }}"
>
<p>-</p>
</div>
```
参考https://github.com/AirboZH/halo-theme-chirpy/blob/master/src/_layouts/_includes/post-nav.html |
halo-theme-chirpy | github_2023 | others | 58 | AirboZH | AirboZH | @@ -3,10 +3,18 @@
<footer th:fragment="footer()">
<div class="container px-lg-4">
<div
- class="d-flex justify-content-center align-items-center text-muted mx-md-3"
+ class=" d-flex justify-content-center text-muted flex-lg-row justify-content-lg-between align-items-lg-center pb-lg-3 break-words"
>
<halo:footer />
- <p>
+ <span class="ml-2">
+ <a
+ href="https://beian.miit.gov.cn"
+ target="_blank"
+ rel="noopener"
+ >[[${theme.config.basics.icp_text}]]</a
+ >
+ </span>
+ <span> | 希望在这有一个备案配置value为空的判断。不然如果这个信息为空,页脚文字都是靠右的
当前效果:

删除后效果:

|
awesome-ai-safety | github_2023 | others | 4 | Giskard-AI | alexcombessie | @@ -48,17 +50,23 @@ You can browse papers by Machine Learning task category, and use hashtags like `
* [Anchors: High-Precision Model-Agnostic Explanations](https://homes.cs.washington.edu/~marcotcr/aaai18.pdf) (Ribeiro et al., 2018) `#Explainability`
* [Explanation-Based Human Debugging of NLP Models: A Survey](https://direct.mit.edu/tacl/article/doi/10.1162/tacl_a_00440/108932/Explanation-Based-Human-Debugging-of-NLP-Models-A) (Lertvittayakumjorn, et al., 2021) `#Debugging`
* [SEAL: Interactive Tool for Systematic Error Analysis and Labeling](https://arxiv.org/abs/2210.05839) (Rajani et al., 2022) `#DataSlice` `#Explainability`
+* [Data Statements for Natural Language Processing: Toward Mitigating System Bias and Enabling Better Science](https://direct.mit.edu/tacl/article/doi/10.1162/tacl_a_00041/43452/Data-Statements-for-Natural-Language-Processing) (Bender and Friedman, 2018) `#Bias`
+* [Equalizing Gender Biases in Neural Machine Translation with Word Embeddings Techniques](https://arxiv.org/abs/1901.03116) (Font and Costa-jussà, 2019) `#Bias`
+* [On Measuring Social Biases in Sentence Encoders](https://arxiv.org/abs/1903.10561) (May et al., 2019) `#Bias`
### Large Language Models
* [Holistic Evaluation of Language Models](https://arxiv.org/abs/2211.09110) (Liang et al., 2022) `#General`
* [Learning to summarize from human feedback](https://proceedings.neurips.cc/paper/2020/file/1f89885d556929e98d3ef9b86448f951-Paper.pdf) (Stiennon et al., 2020) `#HumanFeedback`
+* [Identifying and Reducing Gender Bias in Word-Level Language Models](https://arxiv.org/abs/1904.03035) (Bordia and Bowman, 2019) `#Bias`
## Computer Vision
* [DOMINO: Discovering Systematic Errors with Cross-modal Embeddings Domino](https://arxiv.org/pdf/2203.14960.pdf) (Eyuboglu et al., 2022) `#DataSlice`
* [Explaining in Style: Training a GAN to explain a classifier in StyleSpace](https://arxiv.org/pdf/2104.13369.pdf) (Lang et al., 2022) `#Robustness`
* [Model Assertions for Debugging Machine Learning](https://ddkang.github.io/papers/2018/omg-nips-ws.pdf) (Kang et al., 2018) `#Debugging`
+* [Uncovering and Mitigating Algorithmic Bias through Learned Latent Structure](https://dl.acm.org/doi/abs/10.1145/3306618.3314243) (Amini et al.) `#Bias`
+* [Diversity in Faces](https://arxiv.org/abs/1901.10436) (Merler et al.) `#Fairness #Accuracy` | ```suggestion
* [Diversity in Faces](https://arxiv.org/abs/1901.10436) (Merler et al.) `#Fairness` `#Accuracy`
``` |
ngxtension-platform | github_2023 | typescript | 587 | ngxtension | nartc | @@ -0,0 +1,18 @@
+import { HttpContext } from '@angular/common/http';
+
+/**
+ * Merge multiple HttpContext.
+ *
+ * @param contexts Two or more http contexts to be merged.
+ * @returns A merged HttpContext.
+ *
+ */
+
+export function mergeHttpContext(...contexts: HttpContext[]): HttpContext {
+ return contexts.reduce((prev, curr) => {
+ Array.from(curr.keys()).forEach((contextToken) =>
+ prev.set(contextToken, curr.get(contextToken)),
+ );
+ return prev;
+ }, new HttpContext()); | nit/question: I'm not too familiar with `HttpContext` so I might miss something but why can't we use the first context here instead of a new `HttpContext` instance? |
ngxtension-platform | github_2023 | others | 450 | ngxtension | eneajaho | @@ -50,11 +65,52 @@ class TestComponent {
}
```
-Or, if we want to transform the params, we can pass a function to `injectParams`.
+#### Transform
+
+If you want to additional parse the specific param, we can pass a `parse` function. | ```suggestion
If you want to parse the specific param, you can pass a `parse` function.
``` |
ngxtension-platform | github_2023 | others | 450 | ngxtension | eneajaho | @@ -50,11 +65,52 @@ class TestComponent {
}
```
-Or, if we want to transform the params, we can pass a function to `injectParams`.
+#### Transform
+
+If you want to additional parse the specific param, we can pass a `parse` function.
```ts
-@Component()
+@Component({
+ template: `
+ @if (user(); as user) {
+ <div>{{ user.name }}</div>
+ } @else {
+ <div>No user!</div>
+ }
+ `,
+})
class TestComponent {
- paramsKeys = injectParams((params) => Object.keys(params)); // returns a signal with the keys of the params
+ userId = injectParams('id', { parse: numberAttribute }); // returns a signal with the value of the id param parsed to a number
+
+ user = derivedFrom(
+ [this.userId],
+ switchMap((id) => this.userService.getUser(id).pipe(startWith(null))),
+ ); | Should we update this to use rxResource? At leats that's how I'd use it nowdays. What do you think? |
ngxtension-platform | github_2023 | typescript | 450 | ngxtension | eneajaho | @@ -1,31 +1,44 @@
-import { assertInInjectionContext, inject, type Signal } from '@angular/core';
+import { inject, type Signal } from '@angular/core';
import { toSignal } from '@angular/core/rxjs-interop';
import { ActivatedRoute, type Params } from '@angular/router';
+import { assertInjector } from 'ngxtension/assert-injector';
+import {
+ DefaultValueOptions,
+ InjectorOptions,
+ ParseOptions,
+} from 'ngxtension/shared';
import { map } from 'rxjs';
type QueryParamsTransformFn<ReadT> = (params: Params) => ReadT;
/**
- * The `InputOptions` interface defines options for configuring the behavior of the `injectQueryParams` function.
+ * The `QueryParamsOptions` type defines options for configuring the behavior of the `injectQueryParams` function.
*
* @template ReadT - The expected type of the read value.
* @template WriteT - The type of the value to be written.
- * @template InitialValueT - The type of the initial value.
+ * @template DefaultValueT - The type of the default value.
*/
-export interface QueryParamsOptions<ReadT, WriteT, InitialValueT> {
- /**
- * A transformation function to convert the written value to the expected read value.
- *
- * @param v - The value to transform.
- * @returns The transformed value.
- */
- transform?: (v: WriteT) => ReadT;
-
- /**
- * The initial value to use if the query parameter is not present or undefined.
- */
- initialValue?: InitialValueT;
-}
+export type QueryParamsOptions<ReadT, DefaultValueT> = ParseOptions<
+ ReadT,
+ string | null
+> &
+ DefaultValueOptions<DefaultValueT> &
+ InjectorOptions & {
+ /**
+ * The initial value to use if the query parameter is not present or undefined.
+ *
+ * @deprecated Use `defaultValue` as a replacement.
+ */
+ initialValue?: DefaultValueT; | We can write a migration schematic when we remove this one. So it's smooth for everyone when they update. |
ngxtension-platform | github_2023 | others | 528 | ngxtension | eneajaho | @@ -0,0 +1,95 @@
+---
+title: Queries Migration
+description: Schematics for migrating from decorator-based Queries to Signal-based Queries
+entryPoint: plugin/src/generators/convert-queries
+badge: stable
+contributors: ['enea-jahollari']
+--- | This needs to be updated |
ngxtension-platform | github_2023 | others | 528 | ngxtension | eneajaho | @@ -0,0 +1,95 @@
+---
+title: Queries Migration
+description: Schematics for migrating from decorator-based Queries to Signal-based Queries
+entryPoint: plugin/src/generators/convert-queries
+badge: stable
+contributors: ['enea-jahollari']
+---
+
+Recent releases of Angular have deprecated the `@HostBinding` and `@HostListener` decorators, replacing them with `host` defined properties. This migration schematic will help you convert your existing `@HostBinding` and `@HostListener` decorators to the new `host` properties. | The decorators are not yet deprecated |
ngxtension-platform | github_2023 | typescript | 528 | ngxtension | eneajaho | @@ -0,0 +1,108 @@
+import { Tree } from '@nx/devkit';
+import { createTreeWithEmptyWorkspace } from '@nx/devkit/testing';
+
+import { convertHostBindingGenerator } from './generator';
+import { ConvertHostBindingGeneratorSchema } from './schema';
+
+const filesMap = { | I'd like to see some more test cases. And before we merge this one, we will need to make sure that we don't break apps. |
ngxtension-platform | github_2023 | typescript | 543 | ngxtension | michael-small | @@ -0,0 +1,184 @@
+import {
+ computed,
+ Injector,
+ Signal,
+ signal,
+ WritableSignal,
+} from '@angular/core';
+import { explicitEffect } from 'ngxtension/explicit-effect';
+
+interface SignalHistoryRecord<T> {
+ value: T;
+ timestamp: number;
+}
+
+/**
+ * Creates a history record with the current timestamp.
+ * @param value The value to store in the history record.
+ * @returns A SignalHistoryRecord object.
+ */
+function createHistoryRecord<T>(value: T): SignalHistoryRecord<T> {
+ return { value, timestamp: Date.now() };
+}
+
+/**
+ * Enhances a writable signal with undo/redo history functionality.
+ *
+ * @param source The writable signal to track.
+ * @param options Configuration options for the history.
+ * @returns An object with `history`, `undo`, `redo`, `canUndo`, and `canRedo` properties.
+ */
+export function injectSignalHistory<T>(
+ source: WritableSignal<T>,
+ options?: {
+ /**
+ * The maximum number of history records to store.
+ * @default 50 | ```suggestion
* @default 100
``` |
ngxtension-platform | github_2023 | typescript | 543 | ngxtension | JeanMeche | @@ -0,0 +1,184 @@
+import {
+ computed,
+ Injector,
+ Signal,
+ signal,
+ WritableSignal,
+} from '@angular/core';
+import { explicitEffect } from 'ngxtension/explicit-effect';
+
+interface SignalHistoryRecord<T> {
+ value: T;
+ timestamp: number;
+}
+
+/**
+ * Creates a history record with the current timestamp.
+ * @param value The value to store in the history record.
+ * @returns A SignalHistoryRecord object.
+ */
+function createHistoryRecord<T>(value: T): SignalHistoryRecord<T> {
+ return { value, timestamp: Date.now() };
+}
+
+/**
+ * Enhances a writable signal with undo/redo history functionality.
+ *
+ * @param source The writable signal to track.
+ * @param options Configuration options for the history.
+ * @returns An object with `history`, `undo`, `redo`, `canUndo`, and `canRedo` properties.
+ */
+export function injectSignalHistory<T>( | I personnaly don't like the "inject" part of the name.
You don't inject a signal, you create one, hense I would either go with `signalHistory()` or `createSignalHistory()` |
ngxtension-platform | github_2023 | typescript | 543 | ngxtension | JeanMeche | @@ -0,0 +1,184 @@
+import {
+ computed,
+ Injector,
+ Signal,
+ signal,
+ WritableSignal,
+} from '@angular/core';
+import { explicitEffect } from 'ngxtension/explicit-effect';
+
+interface SignalHistoryRecord<T> {
+ value: T;
+ timestamp: number;
+}
+
+/**
+ * Creates a history record with the current timestamp.
+ * @param value The value to store in the history record.
+ * @returns A SignalHistoryRecord object.
+ */
+function createHistoryRecord<T>(value: T): SignalHistoryRecord<T> {
+ return { value, timestamp: Date.now() };
+}
+
+/**
+ * Enhances a writable signal with undo/redo history functionality.
+ *
+ * @param source The writable signal to track.
+ * @param options Configuration options for the history.
+ * @returns An object with `history`, `undo`, `redo`, `canUndo`, and `canRedo` properties.
+ */
+export function injectSignalHistory<T>(
+ source: WritableSignal<T>,
+ options?: {
+ /**
+ * The maximum number of history records to store.
+ * @default 50
+ */
+ capacity?: number;
+
+ /**
+ * The injector to use for the effect.
+ * @default undefined
+ */
+ injector?: Injector;
+ },
+): {
+ /**
+ * The history of changes to the source signal.
+ */
+ history: Signal<SignalHistoryRecord<T>[]>;
+ /**
+ * Undo the last change to the source signal.
+ */
+ undo: () => void;
+ /**
+ * Redo the last undone change to the source signal.
+ */
+ redo: () => void;
+ /**
+ * Reset the history to the current state.
+ */
+ reset: () => void;
+ /**
+ * Clear the history. This will remove all history records.
+ */
+ clear: () => void;
+ /**
+ * A signal indicating if undo is possible.
+ */
+ canUndo: Signal<boolean>;
+ /**
+ * A signal indicating if redo is possible.
+ */
+ canRedo: Signal<boolean>;
+} {
+ const capacity = options?.capacity ?? 100; // Default capacity is 100 records | I'd guard that function with `options?.injector && assertInInjectionContext(...);` |
ngxtension-platform | github_2023 | typescript | 526 | ngxtension | JeanMeche | @@ -0,0 +1,134 @@
+import { JsonPipe } from '@angular/common';
+import { Component, effect, inject } from '@angular/core';
+import { FormsModule } from '@angular/forms';
+import { Router } from '@angular/router';
+import { linkedQueryParam } from 'ngxtension/linked-query-param';
+
+@Component({
+ standalone: true,
+ template: `
+ <div>
+ <label>
+ SearchQuery:
+ <input [(ngModel)]="filterState.searchQuery" name="searchQuery" />
+
+ <button type="button" (click)="filterState.searchQuery.set(null)">
+ reset
+ </button>
+ </label>
+ <br />
+
+ <label>
+ Size:
+ <select [(ngModel)]="filterState.pageSize" name="pageSize">
+ <option value="10">10</option>
+ <option value="20">20</option>
+ <option value="30">30</option>
+ <option [ngValue]="null">null</option>
+ </select>
+ </label>
+ <br />
+ <label>
+ Show deleted:
+ <input
+ [(ngModel)]="filterState.showDeleted"
+ name="showDeleted"
+ type="checkbox"
+ />
+ </label>
+ <br />
+
+ <label>
+ Page:
+ <input [(ngModel)]="filterState.pageNumber" name="pageNumber" />
+
+ <button type="button" (click)="filterState.pageNumber.set(1)">1</button>
+ <button type="button" (click)="filterState.pageNumber.set(2)">2</button>
+ <button type="button" (click)="filterState.pageNumber.set(3)">3</button>
+ <button type="button" (click)="filterState.pageNumber.set(null)">
+ null
+ </button>
+ </label>
+ <br />
+
+ <label>
+ SortBy:
+ <select [(ngModel)]="filterState.sortBy" name="sortBy">
+ <option value="name">name</option>
+ <option value="age">age</option>
+ </select>
+ </label>
+ <br />
+
+ <label>
+ Direction:
+ <select [(ngModel)]="filterState.direction" name="direction">
+ <option value="asc">asc</option>
+ <option value="desc">desc</option>
+ </select>
+ </label>
+
+ <br />
+
+ <button type="button" (click)="filterState.pageSize.set(10)">
+ pageSize 10
+ </button>
+ <button type="button" (click)="filterState.pageSize.set(20)">
+ pageSize 20
+ </button>
+ <button type="button" (click)="filterState.pageSize.set(null)">
+ pageSize null
+ </button>
+ <button type="button" (click)="resetAll()">reset all</button>
+ <hr />
+ <br />
+ <br />
+ <hr />
+ <br />
+ </div>
+ `,
+ imports: [FormsModule, JsonPipe],
+ styles: `
+ div {
+ padding: 20px;
+ }
+ `,
+})
+export default class LinkedQueryParamInsideObjectCmp {
+ private router = inject(Router);
+
+ filterState = {
+ searchQuery: linkedQueryParam('searchQuery'),
+ showDeleted: linkedQueryParam('showDeleted', {
+ parse: (x) => x === 'true',
+ }),
+ pageNumber: linkedQueryParam('page', { parse: (x) => (x ? +x : null) }),
+ pageSize: linkedQueryParam('pageSize', { parse: (x) => (x ? +x : null) }),
+ sortBy: linkedQueryParam('sortBy', { defaultValue: 'name' }),
+ direction: linkedQueryParam<'asc' | 'desc'>('direction', {
+ defaultValue: 'asc',
+ }),
+ };
+
+ constructor() {
+ Object.keys(this.filterState).forEach((key) => {
+ effect(() => {
+ // @ts-expect-error TODO: fix this | You have a todo left here |
ngxtension-platform | github_2023 | typescript | 526 | ngxtension | JeanMeche | @@ -0,0 +1,414 @@
+import {
+ effect,
+ inject,
+ Injectable,
+ Injector,
+ runInInjectionContext,
+ signal,
+ untracked,
+ WritableSignal,
+} from '@angular/core';
+import { toSignal } from '@angular/core/rxjs-interop';
+import {
+ ActivatedRoute,
+ NavigationExtras,
+ Params,
+ Router,
+} from '@angular/router';
+import { assertInjector } from 'ngxtension/assert-injector';
+import { createNotifier } from 'ngxtension/create-notifier';
+import { distinctUntilKeyChanged, map } from 'rxjs';
+
+/**
+ * The type of the serialized value.
+ * After transforming the value before it is passed to the query param, this type will be used.
+ */
+type SerializeReturnType = string | number | boolean | null | undefined;
+
+/**
+ * These are the options that can be passed to the `linkedQueryParam` function.
+ * They are taken from the `NavigationExtras` type in the `@angular/router` package.
+ */
+type NavigateMethodFields = Pick<
+ NavigationExtras,
+ | 'queryParamsHandling'
+ | 'onSameUrlNavigation'
+ | 'replaceUrl'
+ | 'skipLocationChange'
+>;
+
+/**
+ * Service to coalesce multiple navigation calls into a single navigation event.
+ */
+@Injectable({ providedIn: 'root' })
+export class LinkedQueryParamGlobalHandler {
+ private router = inject(Router); | ```suggestion
private _router = inject(Router);
```
For consistency ? |
ngxtension-platform | github_2023 | typescript | 526 | ngxtension | JeanMeche | @@ -0,0 +1,414 @@
+import {
+ effect,
+ inject,
+ Injectable,
+ Injector,
+ runInInjectionContext,
+ signal,
+ untracked,
+ WritableSignal,
+} from '@angular/core';
+import { toSignal } from '@angular/core/rxjs-interop';
+import {
+ ActivatedRoute,
+ NavigationExtras,
+ Params,
+ Router,
+} from '@angular/router';
+import { assertInjector } from 'ngxtension/assert-injector';
+import { createNotifier } from 'ngxtension/create-notifier';
+import { distinctUntilKeyChanged, map } from 'rxjs';
+
+/**
+ * The type of the serialized value.
+ * After transforming the value before it is passed to the query param, this type will be used.
+ */
+type SerializeReturnType = string | number | boolean | null | undefined;
+
+/**
+ * These are the options that can be passed to the `linkedQueryParam` function.
+ * They are taken from the `NavigationExtras` type in the `@angular/router` package.
+ */
+type NavigateMethodFields = Pick<
+ NavigationExtras,
+ | 'queryParamsHandling'
+ | 'onSameUrlNavigation'
+ | 'replaceUrl'
+ | 'skipLocationChange'
+>;
+
+/**
+ * Service to coalesce multiple navigation calls into a single navigation event.
+ */
+@Injectable({ providedIn: 'root' })
+export class LinkedQueryParamGlobalHandler {
+ private router = inject(Router);
+ /**
+ * @internal
+ * The current query params that will be set on the next navigation event.
+ */
+ private _currentKeys: Record<string, SerializeReturnType> = {};
+ /**
+ * @internal
+ * The navigation extras that will be used on the next navigation event.
+ */
+ private _navigationExtras: NavigationExtras = {};
+ /**
+ * @internal
+ * The notifier that will be used to schedule the navigation event.
+ */
+ _schedulerNotifier = createNotifier(); | ```suggestion
private _schedulerNotifier = createNotifier();
``` |
ngxtension-platform | github_2023 | typescript | 526 | ngxtension | JeanMeche | @@ -0,0 +1,414 @@
+import {
+ effect,
+ inject,
+ Injectable,
+ Injector,
+ runInInjectionContext,
+ signal,
+ untracked,
+ WritableSignal,
+} from '@angular/core';
+import { toSignal } from '@angular/core/rxjs-interop';
+import {
+ ActivatedRoute,
+ NavigationExtras,
+ Params,
+ Router,
+} from '@angular/router';
+import { assertInjector } from 'ngxtension/assert-injector';
+import { createNotifier } from 'ngxtension/create-notifier';
+import { distinctUntilKeyChanged, map } from 'rxjs';
+
+/**
+ * The type of the serialized value.
+ * After transforming the value before it is passed to the query param, this type will be used.
+ */
+type SerializeReturnType = string | number | boolean | null | undefined;
+
+/**
+ * These are the options that can be passed to the `linkedQueryParam` function.
+ * They are taken from the `NavigationExtras` type in the `@angular/router` package.
+ */
+type NavigateMethodFields = Pick<
+ NavigationExtras,
+ | 'queryParamsHandling'
+ | 'onSameUrlNavigation'
+ | 'replaceUrl'
+ | 'skipLocationChange'
+>;
+
+/**
+ * Service to coalesce multiple navigation calls into a single navigation event.
+ */
+@Injectable({ providedIn: 'root' })
+export class LinkedQueryParamGlobalHandler {
+ private router = inject(Router);
+ /**
+ * @internal
+ * The current query params that will be set on the next navigation event.
+ */
+ private _currentKeys: Record<string, SerializeReturnType> = {};
+ /**
+ * @internal
+ * The navigation extras that will be used on the next navigation event.
+ */
+ private _navigationExtras: NavigationExtras = {};
+ /**
+ * @internal
+ * The notifier that will be used to schedule the navigation event.
+ */
+ _schedulerNotifier = createNotifier();
+
+ constructor() {
+ effect(() => {
+ // listen to the scheduler notifier to schedule the navigation event
+ this._schedulerNotifier.listen();
+
+ // we need to untrack the navigation call in order to not register any other signal as a dependency
+ untracked(() => this.navigate().then());
+ });
+ }
+
+ /**
+ * Sets the value of a query param.
+ * This will be used on the next navigation event.
+ */
+ setParamKeyValue(key: string, value: SerializeReturnType) {
+ this._currentKeys[key] = value;
+ }
+
+ /**
+ * Sets the navigation extras that will be used on the next navigation event.
+ */
+ setCurrentNavigationExtras(config: Partial<NavigateMethodFields>) {
+ const {
+ queryParamsHandling,
+ onSameUrlNavigation,
+ replaceUrl,
+ skipLocationChange,
+ } = config ?? {}; | if `config` is nullable, it should be reflected on the param type. |
ngxtension-platform | github_2023 | typescript | 526 | ngxtension | JeanMeche | @@ -0,0 +1,414 @@
+import {
+ effect,
+ inject,
+ Injectable,
+ Injector,
+ runInInjectionContext,
+ signal,
+ untracked,
+ WritableSignal,
+} from '@angular/core';
+import { toSignal } from '@angular/core/rxjs-interop';
+import {
+ ActivatedRoute,
+ NavigationExtras,
+ Params,
+ Router,
+} from '@angular/router';
+import { assertInjector } from 'ngxtension/assert-injector';
+import { createNotifier } from 'ngxtension/create-notifier';
+import { distinctUntilKeyChanged, map } from 'rxjs';
+
+/**
+ * The type of the serialized value.
+ * After transforming the value before it is passed to the query param, this type will be used.
+ */
+type SerializeReturnType = string | number | boolean | null | undefined;
+
+/**
+ * These are the options that can be passed to the `linkedQueryParam` function.
+ * They are taken from the `NavigationExtras` type in the `@angular/router` package.
+ */
+type NavigateMethodFields = Pick<
+ NavigationExtras,
+ | 'queryParamsHandling'
+ | 'onSameUrlNavigation'
+ | 'replaceUrl'
+ | 'skipLocationChange'
+>;
+
+/**
+ * Service to coalesce multiple navigation calls into a single navigation event.
+ */
+@Injectable({ providedIn: 'root' })
+export class LinkedQueryParamGlobalHandler {
+ private router = inject(Router);
+ /**
+ * @internal
+ * The current query params that will be set on the next navigation event.
+ */
+ private _currentKeys: Record<string, SerializeReturnType> = {};
+ /**
+ * @internal
+ * The navigation extras that will be used on the next navigation event.
+ */
+ private _navigationExtras: NavigationExtras = {};
+ /**
+ * @internal
+ * The notifier that will be used to schedule the navigation event.
+ */
+ _schedulerNotifier = createNotifier();
+
+ constructor() {
+ effect(() => {
+ // listen to the scheduler notifier to schedule the navigation event
+ this._schedulerNotifier.listen();
+
+ // we need to untrack the navigation call in order to not register any other signal as a dependency
+ untracked(() => this.navigate().then());
+ });
+ }
+
+ /**
+ * Sets the value of a query param.
+ * This will be used on the next navigation event.
+ */
+ setParamKeyValue(key: string, value: SerializeReturnType) {
+ this._currentKeys[key] = value;
+ }
+
+ /**
+ * Sets the navigation extras that will be used on the next navigation event.
+ */
+ setCurrentNavigationExtras(config: Partial<NavigateMethodFields>) {
+ const {
+ queryParamsHandling,
+ onSameUrlNavigation,
+ replaceUrl,
+ skipLocationChange,
+ } = config ?? {};
+
+ if (queryParamsHandling || queryParamsHandling === '')
+ this._navigationExtras.queryParamsHandling = queryParamsHandling;
+ if (onSameUrlNavigation)
+ this._navigationExtras.onSameUrlNavigation = onSameUrlNavigation;
+ if (replaceUrl) this._navigationExtras.replaceUrl = replaceUrl;
+ if (skipLocationChange)
+ this._navigationExtras.skipLocationChange = skipLocationChange; | nit: I'd drop the curly only for the one liners |
ngxtension-platform | github_2023 | typescript | 526 | ngxtension | nartc | @@ -0,0 +1,72 @@
+import { JsonPipe } from '@angular/common';
+import { Component, effect, inject, untracked } from '@angular/core';
+import { FormsModule } from '@angular/forms';
+import { Title } from '@angular/platform-browser';
+import { linkedQueryParam } from 'ngxtension/linked-query-param';
+
+@Component({
+ standalone: true,
+ template: `
+ <div>
+ <pre>SearchQuery: {{ searchQuery() | json }}</pre>
+
+ <div>
+ <label>
+ Different inputs same signal:
+ <input [(ngModel)]="searchQuery" name="a" placeholder="searchQuery" />
+ <input [(ngModel)]="searchQuery" name="b" placeholder="searchQuery" />
+ </label>
+ </div>
+
+ <!-- <div>-->
+ <!-- <label>-->
+ <!-- Different signals same query param:-->
+ <!-- <input-->
+ <!-- [(ngModel)]="differentSignalWithSearchQuery"-->
+ <!-- name="c"-->
+ <!-- placeholder="differentSignalWithSearchQuery"-->
+ <!-- />-->
+ <!-- </label>-->
+ <!-- </div>--> | question: intended comment? |
ngxtension-platform | github_2023 | typescript | 526 | ngxtension | nartc | @@ -0,0 +1,72 @@
+import { JsonPipe } from '@angular/common';
+import { Component, effect, inject, untracked } from '@angular/core';
+import { FormsModule } from '@angular/forms';
+import { Title } from '@angular/platform-browser';
+import { linkedQueryParam } from 'ngxtension/linked-query-param';
+
+@Component({
+ standalone: true,
+ template: `
+ <div>
+ <pre>SearchQuery: {{ searchQuery() | json }}</pre>
+
+ <div>
+ <label>
+ Different inputs same signal:
+ <input [(ngModel)]="searchQuery" name="a" placeholder="searchQuery" />
+ <input [(ngModel)]="searchQuery" name="b" placeholder="searchQuery" />
+ </label>
+ </div>
+
+ <!-- <div>-->
+ <!-- <label>-->
+ <!-- Different signals same query param:-->
+ <!-- <input-->
+ <!-- [(ngModel)]="differentSignalWithSearchQuery"-->
+ <!-- name="c"-->
+ <!-- placeholder="differentSignalWithSearchQuery"-->
+ <!-- />-->
+ <!-- </label>-->
+ <!-- </div>-->
+
+ <button type="button" (click)="searchQuery.set('cool')">cool</button>
+ <button type="button" (click)="searchQuery.set('great')">great</button>
+ <button type="button" (click)="searchQuery.set(null)">Reset</button>
+ </div>
+ `,
+ imports: [FormsModule, JsonPipe],
+ styles: `
+ div {
+ padding: 20px;
+ }
+ `,
+})
+export default class LinkedQueryParamStringCmp {
+ private titleService = inject(Title);
+
+ searchQuery = linkedQueryParam('searchQuery', {
+ // NOTE in docs that serialize should come after parse in order for types to work correctly
+ // parse: (x) => x ? parseInt(x, 10) : null,
+ // serialize: (x) => x,
+ });
+
+ // differentSignalWithSearchQuery = linkedQueryParam('searchQuery', {
+ // defaultValue: 'default',
+ // });
+
+ constructor() {
+ effect(() => {
+ console.log('searchQuery Type: ', typeof this.searchQuery());
+ console.log('searchQuery Value: ', this.searchQuery());
+
+ untracked(() => {
+ const searchQuery = this.searchQuery();
+ if (searchQuery) {
+ this.titleService.setTitle(searchQuery);
+ } else {
+ this.titleService.setTitle('No search query!');
+ }
+ });
+ }); | nit: this seems a bit confusing to me. The `effect` seems to rely on `searchQuery` to update the `title` but the actual `searchQuery` value is in `untracked`? I am aware that the track of `searchQuery()` happens in the `console.log` |
ngxtension-platform | github_2023 | typescript | 526 | ngxtension | nartc | @@ -0,0 +1,134 @@
+import { JsonPipe } from '@angular/common';
+import { Component, effect, inject } from '@angular/core';
+import { FormsModule } from '@angular/forms';
+import { Router } from '@angular/router';
+import { linkedQueryParam } from 'ngxtension/linked-query-param';
+
+@Component({
+ standalone: true,
+ template: `
+ <div>
+ <label>
+ SearchQuery:
+ <input [(ngModel)]="filterState.searchQuery" name="searchQuery" />
+
+ <button type="button" (click)="filterState.searchQuery.set(null)">
+ reset
+ </button>
+ </label>
+ <br />
+
+ <label>
+ Size:
+ <select [(ngModel)]="filterState.pageSize" name="pageSize">
+ <option value="10">10</option>
+ <option value="20">20</option>
+ <option value="30">30</option>
+ <option [ngValue]="null">null</option>
+ </select>
+ </label>
+ <br />
+ <label>
+ Show deleted:
+ <input
+ [(ngModel)]="filterState.showDeleted"
+ name="showDeleted"
+ type="checkbox"
+ />
+ </label>
+ <br />
+
+ <label>
+ Page:
+ <input [(ngModel)]="filterState.pageNumber" name="pageNumber" />
+
+ <button type="button" (click)="filterState.pageNumber.set(1)">1</button>
+ <button type="button" (click)="filterState.pageNumber.set(2)">2</button>
+ <button type="button" (click)="filterState.pageNumber.set(3)">3</button>
+ <button type="button" (click)="filterState.pageNumber.set(null)">
+ null
+ </button>
+ </label>
+ <br />
+
+ <label>
+ SortBy:
+ <select [(ngModel)]="filterState.sortBy" name="sortBy">
+ <option value="name">name</option>
+ <option value="age">age</option>
+ </select>
+ </label>
+ <br />
+
+ <label>
+ Direction:
+ <select [(ngModel)]="filterState.direction" name="direction">
+ <option value="asc">asc</option>
+ <option value="desc">desc</option>
+ </select>
+ </label>
+
+ <br />
+
+ <button type="button" (click)="filterState.pageSize.set(10)">
+ pageSize 10
+ </button>
+ <button type="button" (click)="filterState.pageSize.set(20)">
+ pageSize 20
+ </button>
+ <button type="button" (click)="filterState.pageSize.set(null)">
+ pageSize null
+ </button>
+ <button type="button" (click)="resetAll()">reset all</button>
+ <hr />
+ <br />
+ <br />
+ <hr />
+ <br />
+ </div>
+ `,
+ imports: [FormsModule, JsonPipe],
+ styles: `
+ div {
+ padding: 20px;
+ }
+ `,
+})
+export default class LinkedQueryParamInsideObjectCmp {
+ private router = inject(Router);
+
+ filterState = {
+ searchQuery: linkedQueryParam('searchQuery'),
+ showDeleted: linkedQueryParam('showDeleted', {
+ parse: (x) => x === 'true',
+ }),
+ pageNumber: linkedQueryParam('page', { parse: (x) => (x ? +x : null) }),
+ pageSize: linkedQueryParam('pageSize', { parse: (x) => (x ? +x : null) }),
+ sortBy: linkedQueryParam('sortBy', { defaultValue: 'name' }),
+ direction: linkedQueryParam<'asc' | 'desc'>('direction', {
+ defaultValue: 'asc',
+ }),
+ };
+
+ constructor() {
+ Object.keys(this.filterState).forEach((key) => {
+ effect(() => { | nit: I wouldn't spawn an `effect` for each `filterState` key |
ngxtension-platform | github_2023 | typescript | 526 | ngxtension | nartc | @@ -0,0 +1,414 @@
+import {
+ effect,
+ inject,
+ Injectable,
+ Injector,
+ runInInjectionContext,
+ signal,
+ untracked,
+ WritableSignal,
+} from '@angular/core';
+import { toSignal } from '@angular/core/rxjs-interop';
+import {
+ ActivatedRoute,
+ NavigationExtras,
+ Params,
+ Router,
+} from '@angular/router';
+import { assertInjector } from 'ngxtension/assert-injector';
+import { createNotifier } from 'ngxtension/create-notifier';
+import { distinctUntilKeyChanged, map } from 'rxjs';
+
+/**
+ * The type of the serialized value.
+ * After transforming the value before it is passed to the query param, this type will be used.
+ */
+type SerializeReturnType = string | number | boolean | null | undefined;
+
+/**
+ * These are the options that can be passed to the `linkedQueryParam` function.
+ * They are taken from the `NavigationExtras` type in the `@angular/router` package.
+ */
+type NavigateMethodFields = Pick<
+ NavigationExtras,
+ | 'queryParamsHandling'
+ | 'onSameUrlNavigation'
+ | 'replaceUrl'
+ | 'skipLocationChange'
+>;
+
+/**
+ * Service to coalesce multiple navigation calls into a single navigation event.
+ */
+@Injectable({ providedIn: 'root' })
+export class LinkedQueryParamGlobalHandler {
+ private router = inject(Router);
+ /**
+ * @internal
+ * The current query params that will be set on the next navigation event.
+ */
+ private _currentKeys: Record<string, SerializeReturnType> = {};
+ /**
+ * @internal
+ * The navigation extras that will be used on the next navigation event.
+ */
+ private _navigationExtras: NavigationExtras = {};
+ /**
+ * @internal
+ * The notifier that will be used to schedule the navigation event.
+ */
+ _schedulerNotifier = createNotifier();
+
+ constructor() {
+ effect(() => {
+ // listen to the scheduler notifier to schedule the navigation event
+ this._schedulerNotifier.listen();
+
+ // we need to untrack the navigation call in order to not register any other signal as a dependency
+ untracked(() => this.navigate().then()); | nit: my personal style is that if `then()` call is empty, I'd use `void` to annotate that: I skip the return value of this expression call.
```suggestion
untracked(() => void this.navigate());
``` |
ngxtension-platform | github_2023 | typescript | 526 | ngxtension | nartc | @@ -0,0 +1,414 @@
+import {
+ effect,
+ inject,
+ Injectable,
+ Injector,
+ runInInjectionContext,
+ signal,
+ untracked,
+ WritableSignal,
+} from '@angular/core';
+import { toSignal } from '@angular/core/rxjs-interop';
+import {
+ ActivatedRoute,
+ NavigationExtras,
+ Params,
+ Router,
+} from '@angular/router';
+import { assertInjector } from 'ngxtension/assert-injector';
+import { createNotifier } from 'ngxtension/create-notifier';
+import { distinctUntilKeyChanged, map } from 'rxjs';
+
+/**
+ * The type of the serialized value.
+ * After transforming the value before it is passed to the query param, this type will be used.
+ */
+type SerializeReturnType = string | number | boolean | null | undefined;
+
+/**
+ * These are the options that can be passed to the `linkedQueryParam` function.
+ * They are taken from the `NavigationExtras` type in the `@angular/router` package.
+ */
+type NavigateMethodFields = Pick<
+ NavigationExtras,
+ | 'queryParamsHandling'
+ | 'onSameUrlNavigation'
+ | 'replaceUrl'
+ | 'skipLocationChange'
+>;
+
+/**
+ * Service to coalesce multiple navigation calls into a single navigation event.
+ */
+@Injectable({ providedIn: 'root' })
+export class LinkedQueryParamGlobalHandler {
+ private router = inject(Router);
+ /**
+ * @internal
+ * The current query params that will be set on the next navigation event.
+ */
+ private _currentKeys: Record<string, SerializeReturnType> = {};
+ /**
+ * @internal
+ * The navigation extras that will be used on the next navigation event.
+ */
+ private _navigationExtras: NavigationExtras = {};
+ /**
+ * @internal
+ * The notifier that will be used to schedule the navigation event.
+ */
+ _schedulerNotifier = createNotifier();
+
+ constructor() {
+ effect(() => {
+ // listen to the scheduler notifier to schedule the navigation event
+ this._schedulerNotifier.listen();
+
+ // we need to untrack the navigation call in order to not register any other signal as a dependency
+ untracked(() => this.navigate().then());
+ });
+ }
+
+ /**
+ * Sets the value of a query param.
+ * This will be used on the next navigation event.
+ */
+ setParamKeyValue(key: string, value: SerializeReturnType) {
+ this._currentKeys[key] = value;
+ }
+
+ /**
+ * Sets the navigation extras that will be used on the next navigation event.
+ */
+ setCurrentNavigationExtras(config: Partial<NavigateMethodFields>) { | nit: if `config` isn't required as inferred by `config ?? {}` below, provide the default value to the parameter
```suggestion
setCurrentNavigationExtras(config: Partial<NavigateMethodFields> = {}) {
``` |
ngxtension-platform | github_2023 | typescript | 526 | ngxtension | nartc | @@ -0,0 +1,414 @@
+import {
+ effect,
+ inject,
+ Injectable,
+ Injector,
+ runInInjectionContext,
+ signal,
+ untracked,
+ WritableSignal,
+} from '@angular/core';
+import { toSignal } from '@angular/core/rxjs-interop';
+import {
+ ActivatedRoute,
+ NavigationExtras,
+ Params,
+ Router,
+} from '@angular/router';
+import { assertInjector } from 'ngxtension/assert-injector';
+import { createNotifier } from 'ngxtension/create-notifier';
+import { distinctUntilKeyChanged, map } from 'rxjs';
+
+/**
+ * The type of the serialized value.
+ * After transforming the value before it is passed to the query param, this type will be used.
+ */
+type SerializeReturnType = string | number | boolean | null | undefined;
+
+/**
+ * These are the options that can be passed to the `linkedQueryParam` function.
+ * They are taken from the `NavigationExtras` type in the `@angular/router` package.
+ */
+type NavigateMethodFields = Pick<
+ NavigationExtras,
+ | 'queryParamsHandling'
+ | 'onSameUrlNavigation'
+ | 'replaceUrl'
+ | 'skipLocationChange'
+>;
+
+/**
+ * Service to coalesce multiple navigation calls into a single navigation event.
+ */
+@Injectable({ providedIn: 'root' })
+export class LinkedQueryParamGlobalHandler {
+ private router = inject(Router);
+ /**
+ * @internal
+ * The current query params that will be set on the next navigation event.
+ */
+ private _currentKeys: Record<string, SerializeReturnType> = {};
+ /**
+ * @internal
+ * The navigation extras that will be used on the next navigation event.
+ */
+ private _navigationExtras: NavigationExtras = {};
+ /**
+ * @internal
+ * The notifier that will be used to schedule the navigation event.
+ */
+ _schedulerNotifier = createNotifier();
+
+ constructor() {
+ effect(() => {
+ // listen to the scheduler notifier to schedule the navigation event
+ this._schedulerNotifier.listen();
+
+ // we need to untrack the navigation call in order to not register any other signal as a dependency
+ untracked(() => this.navigate().then());
+ });
+ }
+
+ /**
+ * Sets the value of a query param.
+ * This will be used on the next navigation event.
+ */
+ setParamKeyValue(key: string, value: SerializeReturnType) {
+ this._currentKeys[key] = value;
+ }
+
+ /**
+ * Sets the navigation extras that will be used on the next navigation event.
+ */
+ setCurrentNavigationExtras(config: Partial<NavigateMethodFields>) {
+ const {
+ queryParamsHandling,
+ onSameUrlNavigation,
+ replaceUrl,
+ skipLocationChange,
+ } = config ?? {};
+
+ if (queryParamsHandling || queryParamsHandling === '')
+ this._navigationExtras.queryParamsHandling = queryParamsHandling;
+ if (onSameUrlNavigation)
+ this._navigationExtras.onSameUrlNavigation = onSameUrlNavigation;
+ if (replaceUrl) this._navigationExtras.replaceUrl = replaceUrl;
+ if (skipLocationChange)
+ this._navigationExtras.skipLocationChange = skipLocationChange;
+ }
+
+ /**
+ * Navigates to the current URL with the accumulated query parameters and navigation extras.
+ * Cleans up the current keys and navigation extras after the navigation.
+ */
+ private navigate(): Promise<boolean> {
+ return this.router
+ .navigate([], {
+ queryParams: this._currentKeys,
+ queryParamsHandling: 'merge', // can be overridden by the `queryParamsHandling` option
+ ...this._navigationExtras, // override the navigation extras
+ })
+ .then((value) => {
+ // we reset the current keys and navigation extras on navigation
+ // in order to avoid leaking to other navigations
+ this._currentKeys = {};
+ this._navigationExtras = {};
+ return value;
+ });
+ }
+}
+
+type LinkedQueryParamOptions = {
+ /**
+ * The injector to use to inject the router and activated route.
+ */
+ injector?: Injector;
+} & Partial<NavigateMethodFields>;
+
+/**
+ * These are the function types that will be used to parse and serialize the query param value.
+ */
+type ParseFn<T> = (value: string | null) => T;
+type SerializeFn<T> = (value: T) => string | number | null;
+
+/**
+ *These types will be used to define the return types of the `set` and `update` methods of the signal.
+ * We need to re-type the WritableSignal, so that the set and update methods can have null in the call signature.
+ * But the WritableSignal itself won't have null in the call signature, so we need to re-type it.
+ * This is needed in order to be able to reset the value to null,
+ * which is not possible with the WritableSignal that doesn't have null in it's type.
+ */
+type SignalSetFn<T> = (value: T) => void;
+type SignalUpdateFn<T> = (fn: (value: T) => T) => void;
+
+/**
+ * Creates a signal that is linked to a query parameter.
+ *
+ * You can parse the query param value before it is passed to the signal, this way you can transform the value from a string to a number or boolean or whatever you need.
+ * You can also serialize the value before it is passed to the query param, this way you can serialize the value from a number or boolean or object to a string or null.
+ *
+ * You can also use the `defaultValue` option to set a default value if the query param is not present in the url (null or undefined).
+ * NOTE: You cannot use both `defaultValue` and `parse` at the same time. You should use `parse` instead to handle the default value.
+ *
+ * You can set the signal to update the query parameter by calling the `set` or `update` method.
+ * Both methods will accept the value + null as a valid value, so you can remove the query parameter by passing null if needed.
+ *
+ * The 'set' and 'update' methods will update the value synchronously, but will schedule the navigation event to
+ * happen on the next tick (using root effect scheduling). This means the query params will be updated asynchronously.
+ * The changes will be coalesced into a single navigation event. This means that if you call `set` or `update` multiple times
+ * in a row (synchronously), only the last value will be updated in the query params.
+ *
+ * If you have multiple signals listening to the same query parameter, they will all be updated when the navigation event happens.
+ *
+ * @param key The name of the query parameter.
+ * @param options Configuration options for the signal.
+ * @returns A signal that is linked to the query parameter.
+ */
+export function linkedQueryParam<T = string>(
+ key: string,
+ options: LinkedQueryParamOptions & {
+ parse: ParseFn<T>;
+ serialize: SerializeFn<T>; | issue: I see this as an issue because of the semantic of Serialization in general. Pairing `parse` with `serialize` seems odd to me. I suggest `deserialize` and `serialize` even though `deserialize` is longer than `parse`, the semantic is clear as to what Serialization means in this case for query params. |
ngxtension-platform | github_2023 | typescript | 526 | ngxtension | nartc | @@ -0,0 +1,414 @@
+import {
+ effect,
+ inject,
+ Injectable,
+ Injector,
+ runInInjectionContext,
+ signal,
+ untracked,
+ WritableSignal,
+} from '@angular/core';
+import { toSignal } from '@angular/core/rxjs-interop';
+import {
+ ActivatedRoute,
+ NavigationExtras,
+ Params,
+ Router,
+} from '@angular/router';
+import { assertInjector } from 'ngxtension/assert-injector';
+import { createNotifier } from 'ngxtension/create-notifier';
+import { distinctUntilKeyChanged, map } from 'rxjs';
+
+/**
+ * The type of the serialized value.
+ * After transforming the value before it is passed to the query param, this type will be used.
+ */
+type SerializeReturnType = string | number | boolean | null | undefined;
+
+/**
+ * These are the options that can be passed to the `linkedQueryParam` function.
+ * They are taken from the `NavigationExtras` type in the `@angular/router` package.
+ */
+type NavigateMethodFields = Pick<
+ NavigationExtras,
+ | 'queryParamsHandling'
+ | 'onSameUrlNavigation'
+ | 'replaceUrl'
+ | 'skipLocationChange'
+>;
+
+/**
+ * Service to coalesce multiple navigation calls into a single navigation event.
+ */
+@Injectable({ providedIn: 'root' })
+export class LinkedQueryParamGlobalHandler {
+ private router = inject(Router);
+ /**
+ * @internal
+ * The current query params that will be set on the next navigation event.
+ */
+ private _currentKeys: Record<string, SerializeReturnType> = {};
+ /**
+ * @internal
+ * The navigation extras that will be used on the next navigation event.
+ */
+ private _navigationExtras: NavigationExtras = {};
+ /**
+ * @internal
+ * The notifier that will be used to schedule the navigation event.
+ */
+ _schedulerNotifier = createNotifier();
+
+ constructor() {
+ effect(() => {
+ // listen to the scheduler notifier to schedule the navigation event
+ this._schedulerNotifier.listen();
+
+ // we need to untrack the navigation call in order to not register any other signal as a dependency
+ untracked(() => this.navigate().then());
+ });
+ }
+
+ /**
+ * Sets the value of a query param.
+ * This will be used on the next navigation event.
+ */
+ setParamKeyValue(key: string, value: SerializeReturnType) {
+ this._currentKeys[key] = value;
+ }
+
+ /**
+ * Sets the navigation extras that will be used on the next navigation event.
+ */
+ setCurrentNavigationExtras(config: Partial<NavigateMethodFields>) {
+ const {
+ queryParamsHandling,
+ onSameUrlNavigation,
+ replaceUrl,
+ skipLocationChange,
+ } = config ?? {};
+
+ if (queryParamsHandling || queryParamsHandling === '')
+ this._navigationExtras.queryParamsHandling = queryParamsHandling;
+ if (onSameUrlNavigation)
+ this._navigationExtras.onSameUrlNavigation = onSameUrlNavigation;
+ if (replaceUrl) this._navigationExtras.replaceUrl = replaceUrl;
+ if (skipLocationChange)
+ this._navigationExtras.skipLocationChange = skipLocationChange;
+ }
+
+ /**
+ * Navigates to the current URL with the accumulated query parameters and navigation extras.
+ * Cleans up the current keys and navigation extras after the navigation.
+ */
+ private navigate(): Promise<boolean> {
+ return this.router
+ .navigate([], {
+ queryParams: this._currentKeys,
+ queryParamsHandling: 'merge', // can be overridden by the `queryParamsHandling` option
+ ...this._navigationExtras, // override the navigation extras
+ })
+ .then((value) => {
+ // we reset the current keys and navigation extras on navigation
+ // in order to avoid leaking to other navigations
+ this._currentKeys = {};
+ this._navigationExtras = {};
+ return value;
+ });
+ }
+}
+
+type LinkedQueryParamOptions = {
+ /**
+ * The injector to use to inject the router and activated route.
+ */
+ injector?: Injector;
+} & Partial<NavigateMethodFields>;
+
+/**
+ * These are the function types that will be used to parse and serialize the query param value.
+ */
+type ParseFn<T> = (value: string | null) => T;
+type SerializeFn<T> = (value: T) => string | number | null;
+
+/**
+ *These types will be used to define the return types of the `set` and `update` methods of the signal.
+ * We need to re-type the WritableSignal, so that the set and update methods can have null in the call signature.
+ * But the WritableSignal itself won't have null in the call signature, so we need to re-type it.
+ * This is needed in order to be able to reset the value to null,
+ * which is not possible with the WritableSignal that doesn't have null in it's type.
+ */
+type SignalSetFn<T> = (value: T) => void;
+type SignalUpdateFn<T> = (fn: (value: T) => T) => void;
+
+/**
+ * Creates a signal that is linked to a query parameter.
+ *
+ * You can parse the query param value before it is passed to the signal, this way you can transform the value from a string to a number or boolean or whatever you need.
+ * You can also serialize the value before it is passed to the query param, this way you can serialize the value from a number or boolean or object to a string or null.
+ *
+ * You can also use the `defaultValue` option to set a default value if the query param is not present in the url (null or undefined).
+ * NOTE: You cannot use both `defaultValue` and `parse` at the same time. You should use `parse` instead to handle the default value.
+ *
+ * You can set the signal to update the query parameter by calling the `set` or `update` method.
+ * Both methods will accept the value + null as a valid value, so you can remove the query parameter by passing null if needed.
+ *
+ * The 'set' and 'update' methods will update the value synchronously, but will schedule the navigation event to
+ * happen on the next tick (using root effect scheduling). This means the query params will be updated asynchronously.
+ * The changes will be coalesced into a single navigation event. This means that if you call `set` or `update` multiple times
+ * in a row (synchronously), only the last value will be updated in the query params.
+ *
+ * If you have multiple signals listening to the same query parameter, they will all be updated when the navigation event happens.
+ *
+ * @param key The name of the query parameter.
+ * @param options Configuration options for the signal.
+ * @returns A signal that is linked to the query parameter.
+ */
+export function linkedQueryParam<T = string>(
+ key: string,
+ options: LinkedQueryParamOptions & {
+ parse: ParseFn<T>;
+ serialize: SerializeFn<T>;
+ },
+): WritableSignal<T> & {
+ set: SignalSetFn<T | null>;
+ update: SignalUpdateFn<T | null>;
+}; | nit/suggestion: I would make it explicitly clear that we'll be overriding `set` and `update` to allow `null`
```suggestion
): Omit<WritableSignal<T>, 'set' | 'update'> & {
set: SignalSetFn<T | null>;
update: SignalUpdateFn<T | null>;
};
``` |
ngxtension-platform | github_2023 | typescript | 526 | ngxtension | nartc | @@ -0,0 +1,414 @@
+import {
+ effect,
+ inject,
+ Injectable,
+ Injector,
+ runInInjectionContext,
+ signal,
+ untracked,
+ WritableSignal,
+} from '@angular/core';
+import { toSignal } from '@angular/core/rxjs-interop';
+import {
+ ActivatedRoute,
+ NavigationExtras,
+ Params,
+ Router,
+} from '@angular/router';
+import { assertInjector } from 'ngxtension/assert-injector';
+import { createNotifier } from 'ngxtension/create-notifier';
+import { distinctUntilKeyChanged, map } from 'rxjs';
+
+/**
+ * The type of the serialized value.
+ * After transforming the value before it is passed to the query param, this type will be used.
+ */
+type SerializeReturnType = string | number | boolean | null | undefined;
+
+/**
+ * These are the options that can be passed to the `linkedQueryParam` function.
+ * They are taken from the `NavigationExtras` type in the `@angular/router` package.
+ */
+type NavigateMethodFields = Pick<
+ NavigationExtras,
+ | 'queryParamsHandling'
+ | 'onSameUrlNavigation'
+ | 'replaceUrl'
+ | 'skipLocationChange'
+>;
+
+/**
+ * Service to coalesce multiple navigation calls into a single navigation event.
+ */
+@Injectable({ providedIn: 'root' })
+export class LinkedQueryParamGlobalHandler {
+ private router = inject(Router);
+ /**
+ * @internal
+ * The current query params that will be set on the next navigation event.
+ */
+ private _currentKeys: Record<string, SerializeReturnType> = {};
+ /**
+ * @internal
+ * The navigation extras that will be used on the next navigation event.
+ */
+ private _navigationExtras: NavigationExtras = {};
+ /**
+ * @internal
+ * The notifier that will be used to schedule the navigation event.
+ */
+ _schedulerNotifier = createNotifier();
+
+ constructor() {
+ effect(() => {
+ // listen to the scheduler notifier to schedule the navigation event
+ this._schedulerNotifier.listen();
+
+ // we need to untrack the navigation call in order to not register any other signal as a dependency
+ untracked(() => this.navigate().then());
+ });
+ }
+
+ /**
+ * Sets the value of a query param.
+ * This will be used on the next navigation event.
+ */
+ setParamKeyValue(key: string, value: SerializeReturnType) {
+ this._currentKeys[key] = value;
+ }
+
+ /**
+ * Sets the navigation extras that will be used on the next navigation event.
+ */
+ setCurrentNavigationExtras(config: Partial<NavigateMethodFields>) {
+ const {
+ queryParamsHandling,
+ onSameUrlNavigation,
+ replaceUrl,
+ skipLocationChange,
+ } = config ?? {};
+
+ if (queryParamsHandling || queryParamsHandling === '')
+ this._navigationExtras.queryParamsHandling = queryParamsHandling;
+ if (onSameUrlNavigation)
+ this._navigationExtras.onSameUrlNavigation = onSameUrlNavigation;
+ if (replaceUrl) this._navigationExtras.replaceUrl = replaceUrl;
+ if (skipLocationChange)
+ this._navigationExtras.skipLocationChange = skipLocationChange;
+ }
+
+ /**
+ * Navigates to the current URL with the accumulated query parameters and navigation extras.
+ * Cleans up the current keys and navigation extras after the navigation.
+ */
+ private navigate(): Promise<boolean> {
+ return this.router
+ .navigate([], {
+ queryParams: this._currentKeys,
+ queryParamsHandling: 'merge', // can be overridden by the `queryParamsHandling` option
+ ...this._navigationExtras, // override the navigation extras
+ })
+ .then((value) => {
+ // we reset the current keys and navigation extras on navigation
+ // in order to avoid leaking to other navigations
+ this._currentKeys = {};
+ this._navigationExtras = {};
+ return value;
+ });
+ }
+}
+
+type LinkedQueryParamOptions = {
+ /**
+ * The injector to use to inject the router and activated route.
+ */
+ injector?: Injector;
+} & Partial<NavigateMethodFields>;
+
+/**
+ * These are the function types that will be used to parse and serialize the query param value.
+ */
+type ParseFn<T> = (value: string | null) => T;
+type SerializeFn<T> = (value: T) => string | number | null;
+
+/**
+ *These types will be used to define the return types of the `set` and `update` methods of the signal.
+ * We need to re-type the WritableSignal, so that the set and update methods can have null in the call signature.
+ * But the WritableSignal itself won't have null in the call signature, so we need to re-type it.
+ * This is needed in order to be able to reset the value to null,
+ * which is not possible with the WritableSignal that doesn't have null in it's type.
+ */
+type SignalSetFn<T> = (value: T) => void;
+type SignalUpdateFn<T> = (fn: (value: T) => T) => void;
+
+/**
+ * Creates a signal that is linked to a query parameter.
+ *
+ * You can parse the query param value before it is passed to the signal, this way you can transform the value from a string to a number or boolean or whatever you need.
+ * You can also serialize the value before it is passed to the query param, this way you can serialize the value from a number or boolean or object to a string or null.
+ *
+ * You can also use the `defaultValue` option to set a default value if the query param is not present in the url (null or undefined).
+ * NOTE: You cannot use both `defaultValue` and `parse` at the same time. You should use `parse` instead to handle the default value.
+ *
+ * You can set the signal to update the query parameter by calling the `set` or `update` method.
+ * Both methods will accept the value + null as a valid value, so you can remove the query parameter by passing null if needed.
+ *
+ * The 'set' and 'update' methods will update the value synchronously, but will schedule the navigation event to
+ * happen on the next tick (using root effect scheduling). This means the query params will be updated asynchronously.
+ * The changes will be coalesced into a single navigation event. This means that if you call `set` or `update` multiple times
+ * in a row (synchronously), only the last value will be updated in the query params.
+ *
+ * If you have multiple signals listening to the same query parameter, they will all be updated when the navigation event happens.
+ *
+ * @param key The name of the query parameter.
+ * @param options Configuration options for the signal.
+ * @returns A signal that is linked to the query parameter.
+ */
+export function linkedQueryParam<T = string>(
+ key: string,
+ options: LinkedQueryParamOptions & {
+ parse: ParseFn<T>;
+ serialize: SerializeFn<T>;
+ },
+): WritableSignal<T> & {
+ set: SignalSetFn<T | null>;
+ update: SignalUpdateFn<T | null>;
+};
+
+/**
+ * You cannot use both `defaultValue` and `parse` at the same time.
+ * You should use `parse` instead to handle the default value.
+ *
+ * For example, you cannot do this:
+ *
+ * ```ts
+ * linkedQueryParam('param', { defaultValue: 1, parse: (x) => x ? parseInt(x, 10) : x });
+ * ```
+ *
+ * Instead, you should do this:
+ *
+ * ```ts
+ * linkedQueryParam('param', { parse: (x) => x ? parseInt(x, 10) : 1 });
+ * ```
+ */
+export function linkedQueryParam<T = string>(
+ key: string,
+ options: LinkedQueryParamOptions & {
+ defaultValue: Exclude<T, undefined>;
+ parse: ParseFn<T>;
+ serialize?: SerializeFn<T>;
+ },
+): never;
+
+export function linkedQueryParam<T = string>(
+ key: string,
+ options: LinkedQueryParamOptions & {
+ defaultValue: T;
+ serialize: SerializeFn<T>;
+ },
+): WritableSignal<T | null>;
+
+export function linkedQueryParam<T>(
+ key: string,
+ options: LinkedQueryParamOptions & { defaultValue: T },
+): WritableSignal<T> & {
+ set: SignalSetFn<T | null>;
+ update: SignalUpdateFn<T | null>;
+};
+
+export function linkedQueryParam<T>(
+ key: string,
+ options: LinkedQueryParamOptions & { defaultValue: T | null },
+): WritableSignal<T | null>;
+
+export function linkedQueryParam<T>(
+ key: string,
+ options: LinkedQueryParamOptions & { defaultValue: T | undefined },
+): WritableSignal<T | undefined>;
+
+export function linkedQueryParam<T = string>(
+ key: string,
+ options: LinkedQueryParamOptions & { defaultValue: undefined },
+): WritableSignal<T | null>;
+
+export function linkedQueryParam<T>(
+ key: string,
+ options: LinkedQueryParamOptions & { parse: ParseFn<T> },
+): WritableSignal<T> & {
+ set: SignalSetFn<T | null>;
+ update: SignalUpdateFn<T | null>;
+};
+
+export function linkedQueryParam<T = string>(
+ key: string,
+ options: LinkedQueryParamOptions & { serialize: SerializeFn<T> },
+): WritableSignal<T | null>;
+
+export function linkedQueryParam<T = string>(
+ key: string,
+ options: LinkedQueryParamOptions,
+): WritableSignal<T | null>;
+
+export function linkedQueryParam<T = string>(
+ key: string,
+): WritableSignal<T | null>;
+
+export function linkedQueryParam<T>(
+ key: string,
+ options?: LinkedQueryParamOptions & {
+ defaultValue?: T;
+ parse?: ParseFn<T>;
+ serialize?: SerializeFn<T>;
+ },
+): WritableSignal<T> {
+ const injector = assertInjector(linkedQueryParam, options?.injector);
+
+ if (options?.defaultValue !== undefined && options?.parse) {
+ throw new Error(
+ 'linkedQueryParam: You cannot have both defaultValue and parse at the same time!',
+ );
+ } | nit: I would move this error case to the top of the function even before asserting the Injector |
ngxtension-platform | github_2023 | typescript | 526 | ngxtension | nartc | @@ -0,0 +1,414 @@
+import {
+ effect,
+ inject,
+ Injectable,
+ Injector,
+ runInInjectionContext,
+ signal,
+ untracked,
+ WritableSignal,
+} from '@angular/core';
+import { toSignal } from '@angular/core/rxjs-interop';
+import {
+ ActivatedRoute,
+ NavigationExtras,
+ Params,
+ Router,
+} from '@angular/router';
+import { assertInjector } from 'ngxtension/assert-injector';
+import { createNotifier } from 'ngxtension/create-notifier';
+import { distinctUntilKeyChanged, map } from 'rxjs';
+
+/**
+ * The type of the serialized value.
+ * After transforming the value before it is passed to the query param, this type will be used.
+ */
+type SerializeReturnType = string | number | boolean | null | undefined;
+
+/**
+ * These are the options that can be passed to the `linkedQueryParam` function.
+ * They are taken from the `NavigationExtras` type in the `@angular/router` package.
+ */
+type NavigateMethodFields = Pick<
+ NavigationExtras,
+ | 'queryParamsHandling'
+ | 'onSameUrlNavigation'
+ | 'replaceUrl'
+ | 'skipLocationChange'
+>;
+
+/**
+ * Service to coalesce multiple navigation calls into a single navigation event.
+ */
+@Injectable({ providedIn: 'root' })
+export class LinkedQueryParamGlobalHandler {
+ private router = inject(Router);
+ /**
+ * @internal
+ * The current query params that will be set on the next navigation event.
+ */
+ private _currentKeys: Record<string, SerializeReturnType> = {};
+ /**
+ * @internal
+ * The navigation extras that will be used on the next navigation event.
+ */
+ private _navigationExtras: NavigationExtras = {};
+ /**
+ * @internal
+ * The notifier that will be used to schedule the navigation event.
+ */
+ _schedulerNotifier = createNotifier();
+
+ constructor() {
+ effect(() => {
+ // listen to the scheduler notifier to schedule the navigation event
+ this._schedulerNotifier.listen();
+
+ // we need to untrack the navigation call in order to not register any other signal as a dependency
+ untracked(() => this.navigate().then());
+ });
+ }
+
+ /**
+ * Sets the value of a query param.
+ * This will be used on the next navigation event.
+ */
+ setParamKeyValue(key: string, value: SerializeReturnType) {
+ this._currentKeys[key] = value;
+ }
+
+ /**
+ * Sets the navigation extras that will be used on the next navigation event.
+ */
+ setCurrentNavigationExtras(config: Partial<NavigateMethodFields>) {
+ const {
+ queryParamsHandling,
+ onSameUrlNavigation,
+ replaceUrl,
+ skipLocationChange,
+ } = config ?? {};
+
+ if (queryParamsHandling || queryParamsHandling === '')
+ this._navigationExtras.queryParamsHandling = queryParamsHandling;
+ if (onSameUrlNavigation)
+ this._navigationExtras.onSameUrlNavigation = onSameUrlNavigation;
+ if (replaceUrl) this._navigationExtras.replaceUrl = replaceUrl;
+ if (skipLocationChange)
+ this._navigationExtras.skipLocationChange = skipLocationChange;
+ }
+
+ /**
+ * Navigates to the current URL with the accumulated query parameters and navigation extras.
+ * Cleans up the current keys and navigation extras after the navigation.
+ */
+ private navigate(): Promise<boolean> {
+ return this.router
+ .navigate([], {
+ queryParams: this._currentKeys,
+ queryParamsHandling: 'merge', // can be overridden by the `queryParamsHandling` option
+ ...this._navigationExtras, // override the navigation extras
+ })
+ .then((value) => {
+ // we reset the current keys and navigation extras on navigation
+ // in order to avoid leaking to other navigations
+ this._currentKeys = {};
+ this._navigationExtras = {};
+ return value;
+ });
+ }
+}
+
+type LinkedQueryParamOptions = {
+ /**
+ * The injector to use to inject the router and activated route.
+ */
+ injector?: Injector;
+} & Partial<NavigateMethodFields>;
+
+/**
+ * These are the function types that will be used to parse and serialize the query param value.
+ */
+type ParseFn<T> = (value: string | null) => T;
+type SerializeFn<T> = (value: T) => string | number | null;
+
+/**
+ *These types will be used to define the return types of the `set` and `update` methods of the signal.
+ * We need to re-type the WritableSignal, so that the set and update methods can have null in the call signature.
+ * But the WritableSignal itself won't have null in the call signature, so we need to re-type it.
+ * This is needed in order to be able to reset the value to null,
+ * which is not possible with the WritableSignal that doesn't have null in it's type.
+ */
+type SignalSetFn<T> = (value: T) => void;
+type SignalUpdateFn<T> = (fn: (value: T) => T) => void;
+
+/**
+ * Creates a signal that is linked to a query parameter.
+ *
+ * You can parse the query param value before it is passed to the signal, this way you can transform the value from a string to a number or boolean or whatever you need.
+ * You can also serialize the value before it is passed to the query param, this way you can serialize the value from a number or boolean or object to a string or null.
+ *
+ * You can also use the `defaultValue` option to set a default value if the query param is not present in the url (null or undefined).
+ * NOTE: You cannot use both `defaultValue` and `parse` at the same time. You should use `parse` instead to handle the default value.
+ *
+ * You can set the signal to update the query parameter by calling the `set` or `update` method.
+ * Both methods will accept the value + null as a valid value, so you can remove the query parameter by passing null if needed.
+ *
+ * The 'set' and 'update' methods will update the value synchronously, but will schedule the navigation event to
+ * happen on the next tick (using root effect scheduling). This means the query params will be updated asynchronously.
+ * The changes will be coalesced into a single navigation event. This means that if you call `set` or `update` multiple times
+ * in a row (synchronously), only the last value will be updated in the query params.
+ *
+ * If you have multiple signals listening to the same query parameter, they will all be updated when the navigation event happens.
+ *
+ * @param key The name of the query parameter.
+ * @param options Configuration options for the signal.
+ * @returns A signal that is linked to the query parameter.
+ */
+export function linkedQueryParam<T = string>(
+ key: string,
+ options: LinkedQueryParamOptions & {
+ parse: ParseFn<T>;
+ serialize: SerializeFn<T>;
+ },
+): WritableSignal<T> & {
+ set: SignalSetFn<T | null>;
+ update: SignalUpdateFn<T | null>;
+};
+
+/**
+ * You cannot use both `defaultValue` and `parse` at the same time.
+ * You should use `parse` instead to handle the default value.
+ *
+ * For example, you cannot do this:
+ *
+ * ```ts
+ * linkedQueryParam('param', { defaultValue: 1, parse: (x) => x ? parseInt(x, 10) : x });
+ * ```
+ *
+ * Instead, you should do this:
+ *
+ * ```ts
+ * linkedQueryParam('param', { parse: (x) => x ? parseInt(x, 10) : 1 });
+ * ```
+ */
+export function linkedQueryParam<T = string>(
+ key: string,
+ options: LinkedQueryParamOptions & {
+ defaultValue: Exclude<T, undefined>;
+ parse: ParseFn<T>;
+ serialize?: SerializeFn<T>;
+ },
+): never;
+
+export function linkedQueryParam<T = string>(
+ key: string,
+ options: LinkedQueryParamOptions & {
+ defaultValue: T;
+ serialize: SerializeFn<T>;
+ },
+): WritableSignal<T | null>;
+
+export function linkedQueryParam<T>(
+ key: string,
+ options: LinkedQueryParamOptions & { defaultValue: T },
+): WritableSignal<T> & {
+ set: SignalSetFn<T | null>;
+ update: SignalUpdateFn<T | null>;
+};
+
+export function linkedQueryParam<T>(
+ key: string,
+ options: LinkedQueryParamOptions & { defaultValue: T | null },
+): WritableSignal<T | null>;
+
+export function linkedQueryParam<T>(
+ key: string,
+ options: LinkedQueryParamOptions & { defaultValue: T | undefined },
+): WritableSignal<T | undefined>;
+
+export function linkedQueryParam<T = string>(
+ key: string,
+ options: LinkedQueryParamOptions & { defaultValue: undefined },
+): WritableSignal<T | null>;
+
+export function linkedQueryParam<T>(
+ key: string,
+ options: LinkedQueryParamOptions & { parse: ParseFn<T> },
+): WritableSignal<T> & {
+ set: SignalSetFn<T | null>;
+ update: SignalUpdateFn<T | null>;
+};
+
+export function linkedQueryParam<T = string>(
+ key: string,
+ options: LinkedQueryParamOptions & { serialize: SerializeFn<T> },
+): WritableSignal<T | null>;
+
+export function linkedQueryParam<T = string>(
+ key: string,
+ options: LinkedQueryParamOptions,
+): WritableSignal<T | null>;
+
+export function linkedQueryParam<T = string>(
+ key: string,
+): WritableSignal<T | null>;
+
+export function linkedQueryParam<T>(
+ key: string,
+ options?: LinkedQueryParamOptions & {
+ defaultValue?: T;
+ parse?: ParseFn<T>;
+ serialize?: SerializeFn<T>;
+ },
+): WritableSignal<T> {
+ const injector = assertInjector(linkedQueryParam, options?.injector);
+
+ if (options?.defaultValue !== undefined && options?.parse) {
+ throw new Error(
+ 'linkedQueryParam: You cannot have both defaultValue and parse at the same time!',
+ );
+ }
+
+ return runInInjectionContext(injector, () => {
+ const route = inject(ActivatedRoute);
+ const globalHandler = inject(LinkedQueryParamGlobalHandler);
+
+ /**
+ * Parses a parameter value based on provided configuration.
+ * @param params - An object containing parameters.
+ * @returns The parsed parameter value.
+ */
+ const parseParamValue = (params: Params) => {
+ // Get the value from the params object.
+ const value: string | null = params[key] ?? null;
+ // If a parsing function is provided in the config, use it to parse the value.
+ if (options?.parse) {
+ return options.parse(value);
+ }
+ // If the value is undefined or null and a default value is provided, return the default value.
+ if (
+ (value === undefined || value === null) &&
+ options?.defaultValue !== undefined
+ ) {
+ return options.defaultValue;
+ }
+ // Otherwise, return the original value or the parsed value (if it was parsed).
+ return value;
+ };
+
+ // create a signal that is updated whenever the query param changes
+ const queryParamValue = toSignal(
+ route.queryParams.pipe(
+ distinctUntilKeyChanged(key), // skip if no changes on same key
+ map((x) => parseParamValue(x)),
+ ),
+ { initialValue: parseParamValue(route.snapshot.queryParams) },
+ );
+
+ const source = signal<T>(queryParamValue() as T);
+
+ const originalSet = source.set;
+
+ effect(() => {
+ const x = queryParamValue();
+ // update the source signal whenever the query param changes
+ untracked(() => originalSet(x as T));
+ });
+
+ const set = (value: T) => {
+ // we first set the initial value so it synchronous (same as a normal signal)
+ originalSet(value);
+
+ // when the source signal changes, update the query param
+ // store the new value in the current keys so that we can coalesce the navigation
+ let valueToBeSet: any = value;
+ if (options?.serialize) {
+ valueToBeSet = options.serialize(value);
+ } else if (value === undefined || value === null) {
+ valueToBeSet = null;
+ } else {
+ valueToBeSet = typeof value === 'string' ? value : String(value);
+ }
+
+ globalHandler.setParamKeyValue(key, valueToBeSet);
+ globalHandler.setCurrentNavigationExtras(options ?? {});
+
+ // schedule the navigation event (multiple synchronous navigations will be coalesced)
+ // this will also reset the current keys and navigation extras after the navigation
+ globalHandler._schedulerNotifier.notify();
+ };
+
+ const update = (fn: (value: T) => T) => set(fn(source()));
+
+ return Object.assign(source, { set, update });
+ });
+}
+
+/**
+ * Can be used to parse a query param value to a number.
+ * You can also use the `defaultValue` option to set a default value if the query param is not present in the url (null or undefined).
+ *
+ * Example:
+ * ```ts
+ * linkedQueryParam('page', { parse: paramToNumber() });
+ * ```
+ * Will return null if the query param is not present in the url.
+ *
+ * Or with a default value:
+ * ```ts
+ * linkedQueryParam('page', { parse: paramToNumber({defaultValue: 1}) });
+ * ```
+ *
+ * Will return 1 if the query param is not present in the url.
+ */
+export function paramToNumber(config: {
+ defaultValue: number;
+}): (x: string | null) => number;
+export function paramToNumber(config?: {
+ defaultValue?: number | null | undefined;
+}): (x: string | null) => number | null; | question: would `export function paramToNumber(): number | null` work? It looks like as soon as I provide a `config` object, the first overload takes over and I can't provide `null | undefined` to `defaultValue` anyway so why would the second overload needs to declare the optional `config?` object at all? |
ngxtension-platform | github_2023 | typescript | 526 | ngxtension | nartc | @@ -0,0 +1,414 @@
+import {
+ effect,
+ inject,
+ Injectable,
+ Injector,
+ runInInjectionContext,
+ signal,
+ untracked,
+ WritableSignal,
+} from '@angular/core';
+import { toSignal } from '@angular/core/rxjs-interop';
+import {
+ ActivatedRoute,
+ NavigationExtras,
+ Params,
+ Router,
+} from '@angular/router';
+import { assertInjector } from 'ngxtension/assert-injector';
+import { createNotifier } from 'ngxtension/create-notifier';
+import { distinctUntilKeyChanged, map } from 'rxjs';
+
+/**
+ * The type of the serialized value.
+ * After transforming the value before it is passed to the query param, this type will be used.
+ */
+type SerializeReturnType = string | number | boolean | null | undefined;
+
+/**
+ * These are the options that can be passed to the `linkedQueryParam` function.
+ * They are taken from the `NavigationExtras` type in the `@angular/router` package.
+ */
+type NavigateMethodFields = Pick<
+ NavigationExtras,
+ | 'queryParamsHandling'
+ | 'onSameUrlNavigation'
+ | 'replaceUrl'
+ | 'skipLocationChange'
+>;
+
+/**
+ * Service to coalesce multiple navigation calls into a single navigation event.
+ */
+@Injectable({ providedIn: 'root' })
+export class LinkedQueryParamGlobalHandler {
+ private router = inject(Router);
+ /**
+ * @internal
+ * The current query params that will be set on the next navigation event.
+ */
+ private _currentKeys: Record<string, SerializeReturnType> = {};
+ /**
+ * @internal
+ * The navigation extras that will be used on the next navigation event.
+ */
+ private _navigationExtras: NavigationExtras = {};
+ /**
+ * @internal
+ * The notifier that will be used to schedule the navigation event.
+ */
+ _schedulerNotifier = createNotifier();
+
+ constructor() {
+ effect(() => {
+ // listen to the scheduler notifier to schedule the navigation event
+ this._schedulerNotifier.listen();
+
+ // we need to untrack the navigation call in order to not register any other signal as a dependency
+ untracked(() => this.navigate().then());
+ });
+ }
+
+ /**
+ * Sets the value of a query param.
+ * This will be used on the next navigation event.
+ */
+ setParamKeyValue(key: string, value: SerializeReturnType) {
+ this._currentKeys[key] = value;
+ }
+
+ /**
+ * Sets the navigation extras that will be used on the next navigation event.
+ */
+ setCurrentNavigationExtras(config: Partial<NavigateMethodFields>) {
+ const {
+ queryParamsHandling,
+ onSameUrlNavigation,
+ replaceUrl,
+ skipLocationChange,
+ } = config ?? {};
+
+ if (queryParamsHandling || queryParamsHandling === '')
+ this._navigationExtras.queryParamsHandling = queryParamsHandling;
+ if (onSameUrlNavigation)
+ this._navigationExtras.onSameUrlNavigation = onSameUrlNavigation;
+ if (replaceUrl) this._navigationExtras.replaceUrl = replaceUrl;
+ if (skipLocationChange)
+ this._navigationExtras.skipLocationChange = skipLocationChange;
+ }
+
+ /**
+ * Navigates to the current URL with the accumulated query parameters and navigation extras.
+ * Cleans up the current keys and navigation extras after the navigation.
+ */
+ private navigate(): Promise<boolean> {
+ return this.router
+ .navigate([], {
+ queryParams: this._currentKeys,
+ queryParamsHandling: 'merge', // can be overridden by the `queryParamsHandling` option
+ ...this._navigationExtras, // override the navigation extras
+ })
+ .then((value) => {
+ // we reset the current keys and navigation extras on navigation
+ // in order to avoid leaking to other navigations
+ this._currentKeys = {};
+ this._navigationExtras = {};
+ return value;
+ });
+ }
+}
+
+type LinkedQueryParamOptions = {
+ /**
+ * The injector to use to inject the router and activated route.
+ */
+ injector?: Injector;
+} & Partial<NavigateMethodFields>;
+
+/**
+ * These are the function types that will be used to parse and serialize the query param value.
+ */
+type ParseFn<T> = (value: string | null) => T;
+type SerializeFn<T> = (value: T) => string | number | null;
+
+/**
+ *These types will be used to define the return types of the `set` and `update` methods of the signal.
+ * We need to re-type the WritableSignal, so that the set and update methods can have null in the call signature.
+ * But the WritableSignal itself won't have null in the call signature, so we need to re-type it.
+ * This is needed in order to be able to reset the value to null,
+ * which is not possible with the WritableSignal that doesn't have null in it's type.
+ */
+type SignalSetFn<T> = (value: T) => void;
+type SignalUpdateFn<T> = (fn: (value: T) => T) => void;
+
+/**
+ * Creates a signal that is linked to a query parameter.
+ *
+ * You can parse the query param value before it is passed to the signal, this way you can transform the value from a string to a number or boolean or whatever you need.
+ * You can also serialize the value before it is passed to the query param, this way you can serialize the value from a number or boolean or object to a string or null.
+ *
+ * You can also use the `defaultValue` option to set a default value if the query param is not present in the url (null or undefined).
+ * NOTE: You cannot use both `defaultValue` and `parse` at the same time. You should use `parse` instead to handle the default value.
+ *
+ * You can set the signal to update the query parameter by calling the `set` or `update` method.
+ * Both methods will accept the value + null as a valid value, so you can remove the query parameter by passing null if needed.
+ *
+ * The 'set' and 'update' methods will update the value synchronously, but will schedule the navigation event to
+ * happen on the next tick (using root effect scheduling). This means the query params will be updated asynchronously.
+ * The changes will be coalesced into a single navigation event. This means that if you call `set` or `update` multiple times
+ * in a row (synchronously), only the last value will be updated in the query params.
+ *
+ * If you have multiple signals listening to the same query parameter, they will all be updated when the navigation event happens.
+ *
+ * @param key The name of the query parameter.
+ * @param options Configuration options for the signal.
+ * @returns A signal that is linked to the query parameter.
+ */
+export function linkedQueryParam<T = string>(
+ key: string,
+ options: LinkedQueryParamOptions & {
+ parse: ParseFn<T>;
+ serialize: SerializeFn<T>;
+ },
+): WritableSignal<T> & {
+ set: SignalSetFn<T | null>;
+ update: SignalUpdateFn<T | null>;
+};
+
+/**
+ * You cannot use both `defaultValue` and `parse` at the same time.
+ * You should use `parse` instead to handle the default value.
+ *
+ * For example, you cannot do this:
+ *
+ * ```ts
+ * linkedQueryParam('param', { defaultValue: 1, parse: (x) => x ? parseInt(x, 10) : x });
+ * ```
+ *
+ * Instead, you should do this:
+ *
+ * ```ts
+ * linkedQueryParam('param', { parse: (x) => x ? parseInt(x, 10) : 1 });
+ * ```
+ */
+export function linkedQueryParam<T = string>(
+ key: string,
+ options: LinkedQueryParamOptions & {
+ defaultValue: Exclude<T, undefined>;
+ parse: ParseFn<T>;
+ serialize?: SerializeFn<T>;
+ },
+): never;
+
+export function linkedQueryParam<T = string>(
+ key: string,
+ options: LinkedQueryParamOptions & {
+ defaultValue: T;
+ serialize: SerializeFn<T>;
+ },
+): WritableSignal<T | null>;
+
+export function linkedQueryParam<T>(
+ key: string,
+ options: LinkedQueryParamOptions & { defaultValue: T },
+): WritableSignal<T> & {
+ set: SignalSetFn<T | null>;
+ update: SignalUpdateFn<T | null>;
+};
+
+export function linkedQueryParam<T>(
+ key: string,
+ options: LinkedQueryParamOptions & { defaultValue: T | null },
+): WritableSignal<T | null>;
+
+export function linkedQueryParam<T>(
+ key: string,
+ options: LinkedQueryParamOptions & { defaultValue: T | undefined },
+): WritableSignal<T | undefined>;
+
+export function linkedQueryParam<T = string>(
+ key: string,
+ options: LinkedQueryParamOptions & { defaultValue: undefined },
+): WritableSignal<T | null>;
+
+export function linkedQueryParam<T>(
+ key: string,
+ options: LinkedQueryParamOptions & { parse: ParseFn<T> },
+): WritableSignal<T> & {
+ set: SignalSetFn<T | null>;
+ update: SignalUpdateFn<T | null>;
+};
+
+export function linkedQueryParam<T = string>(
+ key: string,
+ options: LinkedQueryParamOptions & { serialize: SerializeFn<T> },
+): WritableSignal<T | null>;
+
+export function linkedQueryParam<T = string>(
+ key: string,
+ options: LinkedQueryParamOptions,
+): WritableSignal<T | null>;
+
+export function linkedQueryParam<T = string>(
+ key: string,
+): WritableSignal<T | null>;
+
+export function linkedQueryParam<T>(
+ key: string,
+ options?: LinkedQueryParamOptions & {
+ defaultValue?: T;
+ parse?: ParseFn<T>;
+ serialize?: SerializeFn<T>;
+ },
+): WritableSignal<T> {
+ const injector = assertInjector(linkedQueryParam, options?.injector);
+
+ if (options?.defaultValue !== undefined && options?.parse) {
+ throw new Error(
+ 'linkedQueryParam: You cannot have both defaultValue and parse at the same time!',
+ );
+ }
+
+ return runInInjectionContext(injector, () => {
+ const route = inject(ActivatedRoute);
+ const globalHandler = inject(LinkedQueryParamGlobalHandler);
+
+ /**
+ * Parses a parameter value based on provided configuration.
+ * @param params - An object containing parameters.
+ * @returns The parsed parameter value.
+ */
+ const parseParamValue = (params: Params) => {
+ // Get the value from the params object.
+ const value: string | null = params[key] ?? null;
+ // If a parsing function is provided in the config, use it to parse the value.
+ if (options?.parse) {
+ return options.parse(value);
+ }
+ // If the value is undefined or null and a default value is provided, return the default value.
+ if (
+ (value === undefined || value === null) &&
+ options?.defaultValue !== undefined
+ ) {
+ return options.defaultValue;
+ }
+ // Otherwise, return the original value or the parsed value (if it was parsed).
+ return value;
+ };
+
+ // create a signal that is updated whenever the query param changes
+ const queryParamValue = toSignal(
+ route.queryParams.pipe(
+ distinctUntilKeyChanged(key), // skip if no changes on same key
+ map((x) => parseParamValue(x)),
+ ),
+ { initialValue: parseParamValue(route.snapshot.queryParams) },
+ );
+
+ const source = signal<T>(queryParamValue() as T);
+
+ const originalSet = source.set;
+
+ effect(() => {
+ const x = queryParamValue();
+ // update the source signal whenever the query param changes
+ untracked(() => originalSet(x as T));
+ });
+
+ const set = (value: T) => {
+ // we first set the initial value so it synchronous (same as a normal signal)
+ originalSet(value);
+
+ // when the source signal changes, update the query param
+ // store the new value in the current keys so that we can coalesce the navigation
+ let valueToBeSet: any = value;
+ if (options?.serialize) {
+ valueToBeSet = options.serialize(value);
+ } else if (value === undefined || value === null) {
+ valueToBeSet = null;
+ } else {
+ valueToBeSet = typeof value === 'string' ? value : String(value);
+ }
+
+ globalHandler.setParamKeyValue(key, valueToBeSet);
+ globalHandler.setCurrentNavigationExtras(options ?? {});
+
+ // schedule the navigation event (multiple synchronous navigations will be coalesced)
+ // this will also reset the current keys and navigation extras after the navigation
+ globalHandler._schedulerNotifier.notify();
+ };
+
+ const update = (fn: (value: T) => T) => set(fn(source()));
+
+ return Object.assign(source, { set, update });
+ });
+}
+
+/**
+ * Can be used to parse a query param value to a number.
+ * You can also use the `defaultValue` option to set a default value if the query param is not present in the url (null or undefined).
+ *
+ * Example:
+ * ```ts
+ * linkedQueryParam('page', { parse: paramToNumber() });
+ * ```
+ * Will return null if the query param is not present in the url.
+ *
+ * Or with a default value:
+ * ```ts
+ * linkedQueryParam('page', { parse: paramToNumber({defaultValue: 1}) });
+ * ```
+ *
+ * Will return 1 if the query param is not present in the url.
+ */
+export function paramToNumber(config: {
+ defaultValue: number;
+}): (x: string | null) => number;
+export function paramToNumber(config?: {
+ defaultValue?: number | null | undefined;
+}): (x: string | null) => number | null;
+
+export function paramToNumber(
+ config: { defaultValue?: number | null | undefined } = { defaultValue: null },
+) {
+ return (x: string | null) => {
+ if (x === undefined || x === null) return config.defaultValue;
+ const parsed = parseInt(x, 10);
+ if (Number.isNaN(parsed)) return config.defaultValue;
+ return parsed;
+ };
+}
+
+/**
+ * Can be used to parse a query param value to a boolean.
+ * You can also use the `defaultValue` option to set a default value if the query param is not present in the url (null or undefined).
+ *
+ * Example:
+ * ```ts
+ * linkedQueryParam('showHidden', { parse: paramToBoolean() });
+ * ```
+ * Will return null if the query param is not present in the url or true/false if the query param is present.
+ *
+ * Or with a default value:
+ * ```ts
+ * linkedQueryParam('showHidden', { parse: paramToBoolean({defaultValue: true}) });
+ * ```
+ *
+ * Will return true if the query param is not present in the url.
+ * Otherwise, it will return whatever the query param value is.
+ */
+export function paramToBoolean(config: {
+ defaultValue: boolean;
+}): (x: string | null) => boolean;
+export function paramToBoolean(config?: {
+ defaultValue?: boolean | null | undefined;
+}): (x: string | null) => boolean | null; | question: same as above about `paramToNumber` overload |
ngxtension-platform | github_2023 | typescript | 525 | ngxtension | eneajaho | @@ -124,29 +124,69 @@ const internalInjectLocalStorage = <R>(
const localStorage = inject(NGXTENSION_LOCAL_STORAGE);
const destroyRef = inject(DestroyRef);
- const initialStoredValue = goodTry(() => localStorage.getItem(key));
- const initialValue = initialStoredValue
- ? (goodTry(() => parse(initialStoredValue) as R) ?? defaultValue)
- : defaultValue;
- const internalSignal = signal(initialValue);
-
- effect(() => {
- const value = internalSignal();
- if (value === undefined) {
- goodTry(() => localStorage.removeItem(key));
- } else {
- goodTry(() => localStorage.setItem(key, stringify(value)));
- }
- });
+ const initialStoredValue = goodTry(() => localStorage.getItem(key), null);
+ const rawValue = signal(initialStoredValue);
+ const internalSignal = signal<R>(
+ initialStoredValue
+ ? goodTry(() => parse(initialStoredValue) as R, defaultValue)
+ : defaultValue,
+ );
+
+ effect(
+ () => {
+ rawValue.set(stringify(internalSignal()));
+ },
+ {
+ allowSignalWrites: true,
+ },
+ );
+
+ effect(
+ () => {
+ const storedValue = rawValue();
+
+ internalSignal.set(
+ storedValue
+ ? goodTry(() => parse(storedValue) as R, defaultValue)
+ : defaultValue,
+ );
+ },
+ {
+ allowSignalWrites: true,
+ },
+ );
+
+ effect(
+ () => {
+ const value = rawValue();
+ try {
+ if (value === null) {
+ localStorage.removeItem(key);
+ } else {
+ localStorage.setItem(key, value);
+ }
+
+ // We notify other consumers in this tab about changing the value in the store for synchronization
+ window.dispatchEvent(
+ new StorageEvent(`storage`, {
+ key,
+ newValue: value,
+ storageArea: localStorage,
+ }),
+ );
+ } catch {
+ // ignore errors
+ }
+ },
+ {
+ allowSignalWrites: true,
+ },
+ );
if (storageSync) {
const onStorage = (event: StorageEvent) => {
if (event.storageArea === localStorage && event.key === key) {
- const newValue =
- event.newValue !== null
- ? (parse(event.newValue) as R)
- : defaultValue;
- internalSignal.set(newValue);
+ rawValue.set(event.newValue); | This looks like a job for observables to be honest. I'd like to see this part converted to a subject that emits and this way we can handle things easily. We have too many effects at this point inside the utility. We need only one I guess, the others can be replaced by the subject subscription. What do you think? |
ngxtension-platform | github_2023 | typescript | 525 | ngxtension | nartc | @@ -124,27 +123,74 @@ const internalInjectLocalStorage = <R>(
const localStorage = inject(NGXTENSION_LOCAL_STORAGE);
const destroyRef = inject(DestroyRef);
- const initialStoredValue = goodTry(() => localStorage.getItem(key));
- const initialValue = initialStoredValue
- ? (goodTry(() => parse(initialStoredValue) as R) ?? defaultValue)
- : defaultValue;
- const internalSignal = signal(initialValue);
-
- effect(() => {
- const value = internalSignal();
- if (value === undefined) {
- goodTry(() => localStorage.removeItem(key));
- } else {
- goodTry(() => localStorage.setItem(key, stringify(value)));
+ const initialStoredValue = goodTry(() => localStorage.getItem(key), null);
+ const internalSignal = signal<R>(
+ initialStoredValue
+ ? goodTry(() => parse(initialStoredValue) as R, defaultValue)
+ : defaultValue,
+ );
+
+ function syncValueWithLocalStorage(value: R): void {
+ const newValue = goodTry(
+ () => (value === undefined ? null : stringify(value)),
+ null,
+ );
+
+ try {
+ if (newValue === localStorage.getItem(key)) {
+ return;
+ }
+
+ if (newValue === null) {
+ localStorage.removeItem(key);
+ } else {
+ localStorage.setItem(key, newValue);
+ }
+
+ // We notify other consumers in this tab about changing the value in the store for synchronization
+ window.dispatchEvent( | nit: use DI for `window` |
ngxtension-platform | github_2023 | typescript | 487 | ngxtension | eneajaho | @@ -271,9 +272,7 @@ export function derivedAsync<T>(
untracked(() => sourceEvent$.next(newSource));
} else {
// if the new source is not an observable or a promise, we set the value immediately
- untracked(() =>
- sourceValue.set({ kind: StateKind.Value, value: newSource as T }),
- );
+ untracked(() => sourceEvent$.next(of(newSource as T)));
} | Hi @MillerSvt
We can refactor it a bit more now to just be:
```ts
// we untrack the source$.next() so that we don't register other signals as dependencies
untracked(() => {
sourceEvent$.next(
isObservable(newSource) || isPromise(newSource) ? newSource : of(newSource as T)
);
});
``` |
ngxtension-platform | github_2023 | typescript | 508 | ngxtension | eneajaho | @@ -29,6 +29,16 @@ describe('injectLocalStorage', () => {
});
}));
+ it('should return data of defaultValue', () => {
+ TestBed.runInInjectionContext(() => {
+ const defaultValue = 'default';
+ const localStorageSignal = injectLocalStorage<string>(key, { | Can we include a test that only checks for the type and not just the value? |
ngxtension-platform | github_2023 | others | 508 | ngxtension | eneajaho | @@ -55,9 +55,8 @@ Options to configure the behavior of the local storage signal.
Here's a basic example of using `injectLocalStorage`:
```typescript
-const username = injectLocalStorage<string>('username', {
- defaultValue: 'Anonymous',
- storageSync: true,
+const username = injectLocalStorage<string | undefined>('username', {
+ storageSync: true
}); | I would move this below to showcase the storageSync method, instead of showcasing it as a basic example.
A basic example would just be : `const username = injectLocalStorage<string>('username');` and tell the developer that it will also contain the null or undefined (whatever we use as default) value in the type |
ngxtension-platform | github_2023 | typescript | 282 | ngxtension | JeanMeche | @@ -0,0 +1,285 @@
+import {
+ Injector,
+ computed,
+ effect,
+ isSignal,
+ signal,
+ untracked,
+ type Signal,
+} from '@angular/core';
+import { assertInjector } from 'ngxtension/assert-injector';
+import { computedPrevious } from 'ngxtension/computed-previous';
+
+interface CreateResourceOptions {
+ injector?: Injector;
+}
+
+export type CreateResourceSource<TValue> = Signal<TValue | undefined>;
+
+export type CreateResourceStatus =
+ | 'unresolved'
+ | 'pending'
+ | 'ready'
+ | 'refreshing'
+ | 'errored';
+
+interface Resource<TValue, TError = string> {
+ data: Signal<TValue>;
+ status: Signal<CreateResourceStatus>;
+ error: Signal<TError | undefined>;
+ loading: Signal<boolean>;
+ latest: Signal<TValue>;
+ refetch: () => void;
+ mutate: (value: TValue) => void;
+ destroy: () => void;
+}
+
+// TODO: FIX types as they don't play well
+// Add better naming for the types ex: sourceOrFetcher, fetcherOrOptions
+
+// fetcher with options
+export function createResource<TValue, TError = string>(
+ fetcher: () => Promise<TValue>,
+ options?: CreateResourceOptions & { initialValue?: undefined },
+): Resource<TValue | undefined, TError>;
+
+// fetcher with options + initial value
+export function createResource<TValue, TError = string>(
+ fetcher: () => Promise<TValue>,
+ options: CreateResourceOptions & { initialValue: TValue },
+): Resource<TValue, TError>;
+
+// source + fetcher + options
+export function createResource<TValue, TError = string>(
+ source: Signal<TValue | undefined | null>,
+ fetcher: (source: TValue | undefined) => Promise<TValue>,
+ options?: CreateResourceOptions & { initialValue?: TValue },
+): Resource<TValue | undefined, TError>;
+
+// source + fetcher + options + initial value
+export function createResource<TValue, TError = string>(
+ source: Signal<TValue | undefined>,
+ fetcher: (source: TValue | undefined) => Promise<TValue>,
+ options: CreateResourceOptions & { initialValue: TValue },
+): Resource<TValue, TError>;
+
+export function createResource<TValue, TError = string>(
+ source:
+ | Signal<TValue | undefined>
+ | ((source?: TValue | undefined) => Promise<TValue>),
+ fetcher: (
+ source: TValue | undefined,
+ ) => Promise<TValue> | (CreateResourceOptions & { initialValue: TValue }),
+ options?: CreateResourceOptions & { initialValue?: undefined },
+): Resource<TValue, TError>;
+
+export function createResource<TValue, TError = string>(
+ source: Signal<TValue> | ((source: TValue) => Promise<TValue>),
+ fetcher: (
+ source: TValue,
+ ) => Promise<TValue> | (CreateResourceOptions & { initialValue: TValue }),
+ options: CreateResourceOptions & { initialValue: TValue },
+): Resource<TValue, TError>;
+
+/**
+ *
+ * This function creates a resource that can be used to fetch data from an API or any other source.
+ * It returns an object with the following properties:
+ * - data: a signal that contains the data
+ * - status: a signal that contains the status of the resource
+ * - error: a signal that contains the error if the resource is in an errored state
+ * - loading: a signal that contains a boolean indicating if the resource is loading
+ * - latest: a function that returns the latest value of the resource
+ * - refetch: a function that refetches the resource
+ * - mutate: a function that updates the value of the resource
+ * - destroy: a function that destroys the resource
+ *
+ * @example
+ *
+ * getUser(id: string): Promise<User> {
+ * return fetch(`/api/users/${id}`).then(res => res.json());
+ * }
+ *
+ * userId = injectQueryParam('userId');
+ *
+ * res = createResource(() => this.getUser(this.userId()));
+ * or
+ * res = createResource(this.userId, this.getUser);
+ *
+ * res.data() // returns undefined
+ * res.loading() // returns true
+ * res.error() // returns undefined
+ * res.latest() // returns undefined
+ *
+ * // After the promise resolves
+ *
+ * res.data() // returns User
+ * res.loading() // returns false
+ * res.error() // returns undefined
+ * res.latest() // returns User
+ *
+ * // After the promise rejects
+ *
+ * res.data() // returns undefined
+ * res.loading() // returns false
+ * res.error() // returns Error
+ * res.latest() // returns undefined
+ *
+ * // After calling refetch
+ *
+ * res.data() // returns undefined
+ * res.loading() // returns true
+ * res.error() // returns undefined
+ * res.latest() // returns User
+ *
+ * // After calling mutate
+ *
+ * res.data() // returns User
+ * res.loading() // returns false
+ * res.error() // returns undefined
+ * res.latest() // returns User
+ *
+ *
+ */
+export function createResource<TValue, TError = string>(...args: unknown[]) {
+ const { source, fetcher, options } = parseArgs<TValue>(args);
+
+ if (fetcher === undefined) {
+ throw new Error('fetcher is required');
+ }
+
+ const value = signal<TValue | undefined>(options.initialValue);
+ const error = signal<TError | undefined>(undefined);
+ const trigger = signal(0);
+ const state = signal<CreateResourceStatus>(
+ 'initialValue' in options ? 'ready' : 'unresolved',
+ );
+
+ const latest = signal<TValue | undefined>(value());
+
+ const previousTrigger = computedPrevious(trigger);
+
+ return assertInjector(createResource, options.injector, () => {
+ const effectRef = effect(() => {
+ trigger(); // used to trigger the effect and for refetching
+
+ const promise = source ? fetcher(source()) : fetcher();
+
+ // we don't want to track anything else except the source and the fetcher
+ untracked(() => {
+ // TODO: do we want to cancel the current promise if it's still pending? it's easy with observables 😅
+ load(promise!);
+ });
+ });
+
+ function load(p: Promise<TValue> | undefined) {
+ if (p && isPromise(p)) { | ```suggestion
if (isPromise(p)) {
``` |
ngxtension-platform | github_2023 | typescript | 282 | ngxtension | JeanMeche | @@ -0,0 +1,285 @@
+import {
+ Injector,
+ computed,
+ effect,
+ isSignal,
+ signal,
+ untracked,
+ type Signal,
+} from '@angular/core';
+import { assertInjector } from 'ngxtension/assert-injector';
+import { computedPrevious } from 'ngxtension/computed-previous';
+
+interface CreateResourceOptions {
+ injector?: Injector;
+}
+
+export type CreateResourceSource<TValue> = Signal<TValue | undefined>;
+
+export type CreateResourceStatus =
+ | 'unresolved'
+ | 'pending'
+ | 'ready'
+ | 'refreshing'
+ | 'errored';
+
+interface Resource<TValue, TError = string> {
+ data: Signal<TValue>;
+ status: Signal<CreateResourceStatus>;
+ error: Signal<TError | undefined>;
+ loading: Signal<boolean>;
+ latest: Signal<TValue>;
+ refetch: () => void;
+ mutate: (value: TValue) => void;
+ destroy: () => void;
+}
+
+// TODO: FIX types as they don't play well
+// Add better naming for the types ex: sourceOrFetcher, fetcherOrOptions
+
+// fetcher with options
+export function createResource<TValue, TError = string>(
+ fetcher: () => Promise<TValue>,
+ options?: CreateResourceOptions & { initialValue?: undefined },
+): Resource<TValue | undefined, TError>;
+
+// fetcher with options + initial value
+export function createResource<TValue, TError = string>(
+ fetcher: () => Promise<TValue>,
+ options: CreateResourceOptions & { initialValue: TValue },
+): Resource<TValue, TError>;
+
+// source + fetcher + options
+export function createResource<TValue, TError = string>(
+ source: Signal<TValue | undefined | null>,
+ fetcher: (source: TValue | undefined) => Promise<TValue>,
+ options?: CreateResourceOptions & { initialValue?: TValue },
+): Resource<TValue | undefined, TError>;
+
+// source + fetcher + options + initial value
+export function createResource<TValue, TError = string>(
+ source: Signal<TValue | undefined>,
+ fetcher: (source: TValue | undefined) => Promise<TValue>,
+ options: CreateResourceOptions & { initialValue: TValue },
+): Resource<TValue, TError>;
+
+export function createResource<TValue, TError = string>(
+ source:
+ | Signal<TValue | undefined>
+ | ((source?: TValue | undefined) => Promise<TValue>),
+ fetcher: (
+ source: TValue | undefined,
+ ) => Promise<TValue> | (CreateResourceOptions & { initialValue: TValue }),
+ options?: CreateResourceOptions & { initialValue?: undefined },
+): Resource<TValue, TError>;
+
+export function createResource<TValue, TError = string>(
+ source: Signal<TValue> | ((source: TValue) => Promise<TValue>),
+ fetcher: (
+ source: TValue,
+ ) => Promise<TValue> | (CreateResourceOptions & { initialValue: TValue }),
+ options: CreateResourceOptions & { initialValue: TValue },
+): Resource<TValue, TError>;
+
+/**
+ *
+ * This function creates a resource that can be used to fetch data from an API or any other source.
+ * It returns an object with the following properties:
+ * - data: a signal that contains the data
+ * - status: a signal that contains the status of the resource
+ * - error: a signal that contains the error if the resource is in an errored state
+ * - loading: a signal that contains a boolean indicating if the resource is loading
+ * - latest: a function that returns the latest value of the resource
+ * - refetch: a function that refetches the resource
+ * - mutate: a function that updates the value of the resource
+ * - destroy: a function that destroys the resource
+ *
+ * @example
+ *
+ * getUser(id: string): Promise<User> {
+ * return fetch(`/api/users/${id}`).then(res => res.json());
+ * }
+ *
+ * userId = injectQueryParam('userId');
+ *
+ * res = createResource(() => this.getUser(this.userId()));
+ * or
+ * res = createResource(this.userId, this.getUser);
+ *
+ * res.data() // returns undefined
+ * res.loading() // returns true
+ * res.error() // returns undefined
+ * res.latest() // returns undefined
+ *
+ * // After the promise resolves
+ *
+ * res.data() // returns User
+ * res.loading() // returns false
+ * res.error() // returns undefined
+ * res.latest() // returns User
+ *
+ * // After the promise rejects
+ *
+ * res.data() // returns undefined
+ * res.loading() // returns false
+ * res.error() // returns Error
+ * res.latest() // returns undefined
+ *
+ * // After calling refetch
+ *
+ * res.data() // returns undefined
+ * res.loading() // returns true
+ * res.error() // returns undefined
+ * res.latest() // returns User
+ *
+ * // After calling mutate
+ *
+ * res.data() // returns User
+ * res.loading() // returns false
+ * res.error() // returns undefined
+ * res.latest() // returns User
+ *
+ *
+ */
+export function createResource<TValue, TError = string>(...args: unknown[]) {
+ const { source, fetcher, options } = parseArgs<TValue>(args);
+
+ if (fetcher === undefined) {
+ throw new Error('fetcher is required');
+ }
+
+ const value = signal<TValue | undefined>(options.initialValue);
+ const error = signal<TError | undefined>(undefined);
+ const trigger = signal(0);
+ const state = signal<CreateResourceStatus>(
+ 'initialValue' in options ? 'ready' : 'unresolved',
+ );
+
+ const latest = signal<TValue | undefined>(value());
+
+ const previousTrigger = computedPrevious(trigger);
+
+ return assertInjector(createResource, options.injector, () => {
+ const effectRef = effect(() => {
+ trigger(); // used to trigger the effect and for refetching
+
+ const promise = source ? fetcher(source()) : fetcher();
+
+ // we don't want to track anything else except the source and the fetcher
+ untracked(() => {
+ // TODO: do we want to cancel the current promise if it's still pending? it's easy with observables 😅
+ load(promise!);
+ });
+ });
+
+ function load(p: Promise<TValue> | undefined) {
+ if (p && isPromise(p)) {
+ if (state() === 'pending' || state() === 'refreshing') return;
+
+ // if the trigger has changed, we want to refetch and set the state to refreshing
+ if (trigger() !== previousTrigger()) {
+ state.set('refreshing');
+ } else {
+ state.set('pending');
+ } | ```suggestion
state.set(trigger() !== previousTrigger() ? 'refreshing' : 'pending');
``` |
ngxtension-platform | github_2023 | typescript | 282 | ngxtension | ilirbeqirii | @@ -0,0 +1,285 @@
+import {
+ Injector,
+ computed,
+ effect,
+ isSignal,
+ signal,
+ untracked,
+ type Signal,
+} from '@angular/core';
+import { assertInjector } from 'ngxtension/assert-injector';
+import { computedPrevious } from 'ngxtension/computed-previous';
+
+interface CreateResourceOptions {
+ injector?: Injector;
+}
+
+export type CreateResourceSource<TValue> = Signal<TValue | undefined>;
+
+export type CreateResourceStatus =
+ | 'unresolved'
+ | 'pending'
+ | 'ready'
+ | 'refreshing'
+ | 'errored';
+
+interface Resource<TValue, TError = string> {
+ data: Signal<TValue>;
+ status: Signal<CreateResourceStatus>;
+ error: Signal<TError | undefined>;
+ loading: Signal<boolean>;
+ latest: Signal<TValue>;
+ refetch: () => void;
+ mutate: (value: TValue) => void;
+ destroy: () => void;
+}
+
+// TODO: FIX types as they don't play well
+// Add better naming for the types ex: sourceOrFetcher, fetcherOrOptions
+
+// fetcher with options
+export function createResource<TValue, TError = string>(
+ fetcher: () => Promise<TValue>,
+ options?: CreateResourceOptions & { initialValue?: undefined },
+): Resource<TValue | undefined, TError>;
+
+// fetcher with options + initial value
+export function createResource<TValue, TError = string>(
+ fetcher: () => Promise<TValue>,
+ options: CreateResourceOptions & { initialValue: TValue },
+): Resource<TValue, TError>;
+
+// source + fetcher + options
+export function createResource<TValue, TError = string>(
+ source: Signal<TValue | undefined | null>,
+ fetcher: (source: TValue | undefined) => Promise<TValue>,
+ options?: CreateResourceOptions & { initialValue?: TValue },
+): Resource<TValue | undefined, TError>;
+
+// source + fetcher + options + initial value
+export function createResource<TValue, TError = string>(
+ source: Signal<TValue | undefined>,
+ fetcher: (source: TValue | undefined) => Promise<TValue>,
+ options: CreateResourceOptions & { initialValue: TValue },
+): Resource<TValue, TError>;
+
+export function createResource<TValue, TError = string>(
+ source:
+ | Signal<TValue | undefined>
+ | ((source?: TValue | undefined) => Promise<TValue>),
+ fetcher: (
+ source: TValue | undefined,
+ ) => Promise<TValue> | (CreateResourceOptions & { initialValue: TValue }),
+ options?: CreateResourceOptions & { initialValue?: undefined },
+): Resource<TValue, TError>;
+
+export function createResource<TValue, TError = string>(
+ source: Signal<TValue> | ((source: TValue) => Promise<TValue>),
+ fetcher: (
+ source: TValue,
+ ) => Promise<TValue> | (CreateResourceOptions & { initialValue: TValue }),
+ options: CreateResourceOptions & { initialValue: TValue },
+): Resource<TValue, TError>;
+
+/**
+ *
+ * This function creates a resource that can be used to fetch data from an API or any other source.
+ * It returns an object with the following properties:
+ * - data: a signal that contains the data
+ * - status: a signal that contains the status of the resource
+ * - error: a signal that contains the error if the resource is in an errored state
+ * - loading: a signal that contains a boolean indicating if the resource is loading
+ * - latest: a function that returns the latest value of the resource
+ * - refetch: a function that refetches the resource
+ * - mutate: a function that updates the value of the resource
+ * - destroy: a function that destroys the resource
+ *
+ * @example
+ *
+ * getUser(id: string): Promise<User> {
+ * return fetch(`/api/users/${id}`).then(res => res.json());
+ * }
+ *
+ * userId = injectQueryParam('userId');
+ *
+ * res = createResource(() => this.getUser(this.userId()));
+ * or
+ * res = createResource(this.userId, this.getUser);
+ *
+ * res.data() // returns undefined
+ * res.loading() // returns true
+ * res.error() // returns undefined
+ * res.latest() // returns undefined
+ *
+ * // After the promise resolves
+ *
+ * res.data() // returns User
+ * res.loading() // returns false
+ * res.error() // returns undefined
+ * res.latest() // returns User
+ *
+ * // After the promise rejects
+ *
+ * res.data() // returns undefined
+ * res.loading() // returns false
+ * res.error() // returns Error
+ * res.latest() // returns undefined
+ *
+ * // After calling refetch
+ *
+ * res.data() // returns undefined
+ * res.loading() // returns true
+ * res.error() // returns undefined
+ * res.latest() // returns User
+ *
+ * // After calling mutate
+ *
+ * res.data() // returns User
+ * res.loading() // returns false
+ * res.error() // returns undefined
+ * res.latest() // returns User
+ *
+ *
+ */
+export function createResource<TValue, TError = string>(...args: unknown[]) {
+ const { source, fetcher, options } = parseArgs<TValue>(args);
+
+ if (fetcher === undefined) {
+ throw new Error('fetcher is required');
+ }
+
+ const value = signal<TValue | undefined>(options.initialValue);
+ const error = signal<TError | undefined>(undefined);
+ const trigger = signal(0);
+ const state = signal<CreateResourceStatus>( | maybe this can be renamed to 'status' thus matching the Type Parameter itself too? |
ngxtension-platform | github_2023 | others | 448 | ngxtension | nartc | @@ -5,8 +5,8 @@ import { Icon } from '@astrojs/starlight/components';
const badge = Astro.props.entry.data.badge;
const entryPoint = Astro.props.entry.data.entryPoint;
-const bundleJsUrl = `https://deno.bundlejs.com?q=ngxtension/${entryPoint}&treeshake=[*]&config={%22esbuild%22:{%22external%22:[%22rxjs%22,%22@angular/core%22,%22@angular/common%22,%22@angular/forms%22,%22@angular/router%22]}}`;
-const sourceCodeUrl = `https://github.com/nartc/ngxtension-platform/tree/main/libs/ngxtension/${entryPoint}`;
+const bundleJsUrl = `https://deno.bundlejs.com?q=${entryPoint}&treeshake=[*]&config={%22esbuild%22:{%22external%22:[%22rxjs%22,%22@angular/core%22,%22@angular/common%22,%22@angular/forms%22,%22@angular/router%22]}}`;
+const sourceCodeUrl = `https://github.com/nartc/ngxtension-platform/tree/main/libs/${entryPoint}`; | ```suggestion
const sourceCodeUrl = `https://github.com/ngxtension/ngxtension-platform/tree/main/libs/${entryPoint}`;
``` |
ngxtension-platform | github_2023 | typescript | 467 | ngxtension | ajitzero | @@ -138,11 +138,6 @@ describe(signalSlice.name, () => {
state.age();
expect(testFn).toHaveBeenCalled();
});
-
- it('should connect lazy source after signal value is accessed', () => {
- state().age; | What is this a duplicate of?
The previous one is `state.age()` while this one is `state().age` |
ngxtension-platform | github_2023 | others | 420 | ngxtension | michael-small | @@ -0,0 +1,57 @@
+---
+title: Convert to SFC components migration
+description: Schematics for converting Angular components to SFC components
+entryPoint: convert-to-sfc
+badge: stable
+contributors: ['enea-jahollari']
+---
+
+Angular components can have inline templates or have a separate template file. The inline templates are called SFC (Single File Components) and are a common practice in modern Angular applications.
+This schematic helps you convert your Angular components to SFC components.
+
+### How it works?
+
+The moment you run the schematics, it will look for all the components in your project and will convert them to SFC components.
+
+- It will move the template from the `templateUrl` to the `template` property.
+- The maximum lines length for the template is set to 200 lines. If the template has more than 200 lines, it will be skipped.
+
+In order to change the maximum line length, you can pass the `--max-inline-template-lines` param to the schematics.
+
+````bash | extra ```bash that throws off formatting |
ngxtension-platform | github_2023 | typescript | 420 | ngxtension | ilirbeqirii | @@ -0,0 +1,171 @@
+import {
+ formatFiles,
+ getProjects,
+ joinPathFragments,
+ logger,
+ readJson,
+ readProjectConfiguration,
+ Tree,
+ visitNotIgnoredFiles,
+} from '@nx/devkit';
+import { readFileSync } from 'node:fs';
+import { dirname } from 'node:path';
+import { exit } from 'node:process';
+import { Node, SyntaxKind } from 'ts-morph';
+import { ContentsStore } from '../shared-utils/contents-store';
+import { ConvertToSFCGeneratorSchema } from './schema';
+
+function trackContents(
+ tree: Tree,
+ contentsStore: ContentsStore,
+ fullPath: string,
+) {
+ if (fullPath.endsWith('.ts')) {
+ const fileContent =
+ tree.read(fullPath, 'utf8') || readFileSync(fullPath, 'utf8');
+ if (!fileContent.includes('@Component')) return;
+ if (fileContent.includes('templateUrl')) {
+ contentsStore.track(fullPath, fileContent);
+ }
+ }
+}
+
+export async function convertToSFCGenerator(
+ tree: Tree,
+ options: ConvertToSFCGeneratorSchema,
+) {
+ const contentsStore = new ContentsStore();
+ const packageJson = readJson(tree, 'package.json');
+ const angularCorePackage =
+ packageJson['dependencies']['@angular/core'] ||
+ packageJson['devDependencies']['@angular/core'];
+
+ if (!angularCorePackage) {
+ logger.error(`[ngxtension] No @angular/core detected`);
+ return exit(1);
+ }
+
+ const { path, project, moveStyles, maxInlineTemplateLines } = options;
+
+ if (path && project) {
+ logger.error(
+ `[ngxtension] Cannot pass both "path" and "project" to convertToSFCGenerator`,
+ );
+ return exit(1);
+ }
+
+ if (path) {
+ if (!tree.exists(path)) {
+ logger.error(`[ngxtension] "${path}" does not exist`);
+ return exit(1);
+ }
+
+ trackContents(tree, contentsStore, path);
+ } else if (project) {
+ try {
+ const projectConfiguration = readProjectConfiguration(tree, project);
+
+ if (!projectConfiguration) {
+ throw `"${project}" project not found`;
+ }
+
+ visitNotIgnoredFiles(tree, projectConfiguration.root, (path) => {
+ trackContents(tree, contentsStore, path);
+ });
+ } catch (err) {
+ logger.error(`[ngxtension] ${err}`);
+ return;
+ }
+ } else {
+ const projects = getProjects(tree);
+ for (const project of projects.values()) {
+ visitNotIgnoredFiles(tree, project.root, (path) => {
+ trackContents(tree, contentsStore, path);
+ });
+ }
+ }
+
+ for (const { path: sourcePath } of contentsStore.collection) {
+ if (!sourcePath.endsWith('.ts')) continue;
+
+ const sourceFile = contentsStore.project.getSourceFile(sourcePath)!;
+
+ const classes = sourceFile.getClasses();
+
+ for (const targetClass of classes) {
+ const applicableDecorator = targetClass.getDecorator((decoratorDecl) => {
+ return ['Component'].includes(decoratorDecl.getName());
+ });
+ if (!applicableDecorator) continue;
+
+ const decoratorArg = applicableDecorator.getArguments()[0];
+ if (Node.isObjectLiteralExpression(decoratorArg)) {
+ decoratorArg
+ .getChildrenOfKind(SyntaxKind.PropertyAssignment)
+ .forEach((property) => {
+ const decoratorPropertyName = property.getName();
+ if (decoratorPropertyName === 'templateUrl') {
+ const dir = dirname(sourcePath);
+ const templatePath = joinPathFragments( | Is it possible to create a reusable function for preparing "templatePath", and then reading its content? I also noticed a similar code in the "self-closing tags" schematic. |
ngxtension-platform | github_2023 | typescript | 361 | ngxtension | nartc | @@ -157,16 +131,7 @@ export function signalSlice<
effects?: ( | nit: we can use `/** @deprecated */` JSDoc here to make the intention of deprecating `effects` clearer |
ngxtension-platform | github_2023 | others | 404 | ngxtension | ajitzero | @@ -0,0 +1,6 @@
+{
+ "name": "Fabien Dehopré",
+ "twitter": "https://twitter.com/FabienDehopre",
+ "github": "https://github.com/FabienDehopre",
+ "linkedin": "https://www.linkedin.com/in/fabien1979/?lipi=urn%3Ali%3Apage%3Ad_flagship3_feed%3BbRilI4BMRLygBScKCu5i%2Fw%3D%3D" | minor: 😅
```suggestion
"linkedin": "https://www.linkedin.com/in/fabien1979/"
``` |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.