| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
|
|
| import asyncio |
| import atexit |
| import copy |
| import inspect |
| import math |
| import textwrap |
| import time |
| from collections import defaultdict, deque |
| from collections.abc import Callable |
| from contextlib import nullcontext |
| from pathlib import Path |
| from typing import Any |
|
|
| import numpy as np |
| import pandas as pd |
| import torch |
| import torch.utils.data |
| import transformers |
| from accelerate.logging import get_logger |
| from accelerate.utils import gather, gather_object, is_peft_model, set_seed |
| from datasets import Dataset, IterableDataset |
| from packaging.version import Version |
| from torch import nn |
| from torch.distributed.fsdp import FullyShardedDataParallel as FSDP |
| from torch.utils.data import Sampler |
| from transformers import ( |
| AutoModelForSequenceClassification, |
| AutoProcessor, |
| AutoTokenizer, |
| GenerationConfig, |
| PreTrainedModel, |
| PreTrainedTokenizerBase, |
| ProcessorMixin, |
| TrainerCallback, |
| is_trackio_available, |
| is_wandb_available, |
| ) |
| from transformers.utils import is_peft_available, is_rich_available |
|
|
| from ..data_utils import apply_chat_template, is_conversational, prepare_multimodal_messages |
| from ..extras.profiling import profiling_context, profiling_decorator |
| from ..generation.vllm_generation import VLLMGeneration |
| from ..models import prepare_deepspeed, prepare_fsdp, unwrap_model_for_generation |
| from ..models.utils import disable_gradient_checkpointing |
| from .base_trainer import _BaseTrainer |
| from .callbacks import SyncRefModelCallback |
| from .rloo_config import RLOOConfig |
| from .utils import ( |
| RepeatSampler, |
| create_model_from_path, |
| disable_dropout_in_model, |
| entropy_from_logits, |
| get_config_model_id, |
| identity, |
| nanmax, |
| nanmin, |
| nanstd, |
| pad, |
| print_prompt_completions_sample, |
| selective_log_softmax, |
| shuffle_sequence_dict, |
| shutdown_event_loop_in_daemon, |
| split_pixel_values_by_grid, |
| split_tensor_dict, |
| start_event_loop_in_daemon, |
| unsplit_pixel_values_by_grid, |
| use_adapter, |
| ) |
|
|
|
|
| if is_peft_available(): |
| from peft import PeftConfig, PeftModel, get_peft_model |
|
|
|
|
| if is_wandb_available(): |
| import wandb |
|
|
| if is_trackio_available(): |
| import trackio |
|
|
|
|
| logger = get_logger(__name__) |
|
|
| |
| |
| |
| |
| RewardFunc = str | PreTrainedModel | Callable[..., list[float | None]] |
|
|
|
|
| class RLOOTrainer(_BaseTrainer): |
| """ |
| Trainer for the Reinforce Leave One Out (RLOO) method. This algorithm was initially proposed in the paper [Back to |
| Basics: Revisiting REINFORCE Style Optimization for Learning from Human Feedback in |
| LLMs](https://huggingface.co/papers/2402.14740). |
| |
| Example: |
| |
| ```python |
| from trl import RLOOTrainer |
| from trl.rewards import accuracy_reward |
| from datasets import load_dataset |
| |
| dataset = load_dataset("trl-lib/DeepMath-103K", split="train") |
| |
| trainer = RLOOTrainer( |
| model="Qwen/Qwen2.5-0.5B-Instruct", |
| reward_funcs=accuracy_reward, |
| train_dataset=dataset, |
| ) |
| trainer.train() |
| ``` |
| |
| Args: |
| model (`str` or [`~transformers.PreTrainedModel`] or [`~peft.PeftModel`]): |
| Model to be trained. Can be either: |
| |
| - A string, being the *model id* of a pretrained model hosted inside a model repo on huggingface.co, or a |
| path to a *directory* containing model weights saved using |
| [`~transformers.PreTrainedModel.save_pretrained`], e.g., `'./my_model_directory/'`. The model is loaded |
| using `<ModelArchitecture>.from_pretrained` (where `<ModelArchitecture>` is derived from the model |
| config) with the keyword arguments in `args.model_init_kwargs`. |
| - A [`~transformers.PreTrainedModel`] object. Only causal language models are supported. |
| - A [`~peft.PeftModel`] object. Only causal language models are supported. |
| reward_funcs (`RewardFunc | list[RewardFunc]`): |
| Reward functions to be used for computing the rewards. To compute the rewards, we call all the reward |
| functions with the prompts and completions and sum the rewards. Can be either: |
| |
| - A single reward function, such as: |
| - A string: The *model ID* of a pretrained model hosted inside a model repo on huggingface.co, or a |
| path to a *directory* containing model weights saved using |
| [`~transformers.PreTrainedModel.save_pretrained`], e.g., `'./my_model_directory/'`. The model is loaded |
| using [`~transformers.AutoModelForSequenceClassification.from_pretrained`] with `num_labels=1` and the |
| keyword arguments in `args.model_init_kwargs`. |
| - A [`~transformers.PreTrainedModel`] object: Only sequence classification models are supported. |
| - A custom reward function: The function is provided with the prompts and the generated completions, |
| plus any additional columns in the dataset. It should return a list of rewards. Custom reward |
| functions can be either synchronous or asynchronous and can also return `None` when the reward is |
| not applicable to those samples. This is useful for multi-task training where different reward |
| functions apply to different types of samples. When a reward function returns `None` for a sample, |
| that reward function is excluded from the reward calculation for that sample. For more details, see |
| [Using a custom reward |
| function](#using-a-custom-reward-function). |
| |
| The trainer's state is also passed to the reward function. The trainer's state is an instance of |
| [`~transformers.TrainerState`] and can be accessed by accessing the `trainer_state` argument to the |
| reward function's signature. |
| - A list of reward functions, where each item can independently be any of the above types. Mixing different |
| types within the list (e.g., a string model ID and a custom reward function) is allowed. |
| args ([`RLOOConfig`], *optional*): |
| Configuration for this trainer. If `None`, a default configuration is used. |
| train_dataset ([`~datasets.Dataset`] or [`~datasets.IterableDataset`]): |
| Dataset to use for training. It must include a column `"prompt"`. Any additional columns in the dataset is |
| ignored. The format of the samples can be either: |
| |
| - [Standard](dataset_formats#standard): Each sample contains plain text. |
| - [Conversational](dataset_formats#conversational): Each sample contains structured messages (e.g., role |
| and content). |
| eval_dataset ([`~datasets.Dataset`], [`~datasets.IterableDataset`] or `dict[str, Dataset | IterableDataset]`): |
| Dataset to use for evaluation. It must meet the same requirements as `train_dataset`. |
| processing_class ([`~transformers.PreTrainedTokenizerBase`], [`~transformers.ProcessorMixin`], *optional*): |
| Processing class used to process the data. The padding side must be set to "left". If `None`, the |
| processing class is loaded from the model's name with [`~transformers.AutoProcessor.from_pretrained`]. A |
| padding token, `tokenizer.pad_token`, must be set. If the processing class has not set a padding token, |
| `tokenizer.eos_token` will be used as the default. |
| reward_processing_classes ([`~transformers.PreTrainedTokenizerBase`] or `list[PreTrainedTokenizerBase]`, *optional*): |
| Processing classes corresponding to the reward functions specified in `reward_funcs`. Can be either: |
| |
| - A single processing class: Used when `reward_funcs` contains only one reward function. |
| - A list of processing classes: Must match the order and length of the reward functions in `reward_funcs`. |
| If set to `None`, or if an element of the list corresponding to a [`~transformers.PreTrainedModel`] is |
| `None`, the tokenizer for the model is automatically loaded using |
| [`~transformers.AutoTokenizer.from_pretrained`]. For elements in `reward_funcs` that are custom reward |
| functions (not [`~transformers.PreTrainedModel`]), the corresponding entries in `reward_processing_classes` |
| are ignored. |
| callbacks (list of [`~transformers.TrainerCallback`], *optional*): |
| List of callbacks to customize the training loop. Will add those to the list of default callbacks detailed |
| in [here](https://huggingface.co/docs/transformers/main_classes/callback). |
| |
| If you want to remove one of the default callbacks used, use the [`~transformers.Trainer.remove_callback`] |
| method. |
| optimizers (`tuple[torch.optim.Optimizer | None, torch.optim.lr_scheduler.LambdaLR | None]`, *optional*, defaults to `(None, None)`): |
| A tuple containing the optimizer and the scheduler to use. Will default to an instance of `AdamW` on your |
| model and a scheduler given by [`~transformers.get_linear_schedule_with_warmup`] controlled by `args`. |
| peft_config ([`~peft.PeftConfig`], *optional*): |
| PEFT configuration used to wrap the model. If `None`, the model is not wrapped. |
| """ |
|
|
| _tag_names = ["trl", "rloo"] |
| _name = "RLOO" |
| _paper = { |
| "title": "Back to Basics: Revisiting REINFORCE-Style Optimization for Learning from Human Feedback in LLMs", |
| "id": "2402.14740", |
| |
| "citation": textwrap.dedent("""\ |
| @inproceedings{ahmadian2024back, |
| title = {{Back to Basics: Revisiting REINFORCE-Style Optimization for Learning from Human Feedback in LLMs}}, |
| author = {Arash Ahmadian and Chris Cremer and Matthias Gall{\'{e}} and Marzieh Fadaee and Julia Kreutzer and Olivier Pietquin and Ahmet {\"{U}}st{\"{u}}n and Sara Hooker}, |
| year = 2024, |
| booktitle = {Proceedings of the 62nd Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), {ACL} 2024, Bangkok, Thailand, August 11-16, 2024}, |
| pages = {12248--12267}, |
| publisher = {Association for Computational Linguistics}, |
| editor = {Lun{-}Wei Ku and Andre Martins and Vivek Srikumar}, |
| }"""), |
| } |
|
|
| def __init__( |
| self, |
| model: "str | PreTrainedModel | PeftModel", |
| reward_funcs: RewardFunc | list[RewardFunc], |
| args: RLOOConfig | None = None, |
| train_dataset: Dataset | IterableDataset | None = None, |
| eval_dataset: Dataset | IterableDataset | dict[str, Dataset | IterableDataset] | None = None, |
| processing_class: PreTrainedTokenizerBase | ProcessorMixin | None = None, |
| reward_processing_classes: PreTrainedTokenizerBase | list[PreTrainedTokenizerBase] | None = None, |
| callbacks: list[TrainerCallback] | None = None, |
| optimizers: tuple[torch.optim.Optimizer | None, torch.optim.lr_scheduler.LambdaLR | None] = (None, None), |
| peft_config: "PeftConfig | None" = None, |
| ): |
| |
| if args is None: |
| model_name = model if isinstance(model, str) else get_config_model_id(model.config) |
| model_name = model_name.split("/")[-1] |
| args = RLOOConfig(f"{model_name}-RLOO") |
|
|
| |
| if isinstance(model, str): |
| model_init_kwargs = args.model_init_kwargs or {} |
| |
| if args.distributed_state.distributed_type in ["MULTI_GPU", "DEEPSPEED"]: |
| model_init_kwargs["device_map"] = None |
| model = create_model_from_path(model, **model_init_kwargs) |
| else: |
| if args.model_init_kwargs is not None: |
| logger.warning( |
| "You passed `model_init_kwargs` to the `RLOOConfig`, but your model is already instantiated. " |
| "The `model_init_kwargs` will be ignored." |
| ) |
|
|
| |
| |
| self.model_kwarg_keys = ( |
| inspect.signature(model.forward).parameters.keys() |
| if not hasattr(model, "get_base_model") |
| else inspect.signature(model.get_base_model().forward).parameters.keys() |
| ) |
|
|
| |
| if processing_class is None: |
| processing_class = AutoProcessor.from_pretrained( |
| get_config_model_id(model.config), truncation_side="left", padding_side="left" |
| ) |
|
|
| |
| if isinstance(processing_class, ProcessorMixin): |
| tokenizer = processing_class.tokenizer |
| elif isinstance(processing_class, PreTrainedTokenizerBase): |
| tokenizer = processing_class |
| else: |
| raise TypeError("The `processing_class` must be either a `PreTrainedTokenizerBase` or a `ProcessorMixin`") |
|
|
| if tokenizer.pad_token is None: |
| tokenizer.pad_token = tokenizer.eos_token |
|
|
| self.pad_token = tokenizer.pad_token |
| self.pad_token_id = tokenizer.pad_token_id |
| self.eos_token_id = tokenizer.eos_token_id |
|
|
| if is_peft_available() and is_peft_model(model) and peft_config is not None: |
| raise ValueError( |
| "You passed a `PeftModel` instance together with a `peft_config` to the trainer. Please first merge " |
| "and unload the existing adapter, save the resulting base model, and then pass that base model along " |
| "with the new `peft_config` to the trainer." |
| ) |
| if is_peft_available() and is_peft_model(model): |
| |
| |
| model.add_adapter("ref", model.peft_config["default"]) |
| for name, param in model.named_parameters(): |
| if ".default." in name: |
| ref_name = name.replace(".default.", ".ref.") |
| ref_param = model.get_parameter(ref_name) |
| ref_param.data.copy_(param.data) |
|
|
| |
| if peft_config is not None: |
| model = get_peft_model(model, peft_config) |
|
|
| |
| |
| if is_peft_available() and is_peft_model(model) and args.gradient_checkpointing: |
| model.enable_input_require_grads() |
|
|
| |
| |
| |
| |
| |
| if getattr(model, "is_loaded_in_4bit", False) or getattr(model, "is_loaded_in_8bit", False): |
| for param in model.parameters(): |
| if param.requires_grad: |
| param.data = param.data.to(torch.bfloat16) |
|
|
| |
| if not isinstance(reward_funcs, list): |
| reward_funcs = [reward_funcs] |
| self.reward_func_names = [] |
| for i, reward_func in enumerate(reward_funcs): |
| if isinstance(reward_func, str): |
| model_init_kwargs = args.model_init_kwargs or {} |
| |
| if args.distributed_state.distributed_type in ["MULTI_GPU", "DEEPSPEED"]: |
| model_init_kwargs["device_map"] = None |
| reward_funcs[i] = AutoModelForSequenceClassification.from_pretrained( |
| reward_func, num_labels=1, **model_init_kwargs |
| ) |
| if isinstance(reward_funcs[i], nn.Module): |
| self.reward_func_names.append(get_config_model_id(reward_funcs[i].config).split("/")[-1]) |
| else: |
| self.reward_func_names.append(reward_funcs[i].__name__) |
| self.reward_funcs = reward_funcs |
|
|
| self._has_async_reward_funcs = any(inspect.iscoroutinefunction(func) for func in self.reward_funcs) |
| if self._has_async_reward_funcs: |
| self.async_reward_loop_thread, self.async_reward_loop, self.async_reward_loop_ready_event = ( |
| start_event_loop_in_daemon(name="RLOOTrainer-AsyncRewardLoop") |
| ) |
| |
| self.async_reward_loop_ready_event.wait() |
| atexit.register(shutdown_event_loop_in_daemon, self.async_reward_loop_thread, self.async_reward_loop) |
|
|
| |
| if args.reward_weights is not None: |
| if len(args.reward_weights) != len(reward_funcs): |
| raise ValueError( |
| f"Number of reward weights ({len(args.reward_weights)}) must match number of reward " |
| f"functions ({len(reward_funcs)})" |
| ) |
| self.reward_weights = torch.tensor(args.reward_weights, dtype=torch.float32) |
| else: |
| self.reward_weights = torch.ones(len(reward_funcs), dtype=torch.float32) |
|
|
| |
| if reward_processing_classes is None: |
| reward_processing_classes = [None] * len(reward_funcs) |
| elif not isinstance(reward_processing_classes, list): |
| reward_processing_classes = [reward_processing_classes] |
| if len(reward_processing_classes) != len(reward_funcs): |
| raise ValueError( |
| f"The number of reward processing classes ({len(reward_processing_classes)}) must match the number of " |
| f"reward functions ({len(reward_funcs)})." |
| ) |
|
|
| for i, (reward_processing_class, reward_func) in enumerate( |
| zip(reward_processing_classes, reward_funcs, strict=True) |
| ): |
| if isinstance(reward_func, PreTrainedModel): |
| if reward_processing_class is None: |
| reward_processing_class = AutoTokenizer.from_pretrained(get_config_model_id(reward_func.config)) |
| if reward_processing_class.pad_token_id is None: |
| reward_processing_class.pad_token = reward_processing_class.eos_token |
| |
| |
| reward_func.config.pad_token_id = reward_processing_class.pad_token_id |
| reward_processing_classes[i] = reward_processing_class |
|
|
| self.reward_processing_classes = reward_processing_classes |
|
|
| |
| self.max_completion_length = args.max_completion_length |
| self.num_generations = args.num_generations |
| self.num_generations_eval = args.num_generations_eval or self.num_generations |
| self.chat_template_kwargs = args.chat_template_kwargs or {} |
| self.temperature = args.temperature |
| self.top_p = args.top_p |
| self.top_k = args.top_k |
| self.min_p = args.min_p |
| self.repetition_penalty = args.repetition_penalty |
| self.use_transformers_paged = args.use_transformers_paged |
| self.pad_to_multiple_of = args.pad_to_multiple_of |
| self.use_vllm = args.use_vllm |
| self.vllm_mode = args.vllm_mode |
| self.vllm_gpu_memory_utilization = args.vllm_gpu_memory_utilization |
| self.vllm_tensor_parallel_size = args.vllm_tensor_parallel_size |
| self.normalize_advantages = args.normalize_advantages |
| self.mask_truncated_completions = args.mask_truncated_completions |
| self.reward_clip_range = args.reward_clip_range |
|
|
| |
| self.shuffle_dataset = args.shuffle_dataset |
|
|
| if train_dataset is None: |
| raise ValueError("`train_dataset` is required") |
| elif ( |
| isinstance(train_dataset, IterableDataset) |
| or isinstance(eval_dataset, IterableDataset) |
| or ( |
| isinstance(eval_dataset, dict) and any(isinstance(ds, IterableDataset) for ds in eval_dataset.values()) |
| ) |
| ): |
| |
| raise NotImplementedError( |
| "Iterable datasets are not yet supported in RLOOTrainer. Please use a standard dataset instead." |
| ) |
|
|
| |
| self.num_iterations = args.num_iterations |
| self.epsilon_low = args.epsilon |
| self.epsilon_high = args.epsilon_high if args.epsilon_high is not None else args.epsilon |
| |
| self._step = 0 |
| |
| |
| self._buffered_inputs = None |
|
|
| |
| |
| |
| |
| if args.gradient_checkpointing and Version(transformers.__version__) < Version("5.0.0"): |
| args.gradient_checkpointing_kwargs = args.gradient_checkpointing_kwargs or {} |
| args.gradient_checkpointing_kwargs.setdefault("use_reentrant", False) |
|
|
| super().__init__( |
| model=model, |
| args=args, |
| data_collator=identity, |
| train_dataset=train_dataset, |
| eval_dataset=eval_dataset, |
| processing_class=processing_class, |
| callbacks=callbacks, |
| optimizers=optimizers, |
| ) |
|
|
| |
| self.beta = args.beta |
| if self.beta == 0.0: |
| |
| self.ref_model = None |
| elif is_peft_model(model): |
| |
| |
| self.ref_model = None |
| else: |
| |
| model_init_kwargs = args.model_init_kwargs or {} |
| |
| if self.args.distributed_state.distributed_type in ["MULTI_GPU", "DEEPSPEED"]: |
| model_init_kwargs["device_map"] = None |
| self.ref_model = create_model_from_path(get_config_model_id(self.model.config), **model_init_kwargs) |
|
|
| |
| if args.disable_dropout: |
| disable_dropout_in_model(model) |
| if self.ref_model is not None: |
| disable_dropout_in_model(self.ref_model) |
|
|
| |
| self._metrics = {"train": defaultdict(list), "eval": defaultdict(list)} |
| self._total_train_tokens = 0 |
| self._current_train_step_time = 0.0 |
| self.log_completions = args.log_completions |
| self.log_unique_prompts = args.log_unique_prompts |
| self.num_completions_to_print = args.num_completions_to_print |
| |
| self._logs = { |
| "images": deque(maxlen=args.generation_batch_size), |
| "prompt": deque(maxlen=args.generation_batch_size), |
| "completion": deque(maxlen=args.generation_batch_size), |
| "rewards": defaultdict(lambda: deque(maxlen=args.generation_batch_size)), |
| "advantages": deque(maxlen=args.generation_batch_size), |
| "extra": defaultdict(lambda: deque(maxlen=args.generation_batch_size)), |
| } |
| |
| self._pending_extra_logs = defaultdict(list) |
| self._pending_metrics = defaultdict(list) |
|
|
| |
| |
| |
| set_seed(args.seed, device_specific=True) |
|
|
| if self.use_vllm: |
| |
| self.vllm_generation = VLLMGeneration( |
| model=self.model, |
| accelerator=self.accelerator, |
| is_fsdp_enabled=self.is_fsdp_enabled, |
| processing_class=self.processing_class, |
| |
| mode=args.vllm_mode, |
| structured_outputs_regex=args.vllm_structured_outputs_regex, |
| |
| server_base_url=args.vllm_server_base_url, |
| server_host=args.vllm_server_host, |
| server_port=args.vllm_server_port, |
| group_port=args.vllm_group_port, |
| server_timeout=args.vllm_server_timeout, |
| |
| tensor_parallel_size=args.vllm_tensor_parallel_size, |
| gpu_memory_utilization=args.vllm_gpu_memory_utilization, |
| max_model_length=args.vllm_max_model_length, |
| max_num_seqs=args.per_device_train_batch_size |
| * args.vllm_tensor_parallel_size |
| * args.steps_per_generation, |
| enable_sleep_mode=args.vllm_enable_sleep_mode, |
| model_impl=args.vllm_model_impl, |
| |
| repetition_penalty=self.repetition_penalty, |
| temperature=self.temperature, |
| top_p=self.top_p, |
| top_k=self.top_k, |
| min_p=self.min_p, |
| max_completion_length=self.max_completion_length, |
| logprobs=None, |
| generation_kwargs=args.generation_kwargs, |
| ) |
| self._last_loaded_step = -1 |
| else: |
| generation_kwargs = { |
| "max_new_tokens": self.max_completion_length, |
| "do_sample": True, |
| "pad_token_id": tokenizer.pad_token_id, |
| "bos_token_id": tokenizer.bos_token_id, |
| "eos_token_id": tokenizer.eos_token_id, |
| "temperature": self.temperature, |
| "top_p": self.top_p, |
| "top_k": self.top_k, |
| "min_p": self.min_p, |
| "repetition_penalty": self.repetition_penalty, |
| "cache_implementation": args.cache_implementation, |
| } |
| if args.generation_kwargs is not None: |
| generation_kwargs.update(args.generation_kwargs) |
| self.generation_config = GenerationConfig(**generation_kwargs, disable_compile=True) |
| |
| self.generation_kwargs = generation_kwargs |
|
|
| |
| |
| |
| self.model_accepts_loss_kwargs = False |
|
|
| |
| self.model.add_model_tags(self._tag_names) |
|
|
| if self.ref_model is not None: |
| if self.is_deepspeed_enabled: |
| self.ref_model = prepare_deepspeed(self.ref_model, self.accelerator) |
| elif self.is_fsdp_enabled: |
| self.ref_model = prepare_fsdp(self.ref_model, self.accelerator) |
| else: |
| self.ref_model = self.accelerator.prepare_model(self.ref_model, evaluation_mode=True) |
|
|
| if args.sync_ref_model: |
| if self.beta == 0.0: |
| raise ValueError( |
| "You passed `sync_ref_model=True` while `beta=0.0`, which means the reference model is not used " |
| "during training. Consequently, RLOOTrainer does not create a `ref_model` instance, and there is " |
| "nothing to synchronize. Please set `sync_ref_model=False`, or set `beta` to a non-zero value." |
| ) |
| if is_peft_model(model): |
| raise NotImplementedError( |
| "You passed `sync_ref_model=True` while using a PEFT model, which is currently not supported. " |
| "With PEFT, RLOOTrainer does not keep a separate reference model in memory; instead, it recovers " |
| "reference behavior by temporarily disabling the adapter. As a result, there is no standalone " |
| "`ref_model` instance to synchronize. Use `sync_ref_model=False`, or opt for full fine-tuning if " |
| "you need a synced reference model. If you need `sync_ref_model` to work with PEFT, please open a " |
| "feature request at https://github.com/huggingface/trl/issues." |
| ) |
| self.add_callback(SyncRefModelCallback(ref_model=self.ref_model, accelerator=self.accelerator)) |
|
|
| for i, reward_func in enumerate(self.reward_funcs): |
| if isinstance(reward_func, PreTrainedModel): |
| if self.is_deepspeed_enabled: |
| self.reward_funcs[i] = prepare_deepspeed(reward_func, self.accelerator) |
| else: |
| |
| self.reward_funcs[i] = self.accelerator.prepare_model( |
| reward_func, evaluation_mode=True, device_placement=True |
| ) |
|
|
| def _set_signature_columns_if_needed(self): |
| |
| |
| |
| |
| if self._signature_columns is None: |
| self._signature_columns = ["prompt", "image", "images"] |
|
|
| |
| |
| |
| |
| |
| |
| |
| |
| |
| def get_train_dataloader(self): |
| return self._get_dataloader( |
| dataset=self.train_dataset, |
| description="Training", |
| batch_size=self._train_batch_size * self.args.steps_per_generation, |
| sampler_fn=self._get_train_sampler, |
| is_training=True, |
| ) |
|
|
| def _get_train_sampler(self, dataset: Dataset | None = None) -> Sampler: |
| |
| |
| |
| |
| |
| |
| |
|
|
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| if dataset is None: |
| dataset = self.train_dataset |
| return RepeatSampler( |
| data_source=dataset, |
| mini_repeat_count=self.num_generations, |
| batch_size=self.args.generation_batch_size // self.num_generations, |
| repeat_count=self.num_iterations * self.args.steps_per_generation, |
| shuffle=self.shuffle_dataset, |
| seed=self.args.seed, |
| ) |
|
|
| def _get_eval_sampler(self, eval_dataset) -> Sampler: |
| |
| return RepeatSampler( |
| data_source=eval_dataset, |
| mini_repeat_count=self.num_generations_eval, |
| seed=self.args.seed, |
| ) |
|
|
| @profiling_decorator |
| def _get_per_token_logps_and_entropies( |
| self, |
| model, |
| input_ids, |
| attention_mask, |
| logits_to_keep, |
| batch_size=None, |
| compute_entropy=False, |
| pixel_values=None, |
| image_grid_thw=None, |
| num_images=None, |
| pixel_attention_mask=None, |
| image_sizes=None, |
| token_type_ids=None, |
| mm_token_type_ids=None, |
| pixel_position_ids=None, |
| ) -> dict[str, torch.Tensor | None]: |
| """Compute log-probs and (optionally) entropies for each token.""" |
| batch_size = batch_size or input_ids.size(0) |
| all_logps = [] |
| all_entropies = [] |
| for start in range(0, input_ids.size(0), batch_size): |
| input_ids_batch = input_ids[start : start + batch_size] |
| attention_mask_batch = attention_mask[start : start + batch_size] |
|
|
| |
| model_inputs = {"input_ids": input_ids_batch, "attention_mask": attention_mask_batch} |
| if image_grid_thw is not None and pixel_values is not None: |
| rows_per_image = image_grid_thw.prod(dim=-1) |
| rows_per_sample = torch.split(rows_per_image, num_images) |
| rows_per_sample = torch.stack([s.sum() for s in rows_per_sample]) |
| cum_rows = torch.cat([torch.tensor([0], device=rows_per_sample.device), rows_per_sample.cumsum(0)]) |
| row_start, row_end = cum_rows[start].item(), cum_rows[start + batch_size].item() |
| model_inputs["pixel_values"] = pixel_values[row_start:row_end] |
| cum_imgs = torch.tensor([0] + num_images).cumsum(0) |
| img_start, img_end = cum_imgs[start], cum_imgs[start + batch_size] |
| model_inputs["image_grid_thw"] = image_grid_thw[img_start:img_end] |
| elif pixel_values is not None: |
| model_inputs["pixel_values"] = pixel_values[start : start + batch_size] |
| if pixel_attention_mask is not None: |
| model_inputs["pixel_attention_mask"] = pixel_attention_mask[start : start + batch_size] |
| if image_sizes is not None: |
| model_inputs["image_sizes"] = image_sizes[start : start + batch_size] |
| if token_type_ids is not None: |
| model_inputs["token_type_ids"] = token_type_ids[start : start + batch_size] |
| if mm_token_type_ids is not None: |
| model_inputs["mm_token_type_ids"] = mm_token_type_ids[start : start + batch_size] |
| if pixel_position_ids is not None: |
| model_inputs["pixel_position_ids"] = pixel_position_ids[start : start + batch_size] |
|
|
| |
| if "logits_to_keep" in self.model_kwarg_keys: |
| |
| model_inputs["logits_to_keep"] = logits_to_keep + 1 |
|
|
| model_inputs["use_cache"] = False |
|
|
| logits = model(**model_inputs).logits |
| |
| logits = logits[:, :-1, :] |
| |
| logits = logits[:, -logits_to_keep:, :] |
| |
| |
| logits.div_(self.temperature) |
| completion_ids = input_ids_batch[:, -logits_to_keep:] |
| logps = selective_log_softmax(logits, completion_ids) |
| all_logps.append(logps) |
|
|
| if compute_entropy: |
| with torch.no_grad(): |
| entropies = entropy_from_logits(logits) |
| all_entropies.append(entropies) |
|
|
| logps = torch.cat(all_logps, dim=0) |
| entropies = torch.cat(all_entropies, dim=0) if compute_entropy else None |
| return logps, entropies |
|
|
| def training_step(self, model, inputs, num_items_in_batch): |
| time_before = time.perf_counter() |
| output = super().training_step(model, inputs, num_items_in_batch) |
| self._step += 1 |
| time_after = time.perf_counter() |
| self._current_train_step_time += time_after - time_before |
| if self._step % self.current_gradient_accumulation_steps == 0: |
| self._metrics["train"]["step_time"].append(self._current_train_step_time) |
| self._current_train_step_time = 0.0 |
| return output |
|
|
| @profiling_decorator |
| def _prepare_inputs(self, generation_batch: dict[str, torch.Tensor | Any]) -> dict[str, torch.Tensor | Any]: |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
|
|
| mode = "train" if self.model.training else "eval" |
| if mode == "train": |
| generate_every = self.args.steps_per_generation * self.num_iterations |
| if self._step % generate_every == 0 or self._buffered_inputs is None: |
| |
| generation_batch = self._generate_and_score_completions(generation_batch) |
| generation_batch = split_pixel_values_by_grid(generation_batch) |
| generation_batch = shuffle_sequence_dict(generation_batch) |
| generation_batches = split_tensor_dict(generation_batch, self.args.steps_per_generation) |
| self._buffered_inputs = [unsplit_pixel_values_by_grid(batch) for batch in generation_batches] |
| inputs = self._buffered_inputs[self._step % self.args.steps_per_generation] |
| else: |
| |
| |
| inputs = self._generate_and_score_completions(generation_batch) |
| return inputs |
|
|
| def _log_completion_extra(self, column: str, values: list): |
| """ |
| Log extra columns to the completions table. Called from reward functions via the `log_extra` kwarg. |
| |
| Args: |
| column (`str`): |
| Name of the column to add. |
| values (`list`): |
| Values for the column, one per sample in the batch. |
| """ |
| self._pending_extra_logs[column].extend(values) |
|
|
| def _log_metric(self, name: str, value: float): |
| """ |
| Log a scalar metric from a reward function. Called via the `log_metric` kwarg. Values are averaged over each |
| logging step and reported alongside built-in metrics like `kl` and `entropy`. |
| |
| Args: |
| name (`str`): |
| Name of the metric. |
| value (`float`): |
| Scalar value for this batch. |
| """ |
| self._pending_metrics[name].append(value) |
|
|
| @profiling_decorator |
| def _calculate_rewards(self, inputs, prompts, completions, completion_ids_list): |
| device = self.accelerator.device |
| rewards_per_func = torch.zeros(len(prompts), len(self.reward_funcs), device=device) |
|
|
| |
| keys = [key for key in inputs[0] if key not in ["prompt", "completion", "completion_ids"]] |
| reward_kwargs = {key: [example[key] for example in inputs] for key in keys} |
|
|
| |
| reward_kwargs["trainer_state"] = self.state |
|
|
| |
| reward_kwargs["log_extra"] = self._log_completion_extra |
|
|
| |
| reward_kwargs["log_metric"] = self._log_metric |
|
|
| async_funcs_info = [] |
|
|
| for i, (reward_func, reward_processing_class, reward_func_name) in enumerate( |
| zip(self.reward_funcs, self.reward_processing_classes, self.reward_func_names, strict=True) |
| ): |
| if isinstance(reward_func, nn.Module): |
| with profiling_context(self, reward_func_name): |
| if is_conversational(inputs[0]): |
| messages = [{"messages": p + c} for p, c in zip(prompts, completions, strict=True)] |
| texts = [ |
| apply_chat_template(x, reward_processing_class, **self.chat_template_kwargs)["text"] |
| for x in messages |
| ] |
| else: |
| texts = [p + c for p, c in zip(prompts, completions, strict=True)] |
| reward_inputs = reward_processing_class( |
| text=texts, return_tensors="pt", padding=True, padding_side="right", add_special_tokens=False |
| ) |
| reward_inputs = super()._prepare_inputs(reward_inputs) |
| with torch.inference_mode(): |
| rewards_per_func[:, i] = reward_func(**reward_inputs).logits[:, 0] |
| elif inspect.iscoroutinefunction(reward_func): |
| async_funcs_info.append((i, reward_func, reward_func_name)) |
| else: |
| |
| with profiling_context(self, reward_func_name): |
| output_reward_func = reward_func( |
| prompts=prompts, completions=completions, completion_ids=completion_ids_list, **reward_kwargs |
| ) |
| |
| output_reward_func = [reward if reward is not None else torch.nan for reward in output_reward_func] |
| rewards_per_func[:, i] = torch.tensor(output_reward_func, dtype=torch.float32, device=device) |
|
|
| |
| if async_funcs_info: |
|
|
| async def _invoke_async_reward(index, func, func_name): |
| with profiling_context(self, func_name): |
| output = await func( |
| prompts=prompts, completions=completions, completion_ids=completion_ids_list, **reward_kwargs |
| ) |
| output = [r if r is not None else torch.nan for r in output] |
| return index, output |
|
|
| async def _run_async_funcs(): |
| coros = [_invoke_async_reward(i, func, func_name) for (i, func, func_name) in async_funcs_info] |
| return await asyncio.gather(*coros) |
|
|
| async_results = asyncio.run_coroutine_threadsafe(_run_async_funcs(), self.async_reward_loop).result() |
| for idx, output_reward_func in async_results: |
| rewards_per_func[:, idx] = torch.tensor(output_reward_func, dtype=torch.float32, device=device) |
|
|
| |
| if torch.isnan(rewards_per_func).all(dim=1).any(): |
| nan_row_idx = torch.isnan(rewards_per_func).all(dim=1).nonzero(as_tuple=True)[0][0] |
| row_reward_kwargs = { |
| key: value[nan_row_idx] |
| for key, value in reward_kwargs.items() |
| if key not in ("trainer_state", "log_extra", "log_metric") |
| } |
| row_reward_kwargs["prompt"] = prompts[nan_row_idx] |
| row_reward_kwargs["completion"] = completions[nan_row_idx] |
| logger.warning( |
| f"All reward functions returned None for the following kwargs:\n{row_reward_kwargs}\n" |
| "Please ensure that at least one reward function returns a valid reward." |
| ) |
|
|
| |
| |
| rewards_per_func = gather(rewards_per_func) |
| return rewards_per_func |
|
|
| def _tokenize_prompts(self, prompts: list): |
| """Tokenize prompts and extract images/multimodal fields for generation.""" |
| if is_conversational({"prompt": prompts[0]}): |
| |
| images = [] |
| has_images = False |
| for prompt in prompts: |
| prompt_images = [] |
| for message in prompt: |
| if isinstance(message["content"], list): |
| for part in message["content"]: |
| if part["type"] == "image": |
| prompt_images.append(part["image"]) |
| has_images = True |
| images.append(prompt_images if prompt_images else None) |
| images = images if has_images else None |
|
|
| |
| |
| |
| tokenized = self.processing_class.apply_chat_template( |
| conversation=prompts, |
| add_generation_prompt=True, |
| tokenize=True, |
| return_dict=True, |
| padding=True, |
| **self.chat_template_kwargs, |
| ) |
| |
| prompt_ids = [ |
| [tok for tok, m in zip(ids, mask, strict=True) if m] |
| for ids, mask in zip(tokenized["input_ids"], tokenized["attention_mask"], strict=True) |
| ] |
| |
| multimodal_fields = {k: v for k, v in tokenized.items() if k not in ("input_ids", "attention_mask")} |
| else: |
| prompt_ids = self.processing_class(text=prompts)["input_ids"] |
| images = None |
| multimodal_fields = {} |
| return prompt_ids, images, multimodal_fields |
|
|
| def _generate_single_turn(self, prompt_ids, images, multimodal_fields): |
| device = self.accelerator.device |
| mode = "train" if self.model.training else "eval" |
|
|
| |
| if self.use_vllm: |
| |
| if self.state.global_step != self._last_loaded_step: |
| with profiling_context(self, "sync_weights"): |
| self.vllm_generation.sync_weights() |
| self._last_loaded_step = self.state.global_step |
|
|
| |
| num_generations = self.num_generations if mode == "train" else self.num_generations_eval |
| _, completion_ids, _, _ = self.vllm_generation.generate( |
| prompts=prompt_ids, |
| images=images, |
| num_generations=num_generations, |
| profiler=profiling_context(self, "vLLM.generate"), |
| ) |
|
|
| elif self.use_transformers_paged: |
| with ( |
| profiling_context(self, "transformers.generate_batch"), |
| unwrap_model_for_generation( |
| self.model_wrapped, self.accelerator, gather_deepspeed3_params=self.args.ds3_gather_for_generation |
| ) as unwrapped_model, |
| torch.no_grad(), |
| FSDP.summon_full_params(self.model_wrapped, recurse=False) if self.is_fsdp_enabled else nullcontext(), |
| ): |
| |
| if self.args.bf16: |
| unwrapped_model.to(torch.bfloat16) |
| elif self.args.fp16: |
| unwrapped_model.to(torch.float16) |
| with torch.inference_mode(): |
| |
| all_outputs = unwrapped_model.generate_batch( |
| prompt_ids, generation_config=self.generation_config, progress_bar=False |
| ) |
| unwrapped_model.train() |
| completion_ids = [output.generated_tokens for output in all_outputs.values()] |
|
|
| else: |
| |
| prompt_tensors = [torch.tensor(ids) for ids in prompt_ids] |
| padded_ids = pad(prompt_tensors, padding_value=self.pad_token_id, padding_side="left") |
| attention_mask = pad([torch.ones_like(t) for t in prompt_tensors], padding_value=0, padding_side="left") |
| generate_inputs = {"input_ids": padded_ids, "attention_mask": attention_mask} |
| |
| for k, v in multimodal_fields.items(): |
| if isinstance(v, torch.Tensor): |
| generate_inputs[k] = v |
| elif isinstance(v, list) and v and isinstance(v[0], list): |
| |
| generate_inputs[k] = pad([torch.tensor(x) for x in v], padding_value=0, padding_side="left") |
| else: |
| generate_inputs[k] = torch.tensor(np.array(v)) |
| generate_inputs = super()._prepare_inputs(generate_inputs) |
|
|
| with ( |
| profiling_context(self, "transformers.generate"), |
| unwrap_model_for_generation( |
| self.model_wrapped, |
| self.accelerator, |
| gather_deepspeed3_params=self.args.ds3_gather_for_generation, |
| generation_kwargs=self.generation_kwargs, |
| ) as unwrapped_model, |
| torch.no_grad(), |
| FSDP.summon_full_params(self.model_wrapped, recurse=False) if self.is_fsdp_enabled else nullcontext(), |
| ): |
| prompt_completion_ids = unwrapped_model.generate( |
| **generate_inputs, generation_config=self.generation_config |
| ) |
| |
| prompt_length = generate_inputs["input_ids"].size(1) |
| completion_ids = prompt_completion_ids[:, prompt_length:] |
|
|
| |
| is_eos = completion_ids == self.eos_token_id |
| eos_idx = torch.full((is_eos.size(0),), is_eos.size(1), dtype=torch.long, device=device) |
| eos_idx[is_eos.any(dim=1)] = is_eos.int().argmax(dim=1)[is_eos.any(dim=1)] |
| sequence_indices = torch.arange(is_eos.size(1), device=device).expand(is_eos.size(0), -1) |
| completion_mask = (sequence_indices <= eos_idx.unsqueeze(1)).int() |
| completion_ids = [ |
| c[m].tolist() for c, m in zip(completion_ids.cpu(), completion_mask.bool().cpu(), strict=True) |
| ] |
|
|
| return completion_ids |
|
|
| def _generate(self, prompts: list): |
| device = self.accelerator.device |
| mode = "train" if self.model.training else "eval" |
|
|
| |
| prompts = copy.deepcopy(prompts) |
|
|
| prompt_ids, images, multimodal_fields = self._tokenize_prompts(prompts) |
| completion_ids = self._generate_single_turn(prompt_ids, images, multimodal_fields) |
|
|
| |
| if is_conversational({"prompt": prompts[0]}): |
| contents = self.processing_class.batch_decode(completion_ids, skip_special_tokens=True) |
| completions = [[{"role": "assistant", "content": content}] for content in contents] |
| else: |
| completions = self.processing_class.batch_decode(completion_ids, skip_special_tokens=True) |
|
|
| |
| prompt_lengths = torch.tensor([len(ids) for ids in prompt_ids], device=device) |
| completion_lengths = torch.tensor([len(ids) for ids in completion_ids], device=device) |
| agg_prompt_lengths = self.accelerator.gather(prompt_lengths) |
| agg_completion_lengths = self.accelerator.gather(completion_lengths) |
| total_prompt_tokens = agg_prompt_lengths.sum() |
| total_completion_tokens = agg_completion_lengths.sum() |
|
|
| |
| if mode == "train": |
| self.state.num_input_tokens_seen += (total_prompt_tokens + total_completion_tokens).item() |
| self._metrics[mode]["num_tokens"] = [self.state.num_input_tokens_seen] |
|
|
| |
| self._metrics[mode]["completions/mean_length"].append(agg_completion_lengths.float().mean().item()) |
| self._metrics[mode]["completions/min_length"].append(agg_completion_lengths.float().min().item()) |
| self._metrics[mode]["completions/max_length"].append(agg_completion_lengths.float().max().item()) |
|
|
| |
| eos_and_pad = [self.eos_token_id, self.pad_token_id] |
| is_truncated = torch.tensor([ids[-1] not in eos_and_pad for ids in completion_ids], device=device) |
| agg_is_truncated = self.accelerator.gather(is_truncated) |
| self._metrics[mode]["completions/clipped_ratio"].append(agg_is_truncated.float().mean().item()) |
| term_completion_lengths = agg_completion_lengths[~agg_is_truncated] |
| if len(term_completion_lengths) == 0: |
| term_completion_lengths = torch.zeros(1, device=device) |
| self._metrics[mode]["completions/mean_terminated_length"].append(term_completion_lengths.float().mean().item()) |
| self._metrics[mode]["completions/min_terminated_length"].append(term_completion_lengths.float().min().item()) |
| self._metrics[mode]["completions/max_terminated_length"].append(term_completion_lengths.float().max().item()) |
|
|
| return prompt_ids, completion_ids, completions |
|
|
| def _generate_and_score_completions( |
| self, inputs: list[dict[str, torch.Tensor | Any]] |
| ) -> dict[str, torch.Tensor | Any]: |
| device = self.accelerator.device |
| mode = "train" if self.model.training else "eval" |
|
|
| prompts = [x["prompt"] for x in inputs] |
|
|
| if "images" in inputs[0]: |
| images = [example.get("images") for example in inputs] |
| elif "image" in inputs[0]: |
| images = [[example.get("image")] if example.get("image") is not None else None for example in inputs] |
| else: |
| images = None |
| |
| if images is not None and all(img_list == [] for img_list in images): |
| images = None |
|
|
| |
| |
| |
| if images is not None: |
| if not is_conversational(inputs[0]): |
| raise ValueError( |
| "Multimodal training requires conversational prompts. It looks like the dataset contains " |
| "non-conversational inputs, likely because a chat template was applied before passing the dataset " |
| "to the trainer. Please provide the raw conversational prompts and let the trainer apply the chat " |
| "template internally." |
| ) |
| prompts = [ |
| prepare_multimodal_messages(prompt, image_list) |
| for prompt, image_list in zip(prompts, images, strict=True) |
| ] |
|
|
| prompt_ids_list, completion_ids_list, completions = self._generate(prompts) |
|
|
| |
| prompt_ids = [torch.tensor(ids) for ids in prompt_ids_list] |
| prompt_mask = [torch.ones_like(ids, dtype=torch.long) for ids in prompt_ids] |
| prompt_ids = pad( |
| prompt_ids, |
| padding_value=self.pad_token_id, |
| padding_side="left", |
| pad_to_multiple_of=self.pad_to_multiple_of, |
| ).to(device=device) |
| prompt_mask = pad( |
| prompt_mask, padding_value=0, padding_side="left", pad_to_multiple_of=self.pad_to_multiple_of |
| ).to(device=device) |
| completion_ids = [torch.tensor(ids) for ids in completion_ids_list] |
| completion_mask = [torch.ones_like(ids, dtype=torch.long) for ids in completion_ids] |
| completion_ids = pad( |
| completion_ids, |
| padding_value=self.pad_token_id, |
| padding_side="right", |
| pad_to_multiple_of=self.pad_to_multiple_of, |
| ).to(device=device) |
| completion_mask = pad( |
| completion_mask, padding_value=0, padding_side="right", pad_to_multiple_of=self.pad_to_multiple_of |
| ).to(device=device) |
|
|
| |
| if self.mask_truncated_completions: |
| eos_and_pad = [self.eos_token_id, self.pad_token_id] |
| |
| is_truncated = torch.tensor([ids[-1] not in eos_and_pad for ids in completion_ids_list], device=device) |
| completion_mask = completion_mask * (~is_truncated).unsqueeze(1).int() |
|
|
| |
| prompt_completion_ids = torch.cat([prompt_ids, completion_ids], dim=1) |
| attention_mask = torch.cat([prompt_mask, completion_mask], dim=1) |
|
|
| logits_to_keep = completion_ids.size(1) |
| batch_size = self.args.per_device_train_batch_size if mode == "train" else self.args.per_device_eval_batch_size |
|
|
| num_images = [len(img_list) for img_list in images] if images is not None else None |
|
|
| |
| if images is not None: |
| prompts_text = [ |
| apply_chat_template({"prompt": prompt}, self.processing_class, **self.chat_template_kwargs)["prompt"] |
| for prompt in prompts |
| ] |
| prompt_inputs = self.processing_class(images=images, text=prompts_text, padding=True, return_tensors="pt") |
| prompt_inputs = super()._prepare_inputs(prompt_inputs) |
| forward_kwargs = {k: v for k, v in prompt_inputs.items() if k not in ["input_ids", "attention_mask"]} |
| else: |
| forward_kwargs = {} |
|
|
| |
| if "token_type_ids" in forward_kwargs: |
| token_type_ids = forward_kwargs["token_type_ids"] |
| if self.pad_to_multiple_of is not None: |
| |
| padding_size = prompt_ids.size(1) - token_type_ids.size(1) |
| if padding_size > 0: |
| token_type_ids = torch.cat( |
| [token_type_ids.new_zeros((token_type_ids.size(0), padding_size)), token_type_ids], dim=1 |
| ) |
| forward_kwargs["token_type_ids"] = torch.cat( |
| [token_type_ids, token_type_ids.new_zeros(completion_ids.shape)], dim=1 |
| ) |
| |
| if "mm_token_type_ids" in forward_kwargs: |
| mm_token_type_ids = forward_kwargs["mm_token_type_ids"] |
| if self.pad_to_multiple_of is not None: |
| |
| padding_size = prompt_ids.size(1) - mm_token_type_ids.size(1) |
| if padding_size > 0: |
| mm_token_type_ids = torch.cat( |
| [mm_token_type_ids.new_zeros((mm_token_type_ids.size(0), padding_size)), mm_token_type_ids], |
| dim=1, |
| ) |
| forward_kwargs["mm_token_type_ids"] = torch.cat( |
| [mm_token_type_ids, mm_token_type_ids.new_zeros(completion_ids.shape)], dim=1 |
| ) |
|
|
| |
| |
| |
| with torch.no_grad(), disable_gradient_checkpointing(self.model, self.args.gradient_checkpointing_kwargs): |
| |
| old_per_token_logps, _ = self._get_per_token_logps_and_entropies( |
| self.model, |
| prompt_completion_ids, |
| attention_mask, |
| logits_to_keep, |
| batch_size, |
| num_images=num_images, |
| **forward_kwargs, |
| ) |
| old_logps = (old_per_token_logps * completion_mask).sum(1) |
|
|
| |
| if self.beta != 0.0: |
| if self.ref_model is not None: |
| ref_per_token_logps, _ = self._get_per_token_logps_and_entropies( |
| self.ref_model, |
| prompt_completion_ids, |
| attention_mask, |
| logits_to_keep, |
| batch_size=batch_size, |
| num_images=num_images, |
| **forward_kwargs, |
| ) |
| else: |
| |
| |
| |
| model = self.accelerator.unwrap_model(self.model) |
| with use_adapter(model, adapter_name="ref" if "ref" in model.peft_config else None): |
| ref_per_token_logps, _ = self._get_per_token_logps_and_entropies( |
| self.model, |
| prompt_completion_ids, |
| attention_mask, |
| logits_to_keep, |
| batch_size=batch_size, |
| num_images=num_images, |
| **forward_kwargs, |
| ) |
| else: |
| ref_per_token_logps = None |
|
|
| |
| prompts_text = self.processing_class.batch_decode(prompt_ids, skip_special_tokens=True) |
| completions_text = self.processing_class.batch_decode(completion_ids, skip_special_tokens=True) |
|
|
| |
| |
| |
| rewards_per_func = self._calculate_rewards(inputs, prompts, completions, completion_ids_list) |
| num_generations = self.num_generations if mode == "train" else self.num_generations_eval |
|
|
| |
| rewards = (rewards_per_func * self.reward_weights.to(device).unsqueeze(0)).nansum(dim=1) |
|
|
| |
| if self.reward_clip_range: |
| rewards = rewards.clamp(min=self.reward_clip_range[0], max=self.reward_clip_range[1]) |
|
|
| |
| if self.beta != 0.0: |
| per_token_kl = old_per_token_logps - ref_per_token_logps |
| |
| kl = (per_token_kl * completion_mask).sum(-1) |
| kl = gather(kl) |
| rewards = rewards - self.beta * kl |
|
|
| grouped_rewards = rewards.view(-1, num_generations) |
| mean_grouped_rewards = grouped_rewards.mean(dim=1) |
| if num_generations > 1: |
| std_rewards = grouped_rewards.std(dim=1) |
| else: |
| std_rewards = torch.zeros_like(mean_grouped_rewards) |
|
|
| |
| grouped_sum = grouped_rewards.sum(dim=1, keepdim=True) |
| if num_generations > 1: |
| baselines = (grouped_sum - grouped_rewards) / (num_generations - 1) |
| baselines = baselines.view(-1) |
| advantages = rewards - baselines |
| else: |
| advantages = torch.zeros_like(rewards) |
|
|
| |
| if self.normalize_advantages: |
| advantages = (advantages - advantages.mean()) / (advantages.std() + 1e-4) |
|
|
| is_std_zero = torch.isclose(std_rewards, torch.zeros_like(std_rewards)) |
|
|
| |
| process_slice = slice( |
| self.accelerator.process_index * len(prompts), |
| (self.accelerator.process_index + 1) * len(prompts), |
| ) |
| all_process_advantages = advantages.clone() |
| advantages = advantages[process_slice] |
|
|
| |
| if self.beta != 0.0: |
| mean_kl = (per_token_kl * completion_mask).sum() / completion_mask.sum().clamp(min=1.0) |
| self._metrics[mode]["kl"].append(self.accelerator.gather(mean_kl).nanmean().item()) |
|
|
| |
| for i, reward_func_name in enumerate(self.reward_func_names): |
| mean_rewards = torch.nanmean(rewards_per_func[:, i]).item() |
| self._metrics[mode][f"rewards/{reward_func_name}/mean"].append(mean_rewards) |
| std_func_rewards = nanstd(rewards_per_func[:, i]).item() |
| self._metrics[mode][f"rewards/{reward_func_name}/std"].append(std_func_rewards) |
| rewards = (rewards_per_func * self.reward_weights.to(rewards_per_func.device).unsqueeze(0)).nansum(dim=1) |
| self._metrics[mode]["reward"].append(rewards.mean().item()) |
| self._metrics[mode]["reward_std"].append(rewards.std().item()) |
| self._metrics[mode]["frac_reward_zero_std"].append(is_std_zero.float().mean().item()) |
|
|
| |
| self._logs["prompt"].extend(gather_object(prompts_text)) |
| self._logs["completion"].extend(gather_object(completions_text)) |
| for i, name in enumerate(self.reward_func_names): |
| self._logs["rewards"][name].extend(rewards_per_func[:, i].tolist()) |
| self._logs["advantages"].extend(all_process_advantages.tolist()) |
|
|
| |
| |
| |
| for column in sorted(self._pending_extra_logs): |
| self._logs["extra"][column].extend(gather_object(self._pending_extra_logs[column])) |
| self._pending_extra_logs.clear() |
|
|
| |
| |
| |
| for name in sorted(self._pending_metrics): |
| values = self._pending_metrics[name] |
| local_mean = sum(values) / len(values) |
| global_mean = self.accelerator.gather(torch.tensor(local_mean, device=device)).mean().item() |
| self._metrics[mode][name].append(global_mean) |
| self._pending_metrics.clear() |
|
|
| if images is not None: |
| self._logs["images"].extend(gather_object(images)) |
|
|
| output = { |
| "prompt_ids": prompt_ids, |
| "prompt_mask": prompt_mask, |
| "completion_ids": completion_ids, |
| "completion_mask": completion_mask, |
| "old_logps": old_logps, |
| "advantages": advantages, |
| } |
| if "pixel_values" in forward_kwargs: |
| output["pixel_values"] = forward_kwargs["pixel_values"] |
| if "image_grid_thw" in forward_kwargs: |
| output["image_grid_thw"] = forward_kwargs["image_grid_thw"] |
| if "pixel_attention_mask" in forward_kwargs: |
| output["pixel_attention_mask"] = forward_kwargs["pixel_attention_mask"] |
| if "image_sizes" in forward_kwargs: |
| output["image_sizes"] = forward_kwargs["image_sizes"] |
| if "token_type_ids" in forward_kwargs: |
| output["token_type_ids"] = forward_kwargs["token_type_ids"] |
| if "mm_token_type_ids" in forward_kwargs: |
| output["mm_token_type_ids"] = forward_kwargs["mm_token_type_ids"] |
| if "pixel_position_ids" in forward_kwargs: |
| output["pixel_position_ids"] = forward_kwargs["pixel_position_ids"] |
| if images is not None: |
| output["num_images"] = num_images |
| return output |
|
|
| @profiling_decorator |
| def compute_loss(self, model, inputs, return_outputs=False, num_items_in_batch=None): |
| if return_outputs: |
| raise ValueError("The RLOOTrainer does not support returning outputs") |
| return self._compute_loss(model, inputs) |
|
|
| def _compute_loss(self, model, inputs): |
| |
| prompt_ids, prompt_mask = inputs["prompt_ids"], inputs["prompt_mask"] |
| completion_ids, completion_mask = inputs["completion_ids"], inputs["completion_mask"] |
| input_ids = torch.cat([prompt_ids, completion_ids], dim=1) |
| attention_mask = torch.cat([prompt_mask, completion_mask], dim=1) |
| logits_to_keep = completion_ids.size(1) |
|
|
| |
| per_token_logps, entropies = self._get_per_token_logps_and_entropies( |
| model, |
| input_ids, |
| attention_mask, |
| logits_to_keep, |
| compute_entropy=True, |
| pixel_values=inputs.get("pixel_values"), |
| image_grid_thw=inputs.get("image_grid_thw"), |
| num_images=inputs.get("num_images"), |
| pixel_attention_mask=inputs.get("pixel_attention_mask"), |
| image_sizes=inputs.get("image_sizes"), |
| token_type_ids=inputs.get("token_type_ids"), |
| mm_token_type_ids=inputs.get("mm_token_type_ids"), |
| pixel_position_ids=inputs.get("pixel_position_ids"), |
| ) |
|
|
| logps = (per_token_logps * completion_mask).sum(1) |
| old_logps = inputs["old_logps"] |
| log_ratio = logps - old_logps |
|
|
| |
| advantages = inputs["advantages"] |
| coef_1 = torch.exp(log_ratio) |
| coef_2 = torch.clamp(coef_1, 1 - self.epsilon_low, 1 + self.epsilon_high) |
| per_sequence_loss1 = coef_1 * advantages |
| per_sequence_loss2 = coef_2 * advantages |
| per_sequence_loss = -torch.min(per_sequence_loss1, per_sequence_loss2) |
| loss = per_sequence_loss.mean() |
|
|
| |
| mode = "train" if self.model.training else "eval" |
|
|
| |
| mean_entropy = (entropies * completion_mask).sum() / completion_mask.sum().clamp(min=1.0) |
| self._metrics[mode]["entropy"].append(self.accelerator.gather(mean_entropy).nanmean().item()) |
|
|
| |
| is_low_clipped = (coef_1 < 1 - self.epsilon_low) & (advantages < 0) |
| is_high_clipped = (coef_1 > 1 + self.epsilon_high) & (advantages > 0) |
| is_region_clipped = is_low_clipped | is_high_clipped |
| gathered_low_clip = self.accelerator.gather(is_low_clipped.float().mean()) |
| self._metrics[mode]["clip_ratio/low_mean"].append(gathered_low_clip.nanmean().item()) |
| self._metrics[mode]["clip_ratio/low_min"].append(nanmin(gathered_low_clip).item()) |
| gathered_high_clip = self.accelerator.gather(is_high_clipped.float().mean()) |
| self._metrics[mode]["clip_ratio/high_mean"].append(gathered_high_clip.nanmean().item()) |
| self._metrics[mode]["clip_ratio/high_max"].append(nanmax(gathered_high_clip).item()) |
| gathered_clip_ratio = self.accelerator.gather(is_region_clipped.float().mean()) |
| self._metrics[mode]["clip_ratio/region_mean"].append(gathered_clip_ratio.nanmean().item()) |
| return loss |
|
|
| |
| |
| def prediction_step(self, model, inputs, prediction_loss_only, ignore_keys: list[str] | None = None): |
| inputs = self._prepare_inputs(inputs) |
| with torch.no_grad(): |
| with self.compute_loss_context_manager(): |
| loss = self.compute_loss(model, inputs) |
| loss = loss.mean().detach() |
| return loss, None, None |
|
|
| def log(self, logs: dict[str, float], start_time: float | None = None) -> None: |
| mode = "train" if self.model.training else "eval" |
| |
| metrics = {} |
| for key, val in self._metrics[mode].items(): |
| |
| |
| |
| |
| valid = [v for v in val if not math.isnan(v)] |
| metrics[key] = sum(valid) / len(valid) if valid else None |
|
|
| |
| |
| if mode == "eval": |
| metrics = {f"eval_{key}": val for key, val in metrics.items()} |
|
|
| logs = {**logs, **metrics} |
| super().log(logs, start_time) |
| self._metrics[mode].clear() |
|
|
| if self.accelerator.is_main_process and self.log_completions: |
| if is_rich_available(): |
| print_prompt_completions_sample( |
| self._logs["prompt"], |
| self._logs["completion"], |
| self._logs["rewards"], |
| self._logs["advantages"], |
| self.state.global_step, |
| self.num_completions_to_print, |
| ) |
|
|
| logging_backends = [] |
| if self.args.report_to and "wandb" in self.args.report_to and wandb.run is not None: |
| logging_backends.append(wandb) |
| if self.args.report_to and "trackio" in self.args.report_to: |
| logging_backends.append(trackio) |
|
|
| table = { |
| "step": [self.state.global_step] * len(self._logs["prompt"]), |
| "prompt": self._logs["prompt"], |
| "completion": self._logs["completion"], |
| **self._logs["rewards"], |
| **self._logs["extra"], |
| "advantage": self._logs["advantages"], |
| } |
|
|
| df_base = pd.DataFrame(table) |
| images_raw = self._logs["images"] or [] |
|
|
| for logging_backend in logging_backends: |
| if images_raw: |
| images = [] |
| for image_list in self._logs["images"]: |
| images.append([logging_backend.Image(image) for image in image_list]) |
| df = pd.concat( |
| [df_base, pd.Series(images, name="image")], |
| axis=1, |
| copy=False, |
| ) |
| else: |
| df = df_base |
|
|
| if self.log_unique_prompts: |
| df = df.drop_duplicates(subset=["prompt"]) |
|
|
| logging_backend.log({"completions": logging_backend.Table(dataframe=df)}) |
|
|
| |
| def _save_checkpoint(self, model, trial): |
| if self.args.hub_model_id is None: |
| model_name = Path(self.args.output_dir).name |
| else: |
| model_name = self.args.hub_model_id.split("/")[-1] |
| self.create_model_card(model_name=model_name) |
| super()._save_checkpoint(model, trial) |
|
|