| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
|
|
| from collections import defaultdict |
| from typing import TYPE_CHECKING, Any, Dict, List, Optional, Sequence, Tuple |
|
|
| from ...extras.constants import IGNORE_INDEX |
| from ...extras.logging import get_logger |
| from .processor_utils import infer_seqlen |
|
|
|
|
| if TYPE_CHECKING: |
| from transformers import PreTrainedTokenizer, ProcessorMixin |
|
|
| from ...hparams import DataArguments |
| from ..mm_plugin import ImageInput, VideoInput |
| from ..template import Template |
|
|
|
|
| logger = get_logger(__name__) |
|
|
|
|
| def _encode_pairwise_example( |
| prompt: Sequence[Dict[str, str]], |
| response: Sequence[Dict[str, str]], |
| system: Optional[str], |
| tools: Optional[str], |
| images: Sequence["ImageInput"], |
| videos: Sequence["VideoInput"], |
| template: "Template", |
| tokenizer: "PreTrainedTokenizer", |
| processor: Optional["ProcessorMixin"], |
| cutoff_len: int, |
| ) -> Tuple[List[int], List[int], List[int], List[int]]: |
| chosen_messages = template.mm_plugin.process_messages(prompt + [response[0]], images, videos, processor) |
| rejected_messages = template.mm_plugin.process_messages(prompt + [response[1]], images, videos, processor) |
| prompt_ids, chosen_ids = template.encode_oneturn(tokenizer, chosen_messages, system, tools) |
| _, rejected_ids = template.encode_oneturn(tokenizer, rejected_messages, system, tools) |
|
|
| if template.efficient_eos: |
| chosen_ids += [tokenizer.eos_token_id] |
| rejected_ids += [tokenizer.eos_token_id] |
|
|
| prompt_ids, _ = template.mm_plugin.process_token_ids(prompt_ids, None, images, videos, tokenizer, processor) |
| |
| source_len, target_len = infer_seqlen(len(prompt_ids), max(len(chosen_ids), len(rejected_ids)), cutoff_len) |
| prompt_ids = prompt_ids[:source_len] |
| chosen_ids = chosen_ids[:target_len] |
| rejected_ids = rejected_ids[:target_len] |
|
|
| chosen_input_ids = prompt_ids + chosen_ids |
| chosen_labels = [IGNORE_INDEX] * source_len + chosen_ids |
| rejected_input_ids = prompt_ids + rejected_ids |
| rejected_labels = [IGNORE_INDEX] * source_len + rejected_ids |
| return chosen_input_ids, chosen_labels, rejected_input_ids, rejected_labels |
|
|
|
|
| def preprocess_pairwise_dataset( |
| examples: Dict[str, List[Any]], |
| template: "Template", |
| tokenizer: "PreTrainedTokenizer", |
| processor: Optional["ProcessorMixin"], |
| data_args: "DataArguments", |
| ) -> Dict[str, List[Any]]: |
| |
| model_inputs = defaultdict(list) |
| for i in range(len(examples["_prompt"])): |
| if len(examples["_prompt"][i]) % 2 != 1 or len(examples["_response"][i]) < 2: |
| logger.warning("Dropped invalid example: {}".format(examples["_prompt"][i] + examples["_response"][i])) |
| continue |
|
|
| chosen_input_ids, chosen_labels, rejected_input_ids, rejected_labels = _encode_pairwise_example( |
| prompt=examples["_prompt"][i], |
| response=examples["_response"][i], |
| system=examples["_system"][i], |
| tools=examples["_tools"][i], |
| images=examples["_images"][i] or [], |
| videos=examples["_videos"][i] or [], |
| template=template, |
| tokenizer=tokenizer, |
| processor=processor, |
| cutoff_len=data_args.cutoff_len, |
| ) |
| model_inputs["chosen_input_ids"].append(chosen_input_ids) |
| model_inputs["chosen_attention_mask"].append([1] * len(chosen_input_ids)) |
| model_inputs["chosen_labels"].append(chosen_labels) |
| model_inputs["rejected_input_ids"].append(rejected_input_ids) |
| model_inputs["rejected_attention_mask"].append([1] * len(rejected_input_ids)) |
| model_inputs["rejected_labels"].append(rejected_labels) |
| model_inputs["images"].append(examples["_images"][i]) |
| model_inputs["videos"].append(examples["_videos"][i]) |
|
|
| return model_inputs |
|
|
|
|
| def print_pairwise_dataset_example(example: Dict[str, List[int]], tokenizer: "PreTrainedTokenizer") -> None: |
| valid_chosen_labels = list(filter(lambda x: x != IGNORE_INDEX, example["chosen_labels"])) |
| valid_rejected_labels = list(filter(lambda x: x != IGNORE_INDEX, example["rejected_labels"])) |
| print("chosen_input_ids:\n{}".format(example["chosen_input_ids"])) |
| print("chosen_inputs:\n{}".format(tokenizer.decode(example["chosen_input_ids"], skip_special_tokens=False))) |
| print("chosen_label_ids:\n{}".format(example["chosen_labels"])) |
| print("chosen_labels:\n{}".format(tokenizer.decode(valid_chosen_labels, skip_special_tokens=False))) |
| print("rejected_input_ids:\n{}".format(example["rejected_input_ids"])) |
| print("rejected_inputs:\n{}".format(tokenizer.decode(example["rejected_input_ids"], skip_special_tokens=False))) |
| print("rejected_label_ids:\n{}".format(example["rejected_labels"])) |
| print("rejected_labels:\n{}".format(tokenizer.decode(valid_rejected_labels, skip_special_tokens=False))) |
|
|