| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| |
|
| | from typing import TYPE_CHECKING |
| |
|
| | from transformers.utils import is_flash_attn_2_available, is_torch_sdpa_available |
| |
|
| | from ...extras.logging import get_logger |
| |
|
| |
|
| | if TYPE_CHECKING: |
| | from transformers import PretrainedConfig |
| |
|
| | from ...hparams import ModelArguments |
| |
|
| |
|
| | logger = get_logger(__name__) |
| |
|
| |
|
| | def configure_attn_implementation( |
| | config: "PretrainedConfig", model_args: "ModelArguments", is_trainable: bool |
| | ) -> None: |
| | if getattr(config, "model_type", None) == "gemma2" and is_trainable: |
| | if model_args.flash_attn == "auto": |
| | logger.warning("Gemma-2 models should use eager attention in training, change `flash_attn` to disabled.") |
| | model_args.flash_attn = "disabled" |
| | elif model_args.flash_attn != "disabled": |
| | logger.warning( |
| | "Gemma-2 models should use eager attention in training, but you set `flash_attn: {}`. " |
| | "Will proceed at your own risk.".format(model_args.flash_attn) |
| | ) |
| |
|
| | if model_args.flash_attn == "auto": |
| | return |
| |
|
| | elif model_args.flash_attn == "disabled": |
| | requested_attn_implementation = "eager" |
| |
|
| | elif model_args.flash_attn == "sdpa": |
| | if not is_torch_sdpa_available(): |
| | logger.warning("torch>=2.1.1 is required for SDPA attention.") |
| | return |
| |
|
| | requested_attn_implementation = "sdpa" |
| | elif model_args.flash_attn == "fa2": |
| | if not is_flash_attn_2_available(): |
| | logger.warning("FlashAttention-2 is not installed.") |
| | return |
| |
|
| | requested_attn_implementation = "flash_attention_2" |
| | else: |
| | raise NotImplementedError("Unknown attention type: {}".format(model_args.flash_attn)) |
| |
|
| | if getattr(config, "model_type", None) == "internlm2": |
| | setattr(config, "attn_implementation", requested_attn_implementation) |
| | else: |
| | setattr(config, "_attn_implementation", requested_attn_implementation) |
| |
|
| |
|
| | def print_attn_implementation(config: "PretrainedConfig") -> None: |
| | if getattr(config, "model_type", None) == "internlm2": |
| | attn_implementation = getattr(config, "attn_implementation", None) |
| | else: |
| | attn_implementation = getattr(config, "_attn_implementation", None) |
| |
|
| | if attn_implementation == "flash_attention_2": |
| | logger.info("Using FlashAttention-2 for faster training and inference.") |
| | elif attn_implementation == "sdpa": |
| | logger.info("Using torch SDPA for faster training and inference.") |
| | else: |
| | logger.info("Using vanilla attention implementation.") |
| |
|