id
int64
0
190k
prompt
stringlengths
21
13.4M
docstring
stringlengths
1
12k
2,866
from typing import List from .keymap import KEYMAP, get_character The provided code snippet includes necessary dependencies for implementing the `mark` function. Write a Python function `def mark(key: str)` to solve the following problem: Mark the function with the key code so it can be handled in the register Here is the function: def mark(key: str): """ Mark the function with the key code so it can be handled in the register """ def decorator(func): handle = getattr(func, "handle_key", []) handle += [key] func.handle_key = handle return func return decorator
Mark the function with the key code so it can be handled in the register
2,867
from typing import List from .keymap import KEYMAP, get_character The provided code snippet includes necessary dependencies for implementing the `mark_multiple` function. Write a Python function `def mark_multiple(*keys: List[str])` to solve the following problem: Mark the function with the key codes so it can be handled in the register Here is the function: def mark_multiple(*keys: List[str]): """ Mark the function with the key codes so it can be handled in the register """ def decorator(func): handle = getattr(func, "handle_key", []) handle += keys func.handle_key = handle return func return decorator
Mark the function with the key codes so it can be handled in the register
2,868
from typing import List from .keymap import KEYMAP, get_character class KeyHandler(type): """ Metaclass that adds the key handlers to the class """ def __new__(cls, name, bases, attrs): new_cls = super().__new__(cls, name, bases, attrs) if not hasattr(new_cls, "key_handler"): new_cls.key_handler = {} new_cls.handle_input = KeyHandler.handle_input for value in attrs.values(): handled_keys = getattr(value, "handle_key", []) for key in handled_keys: new_cls.key_handler[key] = value return new_cls def handle_input(cls): "Finds and returns the selected character if it exists in the handler" char = get_character() if char != KEYMAP["undefined"]: char = ord(char) handler = cls.key_handler.get(char) if handler: cls.current_selection = char return handler(cls) else: return None The provided code snippet includes necessary dependencies for implementing the `register` function. Write a Python function `def register(cls)` to solve the following problem: Adds KeyHandler metaclass to the class Here is the function: def register(cls): """Adds KeyHandler metaclass to the class""" return KeyHandler(cls.__name__, cls.__bases__, cls.__dict__.copy())
Adds KeyHandler metaclass to the class
2,869
import os import sys from contextlib import contextmanager def hide_cursor(): if os.name == "nt": ci = CursorInfo() handle = ctypes.windll.kernel32.GetStdHandle(-11) ctypes.windll.kernel32.GetConsoleCursorInfo(handle, ctypes.byref(ci)) ci.visible = False ctypes.windll.kernel32.SetConsoleCursorInfo(handle, ctypes.byref(ci)) elif os.name == "posix": sys.stdout.write("\033[?25l") sys.stdout.flush() def show_cursor(): if os.name == "nt": ci = CursorInfo() handle = ctypes.windll.kernel32.GetStdHandle(-11) ctypes.windll.kernel32.GetConsoleCursorInfo(handle, ctypes.byref(ci)) ci.visible = True ctypes.windll.kernel32.SetConsoleCursorInfo(handle, ctypes.byref(ci)) elif os.name == "posix": sys.stdout.write("\033[?25h") sys.stdout.flush() The provided code snippet includes necessary dependencies for implementing the `hide` function. Write a Python function `def hide()` to solve the following problem: Context manager to hide the terminal cursor Here is the function: def hide(): "Context manager to hide the terminal cursor" try: hide_cursor() yield finally: show_cursor()
Context manager to hide the terminal cursor
2,870
import enum import shutil import sys def forceWrite(content, end=""): sys.stdout.write(str(content) + end) sys.stdout.flush() def writeColor(content, color, end=""): forceWrite(f"\u001b[{color}m{content}\u001b[0m", end)
null
2,871
import enum import shutil import sys CURSOR_TO_CHAR = {"UP": "A", "DOWN": "B", "RIGHT": "C", "LEFT": "D"} def forceWrite(content, end=""): sys.stdout.write(str(content) + end) sys.stdout.flush() def move_cursor(num_lines: int, direction: str): forceWrite(f"\033[{num_lines}{CURSOR_TO_CHAR[direction.upper()]}")
null
2,872
import enum import shutil import sys TERMINAL_WIDTH, _ = shutil.get_terminal_size() def forceWrite(content, end=""): def reset_cursor(): def clear_line(): forceWrite(" " * TERMINAL_WIDTH) reset_cursor()
null
2,873
import enum import shutil import sys TERMINAL_WIDTH, _ = shutil.get_terminal_size() def forceWrite(content, end=""): sys.stdout.write(str(content) + end) sys.stdout.flush() def reset_cursor(): forceWrite("\r") def linebreak(): reset_cursor() forceWrite("-" * TERMINAL_WIDTH)
null
2,874
import argparse import os import platform import numpy as np import psutil import torch from accelerate import __version__ as version from accelerate.commands.config import default_config_file, load_config_from_file from ..utils import is_npu_available, is_xpu_available def env_command(args): def env_command_parser(subparsers=None): if subparsers is not None: parser = subparsers.add_parser("env") else: parser = argparse.ArgumentParser("Accelerate env command") parser.add_argument( "--config_file", default=None, help="The config file to use for the default values in the launching script." ) if subparsers is not None: parser.set_defaults(func=env_command) return parser
null
2,875
from pathlib import Path from .config_args import default_config_file, load_config_from_file from .config_utils import SubcommandHelpFormatter description = "Update an existing config file with the latest defaults while maintaining the old configuration." def update_config_command(args): config_file = update_config(args) print(f"Sucessfully updated the configuration file at {config_file}.") class SubcommandHelpFormatter(argparse.RawDescriptionHelpFormatter): """ A custom formatter that will remove the usage line from the help message for subcommands. """ def _format_usage(self, usage, actions, groups, prefix): usage = super()._format_usage(usage, actions, groups, prefix) usage = usage.replace("<command> [<args>] ", "") return usage def update_command_parser(parser, parents): parser = parser.add_parser("update", parents=parents, help=description, formatter_class=SubcommandHelpFormatter) parser.add_argument( "--config_file", default=None, help=( "The path to the config file to update. Will default to a file named default_config.yaml in the cache " "location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have " "such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed " "with 'huggingface'." ), ) parser.set_defaults(func=update_config_command) return parser
null
2,876
from pathlib import Path import torch from ...utils import is_npu_available, is_xpu_available from .config_args import ClusterConfig, default_json_config_file from .config_utils import SubcommandHelpFormatter description = "Create a default config file for Accelerate with only a few flags set." def default_config_command(args): default_json_config_file = os.path.join(cache_dir, "default_config.yaml") class SubcommandHelpFormatter(argparse.RawDescriptionHelpFormatter): def _format_usage(self, usage, actions, groups, prefix): def default_command_parser(parser, parents): parser = parser.add_parser("default", parents=parents, help=description, formatter_class=SubcommandHelpFormatter) parser.add_argument( "--config_file", default=default_json_config_file, help=( "The path to use to store the config file. Will default to a file named default_config.yaml in the cache " "location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have " "such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed " "with 'huggingface'." ), dest="save_location", ) parser.add_argument( "--mixed_precision", choices=["no", "fp16", "bf16"], type=str, help="Whether or not to use mixed precision training. " "Choose between FP16 and BF16 (bfloat16) training. " "BF16 training is only supported on Nvidia Ampere GPUs and PyTorch 1.10 or later.", default="no", ) parser.set_defaults(func=default_config_command) return parser
null
2,877
import argparse import os from accelerate.utils import ComputeEnvironment from .cluster import get_cluster_input from .config_args import cache_dir, default_config_file, default_yaml_config_file, load_config_from_file from .config_utils import _ask_field, _ask_options, _convert_compute_environment from .sagemaker import get_sagemaker_input description = "Launches a series of prompts to create and save a `default_config.yaml` configuration file for your training system. Should always be ran first on your machine" def config_command(args): config = get_user_input() if args.config_file is not None: config_file = args.config_file else: if not os.path.isdir(cache_dir): os.makedirs(cache_dir) config_file = default_yaml_config_file if config_file.endswith(".json"): config.to_json_file(config_file) else: config.to_yaml_file(config_file) print(f"accelerate configuration saved at {config_file}") def config_command_parser(subparsers=None): if subparsers is not None: parser = subparsers.add_parser("config", description=description) else: parser = argparse.ArgumentParser("Accelerate config command", description=description) parser.add_argument( "--config_file", default=None, help=( "The path to use to store the config file. Will default to a file named default_config.yaml in the cache " "location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have " "such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed " "with 'huggingface'." ), ) if subparsers is not None: parser.set_defaults(func=config_command) return parser
null
2,878
import json import os from ...utils.constants import SAGEMAKER_PARALLEL_EC2_INSTANCES, TORCH_DYNAMO_MODES from ...utils.dataclasses import ComputeEnvironment, SageMakerDistributedType from ...utils.imports import is_boto3_available from .config_args import SageMakerConfig from .config_utils import ( DYNAMO_BACKENDS, _ask_field, _ask_options, _convert_dynamo_backend, _convert_mixed_precision, _convert_sagemaker_distributed_mode, _convert_yes_no_to_bool, ) def _get_iam_role_arn(role_name): iam_client = boto3.client("iam") return iam_client.get_role(RoleName=role_name)["Role"]["Arn"]
null
2,879
import argparse import os import subprocess from packaging.version import Version, parse from accelerate.commands.config.config_args import default_config_file, load_config_from_file _description = "Run commands across TPU VMs for initial setup before running `accelerate launch`." def tpu_command_launcher(args): defaults = None # Get the default from the config file if it exists. if args.config_file is not None or os.path.isfile(default_config_file): defaults = load_config_from_file(args.config_file) if not args.command_file and defaults.command_file is not None and not args.command: args.command_file = defaults.command_file if not args.command and defaults.commands is not None: args.command = defaults.commands if not args.tpu_name: args.tpu_name = defaults.tpu_name if not args.tpu_zone: args.tpu_zone = defaults.tpu_zone if args.accelerate_version == "dev": args.accelerate_version = "git+https://github.com/huggingface/accelerate.git" elif args.accelerate_version == "latest": args.accelerate_version = "accelerate -U" elif isinstance(parse(args.accelerate_version), Version): args.accelerate_version = f"accelerate=={args.accelerate_version}" if not args.command_file and not args.command: raise ValueError("You must specify either a command file or a command to run on the pod.") if args.command_file: with open(args.command_file) as f: args.command = [f.read().splitlines()] # To turn list of lists into list of strings if isinstance(args.command[0], list): args.command = [line for cmd in args.command for line in cmd] # Default to the shared folder and install accelerate new_cmd = ["cd /usr/share"] if args.install_accelerate: new_cmd += [f"pip install {args.accelerate_version}"] new_cmd += args.command args.command = "; ".join(new_cmd) # Then send it to gcloud # Eventually try to use google-api-core to do this instead of subprocess cmd = ["gcloud"] if args.use_alpha: cmd += ["alpha"] cmd += [ "compute", "tpus", "tpu-vm", "ssh", args.tpu_name, "--zone", args.tpu_zone, "--command", args.command, "--worker", "all", ] if args.debug: print(f"Running {' '.join(cmd)}") return subprocess.run(cmd) print("Successfully setup pod.") def tpu_command_parser(subparsers=None): if subparsers is not None: parser = subparsers.add_parser("tpu-config", description=_description) else: parser = argparse.ArgumentParser("Accelerate tpu-config command", description=_description) # Core arguments config_args = parser.add_argument_group( "Config Arguments", "Arguments that can be configured through `accelerate config`." ) config_args.add_argument( "--config_file", type=str, default=None, help="Path to the config file to use for accelerate.", ) config_args.add_argument( "--tpu_name", default=None, help="The name of the TPU to use. If not specified, will use the TPU specified in the config file.", ) config_args.add_argument( "--tpu_zone", default=None, help="The zone of the TPU to use. If not specified, will use the zone specified in the config file.", ) pod_args = parser.add_argument_group("TPU Arguments", "Arguments for options ran inside the TPU.") pod_args.add_argument( "--use_alpha", action="store_true", help="Whether to use `gcloud alpha` when running the TPU training script instead of `gcloud`.", ) pod_args.add_argument( "--command_file", default=None, help="The path to the file containing the commands to run on the pod on startup.", ) pod_args.add_argument( "--command", action="append", nargs="+", help="A command to run on the pod. Can be passed multiple times.", ) pod_args.add_argument( "--install_accelerate", action="store_true", help="Whether to install accelerate on the pod. Defaults to False.", ) pod_args.add_argument( "--accelerate_version", default="latest", help="The version of accelerate to install on the pod. If not specified, will use the latest pypi version. Specify 'dev' to install from GitHub.", ) pod_args.add_argument( "--debug", action="store_true", help="If set, will print the command that would be run instead of running it." ) if subparsers is not None: parser.set_defaults(func=tpu_command_launcher) return parser
null
2,880
import argparse import importlib import logging import os import subprocess import sys from pathlib import Path import psutil import torch from accelerate.commands.config import default_config_file, load_config_from_file from accelerate.commands.config.config_args import SageMakerConfig from accelerate.commands.config.config_utils import DYNAMO_BACKENDS from accelerate.commands.utils import CustomArgumentParser from accelerate.state import get_int_from_env from accelerate.utils import ( ComputeEnvironment, DistributedType, PrepareForLaunch, _filter_args, check_cuda_p2p_ib_support, is_bf16_available, is_deepspeed_available, is_npu_available, is_rich_available, is_sagemaker_available, is_torch_version, is_torch_xla_available, is_xpu_available, patch_environment, prepare_deepspeed_cmd_env, prepare_multi_gpu_env, prepare_sagemager_args_inputs, prepare_simple_launcher_cmd_env, prepare_tpu, ) from accelerate.utils.constants import DEEPSPEED_MULTINODE_LAUNCHERS, TORCH_DYNAMO_MODES The provided code snippet includes necessary dependencies for implementing the `clean_option` function. Write a Python function `def clean_option(option)` to solve the following problem: Finds all cases of - after the first two characters and changes them to _ Here is the function: def clean_option(option): "Finds all cases of - after the first two characters and changes them to _" if option.startswith("--"): return option[2:].replace("-", "_")
Finds all cases of - after the first two characters and changes them to _
2,881
import argparse import importlib import logging import os import subprocess import sys from pathlib import Path import psutil import torch from accelerate.commands.config import default_config_file, load_config_from_file from accelerate.commands.config.config_args import SageMakerConfig from accelerate.commands.config.config_utils import DYNAMO_BACKENDS from accelerate.commands.utils import CustomArgumentParser from accelerate.state import get_int_from_env from accelerate.utils import ( ComputeEnvironment, DistributedType, PrepareForLaunch, _filter_args, check_cuda_p2p_ib_support, is_bf16_available, is_deepspeed_available, is_npu_available, is_rich_available, is_sagemaker_available, is_torch_version, is_torch_xla_available, is_xpu_available, patch_environment, prepare_deepspeed_cmd_env, prepare_multi_gpu_env, prepare_sagemager_args_inputs, prepare_simple_launcher_cmd_env, prepare_tpu, ) from accelerate.utils.constants import DEEPSPEED_MULTINODE_LAUNCHERS, TORCH_DYNAMO_MODES class CustomHelpFormatter(argparse.HelpFormatter): def __init__(self, *args, **kwargs): def add_argument(self, action: argparse.Action): def end_section(self): def launch_command(args): DYNAMO_BACKENDS = [ "EAGER", "AOT_EAGER", "INDUCTOR", "AOT_TS_NVFUSER", "NVPRIMS_NVFUSER", "CUDAGRAPHS", "OFI", "FX2TRT", "ONNXRT", "TENSORRT", "IPEX", "TVM", ] class CustomArgumentParser(argparse.ArgumentParser): def add_argument(self, *args, **kwargs): def add_argument_group(self, *args, **kwargs): TORCH_DYNAMO_MODES = ["default", "reduce-overhead", "max-autotune"] def launch_command_parser(subparsers=None): description = "Launch a python script in a distributed scenario. Arguments can be passed in with either hyphens (`--num-processes=2`) or underscores (`--num_processes=2`)" if subparsers is not None: parser = subparsers.add_parser( "launch", description=description, add_help=False, allow_abbrev=False, formatter_class=CustomHelpFormatter ) else: parser = CustomArgumentParser( "Accelerate launch command", description=description, add_help=False, allow_abbrev=False, formatter_class=CustomHelpFormatter, ) parser.add_argument("-h", "--help", action="help", help="Show this help message and exit.") parser.add_argument( "--config_file", default=None, help="The config file to use for the default values in the launching script.", ) parser.add_argument( "--quiet", "-q", action="store_true", help="Silence subprocess errors from the launch stack trace and only show the relevant tracebacks. (Only applicable to DeepSpeed and single-process configurations)", ) # Hardware selection arguments hardware_args = parser.add_argument_group( "Hardware Selection Arguments", "Arguments for selecting the hardware to be used." ) hardware_args.add_argument( "--cpu", default=False, action="store_true", help="Whether or not to force the training on the CPU." ) hardware_args.add_argument( "--multi_gpu", default=False, action="store_true", help="Whether or not this should launch a distributed GPU training.", ) hardware_args.add_argument( "--tpu", default=False, action="store_true", help="Whether or not this should launch a TPU training." ) hardware_args.add_argument( "--ipex", default=False, action="store_true", help="Whether or not this should launch a Intel PyTorch Extension (IPEX) training.", ) # Resource selection arguments resource_args = parser.add_argument_group( "Resource Selection Arguments", "Arguments for fine-tuning how available hardware should be used." ) resource_args.add_argument( "--mixed_precision", type=str, choices=["no", "fp16", "bf16", "fp8"], help="Whether or not to use mixed precision training. " "Choose between FP16 and BF16 (bfloat16) training. " "BF16 training is only supported on Nvidia Ampere GPUs and PyTorch 1.10 or later.", ) resource_args.add_argument( "--num_processes", type=int, default=None, help="The total number of processes to be launched in parallel." ) resource_args.add_argument( "--num_machines", type=int, default=None, help="The total number of machines used in this training." ) resource_args.add_argument( "--num_cpu_threads_per_process", type=int, default=None, help="The number of CPU threads per process. Can be tuned for optimal performance.", ) # Dynamo arguments resource_args.add_argument( "--dynamo_backend", type=str, choices=["no"] + [b.lower() for b in DYNAMO_BACKENDS], help="Choose a backend to optimize your training with dynamo, see more at " "https://github.com/pytorch/torchdynamo.", ) resource_args.add_argument( "--dynamo_mode", type=str, default="default", choices=TORCH_DYNAMO_MODES, help="Choose a mode to optimize your training with dynamo.", ) resource_args.add_argument( "--dynamo_use_fullgraph", default=False, action="store_true", help="Whether to use full graph mode for dynamo or it is ok to break model into several subgraphs", ) resource_args.add_argument( "--dynamo_use_dynamic", default=False, action="store_true", help="Whether to enable dynamic shape tracing.", ) # Training Paradigm arguments paradigm_args = parser.add_argument_group( "Training Paradigm Arguments", "Arguments for selecting which training paradigm to be used." ) paradigm_args.add_argument( "--use_deepspeed", default=False, action="store_true", help="Whether to use deepspeed.", ) paradigm_args.add_argument( "--use_fsdp", default=False, action="store_true", help="Whether to use fsdp.", ) paradigm_args.add_argument( "--use_megatron_lm", default=False, action="store_true", help="Whether to use Megatron-LM.", ) paradigm_args.add_argument( "--use_xpu", default=False, action="store_true", help="Whether to use IPEX plugin to speed up training on XPU specifically.", ) # distributed GPU training arguments distributed_args = parser.add_argument_group("Distributed GPUs", "Arguments related to distributed GPU training.") distributed_args.add_argument( "--gpu_ids", default=None, help="What GPUs (by id) should be used for training on this machine as a comma-seperated list", ) distributed_args.add_argument( "--same_network", default=False, action="store_true", help="Whether all machines used for multinode training exist on the same local network.", ) distributed_args.add_argument( "--machine_rank", type=int, default=None, help="The rank of the machine on which this script is launched." ) distributed_args.add_argument( "--main_process_ip", type=str, default=None, help="The IP address of the machine of rank 0." ) distributed_args.add_argument( "--main_process_port", type=int, default=None, help="The port to use to communicate with the machine of rank 0.", ) distributed_args.add_argument( "-t", "--tee", default="0", type=str, help="Tee std streams into a log file and also to console.", ) distributed_args.add_argument( "--role", type=str, default="default", help="User-defined role for the workers.", ) # Rendezvous related arguments distributed_args.add_argument( "--rdzv_backend", type=str, default="static", help="The rendezvous method to use, such as 'static' (the default) or 'c10d'", ) distributed_args.add_argument( "--rdzv_conf", type=str, default="", help="Additional rendezvous configuration (<key1>=<value1>,<key2>=<value2>,...).", ) distributed_args.add_argument( "--max_restarts", type=int, default=0, help="Maximum number of worker group restarts before failing.", ) distributed_args.add_argument( "--monitor_interval", type=float, default=5, help="Interval, in seconds, to monitor the state of workers.", ) parser.add_argument( "-m", "--module", action="store_true", help="Change each process to interpret the launch script as a Python module, executing with the same behavior as 'python -m'.", ) parser.add_argument( "--no_python", action="store_true", help="Skip prepending the training script with 'python' - just execute it directly. Useful when the script is not a Python script.", ) # TPU arguments tpu_args = parser.add_argument_group("TPU", "Arguments related to TPU.") tpu_args.add_argument( "--tpu_cluster", action="store_true", dest="tpu_use_cluster", help="Whether to use a GCP TPU pod for training.", ) tpu_args.add_argument( "--no_tpu_cluster", action="store_false", dest="tpu_use_cluster", help="Should not be passed explicitly, this is for internal use only.", ) tpu_args.add_argument( "--tpu_use_sudo", action="store_true", help="Whether to use `sudo` when running the TPU training script in each pod.", ) tpu_args.add_argument( "--vm", type=str, action="append", help=( "List of single Compute VM instance names. " "If not provided we assume usage of instance groups. For TPU pods." ), ) tpu_args.add_argument( "--env", type=str, action="append", help="List of environment variables to set on the Compute VM instances. For TPU pods.", ) tpu_args.add_argument( "--main_training_function", type=str, default=None, help="The name of the main function to be executed in your script (only for TPU training).", ) tpu_args.add_argument( "--downcast_bf16", action="store_true", help="Whether when using bf16 precision on TPUs if both float and double tensors are cast to bfloat16 or if double tensors remain as float32.", ) # DeepSpeed arguments deepspeed_args = parser.add_argument_group("DeepSpeed Arguments", "Arguments related to DeepSpeed.") deepspeed_args.add_argument( "--deepspeed_config_file", default=None, type=str, help="DeepSpeed config file.", ) deepspeed_args.add_argument( "--zero_stage", default=None, type=int, help="DeepSpeed's ZeRO optimization stage (useful only when `use_deepspeed` flag is passed). " "If unspecified, will default to `2`.", ) deepspeed_args.add_argument( "--offload_optimizer_device", default=None, type=str, help="Decides where (none|cpu|nvme) to offload optimizer states (useful only when `use_deepspeed` flag is passed). " "If unspecified, will default to 'none'.", ) deepspeed_args.add_argument( "--offload_param_device", default=None, type=str, help="Decides where (none|cpu|nvme) to offload parameters (useful only when `use_deepspeed` flag is passed). " "If unspecified, will default to 'none'.", ) deepspeed_args.add_argument( "--offload_optimizer_nvme_path", default=None, type=str, help="Decides Nvme Path to offload optimizer states (useful only when `use_deepspeed` flag is passed). " "If unspecified, will default to 'none'.", ) deepspeed_args.add_argument( "--offload_param_nvme_path", default=None, type=str, help="Decides Nvme Path to offload parameters (useful only when `use_deepspeed` flag is passed). " "If unspecified, will default to 'none'.", ) deepspeed_args.add_argument( "--gradient_accumulation_steps", default=None, type=int, help="No of gradient_accumulation_steps used in your training script (useful only when `use_deepspeed` flag is passed). " "If unspecified, will default to `1`.", ) deepspeed_args.add_argument( "--gradient_clipping", default=None, type=float, help="gradient clipping value used in your training script (useful only when `use_deepspeed` flag is passed). " "If unspecified, will default to `1.0`.", ) deepspeed_args.add_argument( "--zero3_init_flag", default=None, type=str, help="Decides Whether (true|false) to enable `deepspeed.zero.Init` for constructing massive models. " "Only applicable with DeepSpeed ZeRO Stage-3. If unspecified, will default to `true`.", ) deepspeed_args.add_argument( "--zero3_save_16bit_model", default=None, type=str, help="Decides Whether (true|false) to save 16-bit model weights when using ZeRO Stage-3. " "Only applicable with DeepSpeed ZeRO Stage-3. If unspecified, will default to `false`.", ) deepspeed_args.add_argument( "--deepspeed_hostfile", default=None, type=str, help="DeepSpeed hostfile for configuring multi-node compute resources.", ) deepspeed_args.add_argument( "--deepspeed_exclusion_filter", default=None, type=str, help="DeepSpeed exclusion filter string when using mutli-node setup.", ) deepspeed_args.add_argument( "--deepspeed_inclusion_filter", default=None, type=str, help="DeepSpeed inclusion filter string when using mutli-node setup.", ) deepspeed_args.add_argument( "--deepspeed_multinode_launcher", default=None, type=str, help="DeepSpeed multi-node launcher to use. If unspecified, will default to `pdsh`.", ) # fsdp arguments fsdp_args = parser.add_argument_group("FSDP Arguments", "Arguments related to Fully Shared Data Parallelism.") fsdp_args.add_argument( "--fsdp_offload_params", default="false", type=str, help="Decides Whether (true|false) to offload parameters and gradients to CPU. (useful only when `use_fsdp` flag is passed).", ) fsdp_args.add_argument( "--fsdp_min_num_params", type=int, default=1e8, help="FSDP's minimum number of parameters for Default Auto Wrapping. (useful only when `use_fsdp` flag is passed).", ) fsdp_args.add_argument( "--fsdp_sharding_strategy", type=str, default="FULL_SHARD", help="FSDP's Sharding Strategy. (useful only when `use_fsdp` flag is passed).", ) fsdp_args.add_argument( "--fsdp_auto_wrap_policy", type=str, default=None, help="FSDP's auto wrap policy. (useful only when `use_fsdp` flag is passed).", ) fsdp_args.add_argument( "--fsdp_transformer_layer_cls_to_wrap", default=None, type=str, help="Transformer layer class name (case-sensitive) to wrap ,e.g, `BertLayer`, `GPTJBlock`, `T5Block` .... " "(useful only when `use_fsdp` flag is passed).", ) fsdp_args.add_argument( "--fsdp_backward_prefetch_policy", default=None, type=str, help="This argument is deprecated and will be removed in version 0.27.0 of 🤗 Accelerate. Use `fsdp_backward_prefetch` instead.", ) fsdp_args.add_argument( "--fsdp_backward_prefetch", default=None, type=str, help="FSDP's backward prefetch policy. (useful only when `use_fsdp` flag is passed).", ) fsdp_args.add_argument( "--fsdp_state_dict_type", default=None, type=str, help="FSDP's state dict type. (useful only when `use_fsdp` flag is passed).", ) fsdp_args.add_argument( "--fsdp_forward_prefetch", default="false", type=str, help="If True, then FSDP explicitly prefetches the next upcoming " "all-gather while executing in the forward pass (useful only when `use_fsdp` flag is passed).", ) fsdp_args.add_argument( "--fsdp_use_orig_params", default="true", type=str, help="If True, allows non-uniform `requires_grad` during init, which means support for interspersed frozen and trainable paramteres." " (useful only when `use_fsdp` flag is passed).", ) fsdp_args.add_argument( "--fsdp_cpu_ram_efficient_loading", default="true", type=str, help="If True, only the first process loads the pretrained model checkoint while all other processes have empty weights. " "Only applicable for 🤗 Transformers. When using this, `--fsdp_sync_module_states` needs to True. " "(useful only when `use_fsdp` flag is passed).", ) fsdp_args.add_argument( "--fsdp_sync_module_states", default="true", type=str, help="If True, each individually wrapped FSDP unit will broadcast module parameters from rank 0." " (useful only when `use_fsdp` flag is passed).", ) # megatron_lm args megatron_lm_args = parser.add_argument_group("Megatron-LM Arguments", "Arguments related to Megatron-LM.") megatron_lm_args.add_argument( "--megatron_lm_tp_degree", type=int, default=1, help="Megatron-LM's Tensor Parallelism (TP) degree. (useful only when `use_megatron_lm` flag is passed).", ) megatron_lm_args.add_argument( "--megatron_lm_pp_degree", type=int, default=1, help="Megatron-LM's Pipeline Parallelism (PP) degree. (useful only when `use_megatron_lm` flag is passed).", ) megatron_lm_args.add_argument( "--megatron_lm_num_micro_batches", type=int, default=None, help="Megatron-LM's number of micro batches when PP degree > 1. (useful only when `use_megatron_lm` flag is passed).", ) megatron_lm_args.add_argument( "--megatron_lm_sequence_parallelism", default=None, type=str, help="Decides Whether (true|false) to enable Sequence Parallelism when TP degree > 1. " "(useful only when `use_megatron_lm` flag is passed).", ) megatron_lm_args.add_argument( "--megatron_lm_recompute_activations", default=None, type=str, help="Decides Whether (true|false) to enable Selective Activation Recomputation. " "(useful only when `use_megatron_lm` flag is passed).", ) megatron_lm_args.add_argument( "--megatron_lm_use_distributed_optimizer", default=None, type=str, help="Decides Whether (true|false) to use distributed optimizer " "which shards optimizer state and gradients across Data Pralellel (DP) ranks. " "(useful only when `use_megatron_lm` flag is passed).", ) megatron_lm_args.add_argument( "--megatron_lm_gradient_clipping", default=1.0, type=float, help="Megatron-LM's gradient clipping value based on global L2 Norm (0 to disable). " "(useful only when `use_megatron_lm` flag is passed).", ) # AWS arguments aws_args = parser.add_argument_group("AWS Arguments", "Arguments related to AWS.") aws_args.add_argument( "--aws_access_key_id", type=str, default=None, help="The AWS_ACCESS_KEY_ID used to launch the Amazon SageMaker training job", ) aws_args.add_argument( "--aws_secret_access_key", type=str, default=None, help="The AWS_SECRET_ACCESS_KEY used to launch the Amazon SageMaker training job.", ) parser.add_argument( "--debug", action="store_true", help="Whether to print out the torch.distributed stack trace when something fails.", ) parser.add_argument( "training_script", type=str, help=( "The full path to the script to be launched in parallel, followed by all the arguments for the training " "script." ), ) # MPI arguments mpirun_args = parser.add_argument_group("MPI Arguments", "Arguments related to mpirun for Multi-CPU") mpirun_args.add_argument( "--mpirun_hostfile", type=str, default=None, help="Location for a hostfile for using Accelerate to launch a multi-CPU training job with mpirun. This will " "get passed to the MPI --hostfile or -f parameter, depending on which MPI program is installed.", ) mpirun_args.add_argument( "--mpirun_ccl", type=int, default=1, help="The number of oneCCL worker threads when using Accelerate to launch multi-CPU training with mpirun.", ) # Other arguments of the training scripts parser.add_argument("training_script_args", nargs=argparse.REMAINDER, help="Arguments of the training script.") if subparsers is not None: parser.set_defaults(func=launch_command) return parser
null
2,882
import random from pathlib import Path from typing import List import numpy as np import torch from safetensors.torch import load_file from torch.cuda.amp import GradScaler from .utils import ( MODEL_NAME, OPTIMIZER_NAME, RNG_STATE_NAME, SAFE_MODEL_NAME, SAFE_WEIGHTS_NAME, SAMPLER_NAME, SCALER_NAME, SCHEDULER_NAME, WEIGHTS_NAME, get_pretty_name, is_torch_xla_available, is_xpu_available, save, ) if is_torch_xla_available(): import torch_xla.core.xla_model as xm from .logging import get_logger from .state import PartialState logger = get_logger(__name__) class SeedableRandomSampler(RandomSampler): """ Same as a random sampler, except that in `__iter__` a seed can be used. Needed specifically in distributed cases, when the random generator for each GPU needs to start from the same seed and be fully reproducable on multiple iterations. If a custom `generator` is passed, it will rely on its initial seed as well as the current iteration it is on (stored in `self.epoch`). """ def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.epoch = 0 self.initial_seed = torch.random.initial_seed() def __iter__(self): if self.generator is None: self.generator = torch.Generator() self.generator.manual_seed(self.initial_seed) # Allow `self.epoch` to modify the seed of the generator seed = self.epoch + self.initial_seed # print("Setting seed at epoch", self.epoch, seed) self.generator.manual_seed(seed) yield from super().__iter__() self.set_epoch(self.epoch + 1) def set_epoch(self, epoch: int): "Sets the current iteration of the sampler." self.epoch = epoch class IterableDatasetShard(IterableDataset): """ Wraps a PyTorch `IterableDataset` to generate samples for one of the processes only. Instances of this class will always yield a number of samples that is a round multiple of the actual batch size (depending of the value of `split_batches`, this is either `batch_size` or `batch_size x num_processes`). Depending on the value of the `drop_last` attribute of the batch sampler passed, it will either stop the iteration at the first batch that would be too small or loop with indices from the beginning. Args: dataset (`torch.utils.data.dataset.IterableDataset`): The batch sampler to split in several shards. batch_size (`int`, *optional*, defaults to 1): The size of the batches per shard (if `split_batches=False`) or the size of the batches (if `split_batches=True`). drop_last (`bool`, *optional*, defaults to `False`): Whether or not to drop the last incomplete batch or complete the last batches by using the samples from the beginning. num_processes (`int`, *optional*, defaults to 1): The number of processes running concurrently. process_index (`int`, *optional*, defaults to 0): The index of the current process. split_batches (`bool`, *optional*, defaults to `False`): Whether the shards should be created by splitting a batch to give a piece of it on each process, or by yielding different full batches on each process. On two processes with an iterable dataset yielding of `[0, 1, 2, 3, 4, 5, 6, 7]`, this will result in: - the shard on process 0 to yield `[0, 1, 2, 3]` and the shard on process 1 to yield `[4, 5, 6, 7]` if this argument is set to `False`. - the shard on process 0 to yield `[0, 1, 4, 5]` and the sampler on process 1 to yield `[2, 3, 6, 7]` if this argument is set to `True`. """ def __init__( self, dataset: IterableDataset, batch_size: int = 1, drop_last: bool = False, num_processes: int = 1, process_index: int = 0, split_batches: bool = False, ): if split_batches and batch_size > 1 and batch_size % num_processes != 0: raise ValueError( f"To use `IterableDatasetShard` in `split_batches` mode, the batch size ({batch_size}) " f"needs to be a round multiple of the number of processes ({num_processes})." ) self.dataset = dataset self.batch_size = batch_size self.drop_last = drop_last self.num_processes = num_processes self.process_index = process_index self.split_batches = split_batches def set_epoch(self, epoch): self.epoch = epoch if hasattr(self.dataset, "set_epoch"): self.dataset.set_epoch(epoch) def __len__(self): # We will just raise the downstream error if the underlying dataset is not sized if self.drop_last: return (len(self.dataset) // (self.batch_size * self.num_processes)) * self.batch_size else: return math.ceil(len(self.dataset) / (self.batch_size * self.num_processes)) * self.batch_size def __iter__(self): if ( not hasattr(self.dataset, "set_epoch") and hasattr(self.dataset, "generator") and isinstance(self.dataset.generator, torch.Generator) ): self.dataset.generator.manual_seed(self.epoch) real_batch_size = self.batch_size if self.split_batches else (self.batch_size * self.num_processes) process_batch_size = (self.batch_size // self.num_processes) if self.split_batches else self.batch_size process_slice = range(self.process_index * process_batch_size, (self.process_index + 1) * process_batch_size) first_batch = None current_batch = [] for element in self.dataset: current_batch.append(element) # Wait to have a full batch before yielding elements. if len(current_batch) == real_batch_size: for i in process_slice: yield current_batch[i] if first_batch is None: first_batch = current_batch.copy() current_batch = [] # Finished if drop_last is True, otherwise complete the last batch with elements from the beginning. if not self.drop_last and len(current_batch) > 0: if first_batch is None: first_batch = current_batch.copy() while len(current_batch) < real_batch_size: current_batch += first_batch for i in process_slice: yield current_batch[i] The provided code snippet includes necessary dependencies for implementing the `save_accelerator_state` function. Write a Python function `def save_accelerator_state( output_dir: str, model_states: List[dict], optimizers: list, schedulers: list, dataloaders: list, process_index: int, scaler: GradScaler = None, save_on_each_node: bool = False, safe_serialization: bool = True, )` to solve the following problem: Saves the current states of the models, optimizers, scaler, and RNG generators to a given directory. <Tip> If `safe_serialization` is `True`, models will be saved with `safetensors` while the rest are saved using native `pickle`. </Tip> Args: output_dir (`str` or `os.PathLike`): The name of the folder to save all relevant weights and states. model_states (`List[torch.nn.Module]`): A list of model states optimizers (`List[torch.optim.Optimizer]`): A list of optimizer instances schedulers (`List[torch.optim.lr_scheduler._LRScheduler]`): A list of learning rate schedulers dataloaders (`List[torch.utils.data.DataLoader]`): A list of dataloader instances to save their sampler states process_index (`int`): The current process index in the Accelerator state scaler (`torch.cuda.amp.GradScaler`, *optional*): An optional gradient scaler instance to save save_on_each_node (`bool`, *optional*): Whether to save on every node, or only the main node. safe_serialization (`bool`, *optional*, defaults to `True`): Whether to save the model using `safetensors` or the traditional PyTorch way (that uses `pickle`). Here is the function: def save_accelerator_state( output_dir: str, model_states: List[dict], optimizers: list, schedulers: list, dataloaders: list, process_index: int, scaler: GradScaler = None, save_on_each_node: bool = False, safe_serialization: bool = True, ): """ Saves the current states of the models, optimizers, scaler, and RNG generators to a given directory. <Tip> If `safe_serialization` is `True`, models will be saved with `safetensors` while the rest are saved using native `pickle`. </Tip> Args: output_dir (`str` or `os.PathLike`): The name of the folder to save all relevant weights and states. model_states (`List[torch.nn.Module]`): A list of model states optimizers (`List[torch.optim.Optimizer]`): A list of optimizer instances schedulers (`List[torch.optim.lr_scheduler._LRScheduler]`): A list of learning rate schedulers dataloaders (`List[torch.utils.data.DataLoader]`): A list of dataloader instances to save their sampler states process_index (`int`): The current process index in the Accelerator state scaler (`torch.cuda.amp.GradScaler`, *optional*): An optional gradient scaler instance to save save_on_each_node (`bool`, *optional*): Whether to save on every node, or only the main node. safe_serialization (`bool`, *optional*, defaults to `True`): Whether to save the model using `safetensors` or the traditional PyTorch way (that uses `pickle`). """ output_dir = Path(output_dir) # Model states for i, state in enumerate(model_states): weights_name = WEIGHTS_NAME if not safe_serialization else SAFE_WEIGHTS_NAME if i > 0: weights_name = weights_name.replace(".", f"_{i}.") output_model_file = output_dir.joinpath(weights_name) save(state, output_model_file, save_on_each_node=save_on_each_node, safe_serialization=safe_serialization) logger.info(f"Model weights saved in {output_model_file}") # Optimizer states for i, opt in enumerate(optimizers): state = opt.state_dict() optimizer_name = f"{OPTIMIZER_NAME}.bin" if i == 0 else f"{OPTIMIZER_NAME}_{i}.bin" output_optimizer_file = output_dir.joinpath(optimizer_name) save(state, output_optimizer_file, save_on_each_node=save_on_each_node, safe_serialization=False) logger.info(f"Optimizer state saved in {output_optimizer_file}") # Scheduler states for i, scheduler in enumerate(schedulers): state = scheduler.state_dict() scheduler_name = f"{SCHEDULER_NAME}.bin" if i == 0 else f"{SCHEDULER_NAME}_{i}.bin" output_scheduler_file = output_dir.joinpath(scheduler_name) save(state, output_scheduler_file, save_on_each_node=save_on_each_node, safe_serialization=False) logger.info(f"Scheduler state saved in {output_scheduler_file}") # DataLoader states for i, dataloader in enumerate(dataloaders): sampler_name = f"{SAMPLER_NAME}.bin" if i == 0 else f"{SAMPLER_NAME}_{i}.bin" output_sampler_file = output_dir.joinpath(sampler_name) # Only save if we have our custom sampler from .data_loader import IterableDatasetShard, SeedableRandomSampler if isinstance(dataloader.dataset, IterableDatasetShard): sampler = dataloader.sampler.sampler if isinstance(sampler, SeedableRandomSampler): save(sampler, output_sampler_file, save_on_each_node=save_on_each_node, safe_serialization=False) logger.info(f"Sampler state for dataloader {i} saved in {output_sampler_file}") # GradScaler state if scaler is not None: state = scaler.state_dict() output_scaler_file = output_dir.joinpath(SCALER_NAME) torch.save(state, output_scaler_file) logger.info(f"Gradient scaler state saved in {output_scaler_file}") # Random number generator states states = {} states_name = f"{RNG_STATE_NAME}_{process_index}.pkl" states["random_state"] = random.getstate() states["numpy_random_seed"] = np.random.get_state() states["torch_manual_seed"] = torch.get_rng_state() if is_xpu_available(): states["torch_xpu_manual_seed"] = torch.xpu.get_rng_state_all() else: states["torch_cuda_manual_seed"] = torch.cuda.get_rng_state_all() if is_torch_xla_available(): states["xm_seed"] = xm.get_rng_state() output_states_file = output_dir.joinpath(states_name) torch.save(states, output_states_file) logger.info(f"Random states saved in {output_states_file}") return output_dir
Saves the current states of the models, optimizers, scaler, and RNG generators to a given directory. <Tip> If `safe_serialization` is `True`, models will be saved with `safetensors` while the rest are saved using native `pickle`. </Tip> Args: output_dir (`str` or `os.PathLike`): The name of the folder to save all relevant weights and states. model_states (`List[torch.nn.Module]`): A list of model states optimizers (`List[torch.optim.Optimizer]`): A list of optimizer instances schedulers (`List[torch.optim.lr_scheduler._LRScheduler]`): A list of learning rate schedulers dataloaders (`List[torch.utils.data.DataLoader]`): A list of dataloader instances to save their sampler states process_index (`int`): The current process index in the Accelerator state scaler (`torch.cuda.amp.GradScaler`, *optional*): An optional gradient scaler instance to save save_on_each_node (`bool`, *optional*): Whether to save on every node, or only the main node. safe_serialization (`bool`, *optional*, defaults to `True`): Whether to save the model using `safetensors` or the traditional PyTorch way (that uses `pickle`).
2,883
import random from pathlib import Path from typing import List import numpy as np import torch from safetensors.torch import load_file from torch.cuda.amp import GradScaler from .utils import ( MODEL_NAME, OPTIMIZER_NAME, RNG_STATE_NAME, SAFE_MODEL_NAME, SAFE_WEIGHTS_NAME, SAMPLER_NAME, SCALER_NAME, SCHEDULER_NAME, WEIGHTS_NAME, get_pretty_name, is_torch_xla_available, is_xpu_available, save, ) if is_torch_xla_available(): import torch_xla.core.xla_model as xm from .logging import get_logger from .state import PartialState logger = get_logger(__name__) class PartialState: """ Singleton class that has information about the current training environment and functions to help with process control. Designed to be used when only process control and device execution states are needed. Does *not* need to be initialized from `Accelerator`. **Available attributes:** - **device** (`torch.device`) -- The device to use. - **distributed_type** ([`~accelerate.state.DistributedType`]) -- The type of distributed environment currently in use. - **local_process_index** (`int`) -- The index of the current process on the current server. - **mixed_precision** (`str`) -- Whether or not the current script will use mixed precision, and if so the type of mixed precision being performed. (Choose from 'no','fp16','bf16 or 'fp8'). - **num_processes** (`int`) -- The number of processes currently launched in parallel. - **process_index** (`int`) -- The index of the current process. - **is_last_process** (`bool`) -- Whether or not the current process is the last one. - **is_main_process** (`bool`) -- Whether or not the current process is the main one. - **is_local_main_process** (`bool`) -- Whether or not the current process is the main one on the local node. - **debug** (`bool`) -- Whether or not the current script is being run in debug mode. """ _shared_state = SharedDict() def __init__(self, cpu: bool = False, **kwargs): self.__dict__ = self._shared_state if not self.initialized: self._cpu = cpu self.backend = None env_device = os.environ.get("ACCELERATE_TORCH_DEVICE", None) self.device = torch.device(env_device) if env_device is not None else None self.debug = parse_flag_from_env("ACCELERATE_DEBUG_MODE") use_sagemaker_dp = kwargs.pop("_use_sagemaker_dp", None) if use_sagemaker_dp is None: use_sagemaker_dp = ( os.environ.get("ACCELERATE_USE_SAGEMAKER", "false") == "true" and os.environ.get("ACCELERATE_SAGEMAKER_DISTRIBUTED_TYPE") != SageMakerDistributedType.NO ) if use_sagemaker_dp and not cpu: if ( os.environ.get("ACCELERATE_SAGEMAKER_DISTRIBUTED_TYPE") == SageMakerDistributedType.DATA_PARALLEL ) or use_sagemaker_dp: self.distributed_type = DistributedType.MULTI_GPU import smdistributed.dataparallel.torch.torch_smddp # noqa if not torch.distributed.is_initialized(): torch.distributed.init_process_group(backend="smddp") self.backend = "smddp" self.num_processes = torch.distributed.get_world_size() self.process_index = torch.distributed.get_rank() self.local_process_index = int(os.environ.get("LOCAL_RANK", -1)) if self.device is None: self.device = torch.device("cuda", self.local_process_index) torch.cuda.set_device(self.device) elif is_torch_xla_available() and not cpu: self.distributed_type = DistributedType.XLA self.device = xm.xla_device() xm.set_replication(self.device, xm.get_xla_supported_devices()) self.num_processes = xm.xrt_world_size() self.process_index = xm.get_ordinal() if is_torch_xla_available(check_is_tpu=True): self.local_process_index = xm.get_local_ordinal() else: self.local_process_index = int(os.environ.get("LOCAL_RANK", -1)) elif ( os.environ.get("ACCELERATE_USE_DEEPSPEED", "false") == "true" and int(os.environ.get("LOCAL_RANK", -1)) != -1 and not cpu ): assert ( is_deepspeed_available() ), "DeepSpeed is not available => install it using `pip3 install deepspeed` or build it from source" self.distributed_type = DistributedType.DEEPSPEED if not torch.distributed.is_initialized(): from deepspeed import comm as dist # DeepSpeed always uses nccl kwargs.pop("backend", None) if is_xpu_available and is_ccl_available(): # Set DeepSpeed backend to ccl for xpu self.backend = "ccl" os.environ["CCL_PROCESS_LAUNCHER"] = "none" os.environ["CCL_LOCAL_SIZE"] = os.environ.get("LOCAL_WORLD_SIZE", "1") os.environ["CCL_LOCAL_RANK"] = os.environ.get("LOCAL_RANK", "0") elif is_npu_available(): self.backend = "hccl" else: self.backend = "nccl" dist.init_distributed(dist_backend=self.backend, auto_mpi_discovery=False, **kwargs) self.num_processes = torch.distributed.get_world_size() self.process_index = torch.distributed.get_rank() self.local_process_index = int(os.environ.get("LOCAL_RANK", -1)) if self.device is None: if is_xpu_available(): self.device = torch.device("xpu", self.local_process_index) if self.device is not None: torch.xpu.set_device(self.device) elif is_npu_available(): self.device = torch.device("npu", self.local_process_index) if self.device is not None: torch.npu.set_device(self.device) else: self.device = torch.device("cuda", self.local_process_index) if self.device is not None: torch.cuda.set_device(self.device) if self.device.type == "cuda" and not check_cuda_p2p_ib_support(): if "NCCL_P2P_DISABLE" not in os.environ or "NCCL_IB_DISABLE" not in os.environ: raise NotImplementedError( "Using RTX 4000 series doesn't support faster communication broadband via P2P or IB. " 'Please set `NCCL_P2P_DISABLE="1"` and `NCCL_IB_DISABLE="1" or use `accelerate launch` which ' "will do this automatically." ) self._mixed_precision = "no" # deepspeed handles mixed_precision using deepspeed_config elif int(os.environ.get("LOCAL_RANK", -1)) != -1 and not cpu and torch.cuda.is_available(): self.distributed_type = DistributedType.MULTI_GPU if not torch.distributed.is_initialized(): self.backend = kwargs.pop("backend", "nccl") # Special case for `TrainingArguments`, where `backend` will be `None` if self.backend is None: self.backend = "nccl" torch.distributed.init_process_group(backend=self.backend, **kwargs) if not check_cuda_p2p_ib_support(): if "NCCL_P2P_DISABLE" not in os.environ or "NCCL_IB_DISABLE" not in os.environ: raise NotImplementedError( "Using RTX 4000 series doesn't support faster communication broadband via P2P or IB. " 'Please set `NCCL_P2P_DISABLE="1"` and `NCCL_IB_DISABLE="1" or use `accelerate launch` which ' "will do this automatically." ) self.num_processes = torch.distributed.get_world_size() self.process_index = torch.distributed.get_rank() self.local_process_index = int(os.environ.get("LOCAL_RANK", -1)) if self.device is None: self.device = torch.device("cuda", self.local_process_index) torch.cuda.set_device(self.device) elif is_npu_available() and not cpu and int(os.environ.get("LOCAL_RANK", -1)) != -1: self.distributed_type = DistributedType.MULTI_NPU if not torch.distributed.is_initialized(): # Backend is not set by the user, we set it here kwargs.pop("backend", None) self.backend = "hccl" torch.distributed.init_process_group(backend=self.backend, **kwargs) self.num_processes = torch.distributed.get_world_size() self.process_index = torch.distributed.get_rank() self.local_process_index = int(os.environ.get("LOCAL_RANK", -1)) if self.device is None: self.device = torch.device("npu", self.local_process_index) torch.npu.set_device(self.device) elif ( get_int_from_env(["PMI_SIZE", "OMPI_COMM_WORLD_SIZE", "MV2_COMM_WORLD_SIZE", "WORLD_SIZE"], 1) > 1 or int(os.environ.get("LOCAL_RANK", -1)) != -1 ): if not cpu and is_xpu_available(): self.distributed_type = DistributedType.MULTI_XPU else: self.distributed_type = DistributedType.MULTI_CPU # Actually, CCL_WORKER_COUNT is a CPU only env var in CCL, no need to set it for XPU. if is_ccl_available() and ( get_int_from_env(["CCL_WORKER_COUNT"], 0) > 0 or self.distributed_type == DistributedType.MULTI_XPU ): if get_ccl_version() >= "1.12": import oneccl_bindings_for_pytorch # noqa: F401 else: import torch_ccl # noqa: F401 backend = "ccl" elif torch.distributed.is_mpi_available(): backend = "mpi" else: backend = "gloo" # Try to get launch configuration from environment variables set by MPI launcher - works for Intel MPI, OpenMPI and MVAPICH rank = get_int_from_env(["RANK", "PMI_RANK", "OMPI_COMM_WORLD_RANK", "MV2_COMM_WORLD_RANK"], 0) size = get_int_from_env(["WORLD_SIZE", "PMI_SIZE", "OMPI_COMM_WORLD_SIZE", "MV2_COMM_WORLD_SIZE"], 1) local_rank = get_int_from_env( ["LOCAL_RANK", "MPI_LOCALRANKID", "OMPI_COMM_WORLD_LOCAL_RANK", "MV2_COMM_WORLD_LOCAL_RANK"], 0 ) local_size = get_int_from_env( ["LOCAL_WORLD_SIZE", "MPI_LOCALNRANKS", "OMPI_COMM_WORLD_LOCAL_SIZE", "MV2_COMM_WORLD_LOCAL_SIZE"], 1, ) self.local_process_index = local_rank os.environ["RANK"] = str(rank) os.environ["WORLD_SIZE"] = str(size) os.environ["LOCAL_RANK"] = str(local_rank) os.environ["LOCAL_WORLD_SIZE"] = str(local_size) if backend == "ccl" and self.distributed_type == DistributedType.MULTI_XPU: os.environ["CCL_PROCESS_LAUNCHER"] = "none" os.environ["CCL_LOCAL_SIZE"] = str(local_size) os.environ["CCL_LOCAL_RANK"] = str(local_rank) if not os.environ.get("MASTER_PORT", None): os.environ["MASTER_PORT"] = "29500" if not os.environ.get("MASTER_ADDR", None): if local_size != size and backend != "mpi": raise ValueError( "Looks like distributed multinode run but MASTER_ADDR env not set, " "please try exporting rank 0's hostname as MASTER_ADDR" ) if ( self.distributed_type == DistributedType.MULTI_CPU and get_int_from_env(["OMP_NUM_THREADS", "MKL_NUM_THREADS"], 0) == 0 ): import psutil num_cpu_threads_per_process = int(psutil.cpu_count(logical=False) / local_size) if num_cpu_threads_per_process == 0: num_cpu_threads_per_process = 1 torch.set_num_threads(num_cpu_threads_per_process) warnings.warn( f"OMP_NUM_THREADS/MKL_NUM_THREADS unset, we set it at {num_cpu_threads_per_process} to improve oob" " performance." ) if not torch.distributed.is_initialized(): # Backend is not set by the user, we set it here kwargs.pop("backend", None) self.backend = backend torch.distributed.init_process_group(self.backend, rank=rank, world_size=size, **kwargs) self.num_processes = torch.distributed.get_world_size() self.process_index = torch.distributed.get_rank() if cpu: self.device = torch.device("cpu") elif is_xpu_available(): self.device = torch.device("xpu", self.local_process_index) torch.xpu.set_device(self.device) else: self.device = self.default_device else: self.distributed_type = ( DistributedType.NO if os.environ.get("ACCELERATE_USE_DEEPSPEED", "false") == "false" else DistributedType.DEEPSPEED ) self.num_processes = 1 self.process_index = self.local_process_index = 0 if self.device is None: self.device = torch.device("cpu") if cpu else self.default_device self.fork_launched = parse_flag_from_env("FORK_LAUNCHED", 0) def __repr__(self) -> str: return ( f"Distributed environment: {self.distributed_type}{(' Backend: ' + self.backend) if self.backend else ''}\n" f"Num processes: {self.num_processes}\n" f"Process index: {self.process_index}\n" f"Local process index: {self.local_process_index}\n" f"Device: {self.device}\n" ) def _reset_state(): "Resets `_shared_state`, is used internally and should not be called" PartialState._shared_state.clear() def initialized(self) -> bool: "Returns whether the `PartialState` has been initialized" return self._shared_state != {} def use_distributed(self): """ Whether the Accelerator is configured for distributed training """ return self.distributed_type != DistributedType.NO and self.num_processes > 1 def is_last_process(self) -> bool: "Returns whether the current process is the last one" return self.process_index == self.num_processes - 1 def is_main_process(self) -> bool: "Returns whether the current process is the main process" return ( self.process_index == 0 if self.distributed_type != DistributedType.MEGATRON_LM else self.is_last_process ) def is_local_main_process(self) -> bool: "Returns whether the current process is the main process on the local node" return ( self.local_process_index == 0 if self.distributed_type != DistributedType.MEGATRON_LM else self.is_last_process ) def wait_for_everyone(self): """ Will stop the execution of the current process until every other process has reached that point (so this does nothing when the script is only run in one process). Useful to do before saving a model. Example: ```python >>> # Assuming two GPU processes >>> import time >>> from accelerate.state import PartialState >>> state = PartialState() >>> if state.is_main_process: ... time.sleep(2) >>> else: ... print("I'm waiting for the main process to finish its sleep...") >>> state.wait_for_everyone() >>> # Should print on every process at the same time >>> print("Everyone is here") ``` """ if self.distributed_type in ( DistributedType.MULTI_GPU, DistributedType.MULTI_NPU, DistributedType.MULTI_XPU, DistributedType.MULTI_CPU, DistributedType.DEEPSPEED, DistributedType.FSDP, ): torch.distributed.barrier() elif self.distributed_type == DistributedType.XLA: xm.rendezvous("accelerate.utils.wait_for_everyone") def _goes_first(self, is_main: bool): if not is_main: self.wait_for_everyone() yield if is_main: self.wait_for_everyone() def split_between_processes(self, inputs: list | tuple | dict | torch.Tensor, apply_padding: bool = False): """ Splits `input` between `self.num_processes` quickly and can be then used on that process. Useful when doing distributed inference, such as with different prompts. Note that when using a `dict`, all keys need to have the same number of elements. Args: inputs (`list`, `tuple`, `torch.Tensor`, `dict` of `list`/`tuple`/`torch.Tensor`, or `datasets.Dataset`): The input to split between processes. apply_padding (`bool`, `optional`, defaults to `False`): Whether to apply padding by repeating the last element of the input so that all processes have the same number of elements. Useful when trying to perform actions such as `gather()` on the outputs or passing in less inputs than there are processes. If so, just remember to drop the padded elements afterwards. Example: ```python # Assume there are two processes from accelerate import PartialState state = PartialState() with state.split_between_processes(["A", "B", "C"]) as inputs: print(inputs) # Process 0 ["A", "B"] # Process 1 ["C"] with state.split_between_processes(["A", "B", "C"], apply_padding=True) as inputs: print(inputs) # Process 0 ["A", "B"] # Process 1 ["C", "C"] ``` """ if self.num_processes == 1: yield inputs return length = len(inputs) # Nested dictionary of any types if isinstance(inputs, dict): length = len(inputs[list(inputs.keys())[0]]) if not all(len(v) == length for v in inputs.values()): raise ValueError("All values in the dictionary must have the same length") num_samples_per_process = math.ceil(length / self.num_processes) start_index = self.process_index * num_samples_per_process end_index = start_index + num_samples_per_process if (len(inputs) % self.num_processes != 0) and (self.process_index == self.num_processes - 1): end_index = length def _split_values(inputs, start_index, end_index): if isinstance(inputs, (list, tuple, torch.Tensor)): if start_index >= len(inputs): result = inputs[-1:] else: result = inputs[start_index:end_index] if apply_padding: if isinstance(result, torch.Tensor): from accelerate.utils import pad_across_processes, send_to_device # The tensor needs to be on the device before we can pad it tensorized_result = send_to_device(result, self.device) result = pad_across_processes(tensorized_result, pad_index=inputs[-1]) else: result += [result[-1]] * (num_samples_per_process - len(result)) return result elif isinstance(inputs, dict): for key in inputs.keys(): inputs[key] = _split_values(inputs[key], start_index, end_index) return inputs else: if is_datasets_available(): from datasets import Dataset if isinstance(inputs, Dataset): if start_index >= len(inputs): start_index = len(inputs) - 1 if end_index > len(inputs): end_index = len(inputs) result_idcs = list(range(start_index, end_index)) if apply_padding: result_idcs += [end_index - 1] * (num_samples_per_process - len(result_idcs)) return inputs.select(result_idcs) return inputs yield _split_values(inputs, start_index, end_index) def main_process_first(self): """ Lets the main process go first inside a with block. The other processes will enter the with block after the main process exits. Example: ```python >>> from accelerate import Accelerator >>> accelerator = Accelerator() >>> with accelerator.main_process_first(): ... # This will be printed first by process 0 then in a seemingly ... # random order by the other processes. ... print(f"This will be printed by process {accelerator.process_index}") ``` """ yield from self._goes_first(self.is_main_process) def local_main_process_first(self): """ Lets the local main process go inside a with block. The other processes will enter the with block after the main process exits. Example: ```python >>> from accelerate.state import PartialState >>> state = PartialState() >>> with state.local_main_process_first(): ... # This will be printed first by local process 0 then in a seemingly ... # random order by the other processes. ... print(f"This will be printed by process {state.local_process_index}") ``` """ yield from self._goes_first(self.is_local_main_process) def on_main_process(self, function: Callable[..., Any] = None): """ Decorator that only runs the decorated function on the main process. Args: function (`Callable`): The function to decorate. Example: ```python >>> from accelerate.state import PartialState >>> state = PartialState() >>> @state.on_main_process ... def print_something(): ... print("This will be printed by process 0 only.") >>> print_something() "This will be printed by process 0 only" ``` """ if not self.initialized: raise ValueError("The `PartialState` or `Accelerator` must be initialized before calling this function.") if self.is_main_process or not self.use_distributed: return function return do_nothing def on_local_main_process(self, function: Callable[..., Any] = None): """ Decorator that only runs the decorated function on the local main process. Args: function (`Callable`): The function to decorate. Example: ```python # Assume we have 2 servers with 4 processes each. from accelerate.state import PartialState state = PartialState() def print_something(): print("This will be printed by process 0 only on each server.") print_something() # On server 1: "This will be printed by process 0 only" # On server 2: "This will be printed by process 0 only" ``` """ if self.is_local_main_process or not self.use_distributed: return function return do_nothing def on_last_process(self, function: Callable[..., Any]): """ Decorator that only runs the decorated function on the last process. Args: function (`Callable`): The function to decorate. Example: ```python # Assume we have 4 processes. from accelerate.state import PartialState state = PartialState() def print_something(): print(f"Printed on process {state.process_index}") print_something() "Printed on process 3" ``` """ if self.is_last_process or not self.use_distributed: return function return do_nothing def on_process(self, function: Callable[..., Any] = None, process_index: int = None): """ Decorator that only runs the decorated function on the process with the given index. Args: function (`Callable`, `optional`): The function to decorate. process_index (`int`, `optional`): The index of the process on which to run the function. Example: ```python # Assume we have 4 processes. from accelerate.state import PartialState state = PartialState() def print_something(): print(f"Printed on process {state.process_index}") print_something() "Printed on process 2" ``` """ if function is None: return partial(self.on_process, process_index=process_index) if (self.process_index == process_index) or (not self.use_distributed): return function return do_nothing def on_local_process(self, function: Callable[..., Any] = None, local_process_index: int = None): """ Decorator that only runs the decorated function on the process with the given index on the current node. Args: function (`Callable`, *optional*): The function to decorate. local_process_index (`int`, *optional*): The index of the local process on which to run the function. Example: ```python # Assume we have 2 servers with 4 processes each. from accelerate import Accelerator accelerator = Accelerator() def print_something(): print(f"Printed on process {accelerator.local_process_index}") print_something() # On server 1: "Printed on process 2" # On server 2: "Printed on process 2" ``` """ if function is None: return partial(self.on_local_process, local_process_index=local_process_index) if (self.local_process_index == local_process_index) or (not self.use_distributed): return function return do_nothing def print(self, *args, **kwargs): if self.is_local_main_process: print(*args, **kwargs) def default_device(self) -> torch.device: """ Returns the default device which is: - MPS if `torch.backends.mps.is_available()` and `torch.backends.mps.is_built()` both return True. - CUDA if `torch.cuda.is_available()` - NPU if `is_npu_available()` - CPU otherwise """ if is_mps_available(): os.environ["PYTORCH_ENABLE_MPS_FALLBACK"] = "1" return torch.device("mps") elif torch.cuda.is_available(): return torch.device("cuda") elif is_xpu_available(): return torch.device("xpu:0") elif is_npu_available(): return torch.device("npu") else: return torch.device("cpu") class SeedableRandomSampler(RandomSampler): """ Same as a random sampler, except that in `__iter__` a seed can be used. Needed specifically in distributed cases, when the random generator for each GPU needs to start from the same seed and be fully reproducable on multiple iterations. If a custom `generator` is passed, it will rely on its initial seed as well as the current iteration it is on (stored in `self.epoch`). """ def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.epoch = 0 self.initial_seed = torch.random.initial_seed() def __iter__(self): if self.generator is None: self.generator = torch.Generator() self.generator.manual_seed(self.initial_seed) # Allow `self.epoch` to modify the seed of the generator seed = self.epoch + self.initial_seed # print("Setting seed at epoch", self.epoch, seed) self.generator.manual_seed(seed) yield from super().__iter__() self.set_epoch(self.epoch + 1) def set_epoch(self, epoch: int): "Sets the current iteration of the sampler." self.epoch = epoch class IterableDatasetShard(IterableDataset): """ Wraps a PyTorch `IterableDataset` to generate samples for one of the processes only. Instances of this class will always yield a number of samples that is a round multiple of the actual batch size (depending of the value of `split_batches`, this is either `batch_size` or `batch_size x num_processes`). Depending on the value of the `drop_last` attribute of the batch sampler passed, it will either stop the iteration at the first batch that would be too small or loop with indices from the beginning. Args: dataset (`torch.utils.data.dataset.IterableDataset`): The batch sampler to split in several shards. batch_size (`int`, *optional*, defaults to 1): The size of the batches per shard (if `split_batches=False`) or the size of the batches (if `split_batches=True`). drop_last (`bool`, *optional*, defaults to `False`): Whether or not to drop the last incomplete batch or complete the last batches by using the samples from the beginning. num_processes (`int`, *optional*, defaults to 1): The number of processes running concurrently. process_index (`int`, *optional*, defaults to 0): The index of the current process. split_batches (`bool`, *optional*, defaults to `False`): Whether the shards should be created by splitting a batch to give a piece of it on each process, or by yielding different full batches on each process. On two processes with an iterable dataset yielding of `[0, 1, 2, 3, 4, 5, 6, 7]`, this will result in: - the shard on process 0 to yield `[0, 1, 2, 3]` and the shard on process 1 to yield `[4, 5, 6, 7]` if this argument is set to `False`. - the shard on process 0 to yield `[0, 1, 4, 5]` and the sampler on process 1 to yield `[2, 3, 6, 7]` if this argument is set to `True`. """ def __init__( self, dataset: IterableDataset, batch_size: int = 1, drop_last: bool = False, num_processes: int = 1, process_index: int = 0, split_batches: bool = False, ): if split_batches and batch_size > 1 and batch_size % num_processes != 0: raise ValueError( f"To use `IterableDatasetShard` in `split_batches` mode, the batch size ({batch_size}) " f"needs to be a round multiple of the number of processes ({num_processes})." ) self.dataset = dataset self.batch_size = batch_size self.drop_last = drop_last self.num_processes = num_processes self.process_index = process_index self.split_batches = split_batches def set_epoch(self, epoch): self.epoch = epoch if hasattr(self.dataset, "set_epoch"): self.dataset.set_epoch(epoch) def __len__(self): # We will just raise the downstream error if the underlying dataset is not sized if self.drop_last: return (len(self.dataset) // (self.batch_size * self.num_processes)) * self.batch_size else: return math.ceil(len(self.dataset) / (self.batch_size * self.num_processes)) * self.batch_size def __iter__(self): if ( not hasattr(self.dataset, "set_epoch") and hasattr(self.dataset, "generator") and isinstance(self.dataset.generator, torch.Generator) ): self.dataset.generator.manual_seed(self.epoch) real_batch_size = self.batch_size if self.split_batches else (self.batch_size * self.num_processes) process_batch_size = (self.batch_size // self.num_processes) if self.split_batches else self.batch_size process_slice = range(self.process_index * process_batch_size, (self.process_index + 1) * process_batch_size) first_batch = None current_batch = [] for element in self.dataset: current_batch.append(element) # Wait to have a full batch before yielding elements. if len(current_batch) == real_batch_size: for i in process_slice: yield current_batch[i] if first_batch is None: first_batch = current_batch.copy() current_batch = [] # Finished if drop_last is True, otherwise complete the last batch with elements from the beginning. if not self.drop_last and len(current_batch) > 0: if first_batch is None: first_batch = current_batch.copy() while len(current_batch) < real_batch_size: current_batch += first_batch for i in process_slice: yield current_batch[i] The provided code snippet includes necessary dependencies for implementing the `load_accelerator_state` function. Write a Python function `def load_accelerator_state( input_dir, models, optimizers, schedulers, dataloaders, process_index, scaler=None, map_location=None, **load_model_func_kwargs, )` to solve the following problem: Loads states of the models, optimizers, scaler, and RNG generators from a given directory. Args: input_dir (`str` or `os.PathLike`): The name of the folder to load all relevant weights and states. models (`List[torch.nn.Module]`): A list of model instances optimizers (`List[torch.optim.Optimizer]`): A list of optimizer instances schedulers (`List[torch.optim.lr_scheduler._LRScheduler]`): A list of learning rate schedulers process_index (`int`): The current process index in the Accelerator state scaler (`torch.cuda.amp.GradScaler`, *optional*): An optional *GradScaler* instance to load map_location (`str`, *optional*): What device to load the optimizer state onto. Should be one of either "cpu" or "on_device". load_model_func_kwargs (`dict`, *optional*): Additional arguments that can be passed to the model's `load_state_dict` method. Here is the function: def load_accelerator_state( input_dir, models, optimizers, schedulers, dataloaders, process_index, scaler=None, map_location=None, **load_model_func_kwargs, ): """ Loads states of the models, optimizers, scaler, and RNG generators from a given directory. Args: input_dir (`str` or `os.PathLike`): The name of the folder to load all relevant weights and states. models (`List[torch.nn.Module]`): A list of model instances optimizers (`List[torch.optim.Optimizer]`): A list of optimizer instances schedulers (`List[torch.optim.lr_scheduler._LRScheduler]`): A list of learning rate schedulers process_index (`int`): The current process index in the Accelerator state scaler (`torch.cuda.amp.GradScaler`, *optional*): An optional *GradScaler* instance to load map_location (`str`, *optional*): What device to load the optimizer state onto. Should be one of either "cpu" or "on_device". load_model_func_kwargs (`dict`, *optional*): Additional arguments that can be passed to the model's `load_state_dict` method. """ if map_location not in [None, "cpu", "on_device"]: raise TypeError( "Unsupported optimizer map location passed, please choose one of `None`, `'cpu'`, or `'on_device'`" ) if map_location is None: map_location = "cpu" elif map_location == "on_device": map_location = PartialState().device input_dir = Path(input_dir) # Model states for i, model in enumerate(models): ending = f"_{i}" if i > 0 else "" input_model_file = input_dir.joinpath(f"{SAFE_MODEL_NAME}{ending}.safetensors") if input_model_file.exists(): state_dict = load_file(input_model_file, device=str(map_location)) else: # Load with torch input_model_file = input_dir.joinpath(f"{MODEL_NAME}{ending}.bin") state_dict = torch.load(input_model_file, map_location=map_location) models[i].load_state_dict(state_dict, **load_model_func_kwargs) logger.info("All model weights loaded successfully") # Optimizer states for i, opt in enumerate(optimizers): optimizer_name = f"{OPTIMIZER_NAME}.bin" if i == 0 else f"{OPTIMIZER_NAME}_{i}.bin" input_optimizer_file = input_dir.joinpath(optimizer_name) optimizer_state = torch.load(input_optimizer_file, map_location=map_location) optimizers[i].load_state_dict(optimizer_state) logger.info("All optimizer states loaded successfully") # Scheduler states for i, scheduler in enumerate(schedulers): scheduler_name = f"{SCHEDULER_NAME}.bin" if i == 0 else f"{SCHEDULER_NAME}_{i}.bin" input_scheduler_file = input_dir.joinpath(scheduler_name) scheduler.load_state_dict(torch.load(input_scheduler_file)) logger.info("All scheduler states loaded successfully") for i, dataloader in enumerate(dataloaders): sampler_name = f"{SAMPLER_NAME}.bin" if i == 0 else f"{SAMPLER_NAME}_{i}.bin" input_sampler_file = input_dir.joinpath(sampler_name) # Only load if we have our custom sampler from .data_loader import IterableDatasetShard, SeedableRandomSampler if isinstance(dataloader.dataset, IterableDatasetShard): sampler = dataloader.sampler.sampler if isinstance(sampler, SeedableRandomSampler): dataloader.sampler.sampler = torch.load(input_sampler_file) logger.info("All dataloader sampler states loaded successfully") # GradScaler state if scaler is not None: input_scaler_file = input_dir.joinpath(SCALER_NAME) scaler.load_state_dict(torch.load(input_scaler_file)) logger.info("GradScaler state loaded successfully") # Random states try: states = torch.load(input_dir.joinpath(f"{RNG_STATE_NAME}_{process_index}.pkl")) random.setstate(states["random_state"]) np.random.set_state(states["numpy_random_seed"]) torch.set_rng_state(states["torch_manual_seed"]) if is_xpu_available(): torch.xpu.set_rng_state_all(states["torch_xpu_manual_seed"]) else: torch.cuda.set_rng_state_all(states["torch_cuda_manual_seed"]) if is_torch_xla_available(): xm.set_rng_state(states["xm_seed"]) logger.info("All random states loaded successfully") except Exception: logger.info("Could not load random states")
Loads states of the models, optimizers, scaler, and RNG generators from a given directory. Args: input_dir (`str` or `os.PathLike`): The name of the folder to load all relevant weights and states. models (`List[torch.nn.Module]`): A list of model instances optimizers (`List[torch.optim.Optimizer]`): A list of optimizer instances schedulers (`List[torch.optim.lr_scheduler._LRScheduler]`): A list of learning rate schedulers process_index (`int`): The current process index in the Accelerator state scaler (`torch.cuda.amp.GradScaler`, *optional*): An optional *GradScaler* instance to load map_location (`str`, *optional*): What device to load the optimizer state onto. Should be one of either "cpu" or "on_device". load_model_func_kwargs (`dict`, *optional*): Additional arguments that can be passed to the model's `load_state_dict` method.
2,884
import random from pathlib import Path from typing import List import numpy as np import torch from safetensors.torch import load_file from torch.cuda.amp import GradScaler from .utils import ( MODEL_NAME, OPTIMIZER_NAME, RNG_STATE_NAME, SAFE_MODEL_NAME, SAFE_WEIGHTS_NAME, SAMPLER_NAME, SCALER_NAME, SCHEDULER_NAME, WEIGHTS_NAME, get_pretty_name, is_torch_xla_available, is_xpu_available, save, ) from .logging import get_logger from .state import PartialState logger = get_logger(__name__) The provided code snippet includes necessary dependencies for implementing the `save_custom_state` function. Write a Python function `def save_custom_state(obj, path, index: int = 0, save_on_each_node: bool = False)` to solve the following problem: Saves the state of `obj` to `{path}/custom_checkpoint_{index}.pkl` Here is the function: def save_custom_state(obj, path, index: int = 0, save_on_each_node: bool = False): """ Saves the state of `obj` to `{path}/custom_checkpoint_{index}.pkl` """ # Should this be the right way to get a qual_name type value from `obj`? save_location = Path(path) / f"custom_checkpoint_{index}.pkl" logger.info(f"Saving the state of {get_pretty_name(obj)} to {save_location}") save(obj.state_dict(), save_location, save_on_each_node=save_on_each_node)
Saves the state of `obj` to `{path}/custom_checkpoint_{index}.pkl`
2,885
import random from pathlib import Path from typing import List import numpy as np import torch from safetensors.torch import load_file from torch.cuda.amp import GradScaler from .utils import ( MODEL_NAME, OPTIMIZER_NAME, RNG_STATE_NAME, SAFE_MODEL_NAME, SAFE_WEIGHTS_NAME, SAMPLER_NAME, SCALER_NAME, SCHEDULER_NAME, WEIGHTS_NAME, get_pretty_name, is_torch_xla_available, is_xpu_available, save, ) from .logging import get_logger from .state import PartialState logger = get_logger(__name__) The provided code snippet includes necessary dependencies for implementing the `load_custom_state` function. Write a Python function `def load_custom_state(obj, path, index: int = 0)` to solve the following problem: Loads the state of `obj` at `{path}/custom_checkpoint_{index}.pkl` Here is the function: def load_custom_state(obj, path, index: int = 0): """ Loads the state of `obj` at `{path}/custom_checkpoint_{index}.pkl` """ load_location = f"{path}/custom_checkpoint_{index}.pkl" logger.info(f"Loading the state of {get_pretty_name(obj)} from {load_location}") obj.load_state_dict(torch.load(load_location, map_location="cpu"))
Loads the state of `obj` at `{path}/custom_checkpoint_{index}.pkl`
2,886
import functools import logging import os from .state import PartialState class MultiProcessAdapter(logging.LoggerAdapter): """ An adapter to assist with logging in multiprocess. `log` takes in an additional `main_process_only` kwarg, which dictates whether it should be called on all processes or only the main executed one. Default is `main_process_only=True`. Does not require an `Accelerator` object to be created first. """ def _should_log(main_process_only): "Check if log should be performed" state = PartialState() return not main_process_only or (main_process_only and state.is_main_process) def log(self, level, msg, *args, **kwargs): """ Delegates logger call after checking if we should log. Accepts a new kwarg of `main_process_only`, which will dictate whether it will be logged across all processes or only the main executed one. Default is `True` if not passed Also accepts "in_order", which if `True` makes the processes log one by one, in order. This is much easier to read, but comes at the cost of sometimes needing to wait for the other processes. Default is `False` to not break with the previous behavior. `in_order` is ignored if `main_process_only` is passed. """ if PartialState._shared_state == {}: raise RuntimeError( "You must initialize the accelerate state by calling either `PartialState()` or `Accelerator()` before using the logging utility." ) main_process_only = kwargs.pop("main_process_only", True) in_order = kwargs.pop("in_order", False) if self.isEnabledFor(level): if self._should_log(main_process_only): msg, kwargs = self.process(msg, kwargs) self.logger.log(level, msg, *args, **kwargs) elif in_order: state = PartialState() for i in range(state.num_processes): if i == state.process_index: msg, kwargs = self.process(msg, kwargs) self.logger.log(level, msg, *args, **kwargs) state.wait_for_everyone() def warning_once(self, *args, **kwargs): """ This method is identical to `logger.warning()`, but will emit the warning with the same message only once Note: The cache is for the function arguments, so 2 different callers using the same arguments will hit the cache. The assumption here is that all warning messages are unique across the code. If they aren't then need to switch to another type of cache that includes the caller frame information in the hashing function. """ self.warning(*args, **kwargs) import logging The provided code snippet includes necessary dependencies for implementing the `get_logger` function. Write a Python function `def get_logger(name: str, log_level: str = None)` to solve the following problem: Returns a `logging.Logger` for `name` that can handle multiprocessing. If a log should be called on all processes, pass `main_process_only=False` If a log should be called on all processes and in order, also pass `in_order=True` Args: name (`str`): The name for the logger, such as `__file__` log_level (`str`, *optional*): The log level to use. If not passed, will default to the `LOG_LEVEL` environment variable, or `INFO` if not Example: ```python >>> from accelerate.logging import get_logger >>> from accelerate import Accelerator >>> logger = get_logger(__name__) >>> accelerator = Accelerator() >>> logger.info("My log", main_process_only=False) >>> logger.debug("My log", main_process_only=True) >>> logger = get_logger(__name__, log_level="DEBUG") >>> logger.info("My log") >>> logger.debug("My second log") >>> array = ["a", "b", "c", "d"] >>> letter_at_rank = array[accelerator.process_index] >>> logger.info(letter_at_rank, in_order=True) ``` Here is the function: def get_logger(name: str, log_level: str = None): """ Returns a `logging.Logger` for `name` that can handle multiprocessing. If a log should be called on all processes, pass `main_process_only=False` If a log should be called on all processes and in order, also pass `in_order=True` Args: name (`str`): The name for the logger, such as `__file__` log_level (`str`, *optional*): The log level to use. If not passed, will default to the `LOG_LEVEL` environment variable, or `INFO` if not Example: ```python >>> from accelerate.logging import get_logger >>> from accelerate import Accelerator >>> logger = get_logger(__name__) >>> accelerator = Accelerator() >>> logger.info("My log", main_process_only=False) >>> logger.debug("My log", main_process_only=True) >>> logger = get_logger(__name__, log_level="DEBUG") >>> logger.info("My log") >>> logger.debug("My second log") >>> array = ["a", "b", "c", "d"] >>> letter_at_rank = array[accelerator.process_index] >>> logger.info(letter_at_rank, in_order=True) ``` """ if log_level is None: log_level = os.environ.get("ACCELERATE_LOG_LEVEL", None) logger = logging.getLogger(name) if log_level is not None: logger.setLevel(log_level.upper()) logger.root.setLevel(log_level.upper()) return MultiProcessAdapter(logger, {})
Returns a `logging.Logger` for `name` that can handle multiprocessing. If a log should be called on all processes, pass `main_process_only=False` If a log should be called on all processes and in order, also pass `in_order=True` Args: name (`str`): The name for the logger, such as `__file__` log_level (`str`, *optional*): The log level to use. If not passed, will default to the `LOG_LEVEL` environment variable, or `INFO` if not Example: ```python >>> from accelerate.logging import get_logger >>> from accelerate import Accelerator >>> logger = get_logger(__name__) >>> accelerator = Accelerator() >>> logger.info("My log", main_process_only=False) >>> logger.debug("My log", main_process_only=True) >>> logger = get_logger(__name__, log_level="DEBUG") >>> logger.info("My log") >>> logger.debug("My second log") >>> array = ["a", "b", "c", "d"] >>> letter_at_rank = array[accelerator.process_index] >>> logger.info(letter_at_rank, in_order=True) ```
2,887
import math from contextlib import suppress from typing import Callable, List, Optional, Union import torch from torch.utils.data import BatchSampler, DataLoader, IterableDataset, RandomSampler from .logging import get_logger from .state import AcceleratorState, DistributedType, GradientState, is_torch_xla_available from .utils import ( RNGType, broadcast, broadcast_object_list, concatenate, find_batch_size, get_data_structure, initialize_tensors, is_torch_version, send_to_device, slice_tensors, synchronize_rng_states, ) _PYTORCH_DATALOADER_KWARGS = { "batch_size": 1, "shuffle": False, "sampler": None, "batch_sampler": None, "num_workers": 0, "collate_fn": None, "pin_memory": False, "drop_last": False, "timeout": 0, "worker_init_fn": None, "multiprocessing_context": None, "generator": None, "prefetch_factor": 2, "persistent_workers": False, } class SeedableRandomSampler(RandomSampler): """ Same as a random sampler, except that in `__iter__` a seed can be used. Needed specifically in distributed cases, when the random generator for each GPU needs to start from the same seed and be fully reproducable on multiple iterations. If a custom `generator` is passed, it will rely on its initial seed as well as the current iteration it is on (stored in `self.epoch`). """ def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.epoch = 0 self.initial_seed = torch.random.initial_seed() def __iter__(self): if self.generator is None: self.generator = torch.Generator() self.generator.manual_seed(self.initial_seed) # Allow `self.epoch` to modify the seed of the generator seed = self.epoch + self.initial_seed # print("Setting seed at epoch", self.epoch, seed) self.generator.manual_seed(seed) yield from super().__iter__() self.set_epoch(self.epoch + 1) def set_epoch(self, epoch: int): "Sets the current iteration of the sampler." self.epoch = epoch class BatchSamplerShard(BatchSampler): """ Wraps a PyTorch `BatchSampler` to generate batches for one of the processes only. Instances of this class will always yield a number of batches that is a round multiple of `num_processes` and that all have the same size. Depending on the value of the `drop_last` attribute of the batch sampler passed, it will either stop the iteration at the first batch that would be too small / not present on all processes or loop with indices from the beginning. Args: batch_sampler (`torch.utils.data.sampler.BatchSampler`): The batch sampler to split in several shards. num_processes (`int`, *optional*, defaults to 1): The number of processes running concurrently. process_index (`int`, *optional*, defaults to 0): The index of the current process. split_batches (`bool`, *optional*, defaults to `False`): Whether the shards should be created by splitting a batch to give a piece of it on each process, or by yielding different full batches on each process. On two processes with a sampler of `[[0, 1, 2, 3], [4, 5, 6, 7]]`, this will result in: - the sampler on process 0 to yield `[0, 1, 2, 3]` and the sampler on process 1 to yield `[4, 5, 6, 7]` if this argument is set to `False`. - the sampler on process 0 to yield `[0, 1]` then `[4, 5]` and the sampler on process 1 to yield `[2, 3]` then `[6, 7]` if this argument is set to `True`. even_batches (`bool`, *optional*, defaults to `True`): Whether or not to loop back at the beginning of the sampler when the number of samples is not a round multiple of (original batch size / number of processes). <Tip warning={true}> `BatchSampler`s with varying batch sizes are not enabled by default. To enable this behaviour, set `even_batches` equal to `False` </Tip>""" def __init__( self, batch_sampler: BatchSampler, num_processes: int = 1, process_index: int = 0, split_batches: bool = False, even_batches: bool = True, ): if split_batches and batch_sampler.batch_size % num_processes != 0: raise ValueError( f"To use `BatchSamplerShard` in `split_batches` mode, the batch size ({batch_sampler.batch_size}) " f"needs to be a round multiple of the number of processes ({num_processes})." ) self.batch_sampler = batch_sampler self.num_processes = num_processes self.process_index = process_index self.split_batches = split_batches self.even_batches = even_batches self.batch_size = getattr(batch_sampler, "batch_size", None) self.drop_last = getattr(batch_sampler, "drop_last", False) if self.batch_size is None and self.even_batches: raise ValueError( "You need to use `even_batches=False` when the batch sampler has no batch size. If you " "are not calling this method directly, set `accelerator.even_batches=False` instead." ) def total_length(self): return len(self.batch_sampler) def __len__(self): if self.split_batches: # Split batches does not change the length of the batch sampler return len(self.batch_sampler) if len(self.batch_sampler) % self.num_processes == 0: # If the length is a round multiple of the number of processes, it's easy. return len(self.batch_sampler) // self.num_processes length = len(self.batch_sampler) // self.num_processes if self.drop_last: # Same if we drop the remainder. return length elif self.even_batches: # When we even batches we always get +1 return length + 1 else: # Otherwise it depends on the process index. return length + 1 if self.process_index < len(self.batch_sampler) % self.num_processes else length def __iter__(self): return self._iter_with_split() if self.split_batches else self._iter_with_no_split() def _iter_with_split(self): initial_data = [] batch_length = self.batch_sampler.batch_size // self.num_processes for idx, batch in enumerate(self.batch_sampler): if idx == 0: initial_data = batch if len(batch) == self.batch_size: # If the batch is full, we yield the part of it this process is responsible of. yield batch[batch_length * self.process_index : batch_length * (self.process_index + 1)] # If drop_last is True of the last batch was full, iteration is over, otherwise... if not self.drop_last and len(initial_data) > 0 and len(batch) < self.batch_size: if not self.even_batches: if len(batch) > batch_length * self.process_index: yield batch[batch_length * self.process_index : batch_length * (self.process_index + 1)] else: # For degenerate cases where the dataset has less than num_process * batch_size samples while len(initial_data) < self.batch_size: initial_data += initial_data batch = batch + initial_data yield batch[batch_length * self.process_index : batch_length * (self.process_index + 1)] def _iter_with_no_split(self): initial_data = [] batch_to_yield = [] for idx, batch in enumerate(self.batch_sampler): # We gather the initial indices in case we need to circle back at the end. if not self.drop_last and idx < self.num_processes: initial_data += batch # We identify the batch to yield but wait until we ar sure every process gets a full batch before actually # yielding it. if idx % self.num_processes == self.process_index: batch_to_yield = batch if idx % self.num_processes == self.num_processes - 1 and ( self.batch_size is None or len(batch) == self.batch_size ): yield batch_to_yield batch_to_yield = [] # If drop_last is True, iteration is over, otherwise... if not self.drop_last and len(initial_data) > 0: if not self.even_batches: if len(batch_to_yield) > 0: yield batch_to_yield else: # ... we yield the complete batch we had saved before if it has the proper length if len(batch_to_yield) == self.batch_size: yield batch_to_yield # For degenerate cases where the dataset has less than num_process * batch_size samples while len(initial_data) < self.num_processes * self.batch_size: initial_data += initial_data # If the last batch seen was of the proper size, it has been yielded by its process so we move to the next if len(batch) == self.batch_size: batch = [] idx += 1 # Make sure we yield a multiple of self.num_processes batches cycle_index = 0 while idx % self.num_processes != 0 or len(batch) > 0: end_index = cycle_index + self.batch_size - len(batch) batch += initial_data[cycle_index:end_index] if idx % self.num_processes == self.process_index: yield batch cycle_index = end_index batch = [] idx += 1 class IterableDatasetShard(IterableDataset): """ Wraps a PyTorch `IterableDataset` to generate samples for one of the processes only. Instances of this class will always yield a number of samples that is a round multiple of the actual batch size (depending of the value of `split_batches`, this is either `batch_size` or `batch_size x num_processes`). Depending on the value of the `drop_last` attribute of the batch sampler passed, it will either stop the iteration at the first batch that would be too small or loop with indices from the beginning. Args: dataset (`torch.utils.data.dataset.IterableDataset`): The batch sampler to split in several shards. batch_size (`int`, *optional*, defaults to 1): The size of the batches per shard (if `split_batches=False`) or the size of the batches (if `split_batches=True`). drop_last (`bool`, *optional*, defaults to `False`): Whether or not to drop the last incomplete batch or complete the last batches by using the samples from the beginning. num_processes (`int`, *optional*, defaults to 1): The number of processes running concurrently. process_index (`int`, *optional*, defaults to 0): The index of the current process. split_batches (`bool`, *optional*, defaults to `False`): Whether the shards should be created by splitting a batch to give a piece of it on each process, or by yielding different full batches on each process. On two processes with an iterable dataset yielding of `[0, 1, 2, 3, 4, 5, 6, 7]`, this will result in: - the shard on process 0 to yield `[0, 1, 2, 3]` and the shard on process 1 to yield `[4, 5, 6, 7]` if this argument is set to `False`. - the shard on process 0 to yield `[0, 1, 4, 5]` and the sampler on process 1 to yield `[2, 3, 6, 7]` if this argument is set to `True`. """ def __init__( self, dataset: IterableDataset, batch_size: int = 1, drop_last: bool = False, num_processes: int = 1, process_index: int = 0, split_batches: bool = False, ): if split_batches and batch_size > 1 and batch_size % num_processes != 0: raise ValueError( f"To use `IterableDatasetShard` in `split_batches` mode, the batch size ({batch_size}) " f"needs to be a round multiple of the number of processes ({num_processes})." ) self.dataset = dataset self.batch_size = batch_size self.drop_last = drop_last self.num_processes = num_processes self.process_index = process_index self.split_batches = split_batches def set_epoch(self, epoch): self.epoch = epoch if hasattr(self.dataset, "set_epoch"): self.dataset.set_epoch(epoch) def __len__(self): # We will just raise the downstream error if the underlying dataset is not sized if self.drop_last: return (len(self.dataset) // (self.batch_size * self.num_processes)) * self.batch_size else: return math.ceil(len(self.dataset) / (self.batch_size * self.num_processes)) * self.batch_size def __iter__(self): if ( not hasattr(self.dataset, "set_epoch") and hasattr(self.dataset, "generator") and isinstance(self.dataset.generator, torch.Generator) ): self.dataset.generator.manual_seed(self.epoch) real_batch_size = self.batch_size if self.split_batches else (self.batch_size * self.num_processes) process_batch_size = (self.batch_size // self.num_processes) if self.split_batches else self.batch_size process_slice = range(self.process_index * process_batch_size, (self.process_index + 1) * process_batch_size) first_batch = None current_batch = [] for element in self.dataset: current_batch.append(element) # Wait to have a full batch before yielding elements. if len(current_batch) == real_batch_size: for i in process_slice: yield current_batch[i] if first_batch is None: first_batch = current_batch.copy() current_batch = [] # Finished if drop_last is True, otherwise complete the last batch with elements from the beginning. if not self.drop_last and len(current_batch) > 0: if first_batch is None: first_batch = current_batch.copy() while len(current_batch) < real_batch_size: current_batch += first_batch for i in process_slice: yield current_batch[i] class DataLoaderShard(DataLoader, DataLoaderStateMixin): """ Subclass of a PyTorch `DataLoader` that will deal with device placement and current distributed setup. Args: dataset (`torch.utils.data.dataset.Dataset`): The dataset to use to build this datalaoder. device (`torch.device`, *optional*): If passed, the device to put all batches on. rng_types (list of `str` or [`~utils.RNGType`]): The list of random number generators to synchronize at the beginning of each iteration. Should be one or several of: - `"torch"`: the base torch random number generator - `"cuda"`: the CUDA random number generator (GPU only) - `"xla"`: the XLA random number generator (TPU only) - `"generator"`: an optional `torch.Generator` synchronized_generator (`torch.Generator`, *optional*): A random number generator to keep synchronized across processes. skip_batches (`int`, *optional*, defaults to 0): The number of batches to skip at the beginning. kwargs: All other keyword arguments to pass to the regular `DataLoader` initialization. **Available attributes:** - **total_batch_size** (`int`) -- Total batch size of the dataloader across all processes. Equal to the original batch size when `split_batches=True`; otherwise the original batch size * the total number of processes - **total_dataset_length** (`int`) -- Total length of the inner dataset across all processes. """ def __init__( self, dataset, device=None, rng_types=None, synchronized_generator=None, skip_batches=0, _drop_last: bool = False, **kwargs, ): super().__init__(dataset, **kwargs) self.device = device self.rng_types = rng_types self.synchronized_generator = synchronized_generator self.skip_batches = skip_batches self.gradient_state = GradientState() self._drop_last = _drop_last self.iteration = 0 def __iter__(self): if self.rng_types is not None: synchronize_rng_states(self.rng_types, self.synchronized_generator) self.begin() self.set_epoch(self.iteration) dataloader_iter = super().__iter__() # We iterate one batch ahead to check when we are at the end try: current_batch = next(dataloader_iter) except StopIteration: yield batch_index = 0 while True: try: # But we still move it to the device so it is done before `StopIteration` is reached if self.device is not None: current_batch = send_to_device(current_batch, self.device) next_batch = next(dataloader_iter) if batch_index >= self.skip_batches: yield current_batch batch_index += 1 current_batch = next_batch except StopIteration: self.end_of_dataloader = True if batch_index >= self.skip_batches: yield current_batch break self.iteration += 1 self.end() def set_epoch(self, epoch: int): # In case it is manually passed in, the user can set it to what they like if self.iteration != epoch: self.iteration = epoch if hasattr(self.batch_sampler, "sampler") and hasattr(self.batch_sampler.sampler, "set_epoch"): self.batch_sampler.sampler.set_epoch(epoch) # We support if a custom `Dataset` implementation has `set_epoch` # or in general HF datasets `Datasets` elif hasattr(self.dataset, "set_epoch"): self.dataset.set_epoch(epoch) def total_batch_size(self): batch_sampler = self.sampler if isinstance(self.sampler, BatchSampler) else self.batch_sampler return ( batch_sampler.batch_size if getattr(batch_sampler, "split_batches", False) else (batch_sampler.batch_size * getattr(batch_sampler, "num_processes", 1)) ) def total_dataset_length(self): if hasattr(self.dataset, "total_length"): return self.dataset.total_length else: return len(self.dataset) if is_torch_xla_available(): import torch_xla.distributed.parallel_loader as xpl class MpDeviceLoaderWrapper(xpl.MpDeviceLoader): """ Wrapper for the xpl.MpDeviceLoader class that knows the total batch size. XLA preloading threads will all call DataLoaderShard's __iter__(). Remove rng_types from DataLoaderShard to prevent it from using the XLA device in the preloading threads, and synchronize the RNG once from the main thread only. **Available attributes:** - **total_batch_size** (`int`) -- Total batch size of the dataloader across all processes. Equal to the original batch size when `split_batches=True`; otherwise the original batch size * the total number of processes - **total_dataset_length** (`int`) -- Total length of the inner dataset across all processes. """ def __init__(self, dataloader: DataLoaderShard, device: torch.device): super().__init__(dataloader, device) self._rng_types = self._loader.rng_types self._loader.rng_types = None def __iter__(self): if self._rng_types is not None: synchronize_rng_states(self._rng_types, self._loader.synchronized_generator) return super().__iter__() def total_batch_size(self): return self._loader.total_batch_size def total_dataset_length(self): return self._loader.total_dataset_length def batch_sampler(self): return self._loader.batch_sampler class DataLoaderDispatcher(DataLoader, DataLoaderStateMixin): """ Subclass of a PyTorch `DataLoader` that will iterate and preprocess on process 0 only, then dispatch on each process their part of the batch. Args: split_batches (`bool`, *optional*, defaults to `False`): Whether the resulting `DataLoader` should split the batches of the original data loader across devices or yield full batches (in which case it will yield batches starting at the `process_index`-th and advancing of `num_processes` batches at each iteration). Another way to see this is that the observed batch size will be the same as the initial `dataloader` if this option is set to `True`, the batch size of the initial `dataloader` multiplied by `num_processes` otherwise. Setting this option to `True` requires that the batch size of the `dataloader` is a round multiple of `batch_size`. skip_batches (`int`, *optional*, defaults to 0): The number of batches to skip at the beginning of an iteration. **Available attributes:** - **total_batch_size** (`int`) -- Total batch size of the dataloader across all processes. Equal to the original batch size when `split_batches=True`; otherwise the original batch size * the total number of processes - **total_dataset_length** (`int`) -- Total length of the inner dataset across all processes. """ def __init__( self, dataset, split_batches: bool = False, skip_batches=0, _drop_last: bool = False, slice_fn=None, **kwargs ): shuffle = False if is_torch_version(">=", "1.11.0"): from torch.utils.data.datapipes.iter.combinatorics import ShufflerIterDataPipe # We need to save the shuffling state of the DataPipe if isinstance(dataset, ShufflerIterDataPipe): shuffle = dataset._shuffle_enabled super().__init__(dataset, **kwargs) self.split_batches = split_batches if shuffle: torch.utils.data.graph_settings.apply_shuffle_settings(dataset, shuffle=shuffle) self.gradient_state = GradientState() self.state = AcceleratorState() self._drop_last = _drop_last self.skip_batches = skip_batches self.slice_fn = slice_tensors if slice_fn is None else slice_fn self.iteration = 0 def _fetch_batches(self, iterator): batches, batch = None, None # On process 0, we gather the batch to dispatch. if self.state.process_index == 0: try: if self.split_batches: # One batch of the main iterator is dispatched and split. batch = next(iterator) else: # num_processes batches of the main iterator are concatenated then dispatched and split. # We add the batches one by one so we have the remainder available when drop_last=False. batches = [] for _ in range(self.state.num_processes): batches.append(next(iterator)) try: batch = concatenate(batches, dim=0) except RuntimeError as e: raise RuntimeError( "You can't use batches of different size with `dispatch_batches=True` or when using an `IterableDataset`." "either pass `dispatch_batches=False` and have each process fetch its own batch " " or pass `split_batches=True`. By doing so, the main process will fetch a full batch and " "slice it into `num_processes` batches for each process." ) from e # In both cases, we need to get the structure of the batch that we will broadcast on other # processes to initialize the tensors with the right shape. # data_structure, stop_iteration batch_info = [get_data_structure(batch), False] except StopIteration: batch_info = [None, True] else: batch_info = [None, self._stop_iteration] # This is inplace, so after this instruction, every process has the same `batch_info` as process 0. broadcast_object_list(batch_info) self._stop_iteration = batch_info[1] if self._stop_iteration: # If drop_last is False and split_batches is False, we may have a remainder to take care of. if not self.split_batches and not self._drop_last: if self.state.process_index == 0 and len(batches) > 0: batch = concatenate(batches, dim=0) batch_info = [get_data_structure(batch), False] else: batch_info = [None, True] broadcast_object_list(batch_info) return batch, batch_info def __iter__(self): self.begin() self.set_epoch(self.iteration) main_iterator = None if is_torch_version(">=", "2.0.1"): # NOTE PyTorch DataLoader adds forward compatibilities for DataPipes, which broadcasts # shared seed to all dist processes. Thus, we need to create iterator for all dist processes. # But, we only iterate through the DataLoader on process 0. main_iterator = super().__iter__() elif self.state.process_index == 0: main_iterator = super().__iter__() stop_iteration = False self._stop_iteration = False first_batch = None next_batch, next_batch_info = self._fetch_batches(main_iterator) batch_index = 0 while not stop_iteration: batch, batch_info = next_batch, next_batch_info if self.state.process_index != 0: # Initialize tensors on other processes than process 0. batch = initialize_tensors(batch_info[0]) batch = send_to_device(batch, self.state.device) # Broadcast the batch before splitting it. batch = broadcast(batch, from_process=0) if not self._drop_last and first_batch is None: # We keep at least num processes elements of the first batch to be able to complete the last batch first_batch = self.slice_fn( batch, slice(0, self.state.num_processes), process_index=self.state.process_index, num_processes=self.state.num_processes, ) if batch is None: raise ValueError( f"Batch does not contain any data (`{batch}`). At the end of all iterable data available before expected stop iteration." ) observed_batch_size = find_batch_size(batch) batch_size = observed_batch_size // self.state.num_processes stop_iteration = self._stop_iteration if not stop_iteration: # We may still be at the end of the dataloader without knowing it yet: if there is nothing left in # the dataloader since the number of batches is a round multiple of the number of processes. next_batch, next_batch_info = self._fetch_batches(main_iterator) # next_batch_info[0] is None when there are no more batches, otherwise we still need to process them. if self._stop_iteration and next_batch_info[0] is None: stop_iteration = True if not self._drop_last and stop_iteration and observed_batch_size % self.state.num_processes != 0: # If the last batch is not complete, let's add the first batch to it. batch = concatenate([batch, first_batch], dim=0) # Batch size computation above is wrong, it's off by 1 so we fix it. batch_size += 1 data_slice = slice(self.state.process_index * batch_size, (self.state.process_index + 1) * batch_size) batch = self.slice_fn( batch, data_slice, process_index=self.state.process_index, num_processes=self.state.num_processes, ) if stop_iteration: self.end_of_dataloader = True self.remainder = observed_batch_size if batch_index >= self.skip_batches: yield batch batch_index += 1 self.iteration += 1 self.end() def set_epoch(self, epoch: int): # In case it is manually passed in, the user can set it to what they like if self.iteration != epoch: self.iteration = epoch if hasattr(self.batch_sampler.sampler, "set_epoch"): self.batch_sampler.sampler.set_epoch(epoch) elif hasattr(self.dataset, "set_epoch"): self.dataset.set_epoch(epoch) def __len__(self): whole_length = super().__len__() if self.split_batches: return whole_length elif self._drop_last: return whole_length // self.state.num_processes else: return math.ceil(whole_length / self.state.num_processes) def total_batch_size(self): return ( self.dataset.batch_size if self.split_batches else (self.dataset.batch_size * self.dataset.num_processes) ) def total_dataset_length(self): return len(self.dataset) class AcceleratorState: """ Singleton class that has information about the current training environment. **Available attributes:** - **device** (`torch.device`) -- The device to use. - **distributed_type** ([`~accelerate.state.DistributedType`]) -- The type of distributed environment currently in use. - **initialized** (`bool`) -- Whether or not the `AcceleratorState` has been initialized from `Accelerator`. - **local_process_index** (`int`) -- The index of the current process on the current server. - **mixed_precision** (`str`) -- Whether or not the current script will use mixed precision, and if so the type of mixed precision being performed. (Choose from 'no','fp16','bf16 or 'fp8'). - **num_processes** (`int`) -- The number of processes currently launched in parallel. - **process_index** (`int`) -- The index of the current process. - **is_last_process** (`bool`) -- Whether or not the current process is the last one. - **is_main_process** (`bool`) -- Whether or not the current process is the main one. - **is_local_main_process** (`bool`) -- Whether or not the current process is the main one on the local node. - **debug** (`bool`) -- Whether or not the current script is being run in debug mode. """ _shared_state = SharedDict() def __init__( self, mixed_precision: str = None, cpu: bool = False, dynamo_plugin=None, deepspeed_plugin=None, fsdp_plugin=None, megatron_lm_plugin=None, _from_accelerator: bool = False, **kwargs, ): self.__dict__ = self._shared_state if parse_flag_from_env("ACCELERATE_USE_CPU"): cpu = True if PartialState._shared_state == {}: PartialState(cpu, **kwargs) self.__dict__.update(PartialState._shared_state) self._check_initialized(mixed_precision, cpu) if not self.initialized: self.deepspeed_plugin = None self.use_ipex = None mixed_precision = ( parse_choice_from_env("ACCELERATE_MIXED_PRECISION", "no") if mixed_precision is None else mixed_precision.lower() ) if mixed_precision == "fp8": if not is_fp8_available(): raise ValueError( "Using `fp8` precision requires `transformer_engine` or `MS-AMP` to be installed." ) elif not check_fp8_capability(): logger.warning( f"The current device has compute capability of {torch.cuda.get_device_capability()} which is " "insufficient for FP8 mixed precision training (requires a GPU Hopper/Ada Lovelace " "or higher, compute capability of 8.9 or higher). Will use FP16 instead." ) mixed_precision = "fp16" self.dynamo_plugin = dynamo_plugin if not _from_accelerator: raise ValueError( "Please make sure to properly initialize your accelerator via `accelerator = Accelerator()` " "before using any functionality from the `accelerate` library." ) # deepspeed handles mixed_precision using deepspeed_config self._mixed_precision = "no" if self.distributed_type == DistributedType.DEEPSPEED else mixed_precision if self.distributed_type == DistributedType.XLA and is_torch_xla_available(check_is_tpu=True): if mixed_precision == "bf16": if os.environ.get("ACCELERATE_DOWNCAST_BF16"): os.environ["XLA_USE_BF16"] = str(0) os.environ["XLA_DOWNCAST_BF16"] = str(1) self.downcast_bfloat = True else: os.environ["XLA_USE_BF16"] = str(1) os.environ["XLA_DOWNCAST_BF16"] = str(0) self.downcast_bfloat = False elif os.environ.get("ACCELERATE_USE_DEEPSPEED", "false") == "true" and not cpu: self.deepspeed_plugin = deepspeed_plugin elif self.distributed_type == DistributedType.MULTI_GPU: if os.environ.get("ACCELERATE_USE_FSDP", "false") == "true": self.distributed_type = DistributedType.FSDP if self._mixed_precision != "no": fsdp_plugin.set_mixed_precision(self._mixed_precision) self.fsdp_plugin = fsdp_plugin if os.environ.get("ACCELERATE_USE_MEGATRON_LM", "false") == "true": self.distributed_type = DistributedType.MEGATRON_LM megatron_lm_plugin.set_mixed_precision(self._mixed_precision) self.megatron_lm_plugin = megatron_lm_plugin elif self.distributed_type == DistributedType.MULTI_NPU: if os.environ.get("ACCELERATE_USE_FSDP", "false") == "true": self.distributed_type = DistributedType.FSDP if self._mixed_precision != "no": fsdp_plugin.set_mixed_precision(self._mixed_precision) self.fsdp_plugin = fsdp_plugin elif self.distributed_type in [DistributedType.MULTI_CPU, DistributedType.MULTI_XPU, DistributedType.NO]: if is_ipex_available(): "check if user disables it explicitly" self.use_ipex = parse_flag_from_env("ACCELERATE_USE_IPEX", default=True) else: self.use_ipex = False if self.distributed_type == DistributedType.MULTI_XPU: if os.environ.get("ACCELERATE_USE_FSDP", "false") == "true": self.distributed_type = DistributedType.FSDP if self._mixed_precision != "no": fsdp_plugin.set_mixed_precision(self._mixed_precision) self.fsdp_plugin = fsdp_plugin if ( self.dynamo_plugin.backend != DynamoBackend.NO and self._mixed_precision == "no" and self.device.type == "cuda" ): torch.backends.cuda.matmul.allow_tf32 = True PartialState._shared_state["distributed_type"] = self.distributed_type def initialized(self) -> bool: return self._shared_state != PartialState._shared_state def __repr__(self): repr = PartialState().__repr__() + f"\nMixed precision type: {self.mixed_precision}\n" if self.distributed_type == DistributedType.DEEPSPEED: repr += f"ds_config: {self.deepspeed_plugin.deepspeed_config}\n" return repr def _check_initialized(self, mixed_precision=None, cpu=None): "Checks if a modification is trying to be made and the `AcceleratorState` has already been initialized" if self.initialized: err = "AcceleratorState has already been initialized and cannot be changed, restart your runtime completely and pass `{flag}` to `Accelerator()`." if cpu and self.device.type != "cpu": raise ValueError(err.format(flag="cpu=True")) if ( mixed_precision is not None and mixed_precision != self._mixed_precision and self.distributed_type != DistributedType.DEEPSPEED ): raise ValueError(err.format(flag=f"mixed_precision='{mixed_precision}'")) # For backward compatibility def use_fp16(self): warnings.warn( "The `use_fp16` property is deprecated and will be removed in version 1.0 of Accelerate use " "`AcceleratorState.mixed_precision == 'fp16'` instead.", FutureWarning, ) return self._mixed_precision != "no" def mixed_precision(self): if self.distributed_type == DistributedType.DEEPSPEED: config = self.deepspeed_plugin.deepspeed_config if config.get("fp16", {}).get("enabled", False): mixed_precision = "fp16" elif config.get("bf16", {}).get("enabled", False): mixed_precision = "bf16" else: mixed_precision = "no" else: mixed_precision = self._mixed_precision return mixed_precision def _reset_state(reset_partial_state: bool = False): "Resets `_shared_state`, is used internally and should not be called" AcceleratorState._shared_state.clear() if reset_partial_state: PartialState._reset_state() def use_distributed(self): """ Whether the Accelerator is configured for distributed training """ return PartialState().use_distributed def is_last_process(self) -> bool: "Returns whether the current process is the last one" return PartialState().is_last_process def is_main_process(self) -> bool: "Returns whether the current process is the main process" return PartialState().is_main_process def is_local_main_process(self) -> bool: "Returns whether the current process is the main process on the local node" return PartialState().is_local_main_process def wait_for_everyone(self): PartialState().wait_for_everyone() def split_between_processes(self, inputs: list | tuple | dict | torch.Tensor, apply_padding: bool = False): """ Splits `input` between `self.num_processes` quickly and can be then used on that process. Useful when doing distributed inference, such as with different prompts. Note that when using a `dict`, all keys need to have the same number of elements. Args: inputs (`list`, `tuple`, `torch.Tensor`, or `dict` of `list`/`tuple`/`torch.Tensor`): The input to split between processes. apply_padding (`bool`, `optional`, defaults to `False`): Whether to apply padding by repeating the last element of the input so that all processes have the same number of elements. Useful when trying to perform actions such as `gather()` on the outputs or passing in less inputs than there are processes. If so, just remember to drop the padded elements afterwards. Example: ```python # Assume there are two processes from accelerate.state import AcceleratorState state = AcceleratorState() with state.split_between_processes(["A", "B", "C"]) as inputs: print(inputs) # Process 0 ["A", "B"] # Process 1 ["C"] with state.split_between_processes(["A", "B", "C"], apply_padding=True) as inputs: print(inputs) # Process 0 ["A", "B"] # Process 1 ["C", "C"] ``` """ with PartialState().split_between_processes(inputs, apply_padding=apply_padding) as inputs: yield inputs def main_process_first(self): """ Lets the main process go first inside a with block. The other processes will enter the with block after the main process exits. """ with PartialState().main_process_first(): yield def local_main_process_first(self): """ Lets the local main process go inside a with block. The other processes will enter the with block after the main process exits. """ with PartialState().local_main_process_first(): yield def print(self, *args, **kwargs): PartialState().print(*args, **kwargs) The provided code snippet includes necessary dependencies for implementing the `prepare_data_loader` function. Write a Python function `def prepare_data_loader( dataloader: DataLoader, device: Optional[torch.device] = None, num_processes: Optional[int] = None, process_index: Optional[int] = None, split_batches: bool = False, put_on_device: bool = False, rng_types: Optional[List[Union[str, RNGType]]] = None, dispatch_batches: Optional[bool] = None, even_batches: bool = True, slice_fn_for_dispatch: Optional[Callable] = None, use_seedable_sampler: bool = False, ) -> DataLoader` to solve the following problem: Wraps a PyTorch `DataLoader` to generate batches for one of the processes only. Depending on the value of the `drop_last` attribute of the `dataloader` passed, it will either stop the iteration at the first batch that would be too small / not present on all processes or loop with indices from the beginning. Args: dataloader (`torch.utils.data.dataloader.DataLoader`): The data loader to split across several devices. device (`torch.device`): The target device for the returned `DataLoader`. num_processes (`int`, *optional*): The number of processes running concurrently. Will default to the value given by [`~state.AcceleratorState`]. process_index (`int`, *optional*): The index of the current process. Will default to the value given by [`~state.AcceleratorState`]. split_batches (`bool`, *optional*, defaults to `False`): Whether the resulting `DataLoader` should split the batches of the original data loader across devices or yield full batches (in which case it will yield batches starting at the `process_index`-th and advancing of `num_processes` batches at each iteration). Another way to see this is that the observed batch size will be the same as the initial `dataloader` if this option is set to `True`, the batch size of the initial `dataloader` multiplied by `num_processes` otherwise. Setting this option to `True` requires that the batch size of the `dataloader` is a round multiple of `batch_size`. put_on_device (`bool`, *optional*, defaults to `False`): Whether or not to put the batches on `device` (only works if the batches are nested list, tuples or dictionaries of tensors). rng_types (list of `str` or [`~utils.RNGType`]): The list of random number generators to synchronize at the beginning of each iteration. Should be one or several of: - `"torch"`: the base torch random number generator - `"cuda"`: the CUDA random number generator (GPU only) - `"xla"`: the XLA random number generator (TPU only) - `"generator"`: the `torch.Generator` of the sampler (or batch sampler if there is no sampler in your dataloader) or of the iterable dataset (if it exists) if the underlying dataset is of that type. dispatch_batches (`bool`, *optional*): If set to `True`, the datalaoder prepared is only iterated through on the main process and then the batches are split and broadcast to each process. Will default to `True` when the underlying dataset is an `IterableDataset`, `False` otherwise. even_batches (`bool`, *optional*, defaults to `True`): If set to `True`, in cases where the total batch size across all processes does not exactly divide the dataset, samples at the start of the dataset will be duplicated so the batch can be divided equally among all workers. slice_fn_for_dispatch (`Callable`, *optional*`): If passed, this function will be used to slice tensors across `num_processes`. Will default to [`~utils.slice_tensors`]. This argument is used only when `dispatch_batches` is set to `True` and will be ignored otherwise. use_seedable_sampler (`bool`, *optional*, defaults to `False`): Whether to use the [`~data_loader.SeedableRandomSampler`] instead of a `RandomSampler` for better reproducability. Comes at a cost of potentially different performances due to different shuffling algorithms but ensures results will be the *exact* same. Should be paired with `set_seed()` at every `self.set_epoch` Returns: `torch.utils.data.dataloader.DataLoader`: A new data loader that will yield the portion of the batches <Tip warning={true}> `BatchSampler`s with varying batch sizes are not enabled by default. To enable this behaviour, set `even_batches` equal to `False` </Tip> Here is the function: def prepare_data_loader( dataloader: DataLoader, device: Optional[torch.device] = None, num_processes: Optional[int] = None, process_index: Optional[int] = None, split_batches: bool = False, put_on_device: bool = False, rng_types: Optional[List[Union[str, RNGType]]] = None, dispatch_batches: Optional[bool] = None, even_batches: bool = True, slice_fn_for_dispatch: Optional[Callable] = None, use_seedable_sampler: bool = False, ) -> DataLoader: """ Wraps a PyTorch `DataLoader` to generate batches for one of the processes only. Depending on the value of the `drop_last` attribute of the `dataloader` passed, it will either stop the iteration at the first batch that would be too small / not present on all processes or loop with indices from the beginning. Args: dataloader (`torch.utils.data.dataloader.DataLoader`): The data loader to split across several devices. device (`torch.device`): The target device for the returned `DataLoader`. num_processes (`int`, *optional*): The number of processes running concurrently. Will default to the value given by [`~state.AcceleratorState`]. process_index (`int`, *optional*): The index of the current process. Will default to the value given by [`~state.AcceleratorState`]. split_batches (`bool`, *optional*, defaults to `False`): Whether the resulting `DataLoader` should split the batches of the original data loader across devices or yield full batches (in which case it will yield batches starting at the `process_index`-th and advancing of `num_processes` batches at each iteration). Another way to see this is that the observed batch size will be the same as the initial `dataloader` if this option is set to `True`, the batch size of the initial `dataloader` multiplied by `num_processes` otherwise. Setting this option to `True` requires that the batch size of the `dataloader` is a round multiple of `batch_size`. put_on_device (`bool`, *optional*, defaults to `False`): Whether or not to put the batches on `device` (only works if the batches are nested list, tuples or dictionaries of tensors). rng_types (list of `str` or [`~utils.RNGType`]): The list of random number generators to synchronize at the beginning of each iteration. Should be one or several of: - `"torch"`: the base torch random number generator - `"cuda"`: the CUDA random number generator (GPU only) - `"xla"`: the XLA random number generator (TPU only) - `"generator"`: the `torch.Generator` of the sampler (or batch sampler if there is no sampler in your dataloader) or of the iterable dataset (if it exists) if the underlying dataset is of that type. dispatch_batches (`bool`, *optional*): If set to `True`, the datalaoder prepared is only iterated through on the main process and then the batches are split and broadcast to each process. Will default to `True` when the underlying dataset is an `IterableDataset`, `False` otherwise. even_batches (`bool`, *optional*, defaults to `True`): If set to `True`, in cases where the total batch size across all processes does not exactly divide the dataset, samples at the start of the dataset will be duplicated so the batch can be divided equally among all workers. slice_fn_for_dispatch (`Callable`, *optional*`): If passed, this function will be used to slice tensors across `num_processes`. Will default to [`~utils.slice_tensors`]. This argument is used only when `dispatch_batches` is set to `True` and will be ignored otherwise. use_seedable_sampler (`bool`, *optional*, defaults to `False`): Whether to use the [`~data_loader.SeedableRandomSampler`] instead of a `RandomSampler` for better reproducability. Comes at a cost of potentially different performances due to different shuffling algorithms but ensures results will be the *exact* same. Should be paired with `set_seed()` at every `self.set_epoch` Returns: `torch.utils.data.dataloader.DataLoader`: A new data loader that will yield the portion of the batches <Tip warning={true}> `BatchSampler`s with varying batch sizes are not enabled by default. To enable this behaviour, set `even_batches` equal to `False` </Tip> """ if dispatch_batches is None: if not put_on_device: dispatch_batches = False else: dispatch_batches = isinstance(dataloader.dataset, IterableDataset) if dispatch_batches and not put_on_device: raise ValueError("Using `dispatch_batches=True` requires `put_on_device=True`.") # Grab defaults from AcceleratorState state = AcceleratorState() if num_processes is None: num_processes = state.num_processes if process_index is None: process_index = state.process_index # Sanity check if split_batches: if dataloader.batch_size is not None: batch_size_for_check = dataloader.batch_size else: # For custom batch_sampler if hasattr(dataloader.batch_sampler, "batch_size"): batch_size_for_check = dataloader.batch_sampler.batch_size else: raise ValueError( "In order to use `split_batches==True` you must have a `batch_size` attribute either in the passed " "`dataloader` or `dataloader.batch_sampler` objects, and it has to return a natural number. " "Your `dataloader.batch_size` is None and `dataloader.batch_sampler` " f"(`{type(dataloader.batch_sampler)}`) does not have the `batch_size` attribute set." ) if batch_size_for_check > 1 and batch_size_for_check % num_processes != 0: raise ValueError( f"To use a `DataLoader` in `split_batches` mode, the batch size ({dataloader.batch_size}) " f"needs to be a round multiple of the number of processes ({num_processes})." ) new_dataset = dataloader.dataset # Iterable dataset doesn't like batch_sampler, but data_loader creates a default one for it new_batch_sampler = dataloader.batch_sampler if not isinstance(new_dataset, IterableDataset) else None sampler_is_batch_sampler = False synchronized_generator = None sampler_is_batch_sampler = isinstance(dataloader.sampler, BatchSampler) if sampler_is_batch_sampler: sampler = getattr(dataloader.sampler, "sampler", None) else: sampler = getattr(dataloader.batch_sampler, "sampler", None) if isinstance(sampler, RandomSampler) and use_seedable_sampler: # When iterating through the dataloader during distributed processes # we want to ensure that on each process we are iterating through the same # samples in the same order if a seed is set. This requires a tweak # to the `torch.utils.data.RandomSampler` class (if used). sampler = SeedableRandomSampler( data_source=sampler.data_source, replacement=sampler.replacement, num_samples=sampler._num_samples, generator=getattr(sampler, "generator", torch.Generator()), ) if isinstance(dataloader.sampler, RandomSampler) and state.distributed_type == DistributedType.XLA: # isinstance(dataloader.sampler, RandomSampler) indicates the original dataloader has `shuffle` enabled. generator = torch.Generator().manual_seed(42) dataloader.generator = generator dataloader.sampler.generator = generator # No change if no multiprocess if (num_processes != 1 or state.distributed_type == DistributedType.MEGATRON_LM) and not dispatch_batches: if isinstance(new_dataset, IterableDataset): if getattr(dataloader.dataset, "generator", None) is not None: synchronized_generator = dataloader.dataset.generator new_dataset = IterableDatasetShard( new_dataset, batch_size=dataloader.batch_size, drop_last=dataloader.drop_last, num_processes=num_processes, process_index=process_index, split_batches=split_batches, ) else: batch_sampler = dataloader.sampler if sampler_is_batch_sampler else dataloader.batch_sampler new_batch_sampler = BatchSamplerShard( batch_sampler, num_processes=num_processes, process_index=process_index, split_batches=split_batches, even_batches=even_batches, ) # We ignore all of those since they are all dealt with by our new_batch_sampler ignore_kwargs = [ "batch_size", "shuffle", "sampler", "batch_sampler", "drop_last", ] if rng_types is not None and synchronized_generator is None and "generator" in rng_types: rng_types.remove("generator") kwargs = { k: getattr(dataloader, k, _PYTORCH_DATALOADER_KWARGS[k]) for k in _PYTORCH_DATALOADER_KWARGS if k not in ignore_kwargs } # Need to provide batch_size as batch_sampler is None for Iterable dataset if new_batch_sampler is None: kwargs["drop_last"] = dataloader.drop_last kwargs["batch_size"] = ( dataloader.batch_size // num_processes if split_batches and not dispatch_batches else dataloader.batch_size ) if dispatch_batches: kwargs.pop("generator") dataloader = DataLoaderDispatcher( new_dataset, split_batches=split_batches, batch_sampler=new_batch_sampler, _drop_last=dataloader.drop_last, slice_fn=slice_fn_for_dispatch, **kwargs, ) elif sampler_is_batch_sampler: dataloader = DataLoaderShard( new_dataset, device=device if put_on_device and state.distributed_type != DistributedType.XLA else None, sampler=new_batch_sampler, batch_size=dataloader.batch_size, rng_types=rng_types, _drop_last=dataloader.drop_last, synchronized_generator=synchronized_generator, **kwargs, ) else: dataloader = DataLoaderShard( new_dataset, device=device if put_on_device and state.distributed_type != DistributedType.XLA else None, batch_sampler=new_batch_sampler, rng_types=rng_types, synchronized_generator=synchronized_generator, _drop_last=dataloader.drop_last, **kwargs, ) if isinstance(sampler, SeedableRandomSampler) and use_seedable_sampler: if sampler_is_batch_sampler: dataloader.sampler.sampler = sampler else: dataloader.batch_sampler.sampler = sampler if state.distributed_type == DistributedType.XLA: return MpDeviceLoaderWrapper(dataloader, device) return dataloader
Wraps a PyTorch `DataLoader` to generate batches for one of the processes only. Depending on the value of the `drop_last` attribute of the `dataloader` passed, it will either stop the iteration at the first batch that would be too small / not present on all processes or loop with indices from the beginning. Args: dataloader (`torch.utils.data.dataloader.DataLoader`): The data loader to split across several devices. device (`torch.device`): The target device for the returned `DataLoader`. num_processes (`int`, *optional*): The number of processes running concurrently. Will default to the value given by [`~state.AcceleratorState`]. process_index (`int`, *optional*): The index of the current process. Will default to the value given by [`~state.AcceleratorState`]. split_batches (`bool`, *optional*, defaults to `False`): Whether the resulting `DataLoader` should split the batches of the original data loader across devices or yield full batches (in which case it will yield batches starting at the `process_index`-th and advancing of `num_processes` batches at each iteration). Another way to see this is that the observed batch size will be the same as the initial `dataloader` if this option is set to `True`, the batch size of the initial `dataloader` multiplied by `num_processes` otherwise. Setting this option to `True` requires that the batch size of the `dataloader` is a round multiple of `batch_size`. put_on_device (`bool`, *optional*, defaults to `False`): Whether or not to put the batches on `device` (only works if the batches are nested list, tuples or dictionaries of tensors). rng_types (list of `str` or [`~utils.RNGType`]): The list of random number generators to synchronize at the beginning of each iteration. Should be one or several of: - `"torch"`: the base torch random number generator - `"cuda"`: the CUDA random number generator (GPU only) - `"xla"`: the XLA random number generator (TPU only) - `"generator"`: the `torch.Generator` of the sampler (or batch sampler if there is no sampler in your dataloader) or of the iterable dataset (if it exists) if the underlying dataset is of that type. dispatch_batches (`bool`, *optional*): If set to `True`, the datalaoder prepared is only iterated through on the main process and then the batches are split and broadcast to each process. Will default to `True` when the underlying dataset is an `IterableDataset`, `False` otherwise. even_batches (`bool`, *optional*, defaults to `True`): If set to `True`, in cases where the total batch size across all processes does not exactly divide the dataset, samples at the start of the dataset will be duplicated so the batch can be divided equally among all workers. slice_fn_for_dispatch (`Callable`, *optional*`): If passed, this function will be used to slice tensors across `num_processes`. Will default to [`~utils.slice_tensors`]. This argument is used only when `dispatch_batches` is set to `True` and will be ignored otherwise. use_seedable_sampler (`bool`, *optional*, defaults to `False`): Whether to use the [`~data_loader.SeedableRandomSampler`] instead of a `RandomSampler` for better reproducability. Comes at a cost of potentially different performances due to different shuffling algorithms but ensures results will be the *exact* same. Should be paired with `set_seed()` at every `self.set_epoch` Returns: `torch.utils.data.dataloader.DataLoader`: A new data loader that will yield the portion of the batches <Tip warning={true}> `BatchSampler`s with varying batch sizes are not enabled by default. To enable this behaviour, set `even_batches` equal to `False` </Tip>
2,888
import math from contextlib import suppress from typing import Callable, List, Optional, Union import torch from torch.utils.data import BatchSampler, DataLoader, IterableDataset, RandomSampler from .logging import get_logger from .state import AcceleratorState, DistributedType, GradientState, is_torch_xla_available from .utils import ( RNGType, broadcast, broadcast_object_list, concatenate, find_batch_size, get_data_structure, initialize_tensors, is_torch_version, send_to_device, slice_tensors, synchronize_rng_states, ) _PYTORCH_DATALOADER_KWARGS = { "batch_size": 1, "shuffle": False, "sampler": None, "batch_sampler": None, "num_workers": 0, "collate_fn": None, "pin_memory": False, "drop_last": False, "timeout": 0, "worker_init_fn": None, "multiprocessing_context": None, "generator": None, "prefetch_factor": 2, "persistent_workers": False, } class DataLoaderShard(DataLoader, DataLoaderStateMixin): """ Subclass of a PyTorch `DataLoader` that will deal with device placement and current distributed setup. Args: dataset (`torch.utils.data.dataset.Dataset`): The dataset to use to build this datalaoder. device (`torch.device`, *optional*): If passed, the device to put all batches on. rng_types (list of `str` or [`~utils.RNGType`]): The list of random number generators to synchronize at the beginning of each iteration. Should be one or several of: - `"torch"`: the base torch random number generator - `"cuda"`: the CUDA random number generator (GPU only) - `"xla"`: the XLA random number generator (TPU only) - `"generator"`: an optional `torch.Generator` synchronized_generator (`torch.Generator`, *optional*): A random number generator to keep synchronized across processes. skip_batches (`int`, *optional*, defaults to 0): The number of batches to skip at the beginning. kwargs: All other keyword arguments to pass to the regular `DataLoader` initialization. **Available attributes:** - **total_batch_size** (`int`) -- Total batch size of the dataloader across all processes. Equal to the original batch size when `split_batches=True`; otherwise the original batch size * the total number of processes - **total_dataset_length** (`int`) -- Total length of the inner dataset across all processes. """ def __init__( self, dataset, device=None, rng_types=None, synchronized_generator=None, skip_batches=0, _drop_last: bool = False, **kwargs, ): super().__init__(dataset, **kwargs) self.device = device self.rng_types = rng_types self.synchronized_generator = synchronized_generator self.skip_batches = skip_batches self.gradient_state = GradientState() self._drop_last = _drop_last self.iteration = 0 def __iter__(self): if self.rng_types is not None: synchronize_rng_states(self.rng_types, self.synchronized_generator) self.begin() self.set_epoch(self.iteration) dataloader_iter = super().__iter__() # We iterate one batch ahead to check when we are at the end try: current_batch = next(dataloader_iter) except StopIteration: yield batch_index = 0 while True: try: # But we still move it to the device so it is done before `StopIteration` is reached if self.device is not None: current_batch = send_to_device(current_batch, self.device) next_batch = next(dataloader_iter) if batch_index >= self.skip_batches: yield current_batch batch_index += 1 current_batch = next_batch except StopIteration: self.end_of_dataloader = True if batch_index >= self.skip_batches: yield current_batch break self.iteration += 1 self.end() def set_epoch(self, epoch: int): # In case it is manually passed in, the user can set it to what they like if self.iteration != epoch: self.iteration = epoch if hasattr(self.batch_sampler, "sampler") and hasattr(self.batch_sampler.sampler, "set_epoch"): self.batch_sampler.sampler.set_epoch(epoch) # We support if a custom `Dataset` implementation has `set_epoch` # or in general HF datasets `Datasets` elif hasattr(self.dataset, "set_epoch"): self.dataset.set_epoch(epoch) def total_batch_size(self): batch_sampler = self.sampler if isinstance(self.sampler, BatchSampler) else self.batch_sampler return ( batch_sampler.batch_size if getattr(batch_sampler, "split_batches", False) else (batch_sampler.batch_size * getattr(batch_sampler, "num_processes", 1)) ) def total_dataset_length(self): if hasattr(self.dataset, "total_length"): return self.dataset.total_length else: return len(self.dataset) class DataLoaderDispatcher(DataLoader, DataLoaderStateMixin): """ Subclass of a PyTorch `DataLoader` that will iterate and preprocess on process 0 only, then dispatch on each process their part of the batch. Args: split_batches (`bool`, *optional*, defaults to `False`): Whether the resulting `DataLoader` should split the batches of the original data loader across devices or yield full batches (in which case it will yield batches starting at the `process_index`-th and advancing of `num_processes` batches at each iteration). Another way to see this is that the observed batch size will be the same as the initial `dataloader` if this option is set to `True`, the batch size of the initial `dataloader` multiplied by `num_processes` otherwise. Setting this option to `True` requires that the batch size of the `dataloader` is a round multiple of `batch_size`. skip_batches (`int`, *optional*, defaults to 0): The number of batches to skip at the beginning of an iteration. **Available attributes:** - **total_batch_size** (`int`) -- Total batch size of the dataloader across all processes. Equal to the original batch size when `split_batches=True`; otherwise the original batch size * the total number of processes - **total_dataset_length** (`int`) -- Total length of the inner dataset across all processes. """ def __init__( self, dataset, split_batches: bool = False, skip_batches=0, _drop_last: bool = False, slice_fn=None, **kwargs ): shuffle = False if is_torch_version(">=", "1.11.0"): from torch.utils.data.datapipes.iter.combinatorics import ShufflerIterDataPipe # We need to save the shuffling state of the DataPipe if isinstance(dataset, ShufflerIterDataPipe): shuffle = dataset._shuffle_enabled super().__init__(dataset, **kwargs) self.split_batches = split_batches if shuffle: torch.utils.data.graph_settings.apply_shuffle_settings(dataset, shuffle=shuffle) self.gradient_state = GradientState() self.state = AcceleratorState() self._drop_last = _drop_last self.skip_batches = skip_batches self.slice_fn = slice_tensors if slice_fn is None else slice_fn self.iteration = 0 def _fetch_batches(self, iterator): batches, batch = None, None # On process 0, we gather the batch to dispatch. if self.state.process_index == 0: try: if self.split_batches: # One batch of the main iterator is dispatched and split. batch = next(iterator) else: # num_processes batches of the main iterator are concatenated then dispatched and split. # We add the batches one by one so we have the remainder available when drop_last=False. batches = [] for _ in range(self.state.num_processes): batches.append(next(iterator)) try: batch = concatenate(batches, dim=0) except RuntimeError as e: raise RuntimeError( "You can't use batches of different size with `dispatch_batches=True` or when using an `IterableDataset`." "either pass `dispatch_batches=False` and have each process fetch its own batch " " or pass `split_batches=True`. By doing so, the main process will fetch a full batch and " "slice it into `num_processes` batches for each process." ) from e # In both cases, we need to get the structure of the batch that we will broadcast on other # processes to initialize the tensors with the right shape. # data_structure, stop_iteration batch_info = [get_data_structure(batch), False] except StopIteration: batch_info = [None, True] else: batch_info = [None, self._stop_iteration] # This is inplace, so after this instruction, every process has the same `batch_info` as process 0. broadcast_object_list(batch_info) self._stop_iteration = batch_info[1] if self._stop_iteration: # If drop_last is False and split_batches is False, we may have a remainder to take care of. if not self.split_batches and not self._drop_last: if self.state.process_index == 0 and len(batches) > 0: batch = concatenate(batches, dim=0) batch_info = [get_data_structure(batch), False] else: batch_info = [None, True] broadcast_object_list(batch_info) return batch, batch_info def __iter__(self): self.begin() self.set_epoch(self.iteration) main_iterator = None if is_torch_version(">=", "2.0.1"): # NOTE PyTorch DataLoader adds forward compatibilities for DataPipes, which broadcasts # shared seed to all dist processes. Thus, we need to create iterator for all dist processes. # But, we only iterate through the DataLoader on process 0. main_iterator = super().__iter__() elif self.state.process_index == 0: main_iterator = super().__iter__() stop_iteration = False self._stop_iteration = False first_batch = None next_batch, next_batch_info = self._fetch_batches(main_iterator) batch_index = 0 while not stop_iteration: batch, batch_info = next_batch, next_batch_info if self.state.process_index != 0: # Initialize tensors on other processes than process 0. batch = initialize_tensors(batch_info[0]) batch = send_to_device(batch, self.state.device) # Broadcast the batch before splitting it. batch = broadcast(batch, from_process=0) if not self._drop_last and first_batch is None: # We keep at least num processes elements of the first batch to be able to complete the last batch first_batch = self.slice_fn( batch, slice(0, self.state.num_processes), process_index=self.state.process_index, num_processes=self.state.num_processes, ) if batch is None: raise ValueError( f"Batch does not contain any data (`{batch}`). At the end of all iterable data available before expected stop iteration." ) observed_batch_size = find_batch_size(batch) batch_size = observed_batch_size // self.state.num_processes stop_iteration = self._stop_iteration if not stop_iteration: # We may still be at the end of the dataloader without knowing it yet: if there is nothing left in # the dataloader since the number of batches is a round multiple of the number of processes. next_batch, next_batch_info = self._fetch_batches(main_iterator) # next_batch_info[0] is None when there are no more batches, otherwise we still need to process them. if self._stop_iteration and next_batch_info[0] is None: stop_iteration = True if not self._drop_last and stop_iteration and observed_batch_size % self.state.num_processes != 0: # If the last batch is not complete, let's add the first batch to it. batch = concatenate([batch, first_batch], dim=0) # Batch size computation above is wrong, it's off by 1 so we fix it. batch_size += 1 data_slice = slice(self.state.process_index * batch_size, (self.state.process_index + 1) * batch_size) batch = self.slice_fn( batch, data_slice, process_index=self.state.process_index, num_processes=self.state.num_processes, ) if stop_iteration: self.end_of_dataloader = True self.remainder = observed_batch_size if batch_index >= self.skip_batches: yield batch batch_index += 1 self.iteration += 1 self.end() def set_epoch(self, epoch: int): # In case it is manually passed in, the user can set it to what they like if self.iteration != epoch: self.iteration = epoch if hasattr(self.batch_sampler.sampler, "set_epoch"): self.batch_sampler.sampler.set_epoch(epoch) elif hasattr(self.dataset, "set_epoch"): self.dataset.set_epoch(epoch) def __len__(self): whole_length = super().__len__() if self.split_batches: return whole_length elif self._drop_last: return whole_length // self.state.num_processes else: return math.ceil(whole_length / self.state.num_processes) def total_batch_size(self): return ( self.dataset.batch_size if self.split_batches else (self.dataset.batch_size * self.dataset.num_processes) ) def total_dataset_length(self): return len(self.dataset) class SkipBatchSampler(BatchSampler): """ A `torch.utils.data.BatchSampler` that skips the first `n` batches of another `torch.utils.data.BatchSampler`. """ def __init__(self, batch_sampler, skip_batches=0): self.batch_sampler = batch_sampler self.skip_batches = skip_batches def __iter__(self): for index, samples in enumerate(self.batch_sampler): if index >= self.skip_batches: yield samples def total_length(self): return len(self.batch_sampler) def __len__(self): return len(self.batch_sampler) - self.skip_batches class SkipDataLoader(DataLoader): """ Subclass of a PyTorch `DataLoader` that will skip the first batches. Args: dataset (`torch.utils.data.dataset.Dataset`): The dataset to use to build this datalaoder. skip_batches (`int`, *optional*, defaults to 0): The number of batches to skip at the beginning. kwargs: All other keyword arguments to pass to the regular `DataLoader` initialization. """ def __init__(self, dataset, skip_batches=0, **kwargs): super().__init__(dataset, **kwargs) self.skip_batches = skip_batches def __iter__(self): for index, batch in enumerate(super().__iter__()): if index >= self.skip_batches: yield batch The provided code snippet includes necessary dependencies for implementing the `skip_first_batches` function. Write a Python function `def skip_first_batches(dataloader, num_batches=0)` to solve the following problem: Creates a `torch.utils.data.DataLoader` that will efficiently skip the first `num_batches`. Here is the function: def skip_first_batches(dataloader, num_batches=0): """ Creates a `torch.utils.data.DataLoader` that will efficiently skip the first `num_batches`. """ dataset = dataloader.dataset sampler_is_batch_sampler = False if isinstance(dataset, IterableDataset): new_batch_sampler = None else: sampler_is_batch_sampler = isinstance(dataloader.sampler, BatchSampler) batch_sampler = dataloader.sampler if sampler_is_batch_sampler else dataloader.batch_sampler new_batch_sampler = SkipBatchSampler(batch_sampler, skip_batches=num_batches) # We ignore all of those since they are all dealt with by our new_batch_sampler ignore_kwargs = [ "batch_size", "shuffle", "sampler", "batch_sampler", "drop_last", ] kwargs = { k: getattr(dataloader, k, _PYTORCH_DATALOADER_KWARGS[k]) for k in _PYTORCH_DATALOADER_KWARGS if k not in ignore_kwargs } # Need to provide batch_size as batch_sampler is None for Iterable dataset if new_batch_sampler is None: kwargs["drop_last"] = dataloader.drop_last kwargs["batch_size"] = dataloader.batch_size if isinstance(dataloader, DataLoaderDispatcher): if new_batch_sampler is None: # Need to manually skip batches in the dataloader kwargs["skip_batches"] = num_batches dataloader = DataLoaderDispatcher( dataset, split_batches=dataloader.split_batches, batch_sampler=new_batch_sampler, _drop_last=dataloader._drop_last, **kwargs, ) elif isinstance(dataloader, DataLoaderShard): if new_batch_sampler is None: # Need to manually skip batches in the dataloader kwargs["skip_batches"] = num_batches elif sampler_is_batch_sampler: kwargs["sampler"] = new_batch_sampler kwargs["batch_size"] = dataloader.batch_size else: kwargs["batch_sampler"] = new_batch_sampler dataloader = DataLoaderShard( dataset, device=dataloader.device, rng_types=dataloader.rng_types, synchronized_generator=dataloader.synchronized_generator, **kwargs, ) else: if new_batch_sampler is None: # Need to manually skip batches in the dataloader dataloader = SkipDataLoader(dataset, skip_batches=num_batches, **kwargs) else: dataloader = DataLoader(dataset, batch_sampler=new_batch_sampler, **kwargs) return dataloader
Creates a `torch.utils.data.DataLoader` that will efficiently skip the first `num_batches`.
2,889
import math from types import MethodType from typing import Any, Dict, List, Optional, Tuple, Union from .state import PartialState from .utils import ( calculate_maximum_sizes, convert_bytes, copy_tensor_to_devices, ignorant_find_batch_size, infer_auto_device_map, is_pippy_available, pad_input_tensors, send_to_device, ) if is_pippy_available(): from pippy.IR import Pipe, PipeSplitWrapper, annotate_split_points from pippy.PipelineStage import PipelineStage def generate_device_map(model, num_processes: int = 1, no_split_module_classes=None, max_memory: dict = None): """ Calculates the device map for `model` with an offset for PiPPy """ if num_processes == 1: return infer_auto_device_map(model, no_split_module_classes=no_split_module_classes, clean_result=False) if max_memory is None: model_size, shared = calculate_maximum_sizes(model) # Split into `n` chunks for each GPU memory = (model_size + shared[0]) / num_processes memory = convert_bytes(memory) value, ending = memory.split(" ") # Add a chunk to deal with potential extra shared memory instances memory = math.ceil(float(value)) * 1.1 memory = f"{memory} {ending}" max_memory = {i: memory for i in range(num_processes)} device_map = infer_auto_device_map( model, max_memory=max_memory, no_split_module_classes=no_split_module_classes, clean_result=False, ) return device_map def build_pipeline(model, split_points, args, kwargs, num_chunks): """ Attaches the split points to the model based on `self.device_map` and generates a `PipelineStage`. Requires passing in needed `args` and `kwargs` as the model needs on the CPU. Users can pass in custom `num_chunks` as an optional hyper-parameter. By default will use `AcceleratorState.num_processes` """ # We need to annotate the split points in the model for PiPPy state = PartialState() annotate_split_points(model, {split_point: PipeSplitWrapper.SplitPoint.BEGINNING for split_point in split_points}) found_batch_size = find_pippy_batch_size(args, kwargs) if found_batch_size != num_chunks: if args is not None: args = pad_input_tensors(args, found_batch_size, num_chunks) if kwargs is not None: kwargs = pad_input_tensors(kwargs, found_batch_size, num_chunks) pipe = Pipe.from_tracing(model, num_chunks=num_chunks, example_args=args, example_kwargs=kwargs) stage = PipelineStage(pipe, state.local_process_index, device=state.device) return stage def pippy_forward(forward, num_chunks, gather_output, *args, **kwargs): state = PartialState() output = None if state.num_processes == 1: output = forward(*args, **kwargs) elif state.is_local_main_process: found_batch_size = find_pippy_batch_size(args, kwargs) if found_batch_size is None: raise ValueError("Could not find batch size from args or kwargs") else: if found_batch_size != num_chunks: args = pad_input_tensors(args, found_batch_size, num_chunks) kwargs = pad_input_tensors(kwargs, found_batch_size, num_chunks) forward(*args, **kwargs) elif state.is_last_process: output = forward() else: forward() if gather_output: # Each node will get a copy of the full output which is only on the last GPU output = copy_tensor_to_devices(output) return output class PartialState: """ Singleton class that has information about the current training environment and functions to help with process control. Designed to be used when only process control and device execution states are needed. Does *not* need to be initialized from `Accelerator`. **Available attributes:** - **device** (`torch.device`) -- The device to use. - **distributed_type** ([`~accelerate.state.DistributedType`]) -- The type of distributed environment currently in use. - **local_process_index** (`int`) -- The index of the current process on the current server. - **mixed_precision** (`str`) -- Whether or not the current script will use mixed precision, and if so the type of mixed precision being performed. (Choose from 'no','fp16','bf16 or 'fp8'). - **num_processes** (`int`) -- The number of processes currently launched in parallel. - **process_index** (`int`) -- The index of the current process. - **is_last_process** (`bool`) -- Whether or not the current process is the last one. - **is_main_process** (`bool`) -- Whether or not the current process is the main one. - **is_local_main_process** (`bool`) -- Whether or not the current process is the main one on the local node. - **debug** (`bool`) -- Whether or not the current script is being run in debug mode. """ _shared_state = SharedDict() def __init__(self, cpu: bool = False, **kwargs): self.__dict__ = self._shared_state if not self.initialized: self._cpu = cpu self.backend = None env_device = os.environ.get("ACCELERATE_TORCH_DEVICE", None) self.device = torch.device(env_device) if env_device is not None else None self.debug = parse_flag_from_env("ACCELERATE_DEBUG_MODE") use_sagemaker_dp = kwargs.pop("_use_sagemaker_dp", None) if use_sagemaker_dp is None: use_sagemaker_dp = ( os.environ.get("ACCELERATE_USE_SAGEMAKER", "false") == "true" and os.environ.get("ACCELERATE_SAGEMAKER_DISTRIBUTED_TYPE") != SageMakerDistributedType.NO ) if use_sagemaker_dp and not cpu: if ( os.environ.get("ACCELERATE_SAGEMAKER_DISTRIBUTED_TYPE") == SageMakerDistributedType.DATA_PARALLEL ) or use_sagemaker_dp: self.distributed_type = DistributedType.MULTI_GPU import smdistributed.dataparallel.torch.torch_smddp # noqa if not torch.distributed.is_initialized(): torch.distributed.init_process_group(backend="smddp") self.backend = "smddp" self.num_processes = torch.distributed.get_world_size() self.process_index = torch.distributed.get_rank() self.local_process_index = int(os.environ.get("LOCAL_RANK", -1)) if self.device is None: self.device = torch.device("cuda", self.local_process_index) torch.cuda.set_device(self.device) elif is_torch_xla_available() and not cpu: self.distributed_type = DistributedType.XLA self.device = xm.xla_device() xm.set_replication(self.device, xm.get_xla_supported_devices()) self.num_processes = xm.xrt_world_size() self.process_index = xm.get_ordinal() if is_torch_xla_available(check_is_tpu=True): self.local_process_index = xm.get_local_ordinal() else: self.local_process_index = int(os.environ.get("LOCAL_RANK", -1)) elif ( os.environ.get("ACCELERATE_USE_DEEPSPEED", "false") == "true" and int(os.environ.get("LOCAL_RANK", -1)) != -1 and not cpu ): assert ( is_deepspeed_available() ), "DeepSpeed is not available => install it using `pip3 install deepspeed` or build it from source" self.distributed_type = DistributedType.DEEPSPEED if not torch.distributed.is_initialized(): from deepspeed import comm as dist # DeepSpeed always uses nccl kwargs.pop("backend", None) if is_xpu_available and is_ccl_available(): # Set DeepSpeed backend to ccl for xpu self.backend = "ccl" os.environ["CCL_PROCESS_LAUNCHER"] = "none" os.environ["CCL_LOCAL_SIZE"] = os.environ.get("LOCAL_WORLD_SIZE", "1") os.environ["CCL_LOCAL_RANK"] = os.environ.get("LOCAL_RANK", "0") elif is_npu_available(): self.backend = "hccl" else: self.backend = "nccl" dist.init_distributed(dist_backend=self.backend, auto_mpi_discovery=False, **kwargs) self.num_processes = torch.distributed.get_world_size() self.process_index = torch.distributed.get_rank() self.local_process_index = int(os.environ.get("LOCAL_RANK", -1)) if self.device is None: if is_xpu_available(): self.device = torch.device("xpu", self.local_process_index) if self.device is not None: torch.xpu.set_device(self.device) elif is_npu_available(): self.device = torch.device("npu", self.local_process_index) if self.device is not None: torch.npu.set_device(self.device) else: self.device = torch.device("cuda", self.local_process_index) if self.device is not None: torch.cuda.set_device(self.device) if self.device.type == "cuda" and not check_cuda_p2p_ib_support(): if "NCCL_P2P_DISABLE" not in os.environ or "NCCL_IB_DISABLE" not in os.environ: raise NotImplementedError( "Using RTX 4000 series doesn't support faster communication broadband via P2P or IB. " 'Please set `NCCL_P2P_DISABLE="1"` and `NCCL_IB_DISABLE="1" or use `accelerate launch` which ' "will do this automatically." ) self._mixed_precision = "no" # deepspeed handles mixed_precision using deepspeed_config elif int(os.environ.get("LOCAL_RANK", -1)) != -1 and not cpu and torch.cuda.is_available(): self.distributed_type = DistributedType.MULTI_GPU if not torch.distributed.is_initialized(): self.backend = kwargs.pop("backend", "nccl") # Special case for `TrainingArguments`, where `backend` will be `None` if self.backend is None: self.backend = "nccl" torch.distributed.init_process_group(backend=self.backend, **kwargs) if not check_cuda_p2p_ib_support(): if "NCCL_P2P_DISABLE" not in os.environ or "NCCL_IB_DISABLE" not in os.environ: raise NotImplementedError( "Using RTX 4000 series doesn't support faster communication broadband via P2P or IB. " 'Please set `NCCL_P2P_DISABLE="1"` and `NCCL_IB_DISABLE="1" or use `accelerate launch` which ' "will do this automatically." ) self.num_processes = torch.distributed.get_world_size() self.process_index = torch.distributed.get_rank() self.local_process_index = int(os.environ.get("LOCAL_RANK", -1)) if self.device is None: self.device = torch.device("cuda", self.local_process_index) torch.cuda.set_device(self.device) elif is_npu_available() and not cpu and int(os.environ.get("LOCAL_RANK", -1)) != -1: self.distributed_type = DistributedType.MULTI_NPU if not torch.distributed.is_initialized(): # Backend is not set by the user, we set it here kwargs.pop("backend", None) self.backend = "hccl" torch.distributed.init_process_group(backend=self.backend, **kwargs) self.num_processes = torch.distributed.get_world_size() self.process_index = torch.distributed.get_rank() self.local_process_index = int(os.environ.get("LOCAL_RANK", -1)) if self.device is None: self.device = torch.device("npu", self.local_process_index) torch.npu.set_device(self.device) elif ( get_int_from_env(["PMI_SIZE", "OMPI_COMM_WORLD_SIZE", "MV2_COMM_WORLD_SIZE", "WORLD_SIZE"], 1) > 1 or int(os.environ.get("LOCAL_RANK", -1)) != -1 ): if not cpu and is_xpu_available(): self.distributed_type = DistributedType.MULTI_XPU else: self.distributed_type = DistributedType.MULTI_CPU # Actually, CCL_WORKER_COUNT is a CPU only env var in CCL, no need to set it for XPU. if is_ccl_available() and ( get_int_from_env(["CCL_WORKER_COUNT"], 0) > 0 or self.distributed_type == DistributedType.MULTI_XPU ): if get_ccl_version() >= "1.12": import oneccl_bindings_for_pytorch # noqa: F401 else: import torch_ccl # noqa: F401 backend = "ccl" elif torch.distributed.is_mpi_available(): backend = "mpi" else: backend = "gloo" # Try to get launch configuration from environment variables set by MPI launcher - works for Intel MPI, OpenMPI and MVAPICH rank = get_int_from_env(["RANK", "PMI_RANK", "OMPI_COMM_WORLD_RANK", "MV2_COMM_WORLD_RANK"], 0) size = get_int_from_env(["WORLD_SIZE", "PMI_SIZE", "OMPI_COMM_WORLD_SIZE", "MV2_COMM_WORLD_SIZE"], 1) local_rank = get_int_from_env( ["LOCAL_RANK", "MPI_LOCALRANKID", "OMPI_COMM_WORLD_LOCAL_RANK", "MV2_COMM_WORLD_LOCAL_RANK"], 0 ) local_size = get_int_from_env( ["LOCAL_WORLD_SIZE", "MPI_LOCALNRANKS", "OMPI_COMM_WORLD_LOCAL_SIZE", "MV2_COMM_WORLD_LOCAL_SIZE"], 1, ) self.local_process_index = local_rank os.environ["RANK"] = str(rank) os.environ["WORLD_SIZE"] = str(size) os.environ["LOCAL_RANK"] = str(local_rank) os.environ["LOCAL_WORLD_SIZE"] = str(local_size) if backend == "ccl" and self.distributed_type == DistributedType.MULTI_XPU: os.environ["CCL_PROCESS_LAUNCHER"] = "none" os.environ["CCL_LOCAL_SIZE"] = str(local_size) os.environ["CCL_LOCAL_RANK"] = str(local_rank) if not os.environ.get("MASTER_PORT", None): os.environ["MASTER_PORT"] = "29500" if not os.environ.get("MASTER_ADDR", None): if local_size != size and backend != "mpi": raise ValueError( "Looks like distributed multinode run but MASTER_ADDR env not set, " "please try exporting rank 0's hostname as MASTER_ADDR" ) if ( self.distributed_type == DistributedType.MULTI_CPU and get_int_from_env(["OMP_NUM_THREADS", "MKL_NUM_THREADS"], 0) == 0 ): import psutil num_cpu_threads_per_process = int(psutil.cpu_count(logical=False) / local_size) if num_cpu_threads_per_process == 0: num_cpu_threads_per_process = 1 torch.set_num_threads(num_cpu_threads_per_process) warnings.warn( f"OMP_NUM_THREADS/MKL_NUM_THREADS unset, we set it at {num_cpu_threads_per_process} to improve oob" " performance." ) if not torch.distributed.is_initialized(): # Backend is not set by the user, we set it here kwargs.pop("backend", None) self.backend = backend torch.distributed.init_process_group(self.backend, rank=rank, world_size=size, **kwargs) self.num_processes = torch.distributed.get_world_size() self.process_index = torch.distributed.get_rank() if cpu: self.device = torch.device("cpu") elif is_xpu_available(): self.device = torch.device("xpu", self.local_process_index) torch.xpu.set_device(self.device) else: self.device = self.default_device else: self.distributed_type = ( DistributedType.NO if os.environ.get("ACCELERATE_USE_DEEPSPEED", "false") == "false" else DistributedType.DEEPSPEED ) self.num_processes = 1 self.process_index = self.local_process_index = 0 if self.device is None: self.device = torch.device("cpu") if cpu else self.default_device self.fork_launched = parse_flag_from_env("FORK_LAUNCHED", 0) def __repr__(self) -> str: return ( f"Distributed environment: {self.distributed_type}{(' Backend: ' + self.backend) if self.backend else ''}\n" f"Num processes: {self.num_processes}\n" f"Process index: {self.process_index}\n" f"Local process index: {self.local_process_index}\n" f"Device: {self.device}\n" ) def _reset_state(): "Resets `_shared_state`, is used internally and should not be called" PartialState._shared_state.clear() def initialized(self) -> bool: "Returns whether the `PartialState` has been initialized" return self._shared_state != {} def use_distributed(self): """ Whether the Accelerator is configured for distributed training """ return self.distributed_type != DistributedType.NO and self.num_processes > 1 def is_last_process(self) -> bool: "Returns whether the current process is the last one" return self.process_index == self.num_processes - 1 def is_main_process(self) -> bool: "Returns whether the current process is the main process" return ( self.process_index == 0 if self.distributed_type != DistributedType.MEGATRON_LM else self.is_last_process ) def is_local_main_process(self) -> bool: "Returns whether the current process is the main process on the local node" return ( self.local_process_index == 0 if self.distributed_type != DistributedType.MEGATRON_LM else self.is_last_process ) def wait_for_everyone(self): """ Will stop the execution of the current process until every other process has reached that point (so this does nothing when the script is only run in one process). Useful to do before saving a model. Example: ```python >>> # Assuming two GPU processes >>> import time >>> from accelerate.state import PartialState >>> state = PartialState() >>> if state.is_main_process: ... time.sleep(2) >>> else: ... print("I'm waiting for the main process to finish its sleep...") >>> state.wait_for_everyone() >>> # Should print on every process at the same time >>> print("Everyone is here") ``` """ if self.distributed_type in ( DistributedType.MULTI_GPU, DistributedType.MULTI_NPU, DistributedType.MULTI_XPU, DistributedType.MULTI_CPU, DistributedType.DEEPSPEED, DistributedType.FSDP, ): torch.distributed.barrier() elif self.distributed_type == DistributedType.XLA: xm.rendezvous("accelerate.utils.wait_for_everyone") def _goes_first(self, is_main: bool): if not is_main: self.wait_for_everyone() yield if is_main: self.wait_for_everyone() def split_between_processes(self, inputs: list | tuple | dict | torch.Tensor, apply_padding: bool = False): """ Splits `input` between `self.num_processes` quickly and can be then used on that process. Useful when doing distributed inference, such as with different prompts. Note that when using a `dict`, all keys need to have the same number of elements. Args: inputs (`list`, `tuple`, `torch.Tensor`, `dict` of `list`/`tuple`/`torch.Tensor`, or `datasets.Dataset`): The input to split between processes. apply_padding (`bool`, `optional`, defaults to `False`): Whether to apply padding by repeating the last element of the input so that all processes have the same number of elements. Useful when trying to perform actions such as `gather()` on the outputs or passing in less inputs than there are processes. If so, just remember to drop the padded elements afterwards. Example: ```python # Assume there are two processes from accelerate import PartialState state = PartialState() with state.split_between_processes(["A", "B", "C"]) as inputs: print(inputs) # Process 0 ["A", "B"] # Process 1 ["C"] with state.split_between_processes(["A", "B", "C"], apply_padding=True) as inputs: print(inputs) # Process 0 ["A", "B"] # Process 1 ["C", "C"] ``` """ if self.num_processes == 1: yield inputs return length = len(inputs) # Nested dictionary of any types if isinstance(inputs, dict): length = len(inputs[list(inputs.keys())[0]]) if not all(len(v) == length for v in inputs.values()): raise ValueError("All values in the dictionary must have the same length") num_samples_per_process = math.ceil(length / self.num_processes) start_index = self.process_index * num_samples_per_process end_index = start_index + num_samples_per_process if (len(inputs) % self.num_processes != 0) and (self.process_index == self.num_processes - 1): end_index = length def _split_values(inputs, start_index, end_index): if isinstance(inputs, (list, tuple, torch.Tensor)): if start_index >= len(inputs): result = inputs[-1:] else: result = inputs[start_index:end_index] if apply_padding: if isinstance(result, torch.Tensor): from accelerate.utils import pad_across_processes, send_to_device # The tensor needs to be on the device before we can pad it tensorized_result = send_to_device(result, self.device) result = pad_across_processes(tensorized_result, pad_index=inputs[-1]) else: result += [result[-1]] * (num_samples_per_process - len(result)) return result elif isinstance(inputs, dict): for key in inputs.keys(): inputs[key] = _split_values(inputs[key], start_index, end_index) return inputs else: if is_datasets_available(): from datasets import Dataset if isinstance(inputs, Dataset): if start_index >= len(inputs): start_index = len(inputs) - 1 if end_index > len(inputs): end_index = len(inputs) result_idcs = list(range(start_index, end_index)) if apply_padding: result_idcs += [end_index - 1] * (num_samples_per_process - len(result_idcs)) return inputs.select(result_idcs) return inputs yield _split_values(inputs, start_index, end_index) def main_process_first(self): """ Lets the main process go first inside a with block. The other processes will enter the with block after the main process exits. Example: ```python >>> from accelerate import Accelerator >>> accelerator = Accelerator() >>> with accelerator.main_process_first(): ... # This will be printed first by process 0 then in a seemingly ... # random order by the other processes. ... print(f"This will be printed by process {accelerator.process_index}") ``` """ yield from self._goes_first(self.is_main_process) def local_main_process_first(self): """ Lets the local main process go inside a with block. The other processes will enter the with block after the main process exits. Example: ```python >>> from accelerate.state import PartialState >>> state = PartialState() >>> with state.local_main_process_first(): ... # This will be printed first by local process 0 then in a seemingly ... # random order by the other processes. ... print(f"This will be printed by process {state.local_process_index}") ``` """ yield from self._goes_first(self.is_local_main_process) def on_main_process(self, function: Callable[..., Any] = None): """ Decorator that only runs the decorated function on the main process. Args: function (`Callable`): The function to decorate. Example: ```python >>> from accelerate.state import PartialState >>> state = PartialState() >>> @state.on_main_process ... def print_something(): ... print("This will be printed by process 0 only.") >>> print_something() "This will be printed by process 0 only" ``` """ if not self.initialized: raise ValueError("The `PartialState` or `Accelerator` must be initialized before calling this function.") if self.is_main_process or not self.use_distributed: return function return do_nothing def on_local_main_process(self, function: Callable[..., Any] = None): """ Decorator that only runs the decorated function on the local main process. Args: function (`Callable`): The function to decorate. Example: ```python # Assume we have 2 servers with 4 processes each. from accelerate.state import PartialState state = PartialState() def print_something(): print("This will be printed by process 0 only on each server.") print_something() # On server 1: "This will be printed by process 0 only" # On server 2: "This will be printed by process 0 only" ``` """ if self.is_local_main_process or not self.use_distributed: return function return do_nothing def on_last_process(self, function: Callable[..., Any]): """ Decorator that only runs the decorated function on the last process. Args: function (`Callable`): The function to decorate. Example: ```python # Assume we have 4 processes. from accelerate.state import PartialState state = PartialState() def print_something(): print(f"Printed on process {state.process_index}") print_something() "Printed on process 3" ``` """ if self.is_last_process or not self.use_distributed: return function return do_nothing def on_process(self, function: Callable[..., Any] = None, process_index: int = None): """ Decorator that only runs the decorated function on the process with the given index. Args: function (`Callable`, `optional`): The function to decorate. process_index (`int`, `optional`): The index of the process on which to run the function. Example: ```python # Assume we have 4 processes. from accelerate.state import PartialState state = PartialState() def print_something(): print(f"Printed on process {state.process_index}") print_something() "Printed on process 2" ``` """ if function is None: return partial(self.on_process, process_index=process_index) if (self.process_index == process_index) or (not self.use_distributed): return function return do_nothing def on_local_process(self, function: Callable[..., Any] = None, local_process_index: int = None): """ Decorator that only runs the decorated function on the process with the given index on the current node. Args: function (`Callable`, *optional*): The function to decorate. local_process_index (`int`, *optional*): The index of the local process on which to run the function. Example: ```python # Assume we have 2 servers with 4 processes each. from accelerate import Accelerator accelerator = Accelerator() def print_something(): print(f"Printed on process {accelerator.local_process_index}") print_something() # On server 1: "Printed on process 2" # On server 2: "Printed on process 2" ``` """ if function is None: return partial(self.on_local_process, local_process_index=local_process_index) if (self.local_process_index == local_process_index) or (not self.use_distributed): return function return do_nothing def print(self, *args, **kwargs): if self.is_local_main_process: print(*args, **kwargs) def default_device(self) -> torch.device: """ Returns the default device which is: - MPS if `torch.backends.mps.is_available()` and `torch.backends.mps.is_built()` both return True. - CUDA if `torch.cuda.is_available()` - NPU if `is_npu_available()` - CPU otherwise """ if is_mps_available(): os.environ["PYTORCH_ENABLE_MPS_FALLBACK"] = "1" return torch.device("mps") elif torch.cuda.is_available(): return torch.device("cuda") elif is_xpu_available(): return torch.device("xpu:0") elif is_npu_available(): return torch.device("npu") else: return torch.device("cpu") The provided code snippet includes necessary dependencies for implementing the `prepare_pippy` function. Write a Python function `def prepare_pippy( model, split_points: Optional[Union[str, List[str]]] = "auto", no_split_module_classes: Optional[List[str]] = None, example_args: Optional[Tuple[Any]] = (), example_kwargs: Optional[Dict[str, Any]] = None, num_chunks: Optional[int] = None, gather_output: Optional[bool] = False, )` to solve the following problem: Wraps `model` for pipeline parallel inference. Args: model (`torch.nn.Module`): A model we want to split for pipeline-parallel inference split_points (`str` or `List[str]`, defaults to 'auto'): How to generate the split points and chunk the model across each GPU. 'auto' will find the best balanced split given any model. Should be a list of layer names in the model to split by otherwise. no_split_module_classes (`List[str]`): A list of class names for layers we don't want to be split. example_args (tuple of model inputs): The expected inputs for the model that uses order-based inputs. Recommended to use this method if possible. example_kwargs (dict of model inputs) The expected inputs for the model that uses dictionary-based inputs. This is a *highly* limiting structure that requires the same keys be present at *all* inference calls. Not recommended unless the prior condition is true for all cases. num_chunks (`int`, defaults to the number of available GPUs): The number of different stages the Pipeline will have. By default it will assign one chunk per GPU, but this can be tuned and played with. In general one should have num_chunks >= num_gpus. gather_output (`bool`, defaults to `False`): If `True`, the output from the last GPU (which holds the true outputs) is sent across to all GPUs. Here is the function: def prepare_pippy( model, split_points: Optional[Union[str, List[str]]] = "auto", no_split_module_classes: Optional[List[str]] = None, example_args: Optional[Tuple[Any]] = (), example_kwargs: Optional[Dict[str, Any]] = None, num_chunks: Optional[int] = None, gather_output: Optional[bool] = False, ): """ Wraps `model` for pipeline parallel inference. Args: model (`torch.nn.Module`): A model we want to split for pipeline-parallel inference split_points (`str` or `List[str]`, defaults to 'auto'): How to generate the split points and chunk the model across each GPU. 'auto' will find the best balanced split given any model. Should be a list of layer names in the model to split by otherwise. no_split_module_classes (`List[str]`): A list of class names for layers we don't want to be split. example_args (tuple of model inputs): The expected inputs for the model that uses order-based inputs. Recommended to use this method if possible. example_kwargs (dict of model inputs) The expected inputs for the model that uses dictionary-based inputs. This is a *highly* limiting structure that requires the same keys be present at *all* inference calls. Not recommended unless the prior condition is true for all cases. num_chunks (`int`, defaults to the number of available GPUs): The number of different stages the Pipeline will have. By default it will assign one chunk per GPU, but this can be tuned and played with. In general one should have num_chunks >= num_gpus. gather_output (`bool`, defaults to `False`): If `True`, the output from the last GPU (which holds the true outputs) is sent across to all GPUs. """ if not is_pippy_available(): raise ImportError( "`pippy` was not found to be installed on your system. Please " "install using `pip install torchpippy` or ensure you have at least version 0.2.0" ) state = PartialState() example_args = send_to_device(example_args, "cpu") example_kwargs = send_to_device(example_kwargs, "cpu") if num_chunks is None: num_chunks = state.num_processes if split_points == "auto": device_map = generate_device_map(model, num_chunks, no_split_module_classes=no_split_module_classes) split_points = [] for i in range(1, num_chunks): split_points.append(next(k for k, v in device_map.items() if v == i)) model.hf_split_points = split_points stage = build_pipeline(model, split_points, example_args, example_kwargs, num_chunks) model._original_forward = model.forward model._original_call = model.__call__ model.pippy_stage = stage model.hf_split_points = split_points def forward(*args, **kwargs): return pippy_forward(stage.forward, num_chunks, gather_output, *args, **kwargs) # To act like a decorator so that it can be popped when doing `extract_model_from_parallel` # Note: creates an infinite recursion loop with `generate` model_forward = MethodType(forward, model) forward.__wrapped__ = model_forward model.forward = forward return model
Wraps `model` for pipeline parallel inference. Args: model (`torch.nn.Module`): A model we want to split for pipeline-parallel inference split_points (`str` or `List[str]`, defaults to 'auto'): How to generate the split points and chunk the model across each GPU. 'auto' will find the best balanced split given any model. Should be a list of layer names in the model to split by otherwise. no_split_module_classes (`List[str]`): A list of class names for layers we don't want to be split. example_args (tuple of model inputs): The expected inputs for the model that uses order-based inputs. Recommended to use this method if possible. example_kwargs (dict of model inputs) The expected inputs for the model that uses dictionary-based inputs. This is a *highly* limiting structure that requires the same keys be present at *all* inference calls. Not recommended unless the prior condition is true for all cases. num_chunks (`int`, defaults to the number of available GPUs): The number of different stages the Pipeline will have. By default it will assign one chunk per GPU, but this can be tuned and played with. In general one should have num_chunks >= num_gpus. gather_output (`bool`, defaults to `False`): If `True`, the output from the last GPU (which holds the true outputs) is sent across to all GPUs.
2,890
import functools from typing import Dict, List, Mapping, Optional, Union import torch import torch.nn as nn from .state import PartialState from .utils import ( PrefixedDataset, find_device, named_module_tensors, send_to_device, set_module_tensor_to_device, ) from .utils.modeling import get_non_persistent_buffers from .utils.other import recursive_getattr def remove_hook_from_module(module: nn.Module, recurse=False): """ Removes any hook attached to a module via `add_hook_to_module`. Args: module (`torch.nn.Module`): The module to attach a hook to. recurse (`bool`, **optional**): Whether to remove the hooks recursively Returns: `torch.nn.Module`: The same module, with the hook detached (the module is modified in place, so the result can be discarded). """ if hasattr(module, "_hf_hook"): module._hf_hook.detach_hook(module) delattr(module, "_hf_hook") if hasattr(module, "_old_forward"): # Overriding a GraphModuleImpl forward freezes the forward call and later modifications on the graph will fail. # Reference: https://pytorch.slack.com/archives/C3PDTEV8E/p1705929610405409 if "GraphModuleImpl" in str(type(module)): module.__class__.forward = module._old_forward else: module.forward = module._old_forward delattr(module, "_old_forward") if recurse: for child in module.children(): remove_hook_from_module(child, recurse) return module The provided code snippet includes necessary dependencies for implementing the `remove_hook_from_submodules` function. Write a Python function `def remove_hook_from_submodules(module: nn.Module)` to solve the following problem: Recursively removes all hooks attached on the submodules of a given model. Args: module (`torch.nn.Module`): The module on which to remove all hooks. Here is the function: def remove_hook_from_submodules(module: nn.Module): """ Recursively removes all hooks attached on the submodules of a given model. Args: module (`torch.nn.Module`): The module on which to remove all hooks. """ remove_hook_from_module(module) for child in module.children(): remove_hook_from_submodules(child)
Recursively removes all hooks attached on the submodules of a given model. Args: module (`torch.nn.Module`): The module on which to remove all hooks.
2,891
import inspect import warnings import torch from .state import AcceleratorState, GradientState from .utils import DistributedType, honor_type, is_torch_xla_available def move_to_device(state, device): if isinstance(state, (list, tuple)): return honor_type(state, (move_to_device(t, device) for t in state)) elif isinstance(state, dict): return type(state)({k: move_to_device(v, device) for k, v in state.items()}) elif isinstance(state, torch.Tensor): return state.to(device) return state
null
2,892
import inspect import warnings import torch from .state import AcceleratorState, GradientState from .utils import DistributedType, honor_type, is_torch_xla_available class AcceleratedOptimizer(torch.optim.Optimizer): """ Internal wrapper around a torch optimizer. Conditionally will perform `step` and `zero_grad` if gradients should be synchronized when performing gradient accumulation. Args: optimizer (`torch.optim.optimizer.Optimizer`): The optimizer to wrap. device_placement (`bool`, *optional*, defaults to `True`): Whether or not the optimizer should handle device placement. If so, it will place the state dictionary of `optimizer` on the right device. scaler (`torch.cuda.amp.grad_scaler.GradScaler`, *optional*): The scaler to use in the step function if training with mixed precision. """ def __init__(self, optimizer, device_placement=True, scaler=None): self.optimizer = optimizer self.scaler = scaler self.accelerator_state = AcceleratorState() self.gradient_state = GradientState() self.device_placement = device_placement self._is_overflow = False if self.scaler is not None: self._accelerate_step_called = False self._optimizer_original_step_method = self.optimizer.step self._optimizer_patched_step_method = patch_optimizer_step(self, self.optimizer.step) # Handle device placement if device_placement: state_dict = self.optimizer.state_dict() if self.accelerator_state.distributed_type == DistributedType.XLA: xm.send_cpu_data_to_device(state_dict, self.accelerator_state.device) else: state_dict = move_to_device(state_dict, self.accelerator_state.device) self.optimizer.load_state_dict(state_dict) def state(self): return self.optimizer.state def state(self, state): self.optimizer.state = state def param_groups(self): return self.optimizer.param_groups def param_groups(self, param_groups): self.optimizer.param_groups = param_groups def defaults(self): return self.optimizer.defaults def defaults(self, defaults): self.optimizer.defaults = defaults def add_param_group(self, param_group): self.optimizer.add_param_group(param_group) def load_state_dict(self, state_dict): if self.accelerator_state.distributed_type == DistributedType.XLA and self.device_placement: xm.send_cpu_data_to_device(state_dict, self.accelerator_state.device) self.optimizer.load_state_dict(state_dict) def state_dict(self): return self.optimizer.state_dict() def zero_grad(self, set_to_none=None): if self.gradient_state.sync_gradients: accept_arg = "set_to_none" in inspect.signature(self.optimizer.zero_grad).parameters if accept_arg: if set_to_none is None: set_to_none = True self.optimizer.zero_grad(set_to_none=set_to_none) else: if set_to_none is not None: raise ValueError("`set_to_none` for Optimizer.zero_grad` is not supported by this optimizer.") self.optimizer.zero_grad() def step(self, closure=None): if ( not self.gradient_state.is_xla_gradients_synced and self.accelerator_state.distributed_type == DistributedType.XLA ): gradients = xm._fetch_gradients(self.optimizer) xm.all_reduce("sum", gradients, scale=1.0 / xm.xrt_world_size()) self.gradient_state.is_xla_gradients_synced = True if self.gradient_state.sync_gradients: if self.scaler is not None: self.optimizer.step = self._optimizer_patched_step_method self.scaler.step(self.optimizer, closure) self.scaler.update() if not self._accelerate_step_called: # If the optimizer step was skipped, gradient overflow was detected. self._is_overflow = True else: self._is_overflow = False # Reset the step method to the original one self.optimizer.step = self._optimizer_original_step_method # Reset the indicator self._accelerate_step_called = False else: self.optimizer.step(closure) if self.accelerator_state.distributed_type == DistributedType.XLA: self.gradient_state.is_xla_gradients_synced = False def _switch_parameters(self, parameters_map): for param_group in self.optimizer.param_groups: param_group["params"] = [parameters_map.get(p, p) for p in param_group["params"]] def is_overflow(self): """Whether or not the optimizer step was done, or skipped because of gradient overflow.""" warnings.warn( "The `is_overflow` property is deprecated and will be removed in version 1.0 of Accelerate use " "`optimizer.step_was_skipped` instead.", FutureWarning, ) return self._is_overflow def step_was_skipped(self): """Whether or not the optimizer step was skipped.""" return self._is_overflow def __getstate__(self): _ignored_keys = [ "_accelerate_step_called", "_optimizer_original_step_method", "_optimizer_patched_step_method", ] return {k: v for k, v in self.__dict__.items() if k not in _ignored_keys} def __setstate__(self, state): self.__dict__.update(state) if self.scaler is not None: self._accelerate_step_called = False self._optimizer_original_step_method = self.optimizer.step self._optimizer_patched_step_method = patch_optimizer_step(self, self.optimizer.step) def patch_optimizer_step(accelerated_optimizer: AcceleratedOptimizer, method): def patched_step(*args, **kwargs): accelerated_optimizer._accelerate_step_called = True return method(*args, **kwargs) return patched_step
null
2,893
import os import sys import tempfile import torch from .state import AcceleratorState, PartialState from .utils import ( PrecisionType, PrepareForLaunch, are_libraries_initialized, check_cuda_p2p_ib_support, is_mps_available, patch_environment, ) def test_launch(): "Verify a `PartialState` can be initialized." _ = PartialState() class AcceleratorState: """ Singleton class that has information about the current training environment. **Available attributes:** - **device** (`torch.device`) -- The device to use. - **distributed_type** ([`~accelerate.state.DistributedType`]) -- The type of distributed environment currently in use. - **initialized** (`bool`) -- Whether or not the `AcceleratorState` has been initialized from `Accelerator`. - **local_process_index** (`int`) -- The index of the current process on the current server. - **mixed_precision** (`str`) -- Whether or not the current script will use mixed precision, and if so the type of mixed precision being performed. (Choose from 'no','fp16','bf16 or 'fp8'). - **num_processes** (`int`) -- The number of processes currently launched in parallel. - **process_index** (`int`) -- The index of the current process. - **is_last_process** (`bool`) -- Whether or not the current process is the last one. - **is_main_process** (`bool`) -- Whether or not the current process is the main one. - **is_local_main_process** (`bool`) -- Whether or not the current process is the main one on the local node. - **debug** (`bool`) -- Whether or not the current script is being run in debug mode. """ _shared_state = SharedDict() def __init__( self, mixed_precision: str = None, cpu: bool = False, dynamo_plugin=None, deepspeed_plugin=None, fsdp_plugin=None, megatron_lm_plugin=None, _from_accelerator: bool = False, **kwargs, ): self.__dict__ = self._shared_state if parse_flag_from_env("ACCELERATE_USE_CPU"): cpu = True if PartialState._shared_state == {}: PartialState(cpu, **kwargs) self.__dict__.update(PartialState._shared_state) self._check_initialized(mixed_precision, cpu) if not self.initialized: self.deepspeed_plugin = None self.use_ipex = None mixed_precision = ( parse_choice_from_env("ACCELERATE_MIXED_PRECISION", "no") if mixed_precision is None else mixed_precision.lower() ) if mixed_precision == "fp8": if not is_fp8_available(): raise ValueError( "Using `fp8` precision requires `transformer_engine` or `MS-AMP` to be installed." ) elif not check_fp8_capability(): logger.warning( f"The current device has compute capability of {torch.cuda.get_device_capability()} which is " "insufficient for FP8 mixed precision training (requires a GPU Hopper/Ada Lovelace " "or higher, compute capability of 8.9 or higher). Will use FP16 instead." ) mixed_precision = "fp16" self.dynamo_plugin = dynamo_plugin if not _from_accelerator: raise ValueError( "Please make sure to properly initialize your accelerator via `accelerator = Accelerator()` " "before using any functionality from the `accelerate` library." ) # deepspeed handles mixed_precision using deepspeed_config self._mixed_precision = "no" if self.distributed_type == DistributedType.DEEPSPEED else mixed_precision if self.distributed_type == DistributedType.XLA and is_torch_xla_available(check_is_tpu=True): if mixed_precision == "bf16": if os.environ.get("ACCELERATE_DOWNCAST_BF16"): os.environ["XLA_USE_BF16"] = str(0) os.environ["XLA_DOWNCAST_BF16"] = str(1) self.downcast_bfloat = True else: os.environ["XLA_USE_BF16"] = str(1) os.environ["XLA_DOWNCAST_BF16"] = str(0) self.downcast_bfloat = False elif os.environ.get("ACCELERATE_USE_DEEPSPEED", "false") == "true" and not cpu: self.deepspeed_plugin = deepspeed_plugin elif self.distributed_type == DistributedType.MULTI_GPU: if os.environ.get("ACCELERATE_USE_FSDP", "false") == "true": self.distributed_type = DistributedType.FSDP if self._mixed_precision != "no": fsdp_plugin.set_mixed_precision(self._mixed_precision) self.fsdp_plugin = fsdp_plugin if os.environ.get("ACCELERATE_USE_MEGATRON_LM", "false") == "true": self.distributed_type = DistributedType.MEGATRON_LM megatron_lm_plugin.set_mixed_precision(self._mixed_precision) self.megatron_lm_plugin = megatron_lm_plugin elif self.distributed_type == DistributedType.MULTI_NPU: if os.environ.get("ACCELERATE_USE_FSDP", "false") == "true": self.distributed_type = DistributedType.FSDP if self._mixed_precision != "no": fsdp_plugin.set_mixed_precision(self._mixed_precision) self.fsdp_plugin = fsdp_plugin elif self.distributed_type in [DistributedType.MULTI_CPU, DistributedType.MULTI_XPU, DistributedType.NO]: if is_ipex_available(): "check if user disables it explicitly" self.use_ipex = parse_flag_from_env("ACCELERATE_USE_IPEX", default=True) else: self.use_ipex = False if self.distributed_type == DistributedType.MULTI_XPU: if os.environ.get("ACCELERATE_USE_FSDP", "false") == "true": self.distributed_type = DistributedType.FSDP if self._mixed_precision != "no": fsdp_plugin.set_mixed_precision(self._mixed_precision) self.fsdp_plugin = fsdp_plugin if ( self.dynamo_plugin.backend != DynamoBackend.NO and self._mixed_precision == "no" and self.device.type == "cuda" ): torch.backends.cuda.matmul.allow_tf32 = True PartialState._shared_state["distributed_type"] = self.distributed_type def initialized(self) -> bool: return self._shared_state != PartialState._shared_state def __repr__(self): repr = PartialState().__repr__() + f"\nMixed precision type: {self.mixed_precision}\n" if self.distributed_type == DistributedType.DEEPSPEED: repr += f"ds_config: {self.deepspeed_plugin.deepspeed_config}\n" return repr def _check_initialized(self, mixed_precision=None, cpu=None): "Checks if a modification is trying to be made and the `AcceleratorState` has already been initialized" if self.initialized: err = "AcceleratorState has already been initialized and cannot be changed, restart your runtime completely and pass `{flag}` to `Accelerator()`." if cpu and self.device.type != "cpu": raise ValueError(err.format(flag="cpu=True")) if ( mixed_precision is not None and mixed_precision != self._mixed_precision and self.distributed_type != DistributedType.DEEPSPEED ): raise ValueError(err.format(flag=f"mixed_precision='{mixed_precision}'")) # For backward compatibility def use_fp16(self): warnings.warn( "The `use_fp16` property is deprecated and will be removed in version 1.0 of Accelerate use " "`AcceleratorState.mixed_precision == 'fp16'` instead.", FutureWarning, ) return self._mixed_precision != "no" def mixed_precision(self): if self.distributed_type == DistributedType.DEEPSPEED: config = self.deepspeed_plugin.deepspeed_config if config.get("fp16", {}).get("enabled", False): mixed_precision = "fp16" elif config.get("bf16", {}).get("enabled", False): mixed_precision = "bf16" else: mixed_precision = "no" else: mixed_precision = self._mixed_precision return mixed_precision def _reset_state(reset_partial_state: bool = False): "Resets `_shared_state`, is used internally and should not be called" AcceleratorState._shared_state.clear() if reset_partial_state: PartialState._reset_state() def use_distributed(self): """ Whether the Accelerator is configured for distributed training """ return PartialState().use_distributed def is_last_process(self) -> bool: "Returns whether the current process is the last one" return PartialState().is_last_process def is_main_process(self) -> bool: "Returns whether the current process is the main process" return PartialState().is_main_process def is_local_main_process(self) -> bool: "Returns whether the current process is the main process on the local node" return PartialState().is_local_main_process def wait_for_everyone(self): PartialState().wait_for_everyone() def split_between_processes(self, inputs: list | tuple | dict | torch.Tensor, apply_padding: bool = False): """ Splits `input` between `self.num_processes` quickly and can be then used on that process. Useful when doing distributed inference, such as with different prompts. Note that when using a `dict`, all keys need to have the same number of elements. Args: inputs (`list`, `tuple`, `torch.Tensor`, or `dict` of `list`/`tuple`/`torch.Tensor`): The input to split between processes. apply_padding (`bool`, `optional`, defaults to `False`): Whether to apply padding by repeating the last element of the input so that all processes have the same number of elements. Useful when trying to perform actions such as `gather()` on the outputs or passing in less inputs than there are processes. If so, just remember to drop the padded elements afterwards. Example: ```python # Assume there are two processes from accelerate.state import AcceleratorState state = AcceleratorState() with state.split_between_processes(["A", "B", "C"]) as inputs: print(inputs) # Process 0 ["A", "B"] # Process 1 ["C"] with state.split_between_processes(["A", "B", "C"], apply_padding=True) as inputs: print(inputs) # Process 0 ["A", "B"] # Process 1 ["C", "C"] ``` """ with PartialState().split_between_processes(inputs, apply_padding=apply_padding) as inputs: yield inputs def main_process_first(self): """ Lets the main process go first inside a with block. The other processes will enter the with block after the main process exits. """ with PartialState().main_process_first(): yield def local_main_process_first(self): """ Lets the local main process go inside a with block. The other processes will enter the with block after the main process exits. """ with PartialState().local_main_process_first(): yield def print(self, *args, **kwargs): PartialState().print(*args, **kwargs) The provided code snippet includes necessary dependencies for implementing the `notebook_launcher` function. Write a Python function `def notebook_launcher( function, args=(), num_processes=None, mixed_precision="no", use_port="29500", master_addr="127.0.0.1", node_rank=0, num_nodes=1, )` to solve the following problem: Launches a training function, using several processes or multiple nodes if it's possible in the current environment (TPU with multiple cores for instance). <Tip warning={true}> To use this function absolutely zero calls to a CUDA device must be made in the notebook session before calling. If any have been made, you will need to restart the notebook and make sure no cells use any CUDA capability. Setting `ACCELERATE_DEBUG_MODE="1"` in your environment will run a test before truly launching to ensure that none of those calls have been made. </Tip> Args: function (`Callable`): The training function to execute. If it accepts arguments, the first argument should be the index of the process run. args (`Tuple`): Tuple of arguments to pass to the function (it will receive `*args`). num_processes (`int`, *optional*): The number of processes to use for training. Will default to 8 in Colab/Kaggle if a TPU is available, to the number of GPUs available otherwise. mixed_precision (`str`, *optional*, defaults to `"no"`): If `fp16` or `bf16`, will use mixed precision training on multi-GPU. use_port (`str`, *optional*, defaults to `"29500"`): The port to use to communicate between processes when launching a multi-GPU training. master_addr (`str`, *optional*, defaults to `"127.0.0.1"`): The address to use for communication between processes. node_rank (`int`, *optional*, defaults to 0): The rank of the current node. num_nodes (`int`, *optional*, defaults to 1): The number of nodes to use for training. Example: ```python # Assume this is defined in a Jupyter Notebook on an instance with two GPUs from accelerate import notebook_launcher def train(*args): # Your training function here ... notebook_launcher(train, args=(arg1, arg2), num_processes=2, mixed_precision="fp16") ``` Here is the function: def notebook_launcher( function, args=(), num_processes=None, mixed_precision="no", use_port="29500", master_addr="127.0.0.1", node_rank=0, num_nodes=1, ): """ Launches a training function, using several processes or multiple nodes if it's possible in the current environment (TPU with multiple cores for instance). <Tip warning={true}> To use this function absolutely zero calls to a CUDA device must be made in the notebook session before calling. If any have been made, you will need to restart the notebook and make sure no cells use any CUDA capability. Setting `ACCELERATE_DEBUG_MODE="1"` in your environment will run a test before truly launching to ensure that none of those calls have been made. </Tip> Args: function (`Callable`): The training function to execute. If it accepts arguments, the first argument should be the index of the process run. args (`Tuple`): Tuple of arguments to pass to the function (it will receive `*args`). num_processes (`int`, *optional*): The number of processes to use for training. Will default to 8 in Colab/Kaggle if a TPU is available, to the number of GPUs available otherwise. mixed_precision (`str`, *optional*, defaults to `"no"`): If `fp16` or `bf16`, will use mixed precision training on multi-GPU. use_port (`str`, *optional*, defaults to `"29500"`): The port to use to communicate between processes when launching a multi-GPU training. master_addr (`str`, *optional*, defaults to `"127.0.0.1"`): The address to use for communication between processes. node_rank (`int`, *optional*, defaults to 0): The rank of the current node. num_nodes (`int`, *optional*, defaults to 1): The number of nodes to use for training. Example: ```python # Assume this is defined in a Jupyter Notebook on an instance with two GPUs from accelerate import notebook_launcher def train(*args): # Your training function here ... notebook_launcher(train, args=(arg1, arg2), num_processes=2, mixed_precision="fp16") ``` """ # Are we in a google colab or a Kaggle Kernel? in_colab = False in_kaggle = False if any(key.startswith("KAGGLE") for key in os.environ.keys()): in_kaggle = True elif "IPython" in sys.modules: in_colab = "google.colab" in str(sys.modules["IPython"].get_ipython()) try: mixed_precision = PrecisionType(mixed_precision.lower()) except ValueError: raise ValueError( f"Unknown mixed_precision mode: {args.mixed_precision.lower()}. Choose between {PrecisionType.list()}." ) if (in_colab or in_kaggle) and (os.environ.get("TPU_NAME", None) is not None): # TPU launch import torch_xla.distributed.xla_multiprocessing as xmp if len(AcceleratorState._shared_state) > 0: raise ValueError( "To train on TPU in Colab or Kaggle Kernel, the `Accelerator` should only be initialized inside " "your training function. Restart your notebook and make sure no cells initializes an " "`Accelerator`." ) if num_processes is None: num_processes = 8 launcher = PrepareForLaunch(function, distributed_type="TPU") print(f"Launching a training on {num_processes} TPU cores.") xmp.spawn(launcher, args=args, nprocs=num_processes, start_method="fork") elif in_colab: # No need for a distributed launch otherwise as it's either CPU or one GPU. if torch.cuda.is_available(): print("Launching training on one GPU.") else: print("Launching training on one CPU.") function(*args) else: if num_processes is None: raise ValueError( "You have to specify the number of GPUs you would like to use, add `num_processes=...` to your call." ) if node_rank >= num_nodes: raise ValueError("The node_rank must be less than the number of nodes.") if num_processes > 1: # Multi-GPU launch from torch.multiprocessing import start_processes from torch.multiprocessing.spawn import ProcessRaisedException if len(AcceleratorState._shared_state) > 0: raise ValueError( "To launch a multi-GPU training from your notebook, the `Accelerator` should only be initialized " "inside your training function. Restart your notebook and make sure no cells initializes an " "`Accelerator`." ) # Check for specific libraries known to initialize CUDA that users constantly use problematic_imports = are_libraries_initialized("bitsandbytes") if len(problematic_imports) > 0: err = ( "Could not start distributed process. Libraries known to initialize CUDA upon import have been " "imported already. Please keep these imports inside your training function to try and help with this:" ) for lib_name in problematic_imports: err += f"\n\t* `{lib_name}`" raise RuntimeError(err) patched_env = dict( nproc=num_processes, node_rank=node_rank, world_size=num_nodes * num_processes, master_addr=master_addr, master_port=use_port, mixed_precision=mixed_precision, ) # Check for CUDA P2P and IB issues if not check_cuda_p2p_ib_support(): patched_env["nccl_p2p_disable"] = "1" patched_env["nccl_ib_disable"] = "1" # torch.distributed will expect a few environment variable to be here. We set the ones common to each # process here (the other ones will be set be the launcher). with patch_environment(**patched_env): # First dummy launch if os.environ.get("ACCELERATE_DEBUG_MODE", "false").lower() == "true": launcher = PrepareForLaunch(test_launch, distributed_type="MULTI_GPU") try: start_processes(launcher, args=(), nprocs=num_processes, start_method="fork") except ProcessRaisedException as e: err = "An issue was found when verifying a stable environment for the notebook launcher." if "Cannot re-initialize CUDA in forked subprocess" in e.args[0]: raise RuntimeError( f"{err}" "This likely stems from an outside import causing issues once the `notebook_launcher()` is called. " "Please review your imports and test them when running the `notebook_launcher()` to identify " "which one is problematic and causing CUDA to be initialized." ) from e else: raise RuntimeError(f"{err} The following error was raised: {e}") from e # Now the actual launch launcher = PrepareForLaunch(function, distributed_type="MULTI_GPU") print(f"Launching training on {num_processes} GPUs.") try: start_processes(launcher, args=args, nprocs=num_processes, start_method="fork") except ProcessRaisedException as e: if "Cannot re-initialize CUDA in forked subprocess" in e.args[0]: raise RuntimeError( "CUDA has been initialized before the `notebook_launcher` could create a forked subprocess. " "This likely stems from an outside import causing issues once the `notebook_launcher()` is called. " "Please review your imports and test them when running the `notebook_launcher()` to identify " "which one is problematic and causing CUDA to be initialized." ) from e else: raise RuntimeError(f"An issue was found when launching the training: {e}") from e else: # No need for a distributed launch otherwise as it's either CPU, GPU or MPS. if is_mps_available(): os.environ["PYTORCH_ENABLE_MPS_FALLBACK"] = "1" print("Launching training on MPS.") elif torch.cuda.is_available(): print("Launching training on one GPU.") else: print("Launching training on CPU.") function(*args)
Launches a training function, using several processes or multiple nodes if it's possible in the current environment (TPU with multiple cores for instance). <Tip warning={true}> To use this function absolutely zero calls to a CUDA device must be made in the notebook session before calling. If any have been made, you will need to restart the notebook and make sure no cells use any CUDA capability. Setting `ACCELERATE_DEBUG_MODE="1"` in your environment will run a test before truly launching to ensure that none of those calls have been made. </Tip> Args: function (`Callable`): The training function to execute. If it accepts arguments, the first argument should be the index of the process run. args (`Tuple`): Tuple of arguments to pass to the function (it will receive `*args`). num_processes (`int`, *optional*): The number of processes to use for training. Will default to 8 in Colab/Kaggle if a TPU is available, to the number of GPUs available otherwise. mixed_precision (`str`, *optional*, defaults to `"no"`): If `fp16` or `bf16`, will use mixed precision training on multi-GPU. use_port (`str`, *optional*, defaults to `"29500"`): The port to use to communicate between processes when launching a multi-GPU training. master_addr (`str`, *optional*, defaults to `"127.0.0.1"`): The address to use for communication between processes. node_rank (`int`, *optional*, defaults to 0): The rank of the current node. num_nodes (`int`, *optional*, defaults to 1): The number of nodes to use for training. Example: ```python # Assume this is defined in a Jupyter Notebook on an instance with two GPUs from accelerate import notebook_launcher def train(*args): # Your training function here ... notebook_launcher(train, args=(arg1, arg2), num_processes=2, mixed_precision="fp16") ```
2,894
import os import sys import tempfile import torch from .state import AcceleratorState, PartialState from .utils import ( PrecisionType, PrepareForLaunch, are_libraries_initialized, check_cuda_p2p_ib_support, is_mps_available, patch_environment, ) The provided code snippet includes necessary dependencies for implementing the `debug_launcher` function. Write a Python function `def debug_launcher(function, args=(), num_processes=2)` to solve the following problem: Launches a training function using several processes on CPU for debugging purposes. <Tip warning={true}> This function is provided for internal testing and debugging, but it's not intended for real trainings. It will only use the CPU. </Tip> Args: function (`Callable`): The training function to execute. args (`Tuple`): Tuple of arguments to pass to the function (it will receive `*args`). num_processes (`int`, *optional*, defaults to 2): The number of processes to use for training. Here is the function: def debug_launcher(function, args=(), num_processes=2): """ Launches a training function using several processes on CPU for debugging purposes. <Tip warning={true}> This function is provided for internal testing and debugging, but it's not intended for real trainings. It will only use the CPU. </Tip> Args: function (`Callable`): The training function to execute. args (`Tuple`): Tuple of arguments to pass to the function (it will receive `*args`). num_processes (`int`, *optional*, defaults to 2): The number of processes to use for training. """ from torch.multiprocessing import start_processes with tempfile.NamedTemporaryFile() as tmp_file: # torch.distributed will expect a few environment variable to be here. We set the ones common to each # process here (the other ones will be set be the launcher). with patch_environment( world_size=num_processes, master_addr="127.0.0.1", master_port="29500", accelerate_mixed_precision="no", accelerate_debug_rdv_file=tmp_file.name, accelerate_use_cpu="yes", ): launcher = PrepareForLaunch(function, debug=True) start_processes(launcher, args=args, nprocs=num_processes, start_method="fork")
Launches a training function using several processes on CPU for debugging purposes. <Tip warning={true}> This function is provided for internal testing and debugging, but it's not intended for real trainings. It will only use the CPU. </Tip> Args: function (`Callable`): The training function to execute. args (`Tuple`): Tuple of arguments to pass to the function (it will receive `*args`). num_processes (`int`, *optional*, defaults to 2): The number of processes to use for training.
2,895
import logging import os from contextlib import contextmanager from functools import wraps from typing import Dict, List, Optional, Union import torch import torch.nn as nn from .hooks import ( AlignDevicesHook, CpuOffload, UserCpuOffloadHook, add_hook_to_module, attach_align_device_hook, attach_align_device_hook_on_blocks, ) from .utils import ( OffloadedWeightsLoader, check_device_map, extract_submodules_state_dict, find_tied_parameters, get_balanced_memory, infer_auto_device_map, is_npu_available, is_torch_version, is_xpu_available, load_checkpoint_in_model, offload_state_dict, parse_flag_from_env, retie_parameters, ) from .utils.other import recursive_getattr def add_hook_to_module(module: nn.Module, hook: ModelHook, append: bool = False): """ Adds a hook to a given module. This will rewrite the `forward` method of the module to include the hook, to remove this behavior and restore the original `forward` method, use `remove_hook_from_module`. <Tip warning={true}> If the module already contains a hook, this will replace it with the new hook passed by default. To chain two hooks together, pass `append=True`, so it chains the current and new hook into an instance of the `SequentialHook` class. </Tip> Args: module (`torch.nn.Module`): The module to attach a hook to. hook (`ModelHook`): The hook to attach. append (`bool`, *optional*, defaults to `False`): Whether the hook should be chained with an existing one (if module already contains a hook) or not. Returns: `torch.nn.Module`: The same module, with the hook attached (the module is modified in place, so the result can be discarded). """ if append and (getattr(module, "_hf_hook", None) is not None): old_hook = module._hf_hook remove_hook_from_module(module) hook = SequentialHook(old_hook, hook) if hasattr(module, "_hf_hook") and hasattr(module, "_old_forward"): # If we already put some hook on this module, we replace it with the new one. old_forward = module._old_forward else: old_forward = module.forward module._old_forward = old_forward module = hook.init_hook(module) module._hf_hook = hook def new_forward(module, *args, **kwargs): args, kwargs = module._hf_hook.pre_forward(module, *args, **kwargs) if module._hf_hook.no_grad: with torch.no_grad(): output = module._old_forward(*args, **kwargs) else: output = module._old_forward(*args, **kwargs) return module._hf_hook.post_forward(module, output) # Overriding a GraphModuleImpl forward freezes the forward call and later modifications on the graph will fail. # Reference: https://pytorch.slack.com/archives/C3PDTEV8E/p1705929610405409 if "GraphModuleImpl" in str(type(module)): module.__class__.forward = functools.update_wrapper(functools.partial(new_forward, module), old_forward) else: module.forward = functools.update_wrapper(functools.partial(new_forward, module), old_forward) return module class AlignDevicesHook(ModelHook): """ A generic `ModelHook` that ensures inputs and model weights are on the same device for the forward pass of the associated module, potentially offloading the weights after the forward pass. Args: execution_device (`torch.device`, *optional*): The device on which inputs and model weights should be placed before the forward pass. offload (`bool`, *optional*, defaults to `False`): Whether or not the weights should be offloaded after the forward pass. io_same_device (`bool`, *optional*, defaults to `False`): Whether or not the output should be placed on the same device as the input was. weights_map (`Mapping[str, torch.Tensor]`, *optional*): When the model weights are offloaded, a (potentially lazy) map from param names to the tensor values. offload_buffers (`bool`, *optional*, defaults to `False`): Whether or not to include the associated module's buffers when offloading. place_submodules (`bool`, *optional*, defaults to `False`): Whether to place the submodules on `execution_device` during the `init_hook` event. """ def __init__( self, execution_device: Optional[Union[int, str, torch.device]] = None, offload: bool = False, io_same_device: bool = False, weights_map: Optional[Mapping] = None, offload_buffers: bool = False, place_submodules: bool = False, skip_keys: Optional[Union[str, List[str]]] = None, tied_params_map: Optional[Dict[int, Dict[torch.device, torch.Tensor]]] = None, ): self.execution_device = execution_device self.offload = offload self.io_same_device = io_same_device self.weights_map = weights_map self.offload_buffers = offload_buffers self.place_submodules = place_submodules self.skip_keys = skip_keys # Will contain the input device when `io_same_device=True`. self.input_device = None self.param_original_devices = {} self.buffer_original_devices = {} self.tied_params_names = set() # The hook pre_forward/post_forward need to have knowledge of this dictionary, as with offloading we want to avoid duplicating memory # for tied weights already loaded on the target execution device. self.tied_params_map = tied_params_map def __repr__(self): return ( f"AlignDevicesHook(execution_device={self.execution_device}, offload={self.offload}, " f"io_same_device={self.io_same_device}, offload_buffers={self.offload_buffers}, " f"place_submodules={self.place_submodules}, skip_keys={repr(self.skip_keys)})" ) def init_hook(self, module): # In case the AlignDevicesHook is on meta device, ignore tied weights as data_ptr() is then always zero. if self.execution_device == "meta" or self.execution_device == torch.device("meta"): self.tied_params_map = None if not self.offload and self.execution_device is not None: for name, _ in named_module_tensors(module, recurse=self.place_submodules): set_module_tensor_to_device(module, name, self.execution_device, tied_params_map=self.tied_params_map) elif self.offload: self.original_devices = { name: param.device for name, param in named_module_tensors(module, recurse=self.place_submodules) } if self.weights_map is None: self.weights_map = { name: param.to("cpu") for name, param in named_module_tensors( module, include_buffers=self.offload_buffers, recurse=self.place_submodules ) } for name, _ in named_module_tensors( module, include_buffers=self.offload_buffers, recurse=self.place_submodules, remove_non_persistent=True ): # When using disk offloading, we can not rely on `weights_map[name].data_ptr()` as the reference pointer, # as we have no guarantee that safetensors' `file.get_tensor()` will always give the same pointer. # As we have no reliable way to track the shared data pointer of tied weights in this case, we use tied_params_names: List[str] # to add on the fly pointers to `tied_params_map` in the pre_forward call. if ( self.tied_params_map is not None and recursive_getattr(module, name).data_ptr() in self.tied_params_map ): self.tied_params_names.add(name) set_module_tensor_to_device(module, name, "meta") if not self.offload_buffers and self.execution_device is not None: for name, _ in module.named_buffers(recurse=self.place_submodules): set_module_tensor_to_device( module, name, self.execution_device, tied_params_map=self.tied_params_map ) elif self.offload_buffers and self.execution_device is not None: for name in get_non_persistent_buffers(module, recurse=self.place_submodules): set_module_tensor_to_device( module, name, self.execution_device, tied_params_map=self.tied_params_map ) return module def pre_forward(self, module, *args, **kwargs): if self.io_same_device: self.input_device = find_device([args, kwargs]) if self.offload: self.tied_pointers_to_remove = set() for name, _ in named_module_tensors( module, include_buffers=self.offload_buffers, recurse=self.place_submodules, remove_non_persistent=True, ): fp16_statistics = None value = self.weights_map[name] if "weight" in name and name.replace("weight", "SCB") in self.weights_map.keys(): if value.dtype == torch.int8: fp16_statistics = self.weights_map[name.replace("weight", "SCB")] # In case we are using offloading with tied weights, we need to keep track of the offloaded weights # that are loaded on device at this point, as we will need to remove them as well from the dictionary # self.tied_params_map in order to allow to free memory. if name in self.tied_params_names and value.data_ptr() not in self.tied_params_map: self.tied_params_map[value.data_ptr()] = {} if ( value is not None and self.tied_params_map is not None and value.data_ptr() in self.tied_params_map and self.execution_device not in self.tied_params_map[value.data_ptr()] ): self.tied_pointers_to_remove.add((value.data_ptr(), self.execution_device)) set_module_tensor_to_device( module, name, self.execution_device, value=value, fp16_statistics=fp16_statistics, tied_params_map=self.tied_params_map, ) return send_to_device(args, self.execution_device), send_to_device( kwargs, self.execution_device, skip_keys=self.skip_keys ) def post_forward(self, module, output): if self.offload: for name, _ in named_module_tensors( module, include_buffers=self.offload_buffers, recurse=self.place_submodules, remove_non_persistent=True, ): set_module_tensor_to_device(module, name, "meta") if type(module).__name__ == "Linear8bitLt": module.state.SCB = None module.state.CxB = None # We may have loaded tied weights into self.tied_params_map (avoiding to load them several times in e.g. submodules): remove them from # this dictionary to allow the garbage collector to do its job. for value_pointer, device in self.tied_pointers_to_remove: del self.tied_params_map[value_pointer][device] self.tied_pointers_to_remove = set() if self.io_same_device and self.input_device is not None: output = send_to_device(output, self.input_device, skip_keys=self.skip_keys) return output def detach_hook(self, module): if self.offload: for name, device in self.original_devices.items(): if device != torch.device("meta"): set_module_tensor_to_device(module, name, device, value=self.weights_map.get(name, None)) return module def attach_align_device_hook( module: torch.nn.Module, execution_device: Optional[torch.device] = None, offload: bool = False, weights_map: Optional[Mapping] = None, offload_buffers: bool = False, module_name: str = "", skip_keys: Optional[Union[str, List[str]]] = None, preload_module_classes: Optional[List[str]] = None, tied_params_map: Optional[Dict[int, Dict[torch.device, torch.Tensor]]] = None, ): """ Recursively attaches `AlignDevicesHook` to all submodules of a given model that have direct parameters and/or buffers. Args: module (`torch.nn.Module`): The module where we want to attach the hooks. execution_device (`torch.device`, *optional*): The device on which inputs and model weights should be placed before the forward pass. offload (`bool`, *optional*, defaults to `False`): Whether or not the weights should be offloaded after the forward pass. weights_map (`Mapping[str, torch.Tensor]`, *optional*): When the model weights are offloaded, a (potentially lazy) map from param names to the tensor values. offload_buffers (`bool`, *optional*, defaults to `False`): Whether or not to include the associated module's buffers when offloading. module_name (`str`, *optional*, defaults to `""`): The name of the module. skip_keys (`str` or `List[str]`, *optional*): A list of keys to ignore when moving inputs or outputs between devices. preload_module_classes (`List[str]`, *optional*): A list of classes whose instances should load all their weights (even in the submodules) at the beginning of the forward. This should only be used for classes that have submodules which are registered but not called directly during the forward, for instance if a `dense` linear layer is registered, but at forward, `dense.weight` and `dense.bias` are used in some operations instead of calling `dense` directly. tied_params_map (Optional[Dict[int, Dict[torch.device, torch.Tensor]]], *optional*, defaults to `None`): A map of data pointers to dictionaries of devices to already dispatched tied weights. For a given execution device, this parameter is useful to reuse the first available pointer of a shared weight for all others, instead of duplicating memory. """ # Attach the hook on this module if it has any direct tensor. directs = named_module_tensors(module) full_offload = ( offload and preload_module_classes is not None and module.__class__.__name__ in preload_module_classes ) if len(list(directs)) > 0 or full_offload: if weights_map is not None: prefix = f"{module_name}." if len(module_name) > 0 else "" prefixed_weights_map = PrefixedDataset(weights_map, prefix) else: prefixed_weights_map = None hook = AlignDevicesHook( execution_device=execution_device, offload=offload, weights_map=prefixed_weights_map, offload_buffers=offload_buffers, place_submodules=full_offload, skip_keys=skip_keys, tied_params_map=tied_params_map, ) add_hook_to_module(module, hook, append=True) # We stop the recursion in case we hit the full offload. if full_offload: return # Recurse on all children of the module. for child_name, child in module.named_children(): child_name = f"{module_name}.{child_name}" if len(module_name) > 0 else child_name attach_align_device_hook( child, execution_device=execution_device, offload=offload, weights_map=weights_map, offload_buffers=offload_buffers, module_name=child_name, preload_module_classes=preload_module_classes, skip_keys=skip_keys, tied_params_map=tied_params_map, ) The provided code snippet includes necessary dependencies for implementing the `cpu_offload` function. Write a Python function `def cpu_offload( model: nn.Module, execution_device: Optional[torch.device] = None, offload_buffers: bool = False, state_dict: Optional[Dict[str, torch.Tensor]] = None, preload_module_classes: Optional[List[str]] = None, )` to solve the following problem: Activates full CPU offload for a model. As a result, all parameters of the model will be offloaded and only one copy of the state dict of the model will be kept. During the forward pass, parameters will be extracted from that state dict and put on the execution device passed as they are needed, then offloaded again. Args: model (`torch.nn.Module`): The model to offload. execution_device (`torch.device`, *optional*): The device on which the forward pass of the model will be executed (should be a GPU). Will default to the model first parameter device. offload_buffers (`bool`, *optional*, defaults to `False`): Whether or not to offload the buffers with the model parameters. state_dict (`Dict[str, torch.Tensor]`, *optional*): The state dict of the model that will be kept on CPU. preload_module_classes (`List[str]`, *optional*): A list of classes whose instances should load all their weights (even in the submodules) at the beginning of the forward. This should only be used for classes that have submodules which are registered but not called directly during the forward, for instance if a `dense` linear layer is registered, but at forward, `dense.weight` and `dense.bias` are used in some operations instead of calling `dense` directly. Here is the function: def cpu_offload( model: nn.Module, execution_device: Optional[torch.device] = None, offload_buffers: bool = False, state_dict: Optional[Dict[str, torch.Tensor]] = None, preload_module_classes: Optional[List[str]] = None, ): """ Activates full CPU offload for a model. As a result, all parameters of the model will be offloaded and only one copy of the state dict of the model will be kept. During the forward pass, parameters will be extracted from that state dict and put on the execution device passed as they are needed, then offloaded again. Args: model (`torch.nn.Module`): The model to offload. execution_device (`torch.device`, *optional*): The device on which the forward pass of the model will be executed (should be a GPU). Will default to the model first parameter device. offload_buffers (`bool`, *optional*, defaults to `False`): Whether or not to offload the buffers with the model parameters. state_dict (`Dict[str, torch.Tensor]`, *optional*): The state dict of the model that will be kept on CPU. preload_module_classes (`List[str]`, *optional*): A list of classes whose instances should load all their weights (even in the submodules) at the beginning of the forward. This should only be used for classes that have submodules which are registered but not called directly during the forward, for instance if a `dense` linear layer is registered, but at forward, `dense.weight` and `dense.bias` are used in some operations instead of calling `dense` directly. """ if execution_device is None: execution_device = next(iter(model.parameters())).device if state_dict is None: state_dict = {n: p.to("cpu") for n, p in model.state_dict().items()} add_hook_to_module(model, AlignDevicesHook(io_same_device=True), append=True) attach_align_device_hook( model, execution_device=execution_device, offload=True, offload_buffers=offload_buffers, weights_map=state_dict, preload_module_classes=preload_module_classes, ) return model
Activates full CPU offload for a model. As a result, all parameters of the model will be offloaded and only one copy of the state dict of the model will be kept. During the forward pass, parameters will be extracted from that state dict and put on the execution device passed as they are needed, then offloaded again. Args: model (`torch.nn.Module`): The model to offload. execution_device (`torch.device`, *optional*): The device on which the forward pass of the model will be executed (should be a GPU). Will default to the model first parameter device. offload_buffers (`bool`, *optional*, defaults to `False`): Whether or not to offload the buffers with the model parameters. state_dict (`Dict[str, torch.Tensor]`, *optional*): The state dict of the model that will be kept on CPU. preload_module_classes (`List[str]`, *optional*): A list of classes whose instances should load all their weights (even in the submodules) at the beginning of the forward. This should only be used for classes that have submodules which are registered but not called directly during the forward, for instance if a `dense` linear layer is registered, but at forward, `dense.weight` and `dense.bias` are used in some operations instead of calling `dense` directly.
2,896
import logging import os from contextlib import contextmanager from functools import wraps from typing import Dict, List, Optional, Union import torch import torch.nn as nn from .hooks import ( AlignDevicesHook, CpuOffload, UserCpuOffloadHook, add_hook_to_module, attach_align_device_hook, attach_align_device_hook_on_blocks, ) from .utils import ( OffloadedWeightsLoader, check_device_map, extract_submodules_state_dict, find_tied_parameters, get_balanced_memory, infer_auto_device_map, is_npu_available, is_torch_version, is_xpu_available, load_checkpoint_in_model, offload_state_dict, parse_flag_from_env, retie_parameters, ) from .utils.other import recursive_getattr def add_hook_to_module(module: nn.Module, hook: ModelHook, append: bool = False): """ Adds a hook to a given module. This will rewrite the `forward` method of the module to include the hook, to remove this behavior and restore the original `forward` method, use `remove_hook_from_module`. <Tip warning={true}> If the module already contains a hook, this will replace it with the new hook passed by default. To chain two hooks together, pass `append=True`, so it chains the current and new hook into an instance of the `SequentialHook` class. </Tip> Args: module (`torch.nn.Module`): The module to attach a hook to. hook (`ModelHook`): The hook to attach. append (`bool`, *optional*, defaults to `False`): Whether the hook should be chained with an existing one (if module already contains a hook) or not. Returns: `torch.nn.Module`: The same module, with the hook attached (the module is modified in place, so the result can be discarded). """ if append and (getattr(module, "_hf_hook", None) is not None): old_hook = module._hf_hook remove_hook_from_module(module) hook = SequentialHook(old_hook, hook) if hasattr(module, "_hf_hook") and hasattr(module, "_old_forward"): # If we already put some hook on this module, we replace it with the new one. old_forward = module._old_forward else: old_forward = module.forward module._old_forward = old_forward module = hook.init_hook(module) module._hf_hook = hook def new_forward(module, *args, **kwargs): args, kwargs = module._hf_hook.pre_forward(module, *args, **kwargs) if module._hf_hook.no_grad: with torch.no_grad(): output = module._old_forward(*args, **kwargs) else: output = module._old_forward(*args, **kwargs) return module._hf_hook.post_forward(module, output) # Overriding a GraphModuleImpl forward freezes the forward call and later modifications on the graph will fail. # Reference: https://pytorch.slack.com/archives/C3PDTEV8E/p1705929610405409 if "GraphModuleImpl" in str(type(module)): module.__class__.forward = functools.update_wrapper(functools.partial(new_forward, module), old_forward) else: module.forward = functools.update_wrapper(functools.partial(new_forward, module), old_forward) return module class CpuOffload(ModelHook): """ Offloads a model on the CPU until its forward pass is called. The model will not be offloaded back to the CPU after the forward, the user needs to call the `init_hook` method again for this. Args: execution_device(`str`, `int` or `torch.device`, *optional*): The device on which the model should be executed. Will default to the MPS device if it's available, then GPU 0 if there is a GPU, and finally to the CPU. prev_module_hook (`UserCpuOffloadHook`, *optional*): The hook sent back by [`cpu_offload_with_hook`] for a previous model in the pipeline you are running. If passed, its offload method will be called just before the forward of the model to which this hook is attached. """ def __init__( self, execution_device: Optional[Union[str, int, torch.device]] = None, prev_module_hook: Optional["UserCpuOffloadHook"] = None, ): self.prev_module_hook = prev_module_hook self.execution_device = execution_device if execution_device is not None else PartialState().default_device def init_hook(self, module): return module.to("cpu") def pre_forward(self, module, *args, **kwargs): if self.prev_module_hook is not None: self.prev_module_hook.offload() module.to(self.execution_device) return send_to_device(args, self.execution_device), send_to_device(kwargs, self.execution_device) class UserCpuOffloadHook: """ A simple hook grouping a model and a `ModelHook`, which provides easy APIs for to call the init method of the hook or remove it entirely. """ def __init__(self, model, hook): self.model = model self.hook = hook def offload(self): self.hook.init_hook(self.model) def remove(self): remove_hook_from_module(self.model) The provided code snippet includes necessary dependencies for implementing the `cpu_offload_with_hook` function. Write a Python function `def cpu_offload_with_hook( model: torch.nn.Module, execution_device: Optional[Union[int, str, torch.device]] = None, prev_module_hook: Optional[UserCpuOffloadHook] = None, )` to solve the following problem: Offloads a model on the CPU and puts it back to an execution device when executed. The difference with [`cpu_offload`] is that the model stays on the execution device after the forward and is only offloaded again when the `offload` method of the returned `hook` is called. Useful for pipelines running a model in a loop. Args: model (`torch.nn.Module`): The model to offload. execution_device(`str`, `int` or `torch.device`, *optional*): The device on which the model should be executed. Will default to the MPS device if it's available, then GPU 0 if there is a GPU, and finally to the CPU. prev_module_hook (`UserCpuOffloadHook`, *optional*): The hook sent back by this function for a previous model in the pipeline you are running. If passed, its offload method will be called just before the forward of the model to which this hook is attached. Example: ```py model_1, hook_1 = cpu_offload_with_hook(model_1, cuda_device) model_2, hook_2 = cpu_offload_with_hook(model_2, cuda_device, prev_module_hook=hook_1) model_3, hook_3 = cpu_offload_with_hook(model_3, cuda_device, prev_module_hook=hook_2) hid_1 = model_1(input) for i in range(50): # model1 is offloaded on the CPU at the first iteration, model 2 stays on the GPU for this whole loop. hid_2 = model_2(hid_1) # model2 is offloaded to the CPU just before this forward. hid_3 = model_3(hid_3) # For model3, you need to manually call the hook offload method. hook_3.offload() ``` Here is the function: def cpu_offload_with_hook( model: torch.nn.Module, execution_device: Optional[Union[int, str, torch.device]] = None, prev_module_hook: Optional[UserCpuOffloadHook] = None, ): """ Offloads a model on the CPU and puts it back to an execution device when executed. The difference with [`cpu_offload`] is that the model stays on the execution device after the forward and is only offloaded again when the `offload` method of the returned `hook` is called. Useful for pipelines running a model in a loop. Args: model (`torch.nn.Module`): The model to offload. execution_device(`str`, `int` or `torch.device`, *optional*): The device on which the model should be executed. Will default to the MPS device if it's available, then GPU 0 if there is a GPU, and finally to the CPU. prev_module_hook (`UserCpuOffloadHook`, *optional*): The hook sent back by this function for a previous model in the pipeline you are running. If passed, its offload method will be called just before the forward of the model to which this hook is attached. Example: ```py model_1, hook_1 = cpu_offload_with_hook(model_1, cuda_device) model_2, hook_2 = cpu_offload_with_hook(model_2, cuda_device, prev_module_hook=hook_1) model_3, hook_3 = cpu_offload_with_hook(model_3, cuda_device, prev_module_hook=hook_2) hid_1 = model_1(input) for i in range(50): # model1 is offloaded on the CPU at the first iteration, model 2 stays on the GPU for this whole loop. hid_2 = model_2(hid_1) # model2 is offloaded to the CPU just before this forward. hid_3 = model_3(hid_3) # For model3, you need to manually call the hook offload method. hook_3.offload() ``` """ hook = CpuOffload(execution_device=execution_device, prev_module_hook=prev_module_hook) add_hook_to_module(model, hook, append=True) user_hook = UserCpuOffloadHook(model, hook) return model, user_hook
Offloads a model on the CPU and puts it back to an execution device when executed. The difference with [`cpu_offload`] is that the model stays on the execution device after the forward and is only offloaded again when the `offload` method of the returned `hook` is called. Useful for pipelines running a model in a loop. Args: model (`torch.nn.Module`): The model to offload. execution_device(`str`, `int` or `torch.device`, *optional*): The device on which the model should be executed. Will default to the MPS device if it's available, then GPU 0 if there is a GPU, and finally to the CPU. prev_module_hook (`UserCpuOffloadHook`, *optional*): The hook sent back by this function for a previous model in the pipeline you are running. If passed, its offload method will be called just before the forward of the model to which this hook is attached. Example: ```py model_1, hook_1 = cpu_offload_with_hook(model_1, cuda_device) model_2, hook_2 = cpu_offload_with_hook(model_2, cuda_device, prev_module_hook=hook_1) model_3, hook_3 = cpu_offload_with_hook(model_3, cuda_device, prev_module_hook=hook_2) hid_1 = model_1(input) for i in range(50): # model1 is offloaded on the CPU at the first iteration, model 2 stays on the GPU for this whole loop. hid_2 = model_2(hid_1) # model2 is offloaded to the CPU just before this forward. hid_3 = model_3(hid_3) # For model3, you need to manually call the hook offload method. hook_3.offload() ```
2,897
import logging import os from contextlib import contextmanager from functools import wraps from typing import Dict, List, Optional, Union import torch import torch.nn as nn from .hooks import ( AlignDevicesHook, CpuOffload, UserCpuOffloadHook, add_hook_to_module, attach_align_device_hook, attach_align_device_hook_on_blocks, ) from .utils import ( OffloadedWeightsLoader, check_device_map, extract_submodules_state_dict, find_tied_parameters, get_balanced_memory, infer_auto_device_map, is_npu_available, is_torch_version, is_xpu_available, load_checkpoint_in_model, offload_state_dict, parse_flag_from_env, retie_parameters, ) from .utils.other import recursive_getattr def add_hook_to_module(module: nn.Module, hook: ModelHook, append: bool = False): """ Adds a hook to a given module. This will rewrite the `forward` method of the module to include the hook, to remove this behavior and restore the original `forward` method, use `remove_hook_from_module`. <Tip warning={true}> If the module already contains a hook, this will replace it with the new hook passed by default. To chain two hooks together, pass `append=True`, so it chains the current and new hook into an instance of the `SequentialHook` class. </Tip> Args: module (`torch.nn.Module`): The module to attach a hook to. hook (`ModelHook`): The hook to attach. append (`bool`, *optional*, defaults to `False`): Whether the hook should be chained with an existing one (if module already contains a hook) or not. Returns: `torch.nn.Module`: The same module, with the hook attached (the module is modified in place, so the result can be discarded). """ if append and (getattr(module, "_hf_hook", None) is not None): old_hook = module._hf_hook remove_hook_from_module(module) hook = SequentialHook(old_hook, hook) if hasattr(module, "_hf_hook") and hasattr(module, "_old_forward"): # If we already put some hook on this module, we replace it with the new one. old_forward = module._old_forward else: old_forward = module.forward module._old_forward = old_forward module = hook.init_hook(module) module._hf_hook = hook def new_forward(module, *args, **kwargs): args, kwargs = module._hf_hook.pre_forward(module, *args, **kwargs) if module._hf_hook.no_grad: with torch.no_grad(): output = module._old_forward(*args, **kwargs) else: output = module._old_forward(*args, **kwargs) return module._hf_hook.post_forward(module, output) # Overriding a GraphModuleImpl forward freezes the forward call and later modifications on the graph will fail. # Reference: https://pytorch.slack.com/archives/C3PDTEV8E/p1705929610405409 if "GraphModuleImpl" in str(type(module)): module.__class__.forward = functools.update_wrapper(functools.partial(new_forward, module), old_forward) else: module.forward = functools.update_wrapper(functools.partial(new_forward, module), old_forward) return module class AlignDevicesHook(ModelHook): """ A generic `ModelHook` that ensures inputs and model weights are on the same device for the forward pass of the associated module, potentially offloading the weights after the forward pass. Args: execution_device (`torch.device`, *optional*): The device on which inputs and model weights should be placed before the forward pass. offload (`bool`, *optional*, defaults to `False`): Whether or not the weights should be offloaded after the forward pass. io_same_device (`bool`, *optional*, defaults to `False`): Whether or not the output should be placed on the same device as the input was. weights_map (`Mapping[str, torch.Tensor]`, *optional*): When the model weights are offloaded, a (potentially lazy) map from param names to the tensor values. offload_buffers (`bool`, *optional*, defaults to `False`): Whether or not to include the associated module's buffers when offloading. place_submodules (`bool`, *optional*, defaults to `False`): Whether to place the submodules on `execution_device` during the `init_hook` event. """ def __init__( self, execution_device: Optional[Union[int, str, torch.device]] = None, offload: bool = False, io_same_device: bool = False, weights_map: Optional[Mapping] = None, offload_buffers: bool = False, place_submodules: bool = False, skip_keys: Optional[Union[str, List[str]]] = None, tied_params_map: Optional[Dict[int, Dict[torch.device, torch.Tensor]]] = None, ): self.execution_device = execution_device self.offload = offload self.io_same_device = io_same_device self.weights_map = weights_map self.offload_buffers = offload_buffers self.place_submodules = place_submodules self.skip_keys = skip_keys # Will contain the input device when `io_same_device=True`. self.input_device = None self.param_original_devices = {} self.buffer_original_devices = {} self.tied_params_names = set() # The hook pre_forward/post_forward need to have knowledge of this dictionary, as with offloading we want to avoid duplicating memory # for tied weights already loaded on the target execution device. self.tied_params_map = tied_params_map def __repr__(self): return ( f"AlignDevicesHook(execution_device={self.execution_device}, offload={self.offload}, " f"io_same_device={self.io_same_device}, offload_buffers={self.offload_buffers}, " f"place_submodules={self.place_submodules}, skip_keys={repr(self.skip_keys)})" ) def init_hook(self, module): # In case the AlignDevicesHook is on meta device, ignore tied weights as data_ptr() is then always zero. if self.execution_device == "meta" or self.execution_device == torch.device("meta"): self.tied_params_map = None if not self.offload and self.execution_device is not None: for name, _ in named_module_tensors(module, recurse=self.place_submodules): set_module_tensor_to_device(module, name, self.execution_device, tied_params_map=self.tied_params_map) elif self.offload: self.original_devices = { name: param.device for name, param in named_module_tensors(module, recurse=self.place_submodules) } if self.weights_map is None: self.weights_map = { name: param.to("cpu") for name, param in named_module_tensors( module, include_buffers=self.offload_buffers, recurse=self.place_submodules ) } for name, _ in named_module_tensors( module, include_buffers=self.offload_buffers, recurse=self.place_submodules, remove_non_persistent=True ): # When using disk offloading, we can not rely on `weights_map[name].data_ptr()` as the reference pointer, # as we have no guarantee that safetensors' `file.get_tensor()` will always give the same pointer. # As we have no reliable way to track the shared data pointer of tied weights in this case, we use tied_params_names: List[str] # to add on the fly pointers to `tied_params_map` in the pre_forward call. if ( self.tied_params_map is not None and recursive_getattr(module, name).data_ptr() in self.tied_params_map ): self.tied_params_names.add(name) set_module_tensor_to_device(module, name, "meta") if not self.offload_buffers and self.execution_device is not None: for name, _ in module.named_buffers(recurse=self.place_submodules): set_module_tensor_to_device( module, name, self.execution_device, tied_params_map=self.tied_params_map ) elif self.offload_buffers and self.execution_device is not None: for name in get_non_persistent_buffers(module, recurse=self.place_submodules): set_module_tensor_to_device( module, name, self.execution_device, tied_params_map=self.tied_params_map ) return module def pre_forward(self, module, *args, **kwargs): if self.io_same_device: self.input_device = find_device([args, kwargs]) if self.offload: self.tied_pointers_to_remove = set() for name, _ in named_module_tensors( module, include_buffers=self.offload_buffers, recurse=self.place_submodules, remove_non_persistent=True, ): fp16_statistics = None value = self.weights_map[name] if "weight" in name and name.replace("weight", "SCB") in self.weights_map.keys(): if value.dtype == torch.int8: fp16_statistics = self.weights_map[name.replace("weight", "SCB")] # In case we are using offloading with tied weights, we need to keep track of the offloaded weights # that are loaded on device at this point, as we will need to remove them as well from the dictionary # self.tied_params_map in order to allow to free memory. if name in self.tied_params_names and value.data_ptr() not in self.tied_params_map: self.tied_params_map[value.data_ptr()] = {} if ( value is not None and self.tied_params_map is not None and value.data_ptr() in self.tied_params_map and self.execution_device not in self.tied_params_map[value.data_ptr()] ): self.tied_pointers_to_remove.add((value.data_ptr(), self.execution_device)) set_module_tensor_to_device( module, name, self.execution_device, value=value, fp16_statistics=fp16_statistics, tied_params_map=self.tied_params_map, ) return send_to_device(args, self.execution_device), send_to_device( kwargs, self.execution_device, skip_keys=self.skip_keys ) def post_forward(self, module, output): if self.offload: for name, _ in named_module_tensors( module, include_buffers=self.offload_buffers, recurse=self.place_submodules, remove_non_persistent=True, ): set_module_tensor_to_device(module, name, "meta") if type(module).__name__ == "Linear8bitLt": module.state.SCB = None module.state.CxB = None # We may have loaded tied weights into self.tied_params_map (avoiding to load them several times in e.g. submodules): remove them from # this dictionary to allow the garbage collector to do its job. for value_pointer, device in self.tied_pointers_to_remove: del self.tied_params_map[value_pointer][device] self.tied_pointers_to_remove = set() if self.io_same_device and self.input_device is not None: output = send_to_device(output, self.input_device, skip_keys=self.skip_keys) return output def detach_hook(self, module): if self.offload: for name, device in self.original_devices.items(): if device != torch.device("meta"): set_module_tensor_to_device(module, name, device, value=self.weights_map.get(name, None)) return module def attach_align_device_hook( module: torch.nn.Module, execution_device: Optional[torch.device] = None, offload: bool = False, weights_map: Optional[Mapping] = None, offload_buffers: bool = False, module_name: str = "", skip_keys: Optional[Union[str, List[str]]] = None, preload_module_classes: Optional[List[str]] = None, tied_params_map: Optional[Dict[int, Dict[torch.device, torch.Tensor]]] = None, ): """ Recursively attaches `AlignDevicesHook` to all submodules of a given model that have direct parameters and/or buffers. Args: module (`torch.nn.Module`): The module where we want to attach the hooks. execution_device (`torch.device`, *optional*): The device on which inputs and model weights should be placed before the forward pass. offload (`bool`, *optional*, defaults to `False`): Whether or not the weights should be offloaded after the forward pass. weights_map (`Mapping[str, torch.Tensor]`, *optional*): When the model weights are offloaded, a (potentially lazy) map from param names to the tensor values. offload_buffers (`bool`, *optional*, defaults to `False`): Whether or not to include the associated module's buffers when offloading. module_name (`str`, *optional*, defaults to `""`): The name of the module. skip_keys (`str` or `List[str]`, *optional*): A list of keys to ignore when moving inputs or outputs between devices. preload_module_classes (`List[str]`, *optional*): A list of classes whose instances should load all their weights (even in the submodules) at the beginning of the forward. This should only be used for classes that have submodules which are registered but not called directly during the forward, for instance if a `dense` linear layer is registered, but at forward, `dense.weight` and `dense.bias` are used in some operations instead of calling `dense` directly. tied_params_map (Optional[Dict[int, Dict[torch.device, torch.Tensor]]], *optional*, defaults to `None`): A map of data pointers to dictionaries of devices to already dispatched tied weights. For a given execution device, this parameter is useful to reuse the first available pointer of a shared weight for all others, instead of duplicating memory. """ # Attach the hook on this module if it has any direct tensor. directs = named_module_tensors(module) full_offload = ( offload and preload_module_classes is not None and module.__class__.__name__ in preload_module_classes ) if len(list(directs)) > 0 or full_offload: if weights_map is not None: prefix = f"{module_name}." if len(module_name) > 0 else "" prefixed_weights_map = PrefixedDataset(weights_map, prefix) else: prefixed_weights_map = None hook = AlignDevicesHook( execution_device=execution_device, offload=offload, weights_map=prefixed_weights_map, offload_buffers=offload_buffers, place_submodules=full_offload, skip_keys=skip_keys, tied_params_map=tied_params_map, ) add_hook_to_module(module, hook, append=True) # We stop the recursion in case we hit the full offload. if full_offload: return # Recurse on all children of the module. for child_name, child in module.named_children(): child_name = f"{module_name}.{child_name}" if len(module_name) > 0 else child_name attach_align_device_hook( child, execution_device=execution_device, offload=offload, weights_map=weights_map, offload_buffers=offload_buffers, module_name=child_name, preload_module_classes=preload_module_classes, skip_keys=skip_keys, tied_params_map=tied_params_map, ) The provided code snippet includes necessary dependencies for implementing the `disk_offload` function. Write a Python function `def disk_offload( model: nn.Module, offload_dir: Union[str, os.PathLike], execution_device: Optional[torch.device] = None, offload_buffers: bool = False, preload_module_classes: Optional[List[str]] = None, )` to solve the following problem: Activates full disk offload for a model. As a result, all parameters of the model will be offloaded as memory-mapped array in a given folder. During the forward pass, parameters will be accessed from that folder and put on the execution device passed as they are needed, then offloaded again. Args: model (`torch.nn.Module`): The model to offload. offload_dir (`str` or `os.PathLike`): The folder in which to offload the model weights (or where the model weights are already offloaded). execution_device (`torch.device`, *optional*): The device on which the forward pass of the model will be executed (should be a GPU). Will default to the model's first parameter device. offload_buffers (`bool`, *optional*, defaults to `False`): Whether or not to offload the buffers with the model parameters. preload_module_classes (`List[str]`, *optional*): A list of classes whose instances should load all their weights (even in the submodules) at the beginning of the forward. This should only be used for classes that have submodules which are registered but not called directly during the forward, for instance if a `dense` linear layer is registered, but at forward, `dense.weight` and `dense.bias` are used in some operations instead of calling `dense` directly. Here is the function: def disk_offload( model: nn.Module, offload_dir: Union[str, os.PathLike], execution_device: Optional[torch.device] = None, offload_buffers: bool = False, preload_module_classes: Optional[List[str]] = None, ): """ Activates full disk offload for a model. As a result, all parameters of the model will be offloaded as memory-mapped array in a given folder. During the forward pass, parameters will be accessed from that folder and put on the execution device passed as they are needed, then offloaded again. Args: model (`torch.nn.Module`): The model to offload. offload_dir (`str` or `os.PathLike`): The folder in which to offload the model weights (or where the model weights are already offloaded). execution_device (`torch.device`, *optional*): The device on which the forward pass of the model will be executed (should be a GPU). Will default to the model's first parameter device. offload_buffers (`bool`, *optional*, defaults to `False`): Whether or not to offload the buffers with the model parameters. preload_module_classes (`List[str]`, *optional*): A list of classes whose instances should load all their weights (even in the submodules) at the beginning of the forward. This should only be used for classes that have submodules which are registered but not called directly during the forward, for instance if a `dense` linear layer is registered, but at forward, `dense.weight` and `dense.bias` are used in some operations instead of calling `dense` directly. """ if not os.path.isdir(offload_dir) or not os.path.isfile(os.path.join(offload_dir, "index.json")): offload_state_dict(offload_dir, model.state_dict()) if execution_device is None: execution_device = next(iter(model.parameters())).device weights_map = OffloadedWeightsLoader(save_folder=offload_dir) add_hook_to_module(model, AlignDevicesHook(io_same_device=True), append=True) attach_align_device_hook( model, execution_device=execution_device, offload=True, offload_buffers=offload_buffers, weights_map=weights_map, preload_module_classes=preload_module_classes, ) return model
Activates full disk offload for a model. As a result, all parameters of the model will be offloaded as memory-mapped array in a given folder. During the forward pass, parameters will be accessed from that folder and put on the execution device passed as they are needed, then offloaded again. Args: model (`torch.nn.Module`): The model to offload. offload_dir (`str` or `os.PathLike`): The folder in which to offload the model weights (or where the model weights are already offloaded). execution_device (`torch.device`, *optional*): The device on which the forward pass of the model will be executed (should be a GPU). Will default to the model's first parameter device. offload_buffers (`bool`, *optional*, defaults to `False`): Whether or not to offload the buffers with the model parameters. preload_module_classes (`List[str]`, *optional*): A list of classes whose instances should load all their weights (even in the submodules) at the beginning of the forward. This should only be used for classes that have submodules which are registered but not called directly during the forward, for instance if a `dense` linear layer is registered, but at forward, `dense.weight` and `dense.bias` are used in some operations instead of calling `dense` directly.
2,898
import logging import os from contextlib import contextmanager from functools import wraps from typing import Dict, List, Optional, Union import torch import torch.nn as nn from .hooks import ( AlignDevicesHook, CpuOffload, UserCpuOffloadHook, add_hook_to_module, attach_align_device_hook, attach_align_device_hook_on_blocks, ) from .utils import ( OffloadedWeightsLoader, check_device_map, extract_submodules_state_dict, find_tied_parameters, get_balanced_memory, infer_auto_device_map, is_npu_available, is_torch_version, is_xpu_available, load_checkpoint_in_model, offload_state_dict, parse_flag_from_env, retie_parameters, ) from .utils.other import recursive_getattr def dispatch_model( model: nn.Module, device_map: Dict[str, Union[str, int, torch.device]], main_device: Optional[torch.device] = None, state_dict: Optional[Dict[str, torch.Tensor]] = None, offload_dir: Optional[Union[str, os.PathLike]] = None, offload_index: Optional[Dict[str, str]] = None, offload_buffers: bool = False, skip_keys: Optional[Union[str, List[str]]] = None, preload_module_classes: Optional[List[str]] = None, force_hooks: bool = False, ): """ Dispatches a model according to a given device map. Layers of the model might be spread across GPUs, offloaded on the CPU or even the disk. Args: model (`torch.nn.Module`): The model to dispatch. device_map (`Dict[str, Union[str, int, torch.device]]`): A dictionary mapping module names in the models `state_dict` to the device they should go to. Note that `"disk"` is accepted even if it's not a proper value for `torch.device`. main_device (`str`, `int` or `torch.device`, *optional*): The main execution device. Will default to the first device in the `device_map` different from `"cpu"` or `"disk"`. state_dict (`Dict[str, torch.Tensor]`, *optional*): The state dict of the part of the model that will be kept on CPU. offload_dir (`str` or `os.PathLike`): The folder in which to offload the model weights (or where the model weights are already offloaded). offload_index (`Dict`, *optional*): A dictionary from weight name to their information (`dtype`/ `shape` or safetensors filename). Will default to the index saved in `save_folder`. offload_buffers (`bool`, *optional*, defaults to `False`): Whether or not to offload the buffers with the model parameters. skip_keys (`str` or `List[str]`, *optional*): A list of keys to ignore when moving inputs or outputs between devices. preload_module_classes (`List[str]`, *optional*): A list of classes whose instances should load all their weights (even in the submodules) at the beginning of the forward. This should only be used for classes that have submodules which are registered but not called directly during the forward, for instance if a `dense` linear layer is registered, but at forward, `dense.weight` and `dense.bias` are used in some operations instead of calling `dense` directly. force_hooks (`bool`, *optional*, defaults to `False`): Whether or not to force device hooks to be attached to the model even if all layers are dispatched to a single device. """ # Error early if the device map is incomplete. check_device_map(model, device_map) # for backward compatibility is_bnb_quantized = ( getattr(model, "is_quantized", False) or getattr(model, "is_loaded_in_8bit", False) ) and getattr(model, "quantization_method", "bitsandbytes") == "bitsandbytes" # We attach hooks if the device_map has at least 2 different devices or if # force_hooks is set to `True`. Otherwise, the model in already loaded # in the unique device and the user can decide where to dispatch the model. # If the model is quantized, we always force-dispatch the model if (len(set(device_map.values())) > 1) or is_bnb_quantized or force_hooks: if main_device is None: if set(device_map.values()) == {"cpu"} or set(device_map.values()) == {"cpu", "disk"}: main_device = "cpu" else: main_device = [d for d in device_map.values() if d not in ["cpu", "disk"]][0] if main_device != "cpu": cpu_modules = [name for name, device in device_map.items() if device == "cpu"] if state_dict is None and len(cpu_modules) > 0: state_dict = extract_submodules_state_dict(model.state_dict(), cpu_modules) disk_modules = [name for name, device in device_map.items() if device == "disk"] if offload_dir is None and offload_index is None and len(disk_modules) > 0: raise ValueError( "We need an `offload_dir` to dispatch this model according to this `device_map`, the following submodules " f"need to be offloaded: {', '.join(disk_modules)}." ) if ( len(disk_modules) > 0 and offload_index is None and (not os.path.isdir(offload_dir) or not os.path.isfile(os.path.join(offload_dir, "index.json"))) ): disk_state_dict = extract_submodules_state_dict(model.state_dict(), disk_modules) offload_state_dict(offload_dir, disk_state_dict) execution_device = { name: main_device if device in ["cpu", "disk"] else device for name, device in device_map.items() } execution_device[""] = main_device offloaded_devices = ["disk"] if main_device == "cpu" or main_device == "mps" else ["cpu", "disk"] offload = {name: device in offloaded_devices for name, device in device_map.items()} save_folder = offload_dir if len(disk_modules) > 0 else None if state_dict is not None or save_folder is not None or offload_index is not None: device = main_device if offload_index is not None else None weights_map = OffloadedWeightsLoader( state_dict=state_dict, save_folder=save_folder, index=offload_index, device=device ) else: weights_map = None # When dispatching the model's parameters to the devices specified in device_map, we want to avoid allocating memory several times for the # tied parameters. The dictionary tied_params_map keeps track of the already allocated data for a given tied parameter (represented by its # original pointer) on each devices. tied_params = find_tied_parameters(model) tied_params_map = {} for group in tied_params: for param_name in group: # data_ptr() is enough here, as `find_tied_parameters` finds tied params simply by comparing `param1 is param2`, so we don't need # to care about views of tensors through storage_offset. data_ptr = recursive_getattr(model, param_name).data_ptr() tied_params_map[data_ptr] = {} # Note: To handle the disk offloading case, we can not simply use weights_map[param_name].data_ptr() as the reference pointer, # as we have no guarantee that safetensors' `file.get_tensor()` will always give the same pointer. attach_align_device_hook_on_blocks( model, execution_device=execution_device, offload=offload, offload_buffers=offload_buffers, weights_map=weights_map, skip_keys=skip_keys, preload_module_classes=preload_module_classes, tied_params_map=tied_params_map, ) # warn if there is any params on the meta device offloaded_devices_str = " and ".join( [device for device in set(device_map.values()) if device in ("cpu", "disk")] ) if len(offloaded_devices_str) > 0: logging.warning( f"Some parameters are on the meta device device because they were offloaded to the {offloaded_devices_str}." ) # Attaching the hook may break tied weights, so we retie them retie_parameters(model, tied_params) # add warning to cuda and to method def add_warning(fn, model): def wrapper(*args, **kwargs): warning_msg = "You shouldn't move a model that is dispatched using accelerate hooks." if str(fn.__name__) == "to": to_device = torch._C._nn._parse_to(*args, **kwargs)[0] if to_device is not None: logger.warning(warning_msg) else: logger.warning(warning_msg) for param in model.parameters(): if param.device == torch.device("meta"): raise RuntimeError("You can't move a model that has some modules offloaded to cpu or disk.") return fn(*args, **kwargs) return wrapper model.to = add_warning(model.to, model) if is_npu_available(): model.npu = add_warning(model.npu, model) elif is_xpu_available(): model.xpu = add_warning(model.xpu, model) else: model.cuda = add_warning(model.cuda, model) else: device = list(device_map.values())[0] # `torch.Tensor.to(<int num>)` is not supported by `torch_npu` (see this [issue](https://github.com/Ascend/pytorch/issues/16)). if is_npu_available() and isinstance(device, int): device = f"npu:{device}" elif is_xpu_available() and isinstance(device, int): device = f"xpu:{device}" if device != "disk": model.to(device) else: raise ValueError( "You are trying to offload the whole model to the disk. Please use the `disk_offload` function instead." ) # Convert OrderedDict back to dict for easier usage model.hf_device_map = dict(device_map) return model The provided code snippet includes necessary dependencies for implementing the `load_checkpoint_and_dispatch` function. Write a Python function `def load_checkpoint_and_dispatch( model: nn.Module, checkpoint: Union[str, os.PathLike], device_map: Optional[Union[str, Dict[str, Union[int, str, torch.device]]]] = None, max_memory: Optional[Dict[Union[int, str], Union[int, str]]] = None, no_split_module_classes: Optional[List[str]] = None, offload_folder: Optional[Union[str, os.PathLike]] = None, offload_buffers: bool = False, dtype: Optional[Union[str, torch.dtype]] = None, offload_state_dict: Optional[bool] = None, skip_keys: Optional[Union[str, List[str]]] = None, preload_module_classes: Optional[List[str]] = None, force_hooks: bool = False, )` to solve the following problem: Loads a (potentially sharded) checkpoint inside a model, potentially sending weights to a given device as they are loaded and adds the various hooks that will make this model run properly (even if split across devices). Args: model (`torch.nn.Module`): The model in which we want to load a checkpoint. checkpoint (`str` or `os.PathLike`): The folder checkpoint to load. It can be: - a path to a file containing a whole model state dict - a path to a `.json` file containing the index to a sharded checkpoint - a path to a folder containing a unique `.index.json` file and the shards of a checkpoint. device_map (`Dict[str, Union[int, str, torch.device]]`, *optional*): A map that specifies where each submodule should go. It doesn't need to be refined to each parameter/buffer name, once a given module name is inside, every submodule of it will be sent to the same device. To have Accelerate compute the most optimized `device_map` automatically, set `device_map="auto"`. For more information about each option see [here](../concept_guides/big_model_inference#designing-a-device-map). Defaults to None, which means [`dispatch_model`] will not be called. max_memory (`Dict`, *optional*): A dictionary device identifier to maximum memory. Will default to the maximum memory available for each GPU and the available CPU RAM if unset. no_split_module_classes (`List[str]`, *optional*): A list of layer class names that should never be split across device (for instance any layer that has a residual connection). offload_folder (`str` or `os.PathLike`, *optional*): If the `device_map` contains any value `"disk"`, the folder where we will offload weights. offload_buffers (`bool`, *optional*, defaults to `False`): In the layers that are offloaded on the CPU or the hard drive, whether or not to offload the buffers as well as the parameters. dtype (`str` or `torch.dtype`, *optional*): If provided, the weights will be converted to that type when loaded. offload_state_dict (`bool`, *optional*): If `True`, will temporarily offload the CPU state dict on the hard drive to avoid getting out of CPU RAM if the weight of the CPU state dict + the biggest shard does not fit. Will default to `True` if the device map picked contains `"disk"` values. skip_keys (`str` or `List[str]`, *optional*): A list of keys to ignore when moving inputs or outputs between devices. preload_module_classes (`List[str]`, *optional*): A list of classes whose instances should load all their weights (even in the submodules) at the beginning of the forward. This should only be used for classes that have submodules which are registered but not called directly during the forward, for instance if a `dense` linear layer is registered, but at forward, `dense.weight` and `dense.bias` are used in some operations instead of calling `dense` directly. force_hooks (`bool`, *optional*, defaults to `False`): Whether or not to force device hooks to be attached to the model even if all layers are dispatched to a single device. Example: ```python >>> from accelerate import init_empty_weights, load_checkpoint_and_dispatch >>> from huggingface_hub import hf_hub_download >>> from transformers import AutoConfig, AutoModelForCausalLM >>> # Download the Weights >>> checkpoint = "EleutherAI/gpt-j-6B" >>> weights_location = hf_hub_download(checkpoint, "pytorch_model.bin") >>> # Create a model and initialize it with empty weights >>> config = AutoConfig.from_pretrained(checkpoint) >>> with init_empty_weights(): ... model = AutoModelForCausalLM.from_config(config) >>> # Load the checkpoint and dispatch it to the right devices >>> model = load_checkpoint_and_dispatch( ... model, weights_location, device_map="auto", no_split_module_classes=["GPTJBlock"] ... ) ``` Here is the function: def load_checkpoint_and_dispatch( model: nn.Module, checkpoint: Union[str, os.PathLike], device_map: Optional[Union[str, Dict[str, Union[int, str, torch.device]]]] = None, max_memory: Optional[Dict[Union[int, str], Union[int, str]]] = None, no_split_module_classes: Optional[List[str]] = None, offload_folder: Optional[Union[str, os.PathLike]] = None, offload_buffers: bool = False, dtype: Optional[Union[str, torch.dtype]] = None, offload_state_dict: Optional[bool] = None, skip_keys: Optional[Union[str, List[str]]] = None, preload_module_classes: Optional[List[str]] = None, force_hooks: bool = False, ): """ Loads a (potentially sharded) checkpoint inside a model, potentially sending weights to a given device as they are loaded and adds the various hooks that will make this model run properly (even if split across devices). Args: model (`torch.nn.Module`): The model in which we want to load a checkpoint. checkpoint (`str` or `os.PathLike`): The folder checkpoint to load. It can be: - a path to a file containing a whole model state dict - a path to a `.json` file containing the index to a sharded checkpoint - a path to a folder containing a unique `.index.json` file and the shards of a checkpoint. device_map (`Dict[str, Union[int, str, torch.device]]`, *optional*): A map that specifies where each submodule should go. It doesn't need to be refined to each parameter/buffer name, once a given module name is inside, every submodule of it will be sent to the same device. To have Accelerate compute the most optimized `device_map` automatically, set `device_map="auto"`. For more information about each option see [here](../concept_guides/big_model_inference#designing-a-device-map). Defaults to None, which means [`dispatch_model`] will not be called. max_memory (`Dict`, *optional*): A dictionary device identifier to maximum memory. Will default to the maximum memory available for each GPU and the available CPU RAM if unset. no_split_module_classes (`List[str]`, *optional*): A list of layer class names that should never be split across device (for instance any layer that has a residual connection). offload_folder (`str` or `os.PathLike`, *optional*): If the `device_map` contains any value `"disk"`, the folder where we will offload weights. offload_buffers (`bool`, *optional*, defaults to `False`): In the layers that are offloaded on the CPU or the hard drive, whether or not to offload the buffers as well as the parameters. dtype (`str` or `torch.dtype`, *optional*): If provided, the weights will be converted to that type when loaded. offload_state_dict (`bool`, *optional*): If `True`, will temporarily offload the CPU state dict on the hard drive to avoid getting out of CPU RAM if the weight of the CPU state dict + the biggest shard does not fit. Will default to `True` if the device map picked contains `"disk"` values. skip_keys (`str` or `List[str]`, *optional*): A list of keys to ignore when moving inputs or outputs between devices. preload_module_classes (`List[str]`, *optional*): A list of classes whose instances should load all their weights (even in the submodules) at the beginning of the forward. This should only be used for classes that have submodules which are registered but not called directly during the forward, for instance if a `dense` linear layer is registered, but at forward, `dense.weight` and `dense.bias` are used in some operations instead of calling `dense` directly. force_hooks (`bool`, *optional*, defaults to `False`): Whether or not to force device hooks to be attached to the model even if all layers are dispatched to a single device. Example: ```python >>> from accelerate import init_empty_weights, load_checkpoint_and_dispatch >>> from huggingface_hub import hf_hub_download >>> from transformers import AutoConfig, AutoModelForCausalLM >>> # Download the Weights >>> checkpoint = "EleutherAI/gpt-j-6B" >>> weights_location = hf_hub_download(checkpoint, "pytorch_model.bin") >>> # Create a model and initialize it with empty weights >>> config = AutoConfig.from_pretrained(checkpoint) >>> with init_empty_weights(): ... model = AutoModelForCausalLM.from_config(config) >>> # Load the checkpoint and dispatch it to the right devices >>> model = load_checkpoint_and_dispatch( ... model, weights_location, device_map="auto", no_split_module_classes=["GPTJBlock"] ... ) ``` """ if isinstance(device_map, str) and device_map not in ["auto", "balanced", "balanced_low_0", "sequential"]: raise ValueError( "If passing a string for `device_map`, please choose 'auto', 'balanced', 'balanced_low_0' or " "'sequential'." ) if isinstance(device_map, str): if device_map != "sequential": max_memory = get_balanced_memory( model, max_memory=max_memory, no_split_module_classes=no_split_module_classes, dtype=dtype, low_zero=(device_map == "balanced_low_0"), ) device_map = infer_auto_device_map( model, max_memory=max_memory, no_split_module_classes=no_split_module_classes, dtype=dtype, offload_buffers=offload_buffers, ) if offload_state_dict is None and device_map is not None and "disk" in device_map.values(): offload_state_dict = True load_checkpoint_in_model( model, checkpoint, device_map=device_map, offload_folder=offload_folder, dtype=dtype, offload_state_dict=offload_state_dict, offload_buffers=offload_buffers, ) if device_map is None: return model return dispatch_model( model, device_map=device_map, offload_dir=offload_folder, offload_buffers=offload_buffers, skip_keys=skip_keys, preload_module_classes=preload_module_classes, force_hooks=force_hooks, )
Loads a (potentially sharded) checkpoint inside a model, potentially sending weights to a given device as they are loaded and adds the various hooks that will make this model run properly (even if split across devices). Args: model (`torch.nn.Module`): The model in which we want to load a checkpoint. checkpoint (`str` or `os.PathLike`): The folder checkpoint to load. It can be: - a path to a file containing a whole model state dict - a path to a `.json` file containing the index to a sharded checkpoint - a path to a folder containing a unique `.index.json` file and the shards of a checkpoint. device_map (`Dict[str, Union[int, str, torch.device]]`, *optional*): A map that specifies where each submodule should go. It doesn't need to be refined to each parameter/buffer name, once a given module name is inside, every submodule of it will be sent to the same device. To have Accelerate compute the most optimized `device_map` automatically, set `device_map="auto"`. For more information about each option see [here](../concept_guides/big_model_inference#designing-a-device-map). Defaults to None, which means [`dispatch_model`] will not be called. max_memory (`Dict`, *optional*): A dictionary device identifier to maximum memory. Will default to the maximum memory available for each GPU and the available CPU RAM if unset. no_split_module_classes (`List[str]`, *optional*): A list of layer class names that should never be split across device (for instance any layer that has a residual connection). offload_folder (`str` or `os.PathLike`, *optional*): If the `device_map` contains any value `"disk"`, the folder where we will offload weights. offload_buffers (`bool`, *optional*, defaults to `False`): In the layers that are offloaded on the CPU or the hard drive, whether or not to offload the buffers as well as the parameters. dtype (`str` or `torch.dtype`, *optional*): If provided, the weights will be converted to that type when loaded. offload_state_dict (`bool`, *optional*): If `True`, will temporarily offload the CPU state dict on the hard drive to avoid getting out of CPU RAM if the weight of the CPU state dict + the biggest shard does not fit. Will default to `True` if the device map picked contains `"disk"` values. skip_keys (`str` or `List[str]`, *optional*): A list of keys to ignore when moving inputs or outputs between devices. preload_module_classes (`List[str]`, *optional*): A list of classes whose instances should load all their weights (even in the submodules) at the beginning of the forward. This should only be used for classes that have submodules which are registered but not called directly during the forward, for instance if a `dense` linear layer is registered, but at forward, `dense.weight` and `dense.bias` are used in some operations instead of calling `dense` directly. force_hooks (`bool`, *optional*, defaults to `False`): Whether or not to force device hooks to be attached to the model even if all layers are dispatched to a single device. Example: ```python >>> from accelerate import init_empty_weights, load_checkpoint_and_dispatch >>> from huggingface_hub import hf_hub_download >>> from transformers import AutoConfig, AutoModelForCausalLM >>> # Download the Weights >>> checkpoint = "EleutherAI/gpt-j-6B" >>> weights_location = hf_hub_download(checkpoint, "pytorch_model.bin") >>> # Create a model and initialize it with empty weights >>> config = AutoConfig.from_pretrained(checkpoint) >>> with init_empty_weights(): ... model = AutoModelForCausalLM.from_config(config) >>> # Load the checkpoint and dispatch it to the right devices >>> model = load_checkpoint_and_dispatch( ... model, weights_location, device_map="auto", no_split_module_classes=["GPTJBlock"] ... ) ```
2,899
from __future__ import annotations import logging import math import os import threading import warnings from contextlib import contextmanager from functools import partial from typing import Any, Callable, Optional import torch from .utils import ( DistributedType, DynamoBackend, GradientAccumulationPlugin, check_cuda_p2p_ib_support, check_fp8_capability, get_ccl_version, get_int_from_env, is_ccl_available, is_datasets_available, is_deepspeed_available, is_fp8_available, is_ipex_available, is_mps_available, is_npu_available, is_torch_xla_available, is_xpu_available, parse_choice_from_env, parse_flag_from_env, ) from .utils.dataclasses import SageMakerDistributedType class AcceleratorState: """ Singleton class that has information about the current training environment. **Available attributes:** - **device** (`torch.device`) -- The device to use. - **distributed_type** ([`~accelerate.state.DistributedType`]) -- The type of distributed environment currently in use. - **initialized** (`bool`) -- Whether or not the `AcceleratorState` has been initialized from `Accelerator`. - **local_process_index** (`int`) -- The index of the current process on the current server. - **mixed_precision** (`str`) -- Whether or not the current script will use mixed precision, and if so the type of mixed precision being performed. (Choose from 'no','fp16','bf16 or 'fp8'). - **num_processes** (`int`) -- The number of processes currently launched in parallel. - **process_index** (`int`) -- The index of the current process. - **is_last_process** (`bool`) -- Whether or not the current process is the last one. - **is_main_process** (`bool`) -- Whether or not the current process is the main one. - **is_local_main_process** (`bool`) -- Whether or not the current process is the main one on the local node. - **debug** (`bool`) -- Whether or not the current script is being run in debug mode. """ _shared_state = SharedDict() def __init__( self, mixed_precision: str = None, cpu: bool = False, dynamo_plugin=None, deepspeed_plugin=None, fsdp_plugin=None, megatron_lm_plugin=None, _from_accelerator: bool = False, **kwargs, ): self.__dict__ = self._shared_state if parse_flag_from_env("ACCELERATE_USE_CPU"): cpu = True if PartialState._shared_state == {}: PartialState(cpu, **kwargs) self.__dict__.update(PartialState._shared_state) self._check_initialized(mixed_precision, cpu) if not self.initialized: self.deepspeed_plugin = None self.use_ipex = None mixed_precision = ( parse_choice_from_env("ACCELERATE_MIXED_PRECISION", "no") if mixed_precision is None else mixed_precision.lower() ) if mixed_precision == "fp8": if not is_fp8_available(): raise ValueError( "Using `fp8` precision requires `transformer_engine` or `MS-AMP` to be installed." ) elif not check_fp8_capability(): logger.warning( f"The current device has compute capability of {torch.cuda.get_device_capability()} which is " "insufficient for FP8 mixed precision training (requires a GPU Hopper/Ada Lovelace " "or higher, compute capability of 8.9 or higher). Will use FP16 instead." ) mixed_precision = "fp16" self.dynamo_plugin = dynamo_plugin if not _from_accelerator: raise ValueError( "Please make sure to properly initialize your accelerator via `accelerator = Accelerator()` " "before using any functionality from the `accelerate` library." ) # deepspeed handles mixed_precision using deepspeed_config self._mixed_precision = "no" if self.distributed_type == DistributedType.DEEPSPEED else mixed_precision if self.distributed_type == DistributedType.XLA and is_torch_xla_available(check_is_tpu=True): if mixed_precision == "bf16": if os.environ.get("ACCELERATE_DOWNCAST_BF16"): os.environ["XLA_USE_BF16"] = str(0) os.environ["XLA_DOWNCAST_BF16"] = str(1) self.downcast_bfloat = True else: os.environ["XLA_USE_BF16"] = str(1) os.environ["XLA_DOWNCAST_BF16"] = str(0) self.downcast_bfloat = False elif os.environ.get("ACCELERATE_USE_DEEPSPEED", "false") == "true" and not cpu: self.deepspeed_plugin = deepspeed_plugin elif self.distributed_type == DistributedType.MULTI_GPU: if os.environ.get("ACCELERATE_USE_FSDP", "false") == "true": self.distributed_type = DistributedType.FSDP if self._mixed_precision != "no": fsdp_plugin.set_mixed_precision(self._mixed_precision) self.fsdp_plugin = fsdp_plugin if os.environ.get("ACCELERATE_USE_MEGATRON_LM", "false") == "true": self.distributed_type = DistributedType.MEGATRON_LM megatron_lm_plugin.set_mixed_precision(self._mixed_precision) self.megatron_lm_plugin = megatron_lm_plugin elif self.distributed_type == DistributedType.MULTI_NPU: if os.environ.get("ACCELERATE_USE_FSDP", "false") == "true": self.distributed_type = DistributedType.FSDP if self._mixed_precision != "no": fsdp_plugin.set_mixed_precision(self._mixed_precision) self.fsdp_plugin = fsdp_plugin elif self.distributed_type in [DistributedType.MULTI_CPU, DistributedType.MULTI_XPU, DistributedType.NO]: if is_ipex_available(): "check if user disables it explicitly" self.use_ipex = parse_flag_from_env("ACCELERATE_USE_IPEX", default=True) else: self.use_ipex = False if self.distributed_type == DistributedType.MULTI_XPU: if os.environ.get("ACCELERATE_USE_FSDP", "false") == "true": self.distributed_type = DistributedType.FSDP if self._mixed_precision != "no": fsdp_plugin.set_mixed_precision(self._mixed_precision) self.fsdp_plugin = fsdp_plugin if ( self.dynamo_plugin.backend != DynamoBackend.NO and self._mixed_precision == "no" and self.device.type == "cuda" ): torch.backends.cuda.matmul.allow_tf32 = True PartialState._shared_state["distributed_type"] = self.distributed_type def initialized(self) -> bool: return self._shared_state != PartialState._shared_state def __repr__(self): repr = PartialState().__repr__() + f"\nMixed precision type: {self.mixed_precision}\n" if self.distributed_type == DistributedType.DEEPSPEED: repr += f"ds_config: {self.deepspeed_plugin.deepspeed_config}\n" return repr def _check_initialized(self, mixed_precision=None, cpu=None): "Checks if a modification is trying to be made and the `AcceleratorState` has already been initialized" if self.initialized: err = "AcceleratorState has already been initialized and cannot be changed, restart your runtime completely and pass `{flag}` to `Accelerator()`." if cpu and self.device.type != "cpu": raise ValueError(err.format(flag="cpu=True")) if ( mixed_precision is not None and mixed_precision != self._mixed_precision and self.distributed_type != DistributedType.DEEPSPEED ): raise ValueError(err.format(flag=f"mixed_precision='{mixed_precision}'")) # For backward compatibility def use_fp16(self): warnings.warn( "The `use_fp16` property is deprecated and will be removed in version 1.0 of Accelerate use " "`AcceleratorState.mixed_precision == 'fp16'` instead.", FutureWarning, ) return self._mixed_precision != "no" def mixed_precision(self): if self.distributed_type == DistributedType.DEEPSPEED: config = self.deepspeed_plugin.deepspeed_config if config.get("fp16", {}).get("enabled", False): mixed_precision = "fp16" elif config.get("bf16", {}).get("enabled", False): mixed_precision = "bf16" else: mixed_precision = "no" else: mixed_precision = self._mixed_precision return mixed_precision def _reset_state(reset_partial_state: bool = False): "Resets `_shared_state`, is used internally and should not be called" AcceleratorState._shared_state.clear() if reset_partial_state: PartialState._reset_state() def use_distributed(self): """ Whether the Accelerator is configured for distributed training """ return PartialState().use_distributed def is_last_process(self) -> bool: "Returns whether the current process is the last one" return PartialState().is_last_process def is_main_process(self) -> bool: "Returns whether the current process is the main process" return PartialState().is_main_process def is_local_main_process(self) -> bool: "Returns whether the current process is the main process on the local node" return PartialState().is_local_main_process def wait_for_everyone(self): PartialState().wait_for_everyone() def split_between_processes(self, inputs: list | tuple | dict | torch.Tensor, apply_padding: bool = False): """ Splits `input` between `self.num_processes` quickly and can be then used on that process. Useful when doing distributed inference, such as with different prompts. Note that when using a `dict`, all keys need to have the same number of elements. Args: inputs (`list`, `tuple`, `torch.Tensor`, or `dict` of `list`/`tuple`/`torch.Tensor`): The input to split between processes. apply_padding (`bool`, `optional`, defaults to `False`): Whether to apply padding by repeating the last element of the input so that all processes have the same number of elements. Useful when trying to perform actions such as `gather()` on the outputs or passing in less inputs than there are processes. If so, just remember to drop the padded elements afterwards. Example: ```python # Assume there are two processes from accelerate.state import AcceleratorState state = AcceleratorState() with state.split_between_processes(["A", "B", "C"]) as inputs: print(inputs) # Process 0 ["A", "B"] # Process 1 ["C"] with state.split_between_processes(["A", "B", "C"], apply_padding=True) as inputs: print(inputs) # Process 0 ["A", "B"] # Process 1 ["C", "C"] ``` """ with PartialState().split_between_processes(inputs, apply_padding=apply_padding) as inputs: yield inputs def main_process_first(self): """ Lets the main process go first inside a with block. The other processes will enter the with block after the main process exits. """ with PartialState().main_process_first(): yield def local_main_process_first(self): """ Lets the local main process go inside a with block. The other processes will enter the with block after the main process exits. """ with PartialState().local_main_process_first(): yield def print(self, *args, **kwargs): PartialState().print(*args, **kwargs) The provided code snippet includes necessary dependencies for implementing the `is_initialized` function. Write a Python function `def is_initialized() -> bool` to solve the following problem: Checks if the `AcceleratorState` has been initialized from `Accelerator`. Same as `AcceleratorState.initialized`, but works as a module method. Here is the function: def is_initialized() -> bool: """ Checks if the `AcceleratorState` has been initialized from `Accelerator`. Same as `AcceleratorState.initialized`, but works as a module method. """ return AcceleratorState._shared_state != {}
Checks if the `AcceleratorState` has been initialized from `Accelerator`. Same as `AcceleratorState.initialized`, but works as a module method.
2,900
from __future__ import annotations import logging import math import os import threading import warnings from contextlib import contextmanager from functools import partial from typing import Any, Callable, Optional import torch from .utils import ( DistributedType, DynamoBackend, GradientAccumulationPlugin, check_cuda_p2p_ib_support, check_fp8_capability, get_ccl_version, get_int_from_env, is_ccl_available, is_datasets_available, is_deepspeed_available, is_fp8_available, is_ipex_available, is_mps_available, is_npu_available, is_torch_xla_available, is_xpu_available, parse_choice_from_env, parse_flag_from_env, ) from .utils.dataclasses import SageMakerDistributedType def do_nothing(*args, **kwargs): return None
null
2,901
import json import os import time from functools import wraps from typing import Any, Dict, List, Optional, Union import yaml from .logging import get_logger from .state import PartialState from .utils import ( LoggerType, is_aim_available, is_clearml_available, is_comet_ml_available, is_dvclive_available, is_mlflow_available, is_tensorboard_available, is_wandb_available, listify, ) class PartialState: """ Singleton class that has information about the current training environment and functions to help with process control. Designed to be used when only process control and device execution states are needed. Does *not* need to be initialized from `Accelerator`. **Available attributes:** - **device** (`torch.device`) -- The device to use. - **distributed_type** ([`~accelerate.state.DistributedType`]) -- The type of distributed environment currently in use. - **local_process_index** (`int`) -- The index of the current process on the current server. - **mixed_precision** (`str`) -- Whether or not the current script will use mixed precision, and if so the type of mixed precision being performed. (Choose from 'no','fp16','bf16 or 'fp8'). - **num_processes** (`int`) -- The number of processes currently launched in parallel. - **process_index** (`int`) -- The index of the current process. - **is_last_process** (`bool`) -- Whether or not the current process is the last one. - **is_main_process** (`bool`) -- Whether or not the current process is the main one. - **is_local_main_process** (`bool`) -- Whether or not the current process is the main one on the local node. - **debug** (`bool`) -- Whether or not the current script is being run in debug mode. """ _shared_state = SharedDict() def __init__(self, cpu: bool = False, **kwargs): self.__dict__ = self._shared_state if not self.initialized: self._cpu = cpu self.backend = None env_device = os.environ.get("ACCELERATE_TORCH_DEVICE", None) self.device = torch.device(env_device) if env_device is not None else None self.debug = parse_flag_from_env("ACCELERATE_DEBUG_MODE") use_sagemaker_dp = kwargs.pop("_use_sagemaker_dp", None) if use_sagemaker_dp is None: use_sagemaker_dp = ( os.environ.get("ACCELERATE_USE_SAGEMAKER", "false") == "true" and os.environ.get("ACCELERATE_SAGEMAKER_DISTRIBUTED_TYPE") != SageMakerDistributedType.NO ) if use_sagemaker_dp and not cpu: if ( os.environ.get("ACCELERATE_SAGEMAKER_DISTRIBUTED_TYPE") == SageMakerDistributedType.DATA_PARALLEL ) or use_sagemaker_dp: self.distributed_type = DistributedType.MULTI_GPU import smdistributed.dataparallel.torch.torch_smddp # noqa if not torch.distributed.is_initialized(): torch.distributed.init_process_group(backend="smddp") self.backend = "smddp" self.num_processes = torch.distributed.get_world_size() self.process_index = torch.distributed.get_rank() self.local_process_index = int(os.environ.get("LOCAL_RANK", -1)) if self.device is None: self.device = torch.device("cuda", self.local_process_index) torch.cuda.set_device(self.device) elif is_torch_xla_available() and not cpu: self.distributed_type = DistributedType.XLA self.device = xm.xla_device() xm.set_replication(self.device, xm.get_xla_supported_devices()) self.num_processes = xm.xrt_world_size() self.process_index = xm.get_ordinal() if is_torch_xla_available(check_is_tpu=True): self.local_process_index = xm.get_local_ordinal() else: self.local_process_index = int(os.environ.get("LOCAL_RANK", -1)) elif ( os.environ.get("ACCELERATE_USE_DEEPSPEED", "false") == "true" and int(os.environ.get("LOCAL_RANK", -1)) != -1 and not cpu ): assert ( is_deepspeed_available() ), "DeepSpeed is not available => install it using `pip3 install deepspeed` or build it from source" self.distributed_type = DistributedType.DEEPSPEED if not torch.distributed.is_initialized(): from deepspeed import comm as dist # DeepSpeed always uses nccl kwargs.pop("backend", None) if is_xpu_available and is_ccl_available(): # Set DeepSpeed backend to ccl for xpu self.backend = "ccl" os.environ["CCL_PROCESS_LAUNCHER"] = "none" os.environ["CCL_LOCAL_SIZE"] = os.environ.get("LOCAL_WORLD_SIZE", "1") os.environ["CCL_LOCAL_RANK"] = os.environ.get("LOCAL_RANK", "0") elif is_npu_available(): self.backend = "hccl" else: self.backend = "nccl" dist.init_distributed(dist_backend=self.backend, auto_mpi_discovery=False, **kwargs) self.num_processes = torch.distributed.get_world_size() self.process_index = torch.distributed.get_rank() self.local_process_index = int(os.environ.get("LOCAL_RANK", -1)) if self.device is None: if is_xpu_available(): self.device = torch.device("xpu", self.local_process_index) if self.device is not None: torch.xpu.set_device(self.device) elif is_npu_available(): self.device = torch.device("npu", self.local_process_index) if self.device is not None: torch.npu.set_device(self.device) else: self.device = torch.device("cuda", self.local_process_index) if self.device is not None: torch.cuda.set_device(self.device) if self.device.type == "cuda" and not check_cuda_p2p_ib_support(): if "NCCL_P2P_DISABLE" not in os.environ or "NCCL_IB_DISABLE" not in os.environ: raise NotImplementedError( "Using RTX 4000 series doesn't support faster communication broadband via P2P or IB. " 'Please set `NCCL_P2P_DISABLE="1"` and `NCCL_IB_DISABLE="1" or use `accelerate launch` which ' "will do this automatically." ) self._mixed_precision = "no" # deepspeed handles mixed_precision using deepspeed_config elif int(os.environ.get("LOCAL_RANK", -1)) != -1 and not cpu and torch.cuda.is_available(): self.distributed_type = DistributedType.MULTI_GPU if not torch.distributed.is_initialized(): self.backend = kwargs.pop("backend", "nccl") # Special case for `TrainingArguments`, where `backend` will be `None` if self.backend is None: self.backend = "nccl" torch.distributed.init_process_group(backend=self.backend, **kwargs) if not check_cuda_p2p_ib_support(): if "NCCL_P2P_DISABLE" not in os.environ or "NCCL_IB_DISABLE" not in os.environ: raise NotImplementedError( "Using RTX 4000 series doesn't support faster communication broadband via P2P or IB. " 'Please set `NCCL_P2P_DISABLE="1"` and `NCCL_IB_DISABLE="1" or use `accelerate launch` which ' "will do this automatically." ) self.num_processes = torch.distributed.get_world_size() self.process_index = torch.distributed.get_rank() self.local_process_index = int(os.environ.get("LOCAL_RANK", -1)) if self.device is None: self.device = torch.device("cuda", self.local_process_index) torch.cuda.set_device(self.device) elif is_npu_available() and not cpu and int(os.environ.get("LOCAL_RANK", -1)) != -1: self.distributed_type = DistributedType.MULTI_NPU if not torch.distributed.is_initialized(): # Backend is not set by the user, we set it here kwargs.pop("backend", None) self.backend = "hccl" torch.distributed.init_process_group(backend=self.backend, **kwargs) self.num_processes = torch.distributed.get_world_size() self.process_index = torch.distributed.get_rank() self.local_process_index = int(os.environ.get("LOCAL_RANK", -1)) if self.device is None: self.device = torch.device("npu", self.local_process_index) torch.npu.set_device(self.device) elif ( get_int_from_env(["PMI_SIZE", "OMPI_COMM_WORLD_SIZE", "MV2_COMM_WORLD_SIZE", "WORLD_SIZE"], 1) > 1 or int(os.environ.get("LOCAL_RANK", -1)) != -1 ): if not cpu and is_xpu_available(): self.distributed_type = DistributedType.MULTI_XPU else: self.distributed_type = DistributedType.MULTI_CPU # Actually, CCL_WORKER_COUNT is a CPU only env var in CCL, no need to set it for XPU. if is_ccl_available() and ( get_int_from_env(["CCL_WORKER_COUNT"], 0) > 0 or self.distributed_type == DistributedType.MULTI_XPU ): if get_ccl_version() >= "1.12": import oneccl_bindings_for_pytorch # noqa: F401 else: import torch_ccl # noqa: F401 backend = "ccl" elif torch.distributed.is_mpi_available(): backend = "mpi" else: backend = "gloo" # Try to get launch configuration from environment variables set by MPI launcher - works for Intel MPI, OpenMPI and MVAPICH rank = get_int_from_env(["RANK", "PMI_RANK", "OMPI_COMM_WORLD_RANK", "MV2_COMM_WORLD_RANK"], 0) size = get_int_from_env(["WORLD_SIZE", "PMI_SIZE", "OMPI_COMM_WORLD_SIZE", "MV2_COMM_WORLD_SIZE"], 1) local_rank = get_int_from_env( ["LOCAL_RANK", "MPI_LOCALRANKID", "OMPI_COMM_WORLD_LOCAL_RANK", "MV2_COMM_WORLD_LOCAL_RANK"], 0 ) local_size = get_int_from_env( ["LOCAL_WORLD_SIZE", "MPI_LOCALNRANKS", "OMPI_COMM_WORLD_LOCAL_SIZE", "MV2_COMM_WORLD_LOCAL_SIZE"], 1, ) self.local_process_index = local_rank os.environ["RANK"] = str(rank) os.environ["WORLD_SIZE"] = str(size) os.environ["LOCAL_RANK"] = str(local_rank) os.environ["LOCAL_WORLD_SIZE"] = str(local_size) if backend == "ccl" and self.distributed_type == DistributedType.MULTI_XPU: os.environ["CCL_PROCESS_LAUNCHER"] = "none" os.environ["CCL_LOCAL_SIZE"] = str(local_size) os.environ["CCL_LOCAL_RANK"] = str(local_rank) if not os.environ.get("MASTER_PORT", None): os.environ["MASTER_PORT"] = "29500" if not os.environ.get("MASTER_ADDR", None): if local_size != size and backend != "mpi": raise ValueError( "Looks like distributed multinode run but MASTER_ADDR env not set, " "please try exporting rank 0's hostname as MASTER_ADDR" ) if ( self.distributed_type == DistributedType.MULTI_CPU and get_int_from_env(["OMP_NUM_THREADS", "MKL_NUM_THREADS"], 0) == 0 ): import psutil num_cpu_threads_per_process = int(psutil.cpu_count(logical=False) / local_size) if num_cpu_threads_per_process == 0: num_cpu_threads_per_process = 1 torch.set_num_threads(num_cpu_threads_per_process) warnings.warn( f"OMP_NUM_THREADS/MKL_NUM_THREADS unset, we set it at {num_cpu_threads_per_process} to improve oob" " performance." ) if not torch.distributed.is_initialized(): # Backend is not set by the user, we set it here kwargs.pop("backend", None) self.backend = backend torch.distributed.init_process_group(self.backend, rank=rank, world_size=size, **kwargs) self.num_processes = torch.distributed.get_world_size() self.process_index = torch.distributed.get_rank() if cpu: self.device = torch.device("cpu") elif is_xpu_available(): self.device = torch.device("xpu", self.local_process_index) torch.xpu.set_device(self.device) else: self.device = self.default_device else: self.distributed_type = ( DistributedType.NO if os.environ.get("ACCELERATE_USE_DEEPSPEED", "false") == "false" else DistributedType.DEEPSPEED ) self.num_processes = 1 self.process_index = self.local_process_index = 0 if self.device is None: self.device = torch.device("cpu") if cpu else self.default_device self.fork_launched = parse_flag_from_env("FORK_LAUNCHED", 0) def __repr__(self) -> str: return ( f"Distributed environment: {self.distributed_type}{(' Backend: ' + self.backend) if self.backend else ''}\n" f"Num processes: {self.num_processes}\n" f"Process index: {self.process_index}\n" f"Local process index: {self.local_process_index}\n" f"Device: {self.device}\n" ) def _reset_state(): "Resets `_shared_state`, is used internally and should not be called" PartialState._shared_state.clear() def initialized(self) -> bool: "Returns whether the `PartialState` has been initialized" return self._shared_state != {} def use_distributed(self): """ Whether the Accelerator is configured for distributed training """ return self.distributed_type != DistributedType.NO and self.num_processes > 1 def is_last_process(self) -> bool: "Returns whether the current process is the last one" return self.process_index == self.num_processes - 1 def is_main_process(self) -> bool: "Returns whether the current process is the main process" return ( self.process_index == 0 if self.distributed_type != DistributedType.MEGATRON_LM else self.is_last_process ) def is_local_main_process(self) -> bool: "Returns whether the current process is the main process on the local node" return ( self.local_process_index == 0 if self.distributed_type != DistributedType.MEGATRON_LM else self.is_last_process ) def wait_for_everyone(self): """ Will stop the execution of the current process until every other process has reached that point (so this does nothing when the script is only run in one process). Useful to do before saving a model. Example: ```python >>> # Assuming two GPU processes >>> import time >>> from accelerate.state import PartialState >>> state = PartialState() >>> if state.is_main_process: ... time.sleep(2) >>> else: ... print("I'm waiting for the main process to finish its sleep...") >>> state.wait_for_everyone() >>> # Should print on every process at the same time >>> print("Everyone is here") ``` """ if self.distributed_type in ( DistributedType.MULTI_GPU, DistributedType.MULTI_NPU, DistributedType.MULTI_XPU, DistributedType.MULTI_CPU, DistributedType.DEEPSPEED, DistributedType.FSDP, ): torch.distributed.barrier() elif self.distributed_type == DistributedType.XLA: xm.rendezvous("accelerate.utils.wait_for_everyone") def _goes_first(self, is_main: bool): if not is_main: self.wait_for_everyone() yield if is_main: self.wait_for_everyone() def split_between_processes(self, inputs: list | tuple | dict | torch.Tensor, apply_padding: bool = False): """ Splits `input` between `self.num_processes` quickly and can be then used on that process. Useful when doing distributed inference, such as with different prompts. Note that when using a `dict`, all keys need to have the same number of elements. Args: inputs (`list`, `tuple`, `torch.Tensor`, `dict` of `list`/`tuple`/`torch.Tensor`, or `datasets.Dataset`): The input to split between processes. apply_padding (`bool`, `optional`, defaults to `False`): Whether to apply padding by repeating the last element of the input so that all processes have the same number of elements. Useful when trying to perform actions such as `gather()` on the outputs or passing in less inputs than there are processes. If so, just remember to drop the padded elements afterwards. Example: ```python # Assume there are two processes from accelerate import PartialState state = PartialState() with state.split_between_processes(["A", "B", "C"]) as inputs: print(inputs) # Process 0 ["A", "B"] # Process 1 ["C"] with state.split_between_processes(["A", "B", "C"], apply_padding=True) as inputs: print(inputs) # Process 0 ["A", "B"] # Process 1 ["C", "C"] ``` """ if self.num_processes == 1: yield inputs return length = len(inputs) # Nested dictionary of any types if isinstance(inputs, dict): length = len(inputs[list(inputs.keys())[0]]) if not all(len(v) == length for v in inputs.values()): raise ValueError("All values in the dictionary must have the same length") num_samples_per_process = math.ceil(length / self.num_processes) start_index = self.process_index * num_samples_per_process end_index = start_index + num_samples_per_process if (len(inputs) % self.num_processes != 0) and (self.process_index == self.num_processes - 1): end_index = length def _split_values(inputs, start_index, end_index): if isinstance(inputs, (list, tuple, torch.Tensor)): if start_index >= len(inputs): result = inputs[-1:] else: result = inputs[start_index:end_index] if apply_padding: if isinstance(result, torch.Tensor): from accelerate.utils import pad_across_processes, send_to_device # The tensor needs to be on the device before we can pad it tensorized_result = send_to_device(result, self.device) result = pad_across_processes(tensorized_result, pad_index=inputs[-1]) else: result += [result[-1]] * (num_samples_per_process - len(result)) return result elif isinstance(inputs, dict): for key in inputs.keys(): inputs[key] = _split_values(inputs[key], start_index, end_index) return inputs else: if is_datasets_available(): from datasets import Dataset if isinstance(inputs, Dataset): if start_index >= len(inputs): start_index = len(inputs) - 1 if end_index > len(inputs): end_index = len(inputs) result_idcs = list(range(start_index, end_index)) if apply_padding: result_idcs += [end_index - 1] * (num_samples_per_process - len(result_idcs)) return inputs.select(result_idcs) return inputs yield _split_values(inputs, start_index, end_index) def main_process_first(self): """ Lets the main process go first inside a with block. The other processes will enter the with block after the main process exits. Example: ```python >>> from accelerate import Accelerator >>> accelerator = Accelerator() >>> with accelerator.main_process_first(): ... # This will be printed first by process 0 then in a seemingly ... # random order by the other processes. ... print(f"This will be printed by process {accelerator.process_index}") ``` """ yield from self._goes_first(self.is_main_process) def local_main_process_first(self): """ Lets the local main process go inside a with block. The other processes will enter the with block after the main process exits. Example: ```python >>> from accelerate.state import PartialState >>> state = PartialState() >>> with state.local_main_process_first(): ... # This will be printed first by local process 0 then in a seemingly ... # random order by the other processes. ... print(f"This will be printed by process {state.local_process_index}") ``` """ yield from self._goes_first(self.is_local_main_process) def on_main_process(self, function: Callable[..., Any] = None): """ Decorator that only runs the decorated function on the main process. Args: function (`Callable`): The function to decorate. Example: ```python >>> from accelerate.state import PartialState >>> state = PartialState() >>> @state.on_main_process ... def print_something(): ... print("This will be printed by process 0 only.") >>> print_something() "This will be printed by process 0 only" ``` """ if not self.initialized: raise ValueError("The `PartialState` or `Accelerator` must be initialized before calling this function.") if self.is_main_process or not self.use_distributed: return function return do_nothing def on_local_main_process(self, function: Callable[..., Any] = None): """ Decorator that only runs the decorated function on the local main process. Args: function (`Callable`): The function to decorate. Example: ```python # Assume we have 2 servers with 4 processes each. from accelerate.state import PartialState state = PartialState() def print_something(): print("This will be printed by process 0 only on each server.") print_something() # On server 1: "This will be printed by process 0 only" # On server 2: "This will be printed by process 0 only" ``` """ if self.is_local_main_process or not self.use_distributed: return function return do_nothing def on_last_process(self, function: Callable[..., Any]): """ Decorator that only runs the decorated function on the last process. Args: function (`Callable`): The function to decorate. Example: ```python # Assume we have 4 processes. from accelerate.state import PartialState state = PartialState() def print_something(): print(f"Printed on process {state.process_index}") print_something() "Printed on process 3" ``` """ if self.is_last_process or not self.use_distributed: return function return do_nothing def on_process(self, function: Callable[..., Any] = None, process_index: int = None): """ Decorator that only runs the decorated function on the process with the given index. Args: function (`Callable`, `optional`): The function to decorate. process_index (`int`, `optional`): The index of the process on which to run the function. Example: ```python # Assume we have 4 processes. from accelerate.state import PartialState state = PartialState() def print_something(): print(f"Printed on process {state.process_index}") print_something() "Printed on process 2" ``` """ if function is None: return partial(self.on_process, process_index=process_index) if (self.process_index == process_index) or (not self.use_distributed): return function return do_nothing def on_local_process(self, function: Callable[..., Any] = None, local_process_index: int = None): """ Decorator that only runs the decorated function on the process with the given index on the current node. Args: function (`Callable`, *optional*): The function to decorate. local_process_index (`int`, *optional*): The index of the local process on which to run the function. Example: ```python # Assume we have 2 servers with 4 processes each. from accelerate import Accelerator accelerator = Accelerator() def print_something(): print(f"Printed on process {accelerator.local_process_index}") print_something() # On server 1: "Printed on process 2" # On server 2: "Printed on process 2" ``` """ if function is None: return partial(self.on_local_process, local_process_index=local_process_index) if (self.local_process_index == local_process_index) or (not self.use_distributed): return function return do_nothing def print(self, *args, **kwargs): if self.is_local_main_process: print(*args, **kwargs) def default_device(self) -> torch.device: """ Returns the default device which is: - MPS if `torch.backends.mps.is_available()` and `torch.backends.mps.is_built()` both return True. - CUDA if `torch.cuda.is_available()` - NPU if `is_npu_available()` - CPU otherwise """ if is_mps_available(): os.environ["PYTORCH_ENABLE_MPS_FALLBACK"] = "1" return torch.device("mps") elif torch.cuda.is_available(): return torch.device("cuda") elif is_xpu_available(): return torch.device("xpu:0") elif is_npu_available(): return torch.device("npu") else: return torch.device("cpu") The provided code snippet includes necessary dependencies for implementing the `on_main_process` function. Write a Python function `def on_main_process(function)` to solve the following problem: Decorator to selectively run the decorated function on the main process only based on the `main_process_only` attribute in a class. Checks at function execution rather than initialization time, not triggering the initialization of the `PartialState`. Here is the function: def on_main_process(function): """ Decorator to selectively run the decorated function on the main process only based on the `main_process_only` attribute in a class. Checks at function execution rather than initialization time, not triggering the initialization of the `PartialState`. """ @wraps(function) def execute_on_main_process(self, *args, **kwargs): if getattr(self, "main_process_only", False): return PartialState().on_main_process(function)(self, *args, **kwargs) else: return function(self, *args, **kwargs) return execute_on_main_process
Decorator to selectively run the decorated function on the main process only based on the `main_process_only` attribute in a class. Checks at function execution rather than initialization time, not triggering the initialization of the `PartialState`.
2,902
import json import os import time from functools import wraps from typing import Any, Dict, List, Optional, Union import yaml from .logging import get_logger from .state import PartialState from .utils import ( LoggerType, is_aim_available, is_clearml_available, is_comet_ml_available, is_dvclive_available, is_mlflow_available, is_tensorboard_available, is_wandb_available, listify, ) logger = get_logger(__name__) def get_available_trackers(): "Returns a list of all supported available trackers in the system" return _available_trackers class GeneralTracker: """ A base Tracker class to be used for all logging integration implementations. Each function should take in `**kwargs` that will automatically be passed in from a base dictionary provided to [`Accelerator`]. Should implement `name`, `requires_logging_directory`, and `tracker` properties such that: `name` (`str`): String representation of the tracker class name, such as "TensorBoard" `requires_logging_directory` (`bool`): Whether the logger requires a directory to store their logs. `tracker` (`object`): Should return internal tracking mechanism used by a tracker class (such as the `run` for wandb) Implementations can also include a `main_process_only` (`bool`) attribute to toggle if relevent logging, init, and other functions should occur on the main process or across all processes (by default will use `True`) """ main_process_only = True def __init__(self, _blank=False): if not _blank: err = "" if not hasattr(self, "name"): err += "`name`" if not hasattr(self, "requires_logging_directory"): if len(err) > 0: err += ", " err += "`requires_logging_directory`" # as tracker is a @property that relies on post-init if "tracker" not in dir(self): if len(err) > 0: err += ", " err += "`tracker`" if len(err) > 0: raise NotImplementedError( f"The implementation for this tracker class is missing the following " f"required attributes. Please define them in the class definition: " f"{err}" ) def store_init_configuration(self, values: dict): """ Logs `values` as hyperparameters for the run. Implementations should use the experiment configuration functionality of a tracking API. Args: values (Dictionary `str` to `bool`, `str`, `float` or `int`): Values to be stored as initial hyperparameters as key-value pairs. The values need to have type `bool`, `str`, `float`, `int`, or `None`. """ pass def log(self, values: dict, step: Optional[int], **kwargs): """ Logs `values` to the current run. Base `log` implementations of a tracking API should go in here, along with special behavior for the `step parameter. Args: values (Dictionary `str` to `str`, `float`, or `int`): Values to be logged as key-value pairs. The values need to have type `str`, `float`, or `int`. step (`int`, *optional*): The run step. If included, the log will be affiliated with this step. """ pass def finish(self): """ Should run any finalizing functions within the tracking API. If the API should not have one, just don't overwrite that method. """ pass LOGGER_TYPE_TO_CLASS = { "aim": AimTracker, "comet_ml": CometMLTracker, "mlflow": MLflowTracker, "tensorboard": TensorBoardTracker, "wandb": WandBTracker, "clearml": ClearMLTracker, "dvclive": DVCLiveTracker, } The provided code snippet includes necessary dependencies for implementing the `filter_trackers` function. Write a Python function `def filter_trackers( log_with: List[Union[str, LoggerType, GeneralTracker]], logging_dir: Union[str, os.PathLike] = None, )` to solve the following problem: Takes in a list of potential tracker types and checks that: - The tracker wanted is available in that environment - Filters out repeats of tracker types - If `all` is in `log_with`, will return all trackers in the environment - If a tracker requires a `logging_dir`, ensures that `logging_dir` is not `None` Args: log_with (list of `str`, [`~utils.LoggerType`] or [`~tracking.GeneralTracker`], *optional*): A list of loggers to be setup for experiment tracking. Should be one or several of: - `"all"` - `"tensorboard"` - `"wandb"` - `"comet_ml"` - `"mlflow"` - `"dvclive"` If `"all"` is selected, will pick up all available trackers in the environment and initialize them. Can also accept implementations of `GeneralTracker` for custom trackers, and can be combined with `"all"`. logging_dir (`str`, `os.PathLike`, *optional*): A path to a directory for storing logs of locally-compatible loggers. Here is the function: def filter_trackers( log_with: List[Union[str, LoggerType, GeneralTracker]], logging_dir: Union[str, os.PathLike] = None, ): """ Takes in a list of potential tracker types and checks that: - The tracker wanted is available in that environment - Filters out repeats of tracker types - If `all` is in `log_with`, will return all trackers in the environment - If a tracker requires a `logging_dir`, ensures that `logging_dir` is not `None` Args: log_with (list of `str`, [`~utils.LoggerType`] or [`~tracking.GeneralTracker`], *optional*): A list of loggers to be setup for experiment tracking. Should be one or several of: - `"all"` - `"tensorboard"` - `"wandb"` - `"comet_ml"` - `"mlflow"` - `"dvclive"` If `"all"` is selected, will pick up all available trackers in the environment and initialize them. Can also accept implementations of `GeneralTracker` for custom trackers, and can be combined with `"all"`. logging_dir (`str`, `os.PathLike`, *optional*): A path to a directory for storing logs of locally-compatible loggers. """ loggers = [] if log_with is not None: if not isinstance(log_with, (list, tuple)): log_with = [log_with] if "all" in log_with or LoggerType.ALL in log_with: loggers = [o for o in log_with if issubclass(type(o), GeneralTracker)] + get_available_trackers() else: for log_type in log_with: if log_type not in LoggerType and not issubclass(type(log_type), GeneralTracker): raise ValueError(f"Unsupported logging capability: {log_type}. Choose between {LoggerType.list()}") if issubclass(type(log_type), GeneralTracker): loggers.append(log_type) else: log_type = LoggerType(log_type) if log_type not in loggers: if log_type in get_available_trackers(): tracker_init = LOGGER_TYPE_TO_CLASS[str(log_type)] if tracker_init.requires_logging_directory: if logging_dir is None: raise ValueError( f"Logging with `{log_type}` requires a `logging_dir` to be passed in." ) loggers.append(log_type) else: logger.debug(f"Tried adding logger {log_type}, but package is unavailable in the system.") return loggers
Takes in a list of potential tracker types and checks that: - The tracker wanted is available in that environment - Filters out repeats of tracker types - If `all` is in `log_with`, will return all trackers in the environment - If a tracker requires a `logging_dir`, ensures that `logging_dir` is not `None` Args: log_with (list of `str`, [`~utils.LoggerType`] or [`~tracking.GeneralTracker`], *optional*): A list of loggers to be setup for experiment tracking. Should be one or several of: - `"all"` - `"tensorboard"` - `"wandb"` - `"comet_ml"` - `"mlflow"` - `"dvclive"` If `"all"` is selected, will pick up all available trackers in the environment and initialize them. Can also accept implementations of `GeneralTracker` for custom trackers, and can be combined with `"all"`. logging_dir (`str`, `os.PathLike`, *optional*): A path to a directory for storing logs of locally-compatible loggers.
2,903
import importlib.metadata import subprocess import sys The provided code snippet includes necessary dependencies for implementing the `install_xla` function. Write a Python function `def install_xla(upgrade: bool = False)` to solve the following problem: Helper function to install appropriate xla wheels based on the `torch` version in Google Colaboratory. Args: upgrade (`bool`, *optional*, defaults to `False`): Whether to upgrade `torch` and install the latest `torch_xla` wheels. Example: ```python >>> from accelerate.utils import install_xla >>> install_xla(upgrade=True) ``` Here is the function: def install_xla(upgrade: bool = False): """ Helper function to install appropriate xla wheels based on the `torch` version in Google Colaboratory. Args: upgrade (`bool`, *optional*, defaults to `False`): Whether to upgrade `torch` and install the latest `torch_xla` wheels. Example: ```python >>> from accelerate.utils import install_xla >>> install_xla(upgrade=True) ``` """ in_colab = False if "IPython" in sys.modules: in_colab = "google.colab" in str(sys.modules["IPython"].get_ipython()) if in_colab: if upgrade: torch_install_cmd = ["pip", "install", "-U", "torch"] subprocess.run(torch_install_cmd, check=True) # get the current version of torch torch_version = importlib.metadata.version("torch") torch_version_trunc = torch_version[: torch_version.rindex(".")] xla_wheel = f"https://storage.googleapis.com/tpu-pytorch/wheels/colab/torch_xla-{torch_version_trunc}-cp37-cp37m-linux_x86_64.whl" xla_install_cmd = ["pip", "install", xla_wheel] subprocess.run(xla_install_cmd, check=True) else: raise RuntimeError("`install_xla` utility works only on google colab.")
Helper function to install appropriate xla wheels based on the `torch` version in Google Colaboratory. Args: upgrade (`bool`, *optional*, defaults to `False`): Whether to upgrade `torch` and install the latest `torch_xla` wheels. Example: ```python >>> from accelerate.utils import install_xla >>> install_xla(upgrade=True) ```
2,904
import os import torch from ..logging import get_logger from .constants import FSDP_MODEL_NAME, FSDP_PYTORCH_VERSION, OPTIMIZER_NAME from .imports import is_torch_distributed_available from .modeling import is_peft_model from .versions import is_torch_version logger = get_logger(__name__) def _get_model_state_dict(model, adapter_only=False): if adapter_only and is_peft_model(model): from peft import get_peft_model_state_dict return get_peft_model_state_dict(model, adapter_name=model.active_adapter) else: return model.state_dict() FSDP_MODEL_NAME = "pytorch_model_fsdp" def save_fsdp_model(fsdp_plugin, accelerator, model, output_dir, model_index=0, adapter_only=False): os.makedirs(output_dir, exist_ok=True) if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT: # FSDP raises error when single GPU is used with `offload_to_cpu=True` for FULL_STATE_DICT # so, only enable it when num_processes>1 is_multi_process = accelerator.num_processes > 1 fsdp_plugin.state_dict_config.offload_to_cpu = is_multi_process fsdp_plugin.state_dict_config.rank0_only = is_multi_process with FSDP.state_dict_type( model, fsdp_plugin.state_dict_type, fsdp_plugin.state_dict_config, fsdp_plugin.optim_state_dict_config ): state_dict = _get_model_state_dict(model, adapter_only=adapter_only) if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT: weights_name = f"{FSDP_MODEL_NAME}.bin" if model_index == 0 else f"{FSDP_MODEL_NAME}_{model_index}.bin" output_model_file = os.path.join(output_dir, weights_name) if accelerator.process_index == 0: logger.info(f"Saving model to {output_model_file}") torch.save(state_dict, output_model_file) logger.info(f"Model saved to {output_model_file}") elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT: weights_name = ( f"{FSDP_MODEL_NAME}_rank{accelerator.process_index}.bin" if model_index == 0 else f"{FSDP_MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin" ) output_model_file = os.path.join(output_dir, weights_name) logger.info(f"Saving model to {output_model_file}") torch.save(state_dict, output_model_file) logger.info(f"Model saved to {output_model_file}") elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT: ckpt_dir = os.path.join(output_dir, f"{FSDP_MODEL_NAME}_{model_index}") os.makedirs(ckpt_dir, exist_ok=True) logger.info(f"Saving model to {ckpt_dir}") state_dict = {"model": state_dict} dist_cp.save_state_dict( state_dict=state_dict, storage_writer=dist_cp.FileSystemWriter(ckpt_dir), planner=DefaultSavePlanner(), ) logger.info(f"Model saved to {ckpt_dir}")
null
2,905
import os import torch from ..logging import get_logger from .constants import FSDP_MODEL_NAME, FSDP_PYTORCH_VERSION, OPTIMIZER_NAME from .imports import is_torch_distributed_available from .modeling import is_peft_model from .versions import is_torch_version logger = get_logger(__name__) def _get_model_state_dict(model, adapter_only=False): if adapter_only and is_peft_model(model): from peft import get_peft_model_state_dict return get_peft_model_state_dict(model, adapter_name=model.active_adapter) else: return model.state_dict() def _set_model_state_dict(model, state_dict, adapter_only=False): if adapter_only and is_peft_model(model): from peft import set_peft_model_state_dict return set_peft_model_state_dict(model, state_dict, adapter_name=model.active_adapter) else: return model.load_state_dict(state_dict) FSDP_MODEL_NAME = "pytorch_model_fsdp" def load_fsdp_model(fsdp_plugin, accelerator, model, input_dir, model_index=0, adapter_only=False): accelerator.wait_for_everyone() if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT: # FSDP raises error when single GPU is used with `offload_to_cpu=True` for FULL_STATE_DICT # so, only enable it when num_processes>1 is_multi_process = accelerator.num_processes > 1 fsdp_plugin.state_dict_config.offload_to_cpu = is_multi_process fsdp_plugin.state_dict_config.rank0_only = is_multi_process with FSDP.state_dict_type( model, fsdp_plugin.state_dict_type, fsdp_plugin.state_dict_config, fsdp_plugin.optim_state_dict_config ): if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT: if type(model) != FSDP and accelerator.process_index != 0: if not fsdp_plugin.sync_module_states: raise ValueError( "Set the `sync_module_states` flag to `True` so that model states are synced across processes when " "initializing FSDP object" ) return weights_name = f"{FSDP_MODEL_NAME}.bin" if model_index == 0 else f"{FSDP_MODEL_NAME}_{model_index}.bin" input_model_file = os.path.join(input_dir, weights_name) logger.info(f"Loading model from {input_model_file}") state_dict = torch.load(input_model_file) logger.info(f"Model loaded from {input_model_file}") elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT: weights_name = ( f"{FSDP_MODEL_NAME}_rank{accelerator.process_index}.bin" if model_index == 0 else f"{FSDP_MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin" ) input_model_file = os.path.join(input_dir, weights_name) logger.info(f"Loading model from {input_model_file}") state_dict = torch.load(input_model_file) logger.info(f"Model loaded from {input_model_file}") elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT: ckpt_dir = ( os.path.join(input_dir, f"{FSDP_MODEL_NAME}_{model_index}") if f"{FSDP_MODEL_NAME}" not in input_dir else input_dir ) logger.info(f"Loading model from {ckpt_dir}") state_dict = {"model": _get_model_state_dict(model, adapter_only=adapter_only)} dist_cp.load_state_dict( state_dict=state_dict, storage_reader=dist_cp.FileSystemReader(ckpt_dir), planner=DefaultLoadPlanner(), ) state_dict = state_dict["model"] logger.info(f"Model loaded from {ckpt_dir}") load_result = _set_model_state_dict(model, state_dict, adapter_only=adapter_only) return load_result
null
2,906
import os import torch from ..logging import get_logger from .constants import FSDP_MODEL_NAME, FSDP_PYTORCH_VERSION, OPTIMIZER_NAME from .imports import is_torch_distributed_available from .modeling import is_peft_model from .versions import is_torch_version logger = get_logger(__name__) OPTIMIZER_NAME = "optimizer" def save_fsdp_optimizer(fsdp_plugin, accelerator, optimizer, model, output_dir, optimizer_index=0): os.makedirs(output_dir, exist_ok=True) with FSDP.state_dict_type( model, fsdp_plugin.state_dict_type, fsdp_plugin.state_dict_config, fsdp_plugin.optim_state_dict_config ): optim_state = FSDP.optim_state_dict(model, optimizer) if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT: if accelerator.process_index == 0: optim_state_name = ( f"{OPTIMIZER_NAME}.bin" if optimizer_index == 0 else f"{OPTIMIZER_NAME}_{optimizer_index}.bin" ) output_optimizer_file = os.path.join(output_dir, optim_state_name) logger.info(f"Saving Optimizer state to {output_optimizer_file}") torch.save(optim_state, output_optimizer_file) logger.info(f"Optimizer state saved in {output_optimizer_file}") else: ckpt_dir = os.path.join(output_dir, f"{OPTIMIZER_NAME}_{optimizer_index}") os.makedirs(ckpt_dir, exist_ok=True) logger.info(f"Saving Optimizer state to {ckpt_dir}") dist_cp.save_state_dict( state_dict={"optimizer": optim_state}, storage_writer=dist_cp.FileSystemWriter(ckpt_dir), planner=DefaultSavePlanner(), ) logger.info(f"Optimizer state saved in {ckpt_dir}")
null
2,907
import os import torch from ..logging import get_logger from .constants import FSDP_MODEL_NAME, FSDP_PYTORCH_VERSION, OPTIMIZER_NAME from .imports import is_torch_distributed_available from .modeling import is_peft_model from .versions import is_torch_version logger = get_logger(__name__) def _get_model_state_dict(model, adapter_only=False): if adapter_only and is_peft_model(model): from peft import get_peft_model_state_dict return get_peft_model_state_dict(model, adapter_name=model.active_adapter) else: return model.state_dict() OPTIMIZER_NAME = "optimizer" def load_fsdp_optimizer(fsdp_plugin, accelerator, optimizer, model, input_dir, optimizer_index=0, adapter_only=False): accelerator.wait_for_everyone() with FSDP.state_dict_type( model, fsdp_plugin.state_dict_type, fsdp_plugin.state_dict_config, fsdp_plugin.optim_state_dict_config ): if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT: optim_state = None if accelerator.process_index == 0 or not fsdp_plugin.optim_state_dict_config.rank0_only: optimizer_name = ( f"{OPTIMIZER_NAME}.bin" if optimizer_index == 0 else f"{OPTIMIZER_NAME}_{optimizer_index}.bin" ) input_optimizer_file = os.path.join(input_dir, optimizer_name) logger.info(f"Loading Optimizer state from {input_optimizer_file}") optim_state = torch.load(input_optimizer_file) logger.info(f"Optimizer state loaded from {input_optimizer_file}") else: ckpt_dir = ( os.path.join(input_dir, f"{OPTIMIZER_NAME}_{optimizer_index}") if f"{OPTIMIZER_NAME}" not in input_dir else input_dir ) logger.info(f"Loading Optimizer from {ckpt_dir}") optim_state = load_sharded_optimizer_state_dict( model_state_dict=_get_model_state_dict(model, adapter_only=adapter_only), optimizer_key="optimizer", storage_reader=dist_cp.FileSystemReader(ckpt_dir), ) optim_state = optim_state["optimizer"] logger.info(f"Optimizer loaded from {ckpt_dir}") flattened_osd = FSDP.optim_state_dict_to_load(model=model, optim=optimizer, optim_state_dict=optim_state) optimizer.load_state_dict(flattened_osd)
null
2,908
import argparse import math from abc import ABC from functools import partial import torch import torch.nn.functional as F from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from torch.nn.parallel.distributed import DistributedDataParallel as torchDDP from ..optimizer import AcceleratedOptimizer from ..scheduler import AcceleratedScheduler from .imports import is_megatron_lm_available, is_transformers_available from .operations import recursively_apply, send_to_device def model_provider_func(pre_process=True, post_process=True, add_encoder=True, add_decoder=True): """Build the model.""" args = get_args() mode = "pre-training" if args.pretraining_flag else "fine-tuning" if args.rank == 0: print(f"Building {args.model_type_name} model in the {mode} mode.") print( "The Megatron LM model weights are initialized at random in `accelerator.prepare`. " "Please use `accelerator.load_checkpoint` to load a pre-trained checkpoint matching the distributed setup." ) if args.model_type_name == "bert": if args.pretraining_flag: num_tokentypes = 2 if args.bert_binary_head else 0 model = BertModel( num_tokentypes=num_tokentypes, add_binary_head=args.bert_binary_head, parallel_output=True, pre_process=pre_process, post_process=post_process, ) else: model = Classification( num_classes=args.num_labels, num_tokentypes=2, pre_process=pre_process, post_process=post_process ) elif args.model_type_name == "gpt": model = GPTModel(num_tokentypes=0, parallel_output=True, pre_process=pre_process, post_process=post_process) elif args.model_type_name == "t5": model = T5Model( num_tokentypes=0, parallel_output=True, pre_process=pre_process, post_process=post_process, add_encoder=add_encoder, add_decoder=add_decoder, ) else: raise ValueError(f"Unsupported model type: {args.model_type_name}") return model def prepare_model(accelerator): accelerator.print("Preparing model") args = get_args() if accelerator.state.megatron_lm_plugin.custom_prepare_model_function is not None: if accelerator.state.megatron_lm_plugin.custom_model_provider_function is None: raise ValueError( "You must provide a `custom_model_provider_function` when using a `custom_prepare_model_function`." ) custom_model_provider_func = accelerator.state.megatron_lm_plugin.custom_model_provider_function model = accelerator.state.megatron_lm_plugin.custom_prepare_model_function(custom_model_provider_func) else: if args.model_type_name in ("bert", "gpt"): model_type = ModelType.encoder_or_decoder elif args.model_type_name == "t5": model_type = ModelType.encoder_and_decoder if args.pipeline_model_parallel_split_rank is None and args.pipeline_model_parallel_size > 1: args.pipeline_model_parallel_split_rank = args.pipeline_model_parallel_size // 2 model = get_model(model_provider_func, model_type) return model
null
2,909
import argparse import math from abc import ABC from functools import partial import torch import torch.nn.functional as F from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from torch.nn.parallel.distributed import DistributedDataParallel as torchDDP from ..optimizer import AcceleratedOptimizer from ..scheduler import AcceleratedScheduler from .imports import is_megatron_lm_available, is_transformers_available from .operations import recursively_apply, send_to_device _PYTORCH_DATALOADER_KWARGS = { "batch_size": 1, "shuffle": False, "sampler": None, "batch_sampler": None, "num_workers": 0, "collate_fn": None, "pin_memory": False, "drop_last": False, "timeout": 0, "worker_init_fn": None, "multiprocessing_context": None, "generator": None, "prefetch_factor": 2, "persistent_workers": False, } def prepare_data_loader(accelerator, dataloader): accelerator.print("Preparing dataloader") args = get_args() if not args.megatron_dataset_flag: from ..data_loader import _PYTORCH_DATALOADER_KWARGS, prepare_data_loader args = get_args() micro_batch_size = args.micro_batch_size * args.num_micro_batches kwargs = {k: getattr(dataloader, k, _PYTORCH_DATALOADER_KWARGS[k]) for k in _PYTORCH_DATALOADER_KWARGS} if kwargs["batch_size"] is None: if isinstance(kwargs["sampler"], torch.utils.data.BatchSampler): kwargs["sampler"].batch_size = micro_batch_size else: del kwargs["sampler"] del kwargs["shuffle"] del kwargs["batch_size"] kwargs["batch_sampler"].batch_size = micro_batch_size else: del kwargs["batch_sampler"] kwargs["batch_size"] = micro_batch_size dataloader = torch.utils.data.DataLoader(dataloader.dataset, **kwargs) return prepare_data_loader( dataloader, accelerator.device, num_processes=mpu.get_data_parallel_world_size(), process_index=mpu.get_data_parallel_rank(), split_batches=accelerator.split_batches, put_on_device=True, rng_types=accelerator.rng_types.copy(), dispatch_batches=accelerator.dispatch_batches, ) else: if args.consumed_samples is not None: ( args.consumed_train_samples, args.consumed_valid_samples, args.consumed_test_samples, ) = args.consumed_samples else: args.consumed_train_samples, args.consumed_valid_samples, args.consumed_test_samples = 0, 0, 0 ( train_data_iterator, valid_data_iterator, test_data_iterator, ) = dataloader.build_train_valid_test_data_iterators() return train_data_iterator, valid_data_iterator, test_data_iterator
null
2,910
import argparse import math from abc import ABC from functools import partial import torch import torch.nn.functional as F from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from torch.nn.parallel.distributed import DistributedDataParallel as torchDDP from ..optimizer import AcceleratedOptimizer from ..scheduler import AcceleratedScheduler from .imports import is_megatron_lm_available, is_transformers_available from .operations import recursively_apply, send_to_device def prepare_optimizer(accelerator, model): accelerator.print("Preparing optimizer") args = get_args() optimizer = get_megatron_optimizer(model, args.no_wd_decay_cond, args.scale_lr_cond, args.lr_mult) return optimizer
null
2,911
import argparse import math from abc import ABC from functools import partial import torch import torch.nn.functional as F from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from torch.nn.parallel.distributed import DistributedDataParallel as torchDDP from ..optimizer import AcceleratedOptimizer from ..scheduler import AcceleratedScheduler from .imports import is_megatron_lm_available, is_transformers_available from .operations import recursively_apply, send_to_device def prepare_scheduler(accelerator, optimizer, scheduler): accelerator.print("Preparing scheduler") scheduler = get_optimizer_param_scheduler(optimizer) return scheduler
null
2,912
import argparse import math from abc import ABC from functools import partial import torch import torch.nn.functional as F from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from torch.nn.parallel.distributed import DistributedDataParallel as torchDDP from ..optimizer import AcceleratedOptimizer from ..scheduler import AcceleratedScheduler from .imports import is_megatron_lm_available, is_transformers_available from .operations import recursively_apply, send_to_device def initialize(accelerator, extra_args_provider=None, args_defaults={}): accelerator.print("Initializing Megatron-LM") assert torch.cuda.is_available(), "Megatron requires CUDA." # Parse arguments args = parse_args(extra_args_provider, ignore_unknown_args=True) # Set defaults for key, value in args_defaults.items(): if getattr(args, key, None) is not None: if args.rank == 0: print( f"WARNING: overriding default arguments for " f"{key}:{getattr(args, key)} with {key}:{value}", flush=True, ) setattr(args, key, value) if args.use_checkpoint_args or args_defaults.get("use_checkpoint_args", False): assert args.load is not None, "--use-checkpoints-args requires --load argument" load_args_from_checkpoint(args) validate_args(args) # set global args, build tokenizer, and set adlr-autoresume, # tensorboard-writer, and timers. set_global_variables(args) # torch.distributed initialization def finish_mpu_init(): args = get_args() # Pytorch distributed. device_count = torch.cuda.device_count() args.rank = torch.distributed.get_rank() args.world_size = torch.distributed.get_world_size() if device_count > 0: device = args.rank % device_count if args.local_rank is not None: assert args.local_rank == device, "expected local-rank to be the same as rank % device-count." else: args.local_rank = device # Set the tensor model-parallel, pipeline model-parallel, and # data-parallel communicators. if mpu.model_parallel_is_initialized(): print("model parallel is already initialized") else: mpu.initialize_model_parallel( args.tensor_model_parallel_size, args.pipeline_model_parallel_size, args.virtual_pipeline_model_parallel_size, args.pipeline_model_parallel_split_rank, ) # Random seeds for reproducibility. if args.rank == 0: print(f"> setting random seeds to {args.seed} ...") _set_random_seed(args.seed, args.data_parallel_random_init) args = get_args() # Megatron's MPU is the master. Complete initialization right away. finish_mpu_init() # Autoresume. _init_autoresume() # Compile dependencies. _compile_dependencies() # Set pytorch JIT layer fusion options and warmup JIT functions. set_jit_fusion_options() args = get_args() args.padded_vocab_size = _vocab_size_with_padding(args.orig_vocab_size, args) if args.model_type_name == "bert" and args.pretraining_flag and args.num_labels == 2: args.bert_binary_head = True else: args.bert_binary_head = False args.iteration = 0
null
2,913
import argparse import math from abc import ABC from functools import partial import torch import torch.nn.functional as F from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from torch.nn.parallel.distributed import DistributedDataParallel as torchDDP from ..optimizer import AcceleratedOptimizer from ..scheduler import AcceleratedScheduler from .imports import is_megatron_lm_available, is_transformers_available from .operations import recursively_apply, send_to_device The provided code snippet includes necessary dependencies for implementing the `avg_losses_across_data_parallel_group` function. Write a Python function `def avg_losses_across_data_parallel_group(losses)` to solve the following problem: Average losses across data parallel group. Args: losses (List[Tensor]): List of losses to average across data parallel group. Here is the function: def avg_losses_across_data_parallel_group(losses): """ Average losses across data parallel group. Args: losses (List[Tensor]): List of losses to average across data parallel group. """ return average_losses_across_data_parallel_group(losses)
Average losses across data parallel group. Args: losses (List[Tensor]): List of losses to average across data parallel group.
2,914
import argparse import math from abc import ABC from functools import partial import torch import torch.nn.functional as F from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from torch.nn.parallel.distributed import DistributedDataParallel as torchDDP from ..optimizer import AcceleratedOptimizer from ..scheduler import AcceleratedScheduler from .imports import is_megatron_lm_available, is_transformers_available from .operations import recursively_apply, send_to_device def recursively_apply(func, data, *args, test_type=is_torch_tensor, error_on_other_type=False, **kwargs): """ Recursively apply a function on a data structure that is a nested list/tuple/dictionary of a given base type. Args: func (`callable`): The function to recursively apply. data (nested list/tuple/dictionary of `main_type`): The data on which to apply `func` *args: Positional arguments that will be passed to `func` when applied on the unpacked data. main_type (`type`, *optional*, defaults to `torch.Tensor`): The base type of the objects to which apply `func`. error_on_other_type (`bool`, *optional*, defaults to `False`): Whether to return an error or not if after unpacking `data`, we get on an object that is not of type `main_type`. If `False`, the function will leave objects of types different than `main_type` unchanged. **kwargs: Keyword arguments that will be passed to `func` when applied on the unpacked data. Returns: The same data structure as `data` with `func` applied to every object of type `main_type`. """ if isinstance(data, (tuple, list)): return honor_type( data, ( recursively_apply( func, o, *args, test_type=test_type, error_on_other_type=error_on_other_type, **kwargs ) for o in data ), ) elif isinstance(data, Mapping): return type(data)( { k: recursively_apply( func, v, *args, test_type=test_type, error_on_other_type=error_on_other_type, **kwargs ) for k, v in data.items() } ) elif test_type(data): return func(data, *args, **kwargs) elif error_on_other_type: raise TypeError( f"Unsupported types ({type(data)}) passed to `{func.__name__}`. Only nested list/tuple/dicts of " f"objects that are valid for `{test_type.__name__}` should be passed." ) return data The provided code snippet includes necessary dependencies for implementing the `gather_across_data_parallel_groups` function. Write a Python function `def gather_across_data_parallel_groups(tensor)` to solve the following problem: Recursively gather tensor in a nested list/tuple/dictionary of tensors from data parallel ranks. Args: tensor (nested list/tuple/dictionary of `torch.Tensor`): The data to gather across data parallel ranks. Here is the function: def gather_across_data_parallel_groups(tensor): """ Recursively gather tensor in a nested list/tuple/dictionary of tensors from data parallel ranks. Args: tensor (nested list/tuple/dictionary of `torch.Tensor`): The data to gather across data parallel ranks. """ def _gpu_gather_one(tensor): if tensor.ndim == 0: tensor = tensor.clone()[None] output_tensors = [ torch.empty_like(tensor) for _ in range(torch.distributed.get_world_size(group=mpu.get_data_parallel_group())) ] torch.distributed.all_gather(output_tensors, tensor, group=mpu.get_data_parallel_group()) return torch.cat(output_tensors, dim=0) return recursively_apply(_gpu_gather_one, tensor, error_on_other_type=True)
Recursively gather tensor in a nested list/tuple/dictionary of tensors from data parallel ranks. Args: tensor (nested list/tuple/dictionary of `torch.Tensor`): The data to gather across data parallel ranks.
2,915
import logging import os from copy import deepcopy from typing import Dict, List, Optional, Union import torch import torch.nn as nn from accelerate.utils.imports import ( is_4bit_bnb_available, is_8bit_bnb_available, ) from ..big_modeling import dispatch_model, init_empty_weights from .dataclasses import BnbQuantizationConfig from .modeling import ( find_tied_parameters, get_balanced_memory, infer_auto_device_map, load_checkpoint_in_model, offload_weight, set_module_tensor_to_device, ) logger = logging.getLogger(__name__) def get_quantized_model_device_map( model, bnb_quantization_config, device_map=None, max_memory=None, no_split_module_classes=None ): if device_map is None: if torch.cuda.is_available(): device_map = {"": torch.cuda.current_device()} else: raise RuntimeError("No GPU found. A GPU is needed for quantization.") logger.info("The device_map was not initialized." "Setting device_map to `{'':torch.cuda.current_device()}`.") if isinstance(device_map, str): if device_map not in ["auto", "balanced", "balanced_low_0", "sequential"]: raise ValueError( "If passing a string for `device_map`, please choose 'auto', 'balanced', 'balanced_low_0' or " "'sequential'." ) special_dtypes = {} special_dtypes.update( { name: bnb_quantization_config.torch_dtype for name, _ in model.named_parameters() if any(m in name for m in bnb_quantization_config.skip_modules) } ) special_dtypes.update( { name: torch.float32 for name, _ in model.named_parameters() if any(m in name for m in bnb_quantization_config.keep_in_fp32_modules) } ) kwargs = {} kwargs["special_dtypes"] = special_dtypes kwargs["no_split_module_classes"] = no_split_module_classes kwargs["dtype"] = bnb_quantization_config.target_dtype # get max_memory for each device. if device_map != "sequential": max_memory = get_balanced_memory( model, low_zero=(device_map == "balanced_low_0"), max_memory=max_memory, **kwargs, ) kwargs["max_memory"] = max_memory device_map = infer_auto_device_map(model, **kwargs) if isinstance(device_map, dict): # check if don't have any quantized module on the cpu modules_not_to_convert = bnb_quantization_config.skip_modules + bnb_quantization_config.keep_in_fp32_modules device_map_without_some_modules = { key: device_map[key] for key in device_map.keys() if key not in modules_not_to_convert } for device in ["cpu", "disk"]: if device in device_map_without_some_modules.values(): if bnb_quantization_config.load_in_4bit: raise ValueError( """ Some modules are dispatched on the CPU or the disk. Make sure you have enough GPU RAM to fit the quantized model. If you want to dispatch the model on the CPU or the disk while keeping these modules in `torch_dtype`, you need to pass a custom `device_map` to `load_and_quantize_model`. Check https://huggingface.co/docs/accelerate/main/en/usage_guides/quantization#offload-modules-to-cpu-and-disk for more details. """ ) else: logger.info( "Some modules are are offloaded to the CPU or the disk. Note that these modules will be converted to 8-bit" ) del device_map_without_some_modules return device_map def replace_with_bnb_layers(model, bnb_quantization_config, modules_to_not_convert=None, current_key_name=None): """ A helper function to replace all `torch.nn.Linear` modules by `bnb.nn.Linear8bit` modules or by `bnb.nn.Linear4bit` modules from the `bitsandbytes`library. The function will be run recursively and replace `torch.nn.Linear` modules. Parameters: model (`torch.nn.Module`): Input model or `torch.nn.Module` as the function is run recursively. modules_to_not_convert (`List[str]`): Names of the modules to not quantize convert. In practice we keep the `lm_head` in full precision for numerical stability reasons. current_key_name (`List[str]`, *optional*): An array to track the current key of the recursion. This is used to check whether the current key (part of it) is not in the list of modules to not convert. """ if modules_to_not_convert is None: modules_to_not_convert = [] model, has_been_replaced = _replace_with_bnb_layers( model, bnb_quantization_config, modules_to_not_convert, current_key_name ) if not has_been_replaced: logger.warning( "You are loading your model in 8bit or 4bit but no linear modules were found in your model." " this can happen for some architectures such as gpt2 that uses Conv1D instead of Linear layers." " Please double check your model architecture, or submit an issue on github if you think this is" " a bug." ) return model def get_keys_to_not_convert(model): r""" An utility function to get the key of the module to keep in full precision if any For example for CausalLM modules we may want to keep the lm_head in full precision for numerical stability reasons. For other architectures, we want to keep the tied weights of the model. The function will return a list of the keys of the modules to not convert in int8. Parameters: model (`torch.nn.Module`): Input model """ # Create a copy of the model with init_empty_weights(): tied_model = deepcopy(model) # this has 0 cost since it is done inside `init_empty_weights` context manager` tied_params = find_tied_parameters(tied_model) # For compatibility with Accelerate < 0.18 if isinstance(tied_params, dict): tied_keys = sum(list(tied_params.values()), []) + list(tied_params.keys()) else: tied_keys = sum(tied_params, []) has_tied_params = len(tied_keys) > 0 # Check if it is a base model is_base_model = False if hasattr(model, "base_model_prefix"): is_base_model = not hasattr(model, model.base_model_prefix) # Ignore this for base models (BertModel, GPT2Model, etc.) if (not has_tied_params) and is_base_model: return [] # otherwise they have an attached head list_modules = list(model.named_children()) list_last_module = [list_modules[-1][0]] # add last module together with tied weights intersection = set(list_last_module) - set(tied_keys) list_untouched = list(set(tied_keys)) + list(intersection) # remove ".weight" from the keys names_to_remove = [".weight", ".bias"] filtered_module_names = [] for name in list_untouched: for name_to_remove in names_to_remove: if name_to_remove in name: name = name.replace(name_to_remove, "") filtered_module_names.append(name) return filtered_module_names def get_parameter_device(parameter: nn.Module): return next(parameter.parameters()).device def is_4bit_bnb_available(): package_exists = _is_package_available("bitsandbytes") if package_exists: bnb_version = version.parse(importlib.metadata.version("bitsandbytes")) return compare_versions(bnb_version, ">=", "0.39.0") return False def is_8bit_bnb_available(): package_exists = _is_package_available("bitsandbytes") if package_exists: bnb_version = version.parse(importlib.metadata.version("bitsandbytes")) return compare_versions(bnb_version, ">=", "0.37.2") return False def init_empty_weights(include_buffers: bool = None): """ A context manager under which models are initialized with all parameters on the meta device, therefore creating an empty model. Useful when just initializing the model would blow the available RAM. Args: include_buffers (`bool`, *optional*): Whether or not to also put all buffers on the meta device while initializing. Example: ```python import torch.nn as nn from accelerate import init_empty_weights # Initialize a model with 100 billions parameters in no time and without using any RAM. with init_empty_weights(): tst = nn.Sequential(*[nn.Linear(10000, 10000) for _ in range(1000)]) ``` <Tip warning={true}> Any model created under this context manager has no weights. As such you can't do something like `model.to(some_device)` with it. To load weights inside your empty model, see [`load_checkpoint_and_dispatch`]. Make sure to overwrite the default device_map param for [`load_checkpoint_and_dispatch`], otherwise dispatch is not called. </Tip> """ if include_buffers is None: include_buffers = parse_flag_from_env("ACCELERATE_INIT_INCLUDE_BUFFERS", False) with init_on_device(torch.device("meta"), include_buffers=include_buffers) as f: yield f def dispatch_model( model: nn.Module, device_map: Dict[str, Union[str, int, torch.device]], main_device: Optional[torch.device] = None, state_dict: Optional[Dict[str, torch.Tensor]] = None, offload_dir: Optional[Union[str, os.PathLike]] = None, offload_index: Optional[Dict[str, str]] = None, offload_buffers: bool = False, skip_keys: Optional[Union[str, List[str]]] = None, preload_module_classes: Optional[List[str]] = None, force_hooks: bool = False, ): """ Dispatches a model according to a given device map. Layers of the model might be spread across GPUs, offloaded on the CPU or even the disk. Args: model (`torch.nn.Module`): The model to dispatch. device_map (`Dict[str, Union[str, int, torch.device]]`): A dictionary mapping module names in the models `state_dict` to the device they should go to. Note that `"disk"` is accepted even if it's not a proper value for `torch.device`. main_device (`str`, `int` or `torch.device`, *optional*): The main execution device. Will default to the first device in the `device_map` different from `"cpu"` or `"disk"`. state_dict (`Dict[str, torch.Tensor]`, *optional*): The state dict of the part of the model that will be kept on CPU. offload_dir (`str` or `os.PathLike`): The folder in which to offload the model weights (or where the model weights are already offloaded). offload_index (`Dict`, *optional*): A dictionary from weight name to their information (`dtype`/ `shape` or safetensors filename). Will default to the index saved in `save_folder`. offload_buffers (`bool`, *optional*, defaults to `False`): Whether or not to offload the buffers with the model parameters. skip_keys (`str` or `List[str]`, *optional*): A list of keys to ignore when moving inputs or outputs between devices. preload_module_classes (`List[str]`, *optional*): A list of classes whose instances should load all their weights (even in the submodules) at the beginning of the forward. This should only be used for classes that have submodules which are registered but not called directly during the forward, for instance if a `dense` linear layer is registered, but at forward, `dense.weight` and `dense.bias` are used in some operations instead of calling `dense` directly. force_hooks (`bool`, *optional*, defaults to `False`): Whether or not to force device hooks to be attached to the model even if all layers are dispatched to a single device. """ # Error early if the device map is incomplete. check_device_map(model, device_map) # for backward compatibility is_bnb_quantized = ( getattr(model, "is_quantized", False) or getattr(model, "is_loaded_in_8bit", False) ) and getattr(model, "quantization_method", "bitsandbytes") == "bitsandbytes" # We attach hooks if the device_map has at least 2 different devices or if # force_hooks is set to `True`. Otherwise, the model in already loaded # in the unique device and the user can decide where to dispatch the model. # If the model is quantized, we always force-dispatch the model if (len(set(device_map.values())) > 1) or is_bnb_quantized or force_hooks: if main_device is None: if set(device_map.values()) == {"cpu"} or set(device_map.values()) == {"cpu", "disk"}: main_device = "cpu" else: main_device = [d for d in device_map.values() if d not in ["cpu", "disk"]][0] if main_device != "cpu": cpu_modules = [name for name, device in device_map.items() if device == "cpu"] if state_dict is None and len(cpu_modules) > 0: state_dict = extract_submodules_state_dict(model.state_dict(), cpu_modules) disk_modules = [name for name, device in device_map.items() if device == "disk"] if offload_dir is None and offload_index is None and len(disk_modules) > 0: raise ValueError( "We need an `offload_dir` to dispatch this model according to this `device_map`, the following submodules " f"need to be offloaded: {', '.join(disk_modules)}." ) if ( len(disk_modules) > 0 and offload_index is None and (not os.path.isdir(offload_dir) or not os.path.isfile(os.path.join(offload_dir, "index.json"))) ): disk_state_dict = extract_submodules_state_dict(model.state_dict(), disk_modules) offload_state_dict(offload_dir, disk_state_dict) execution_device = { name: main_device if device in ["cpu", "disk"] else device for name, device in device_map.items() } execution_device[""] = main_device offloaded_devices = ["disk"] if main_device == "cpu" or main_device == "mps" else ["cpu", "disk"] offload = {name: device in offloaded_devices for name, device in device_map.items()} save_folder = offload_dir if len(disk_modules) > 0 else None if state_dict is not None or save_folder is not None or offload_index is not None: device = main_device if offload_index is not None else None weights_map = OffloadedWeightsLoader( state_dict=state_dict, save_folder=save_folder, index=offload_index, device=device ) else: weights_map = None # When dispatching the model's parameters to the devices specified in device_map, we want to avoid allocating memory several times for the # tied parameters. The dictionary tied_params_map keeps track of the already allocated data for a given tied parameter (represented by its # original pointer) on each devices. tied_params = find_tied_parameters(model) tied_params_map = {} for group in tied_params: for param_name in group: # data_ptr() is enough here, as `find_tied_parameters` finds tied params simply by comparing `param1 is param2`, so we don't need # to care about views of tensors through storage_offset. data_ptr = recursive_getattr(model, param_name).data_ptr() tied_params_map[data_ptr] = {} # Note: To handle the disk offloading case, we can not simply use weights_map[param_name].data_ptr() as the reference pointer, # as we have no guarantee that safetensors' `file.get_tensor()` will always give the same pointer. attach_align_device_hook_on_blocks( model, execution_device=execution_device, offload=offload, offload_buffers=offload_buffers, weights_map=weights_map, skip_keys=skip_keys, preload_module_classes=preload_module_classes, tied_params_map=tied_params_map, ) # warn if there is any params on the meta device offloaded_devices_str = " and ".join( [device for device in set(device_map.values()) if device in ("cpu", "disk")] ) if len(offloaded_devices_str) > 0: logging.warning( f"Some parameters are on the meta device device because they were offloaded to the {offloaded_devices_str}." ) # Attaching the hook may break tied weights, so we retie them retie_parameters(model, tied_params) # add warning to cuda and to method def add_warning(fn, model): def wrapper(*args, **kwargs): warning_msg = "You shouldn't move a model that is dispatched using accelerate hooks." if str(fn.__name__) == "to": to_device = torch._C._nn._parse_to(*args, **kwargs)[0] if to_device is not None: logger.warning(warning_msg) else: logger.warning(warning_msg) for param in model.parameters(): if param.device == torch.device("meta"): raise RuntimeError("You can't move a model that has some modules offloaded to cpu or disk.") return fn(*args, **kwargs) return wrapper model.to = add_warning(model.to, model) if is_npu_available(): model.npu = add_warning(model.npu, model) elif is_xpu_available(): model.xpu = add_warning(model.xpu, model) else: model.cuda = add_warning(model.cuda, model) else: device = list(device_map.values())[0] # `torch.Tensor.to(<int num>)` is not supported by `torch_npu` (see this [issue](https://github.com/Ascend/pytorch/issues/16)). if is_npu_available() and isinstance(device, int): device = f"npu:{device}" elif is_xpu_available() and isinstance(device, int): device = f"xpu:{device}" if device != "disk": model.to(device) else: raise ValueError( "You are trying to offload the whole model to the disk. Please use the `disk_offload` function instead." ) # Convert OrderedDict back to dict for easier usage model.hf_device_map = dict(device_map) return model class BnbQuantizationConfig: """ A plugin to enable BitsAndBytes 4bit and 8bit quantization """ load_in_8bit: bool = field(default=False, metadata={"help": "enable 8bit quantization."}) llm_int8_threshold: float = field( default=6.0, metadata={"help": "value of the outliner threshold. only relevant when load_in_8bit=True"} ) load_in_4bit: bool = field(default=False, metadata={"help": "enable 4bit quantization."}) bnb_4bit_quant_type: str = field( default="fp4", metadata={ "help": "set the quantization data type in the `bnb.nn.Linear4Bit` layers. Options are {'fp4','np4'}." }, ) bnb_4bit_use_double_quant: bool = field( default=False, metadata={ "help": "enable nested quantization where the quantization constants from the first quantization are quantized again." }, ) bnb_4bit_compute_dtype: bool = field( default="fp16", metadata={ "help": "This sets the computational type which might be different than the input time. For example, inputs might be " "fp32, but computation can be set to bf16 for speedups. Options are {'fp32','fp16','bf16'}." }, ) torch_dtype: torch.dtype = field( default=None, metadata={ "help": "this sets the dtype of the remaining non quantized layers. `bitsandbytes` library suggests to set the value" "to `torch.float16` for 8 bit model and use the same dtype as the compute dtype for 4 bit model " }, ) skip_modules: List[str] = field( default=None, metadata={ "help": "an explicit list of the modules that we don't quantize. The dtype of these modules will be `torch_dtype`." }, ) keep_in_fp32_modules: List[str] = field( default=None, metadata={"help": "an explicit list of the modules that we don't quantize. We keep them in `torch.float32`."}, ) def __post_init__(self): """ Safety checker that arguments are correct - also replaces some NoneType arguments with their default values. """ if not isinstance(self.load_in_8bit, bool): raise ValueError("load_in_8bit must be a boolean") if not isinstance(self.load_in_4bit, bool): raise ValueError("load_in_4bit must be a boolean") if self.load_in_4bit and self.load_in_8bit: raise ValueError("load_in_4bit and load_in_8 can't be both True") if not self.load_in_4bit and not self.load_in_8bit: raise ValueError("load_in_4bit and load_in_8 can't be both False") if not isinstance(self.llm_int8_threshold, (int, float)): raise ValueError("llm_int8_threshold must be a float or an int") if not isinstance(self.bnb_4bit_quant_type, str): raise ValueError("bnb_4bit_quant_type must be a string") elif self.bnb_4bit_quant_type not in ["fp4", "nf4"]: raise ValueError(f"bnb_4bit_quant_type must be in ['fp4','nf4'] but found {self.bnb_4bit_quant_type}") if not isinstance(self.bnb_4bit_use_double_quant, bool): raise ValueError("bnb_4bit_use_double_quant must be a boolean") if isinstance(self.bnb_4bit_compute_dtype, str): if self.bnb_4bit_compute_dtype == "fp32": self.bnb_4bit_compute_dtype = torch.float32 elif self.bnb_4bit_compute_dtype == "fp16": self.bnb_4bit_compute_dtype = torch.float16 elif self.bnb_4bit_compute_dtype == "bf16": self.bnb_4bit_compute_dtype = torch.bfloat16 else: raise ValueError( f"bnb_4bit_compute_dtype must be in ['fp32','fp16','bf16'] but found {self.bnb_4bit_compute_dtype}" ) elif not isinstance(self.bnb_4bit_compute_dtype, torch.dtype): raise ValueError("bnb_4bit_compute_dtype must be a string or a torch.dtype") if self.skip_modules is not None and not isinstance(self.skip_modules, list): raise ValueError("skip_modules must be a list of strings") if self.keep_in_fp32_modules is not None and not isinstance(self.keep_in_fp32_modules, list): raise ValueError("keep_in_fp_32_modules must be a list of strings") if self.load_in_4bit: self.target_dtype = CustomDtype.INT4 if self.load_in_8bit: self.target_dtype = torch.int8 if self.load_in_4bit and self.llm_int8_threshold != 6.0: warnings.warn("llm_int8_threshold can only be used for model loaded in 8bit") if isinstance(self.torch_dtype, str): if self.torch_dtype == "fp32": self.torch_dtype = torch.float32 elif self.torch_dtype == "fp16": self.torch_dtype = torch.float16 elif self.torch_dtype == "bf16": self.torch_dtype = torch.bfloat16 else: raise ValueError(f"torch_dtype must be in ['fp32','fp16','bf16'] but found {self.torch_dtype}") if self.load_in_8bit and self.torch_dtype is None: self.torch_dtype = torch.float16 if self.load_in_4bit and self.torch_dtype is None: self.torch_dtype = self.bnb_4bit_compute_dtype if not isinstance(self.torch_dtype, torch.dtype): raise ValueError("torch_dtype must be a torch.dtype") def load_checkpoint_in_model( model: nn.Module, checkpoint: Union[str, os.PathLike], device_map: Optional[Dict[str, Union[int, str, torch.device]]] = None, offload_folder: Optional[Union[str, os.PathLike]] = None, dtype: Optional[Union[str, torch.dtype]] = None, offload_state_dict: bool = False, offload_buffers: bool = False, keep_in_fp32_modules: List[str] = None, offload_8bit_bnb: bool = False, ): """ Loads a (potentially sharded) checkpoint inside a model, potentially sending weights to a given device as they are loaded. <Tip warning={true}> Once loaded across devices, you still need to call [`dispatch_model`] on your model to make it able to run. To group the checkpoint loading and dispatch in one single call, use [`load_checkpoint_and_dispatch`]. </Tip> Args: model (`torch.nn.Module`): The model in which we want to load a checkpoint. checkpoint (`str` or `os.PathLike`): The folder checkpoint to load. It can be: - a path to a file containing a whole model state dict - a path to a `.json` file containing the index to a sharded checkpoint - a path to a folder containing a unique `.index.json` file and the shards of a checkpoint. - a path to a folder containing a unique pytorch_model.bin or a model.safetensors file. device_map (`Dict[str, Union[int, str, torch.device]]`, *optional*): A map that specifies where each submodule should go. It doesn't need to be refined to each parameter/buffer name, once a given module name is inside, every submodule of it will be sent to the same device. offload_folder (`str` or `os.PathLike`, *optional*): If the `device_map` contains any value `"disk"`, the folder where we will offload weights. dtype (`str` or `torch.dtype`, *optional*): If provided, the weights will be converted to that type when loaded. offload_state_dict (`bool`, *optional*, defaults to `False`): If `True`, will temporarily offload the CPU state dict on the hard drive to avoid getting out of CPU RAM if the weight of the CPU state dict + the biggest shard does not fit. offload_buffers (`bool`, *optional*, defaults to `False`): Whether or not to include the buffers in the weights offloaded to disk. keep_in_fp32_modules(`List[str]`, *optional*): A list of the modules that we keep in `torch.float32` dtype. offload_8bit_bnb (`bool`, *optional*): Whether or not to enable offload of 8-bit modules on cpu/disk. """ if offload_8bit_bnb: from .bnb import quantize_and_offload_8bit tied_params = find_tied_parameters(model) if check_tied_parameters_in_config(model) and len(tied_params) == 0: logger.warn( "The model weights are not tied. Please use the `tie_weights` method before using the `infer_auto_device` function." ) if device_map is not None: check_tied_parameters_on_same_device(tied_params, device_map) if offload_folder is None and device_map is not None and "disk" in device_map.values(): raise ValueError( "At least one of the model submodule will be offloaded to disk, please pass along an `offload_folder`." ) elif offload_folder is not None and device_map is not None and "disk" in device_map.values(): os.makedirs(offload_folder, exist_ok=True) if isinstance(dtype, str): # We accept "torch.float16" or just "float16" dtype = dtype.replace("torch.", "") dtype = getattr(torch, dtype) checkpoint_files = None index_filename = None if os.path.isfile(checkpoint): if str(checkpoint).endswith(".json"): index_filename = checkpoint else: checkpoint_files = [checkpoint] elif os.path.isdir(checkpoint): # check if the whole state dict is present potential_state_bin = [f for f in os.listdir(checkpoint) if f == WEIGHTS_NAME] potential_state_safetensor = [f for f in os.listdir(checkpoint) if f == SAFE_WEIGHTS_NAME] if len(potential_state_bin) == 1: checkpoint_files = [os.path.join(checkpoint, potential_state_bin[0])] elif len(potential_state_safetensor) == 1: checkpoint_files = [os.path.join(checkpoint, potential_state_safetensor[0])] else: # otherwise check for sharded checkpoints potential_index = [f for f in os.listdir(checkpoint) if f.endswith(".index.json")] if len(potential_index) == 0: raise ValueError( f"{checkpoint} is not a folder containing a `.index.json` file or a {WEIGHTS_NAME} or a {SAFE_WEIGHTS_NAME} file" ) elif len(potential_index) == 1: index_filename = os.path.join(checkpoint, potential_index[0]) else: raise ValueError( f"{checkpoint} containing more than one `.index.json` file, delete the irrelevant ones." ) else: raise ValueError( "`checkpoint` should be the path to a file containing a whole state dict, or the index of a sharded " f"checkpoint, or a folder containing a sharded checkpoint or the whole state dict, but got {checkpoint}." ) if index_filename is not None: checkpoint_folder = os.path.split(index_filename)[0] with open(index_filename) as f: index = json.loads(f.read()) if "weight_map" in index: index = index["weight_map"] checkpoint_files = sorted(list(set(index.values()))) checkpoint_files = [os.path.join(checkpoint_folder, f) for f in checkpoint_files] # Logic for missing/unexepected keys goes here. offload_index = {} if offload_state_dict: state_dict_folder = tempfile.mkdtemp() state_dict_index = {} buffer_names = [name for name, _ in model.named_buffers()] for checkpoint_file in checkpoint_files: checkpoint = load_state_dict(checkpoint_file, device_map=device_map) if device_map is None: model.load_state_dict(checkpoint, strict=False) else: for param_name, param in checkpoint.items(): # skip SCB parameter (for 8-bit serialization) if "SCB" in param_name: continue module_name = param_name while len(module_name) > 0 and module_name not in device_map: module_name = ".".join(module_name.split(".")[:-1]) if module_name == "" and "" not in device_map: # TODO: group all errors and raise at the end. raise ValueError(f"{param_name} doesn't have any device set.") param_device = device_map[module_name] new_dtype = dtype if dtype is not None and torch.is_floating_point(param): if keep_in_fp32_modules is not None and dtype == torch.float16: proceed = False for key in keep_in_fp32_modules: if ((key in param_name) and (key + "." in param_name)) or key == param_name: proceed = True break if proceed: new_dtype = torch.float32 if "weight" in param_name and param_name.replace("weight", "SCB") in checkpoint.keys(): if param.dtype == torch.int8: fp16_statistics = checkpoint[param_name.replace("weight", "SCB")] else: fp16_statistics = None if param_device == "disk": if offload_buffers or param_name not in buffer_names: if new_dtype is None: new_dtype = param.dtype if offload_8bit_bnb: quantize_and_offload_8bit( model, param, param_name, new_dtype, offload_folder, offload_index, fp16_statistics ) continue else: set_module_tensor_to_device(model, param_name, "meta", dtype=new_dtype) offload_weight(param, param_name, offload_folder, index=offload_index) elif param_device == "cpu" and offload_state_dict: if new_dtype is None: new_dtype = param.dtype if offload_8bit_bnb: quantize_and_offload_8bit( model, param, param_name, new_dtype, state_dict_folder, state_dict_index, fp16_statistics ) else: set_module_tensor_to_device(model, param_name, "meta", dtype=new_dtype) offload_weight(param, param_name, state_dict_folder, index=state_dict_index) else: set_module_tensor_to_device( model, param_name, param_device, value=param, dtype=new_dtype, fp16_statistics=fp16_statistics, ) # Force Python to clean up. del checkpoint gc.collect() save_offload_index(offload_index, offload_folder) # Load back offloaded state dict on CPU if offload_state_dict: load_offloaded_weights(model, state_dict_index, state_dict_folder) shutil.rmtree(state_dict_folder) retie_parameters(model, tied_params) The provided code snippet includes necessary dependencies for implementing the `load_and_quantize_model` function. Write a Python function `def load_and_quantize_model( model: torch.nn.Module, bnb_quantization_config: BnbQuantizationConfig, weights_location: Union[str, os.PathLike] = None, device_map: Optional[Dict[str, Union[int, str, torch.device]]] = None, no_split_module_classes: Optional[List[str]] = None, max_memory: Optional[Dict[Union[int, str], Union[int, str]]] = None, offload_folder: Optional[Union[str, os.PathLike]] = None, offload_state_dict: bool = False, )` to solve the following problem: This function will quantize the input model with the associated config passed in `bnb_quantization_config`. If the model is in the meta device, we will load and dispatch the weights according to the `device_map` passed. If the model is already loaded, we will quantize the model and put the model on the GPU, Args: model (`torch.nn.Module`): Input model. The model can be already loaded or on the meta device bnb_quantization_config (`BnbQuantizationConfig`): The bitsandbytes quantization parameters weights_location (`str` or `os.PathLike`): The folder weights_location to load. It can be: - a path to a file containing a whole model state dict - a path to a `.json` file containing the index to a sharded checkpoint - a path to a folder containing a unique `.index.json` file and the shards of a checkpoint. - a path to a folder containing a unique pytorch_model.bin file. device_map (`Dict[str, Union[int, str, torch.device]]`, *optional*): A map that specifies where each submodule should go. It doesn't need to be refined to each parameter/buffer name, once a given module name is inside, every submodule of it will be sent to the same device. no_split_module_classes (`List[str]`, *optional*): A list of layer class names that should never be split across device (for instance any layer that has a residual connection). max_memory (`Dict`, *optional*): A dictionary device identifier to maximum memory. Will default to the maximum memory available if unset. offload_folder (`str` or `os.PathLike`, *optional*): If the `device_map` contains any value `"disk"`, the folder where we will offload weights. offload_state_dict (`bool`, *optional*, defaults to `False`): If `True`, will temporarily offload the CPU state dict on the hard drive to avoid getting out of CPU RAM if the weight of the CPU state dict + the biggest shard does not fit. Returns: `torch.nn.Module`: The quantized model Here is the function: def load_and_quantize_model( model: torch.nn.Module, bnb_quantization_config: BnbQuantizationConfig, weights_location: Union[str, os.PathLike] = None, device_map: Optional[Dict[str, Union[int, str, torch.device]]] = None, no_split_module_classes: Optional[List[str]] = None, max_memory: Optional[Dict[Union[int, str], Union[int, str]]] = None, offload_folder: Optional[Union[str, os.PathLike]] = None, offload_state_dict: bool = False, ): """ This function will quantize the input model with the associated config passed in `bnb_quantization_config`. If the model is in the meta device, we will load and dispatch the weights according to the `device_map` passed. If the model is already loaded, we will quantize the model and put the model on the GPU, Args: model (`torch.nn.Module`): Input model. The model can be already loaded or on the meta device bnb_quantization_config (`BnbQuantizationConfig`): The bitsandbytes quantization parameters weights_location (`str` or `os.PathLike`): The folder weights_location to load. It can be: - a path to a file containing a whole model state dict - a path to a `.json` file containing the index to a sharded checkpoint - a path to a folder containing a unique `.index.json` file and the shards of a checkpoint. - a path to a folder containing a unique pytorch_model.bin file. device_map (`Dict[str, Union[int, str, torch.device]]`, *optional*): A map that specifies where each submodule should go. It doesn't need to be refined to each parameter/buffer name, once a given module name is inside, every submodule of it will be sent to the same device. no_split_module_classes (`List[str]`, *optional*): A list of layer class names that should never be split across device (for instance any layer that has a residual connection). max_memory (`Dict`, *optional*): A dictionary device identifier to maximum memory. Will default to the maximum memory available if unset. offload_folder (`str` or `os.PathLike`, *optional*): If the `device_map` contains any value `"disk"`, the folder where we will offload weights. offload_state_dict (`bool`, *optional*, defaults to `False`): If `True`, will temporarily offload the CPU state dict on the hard drive to avoid getting out of CPU RAM if the weight of the CPU state dict + the biggest shard does not fit. Returns: `torch.nn.Module`: The quantized model """ load_in_4bit = bnb_quantization_config.load_in_4bit load_in_8bit = bnb_quantization_config.load_in_8bit if load_in_8bit and not is_8bit_bnb_available(): raise ImportError( "You have a version of `bitsandbytes` that is not compatible with 8bit quantization," " make sure you have the latest version of `bitsandbytes` installed." ) if load_in_4bit and not is_4bit_bnb_available(): raise ValueError( "You have a version of `bitsandbytes` that is not compatible with 4bit quantization," "make sure you have the latest version of `bitsandbytes` installed." ) modules_on_cpu = [] # custom device map if isinstance(device_map, dict) and len(device_map.keys()) > 1: modules_on_cpu = [key for key, value in device_map.items() if value in ["disk", "cpu"]] # We keep some modules such as the lm_head in their original dtype for numerical stability reasons if bnb_quantization_config.skip_modules is None: bnb_quantization_config.skip_modules = get_keys_to_not_convert(model) # add cpu modules to skip modules only for 4-bit modules if load_in_4bit: bnb_quantization_config.skip_modules.extend(modules_on_cpu) modules_to_not_convert = bnb_quantization_config.skip_modules # We add the modules we want to keep in full precision if bnb_quantization_config.keep_in_fp32_modules is None: bnb_quantization_config.keep_in_fp32_modules = [] keep_in_fp32_modules = bnb_quantization_config.keep_in_fp32_modules modules_to_not_convert.extend(keep_in_fp32_modules) # compatibility with peft model.is_loaded_in_4bit = load_in_4bit model.is_loaded_in_8bit = load_in_8bit model_device = get_parameter_device(model) if model_device.type != "meta": # quantization of an already loaded model logger.warning( "It is not recommended to quantize a loaded model. " "The model should be instantiated under the `init_empty_weights` context manager." ) model = replace_with_bnb_layers(model, bnb_quantization_config, modules_to_not_convert=modules_to_not_convert) # convert param to the right dtype dtype = bnb_quantization_config.torch_dtype for name, param in model.state_dict().items(): if any(module_to_keep_in_fp32 in name for module_to_keep_in_fp32 in keep_in_fp32_modules): param.to(torch.float32) if param.dtype != torch.float32: name = name.replace(".weight", "").replace(".bias", "") param = getattr(model, name, None) if param is not None: param.to(torch.float32) elif torch.is_floating_point(param): param.to(dtype) if model_device.type == "cuda": # move everything to cpu in the first place because we can't do quantization if the weights are already on cuda model.cuda(torch.cuda.current_device()) torch.cuda.empty_cache() elif torch.cuda.is_available(): model.to(torch.cuda.current_device()) else: raise RuntimeError("No GPU found. A GPU is needed for quantization.") logger.info( f"The model device type is {model_device.type}. However, cuda is needed for quantization." "We move the model to cuda." ) return model elif weights_location is None: raise RuntimeError( f"`weights_location` needs to be the folder path containing the weights of the model, but we found {weights_location} " ) else: with init_empty_weights(): model = replace_with_bnb_layers( model, bnb_quantization_config, modules_to_not_convert=modules_to_not_convert ) device_map = get_quantized_model_device_map( model, bnb_quantization_config, device_map, max_memory=max_memory, no_split_module_classes=no_split_module_classes, ) if offload_state_dict is None and device_map is not None and "disk" in device_map.values(): offload_state_dict = True offload = any(x in list(device_map.values()) for x in ["cpu", "disk"]) load_checkpoint_in_model( model, weights_location, device_map, dtype=bnb_quantization_config.torch_dtype, offload_folder=offload_folder, offload_state_dict=offload_state_dict, keep_in_fp32_modules=bnb_quantization_config.keep_in_fp32_modules, offload_8bit_bnb=load_in_8bit and offload, ) return dispatch_model(model, device_map=device_map, offload_dir=offload_folder)
This function will quantize the input model with the associated config passed in `bnb_quantization_config`. If the model is in the meta device, we will load and dispatch the weights according to the `device_map` passed. If the model is already loaded, we will quantize the model and put the model on the GPU, Args: model (`torch.nn.Module`): Input model. The model can be already loaded or on the meta device bnb_quantization_config (`BnbQuantizationConfig`): The bitsandbytes quantization parameters weights_location (`str` or `os.PathLike`): The folder weights_location to load. It can be: - a path to a file containing a whole model state dict - a path to a `.json` file containing the index to a sharded checkpoint - a path to a folder containing a unique `.index.json` file and the shards of a checkpoint. - a path to a folder containing a unique pytorch_model.bin file. device_map (`Dict[str, Union[int, str, torch.device]]`, *optional*): A map that specifies where each submodule should go. It doesn't need to be refined to each parameter/buffer name, once a given module name is inside, every submodule of it will be sent to the same device. no_split_module_classes (`List[str]`, *optional*): A list of layer class names that should never be split across device (for instance any layer that has a residual connection). max_memory (`Dict`, *optional*): A dictionary device identifier to maximum memory. Will default to the maximum memory available if unset. offload_folder (`str` or `os.PathLike`, *optional*): If the `device_map` contains any value `"disk"`, the folder where we will offload weights. offload_state_dict (`bool`, *optional*, defaults to `False`): If `True`, will temporarily offload the CPU state dict on the hard drive to avoid getting out of CPU RAM if the weight of the CPU state dict + the biggest shard does not fit. Returns: `torch.nn.Module`: The quantized model
2,916
import logging import os from copy import deepcopy from typing import Dict, List, Optional, Union import torch import torch.nn as nn from accelerate.utils.imports import ( is_4bit_bnb_available, is_8bit_bnb_available, ) from ..big_modeling import dispatch_model, init_empty_weights from .dataclasses import BnbQuantizationConfig from .modeling import ( find_tied_parameters, get_balanced_memory, infer_auto_device_map, load_checkpoint_in_model, offload_weight, set_module_tensor_to_device, ) The provided code snippet includes necessary dependencies for implementing the `has_4bit_bnb_layers` function. Write a Python function `def has_4bit_bnb_layers(model)` to solve the following problem: Check if we have `bnb.nn.Linear4bit` or `bnb.nn.Linear8bitLt` layers inside our model Here is the function: def has_4bit_bnb_layers(model): """Check if we have `bnb.nn.Linear4bit` or `bnb.nn.Linear8bitLt` layers inside our model""" # bitsandbytes will initialize CUDA on import, so it needs to be imported lazily import bitsandbytes as bnb for m in model.modules(): if isinstance(m, bnb.nn.Linear4bit): return True return False
Check if we have `bnb.nn.Linear4bit` or `bnb.nn.Linear8bitLt` layers inside our model
2,917
import os import platform import subprocess import sys from shutil import which from typing import List import torch from packaging.version import parse The provided code snippet includes necessary dependencies for implementing the `get_int_from_env` function. Write a Python function `def get_int_from_env(env_keys, default)` to solve the following problem: Returns the first positive env value found in the `env_keys` list or the default. Here is the function: def get_int_from_env(env_keys, default): """Returns the first positive env value found in the `env_keys` list or the default.""" for e in env_keys: val = int(os.environ.get(e, -1)) if val >= 0: return val return default
Returns the first positive env value found in the `env_keys` list or the default.
2,918
import os import platform import subprocess import sys from shutil import which from typing import List import torch from packaging.version import parse def parse_choice_from_env(key, default="no"): value = os.environ.get(key, str(default)) return value
null
2,919
import os import platform import subprocess import sys from shutil import which from typing import List import torch from packaging.version import parse The provided code snippet includes necessary dependencies for implementing the `are_libraries_initialized` function. Write a Python function `def are_libraries_initialized(*library_names: str) -> List[str]` to solve the following problem: Checks if any of `library_names` are imported in the environment. Will return any names that are. Here is the function: def are_libraries_initialized(*library_names: str) -> List[str]: """ Checks if any of `library_names` are imported in the environment. Will return any names that are. """ return [lib_name for lib_name in library_names if lib_name in sys.modules.keys()]
Checks if any of `library_names` are imported in the environment. Will return any names that are.
2,920
import os import platform import subprocess import sys from shutil import which from typing import List import torch from packaging.version import parse def get_gpu_info(): """ Gets GPU count and names using `nvidia-smi` instead of torch to not initialize CUDA. Largely based on the `gputil` library. """ # Returns as list of `n` GPUs and their names output = subprocess.check_output( [_nvidia_smi(), "--query-gpu=count,name", "--format=csv,noheader"], universal_newlines=True ) output = output.strip() gpus = output.split(os.linesep) # Get names from output gpu_count = len(gpus) gpu_names = [gpu.split(",")[1].strip() for gpu in gpus] return gpu_names, gpu_count def get_driver_version(): """ Returns the driver version In the case of multiple GPUs, will return the first. """ output = subprocess.check_output( [_nvidia_smi(), "--query-gpu=driver_version", "--format=csv,noheader"], universal_newlines=True ) output = output.strip() return output.split(os.linesep)[0] The provided code snippet includes necessary dependencies for implementing the `check_cuda_p2p_ib_support` function. Write a Python function `def check_cuda_p2p_ib_support()` to solve the following problem: Checks if the devices being used have issues with P2P and IB communications, namely any consumer GPU hardware after the 3090. Noteably uses `nvidia-smi` instead of torch to not initialize CUDA. Here is the function: def check_cuda_p2p_ib_support(): """ Checks if the devices being used have issues with P2P and IB communications, namely any consumer GPU hardware after the 3090. Noteably uses `nvidia-smi` instead of torch to not initialize CUDA. """ try: device_names, device_count = get_gpu_info() # As new consumer GPUs get released, add them to `unsupported_devices`` unsupported_devices = {"RTX 40"} if device_count > 1: if any( unsupported_device in device_name for device_name in device_names for unsupported_device in unsupported_devices ): # Check if they have the right driver version acceptable_driver_version = "550.40.07" current_driver_version = get_driver_version() if parse(current_driver_version) < parse(acceptable_driver_version): return False return True except Exception: pass return True
Checks if the devices being used have issues with P2P and IB communications, namely any consumer GPU hardware after the 3090. Noteably uses `nvidia-smi` instead of torch to not initialize CUDA.
2,921
import os import platform import subprocess import sys from shutil import which from typing import List import torch from packaging.version import parse The provided code snippet includes necessary dependencies for implementing the `check_fp8_capability` function. Write a Python function `def check_fp8_capability()` to solve the following problem: Checks if all the current GPUs available support FP8. Notably must initialize `torch.cuda` to check. Here is the function: def check_fp8_capability(): """ Checks if all the current GPUs available support FP8. Notably must initialize `torch.cuda` to check. """ cuda_device_capacity = torch.cuda.get_device_capability() return cuda_device_capacity >= (8, 9)
Checks if all the current GPUs available support FP8. Notably must initialize `torch.cuda` to check.
2,922
import collections import os import platform import re import socket from contextlib import contextmanager from functools import partial, reduce from types import MethodType from typing import OrderedDict import torch from packaging.version import Version from safetensors.torch import save_file as safe_save_file from ..commands.config.default import write_basic_config from ..logging import get_logger from ..state import PartialState from .constants import FSDP_PYTORCH_VERSION from .dataclasses import DistributedType from .imports import is_deepspeed_available, is_torch_distributed_available, is_torch_xla_available from .modeling import id_tensor_storage from .transformer_engine import convert_model from .versions import is_torch_version class PartialState: """ Singleton class that has information about the current training environment and functions to help with process control. Designed to be used when only process control and device execution states are needed. Does *not* need to be initialized from `Accelerator`. **Available attributes:** - **device** (`torch.device`) -- The device to use. - **distributed_type** ([`~accelerate.state.DistributedType`]) -- The type of distributed environment currently in use. - **local_process_index** (`int`) -- The index of the current process on the current server. - **mixed_precision** (`str`) -- Whether or not the current script will use mixed precision, and if so the type of mixed precision being performed. (Choose from 'no','fp16','bf16 or 'fp8'). - **num_processes** (`int`) -- The number of processes currently launched in parallel. - **process_index** (`int`) -- The index of the current process. - **is_last_process** (`bool`) -- Whether or not the current process is the last one. - **is_main_process** (`bool`) -- Whether or not the current process is the main one. - **is_local_main_process** (`bool`) -- Whether or not the current process is the main one on the local node. - **debug** (`bool`) -- Whether or not the current script is being run in debug mode. """ _shared_state = SharedDict() def __init__(self, cpu: bool = False, **kwargs): self.__dict__ = self._shared_state if not self.initialized: self._cpu = cpu self.backend = None env_device = os.environ.get("ACCELERATE_TORCH_DEVICE", None) self.device = torch.device(env_device) if env_device is not None else None self.debug = parse_flag_from_env("ACCELERATE_DEBUG_MODE") use_sagemaker_dp = kwargs.pop("_use_sagemaker_dp", None) if use_sagemaker_dp is None: use_sagemaker_dp = ( os.environ.get("ACCELERATE_USE_SAGEMAKER", "false") == "true" and os.environ.get("ACCELERATE_SAGEMAKER_DISTRIBUTED_TYPE") != SageMakerDistributedType.NO ) if use_sagemaker_dp and not cpu: if ( os.environ.get("ACCELERATE_SAGEMAKER_DISTRIBUTED_TYPE") == SageMakerDistributedType.DATA_PARALLEL ) or use_sagemaker_dp: self.distributed_type = DistributedType.MULTI_GPU import smdistributed.dataparallel.torch.torch_smddp # noqa if not torch.distributed.is_initialized(): torch.distributed.init_process_group(backend="smddp") self.backend = "smddp" self.num_processes = torch.distributed.get_world_size() self.process_index = torch.distributed.get_rank() self.local_process_index = int(os.environ.get("LOCAL_RANK", -1)) if self.device is None: self.device = torch.device("cuda", self.local_process_index) torch.cuda.set_device(self.device) elif is_torch_xla_available() and not cpu: self.distributed_type = DistributedType.XLA self.device = xm.xla_device() xm.set_replication(self.device, xm.get_xla_supported_devices()) self.num_processes = xm.xrt_world_size() self.process_index = xm.get_ordinal() if is_torch_xla_available(check_is_tpu=True): self.local_process_index = xm.get_local_ordinal() else: self.local_process_index = int(os.environ.get("LOCAL_RANK", -1)) elif ( os.environ.get("ACCELERATE_USE_DEEPSPEED", "false") == "true" and int(os.environ.get("LOCAL_RANK", -1)) != -1 and not cpu ): assert ( is_deepspeed_available() ), "DeepSpeed is not available => install it using `pip3 install deepspeed` or build it from source" self.distributed_type = DistributedType.DEEPSPEED if not torch.distributed.is_initialized(): from deepspeed import comm as dist # DeepSpeed always uses nccl kwargs.pop("backend", None) if is_xpu_available and is_ccl_available(): # Set DeepSpeed backend to ccl for xpu self.backend = "ccl" os.environ["CCL_PROCESS_LAUNCHER"] = "none" os.environ["CCL_LOCAL_SIZE"] = os.environ.get("LOCAL_WORLD_SIZE", "1") os.environ["CCL_LOCAL_RANK"] = os.environ.get("LOCAL_RANK", "0") elif is_npu_available(): self.backend = "hccl" else: self.backend = "nccl" dist.init_distributed(dist_backend=self.backend, auto_mpi_discovery=False, **kwargs) self.num_processes = torch.distributed.get_world_size() self.process_index = torch.distributed.get_rank() self.local_process_index = int(os.environ.get("LOCAL_RANK", -1)) if self.device is None: if is_xpu_available(): self.device = torch.device("xpu", self.local_process_index) if self.device is not None: torch.xpu.set_device(self.device) elif is_npu_available(): self.device = torch.device("npu", self.local_process_index) if self.device is not None: torch.npu.set_device(self.device) else: self.device = torch.device("cuda", self.local_process_index) if self.device is not None: torch.cuda.set_device(self.device) if self.device.type == "cuda" and not check_cuda_p2p_ib_support(): if "NCCL_P2P_DISABLE" not in os.environ or "NCCL_IB_DISABLE" not in os.environ: raise NotImplementedError( "Using RTX 4000 series doesn't support faster communication broadband via P2P or IB. " 'Please set `NCCL_P2P_DISABLE="1"` and `NCCL_IB_DISABLE="1" or use `accelerate launch` which ' "will do this automatically." ) self._mixed_precision = "no" # deepspeed handles mixed_precision using deepspeed_config elif int(os.environ.get("LOCAL_RANK", -1)) != -1 and not cpu and torch.cuda.is_available(): self.distributed_type = DistributedType.MULTI_GPU if not torch.distributed.is_initialized(): self.backend = kwargs.pop("backend", "nccl") # Special case for `TrainingArguments`, where `backend` will be `None` if self.backend is None: self.backend = "nccl" torch.distributed.init_process_group(backend=self.backend, **kwargs) if not check_cuda_p2p_ib_support(): if "NCCL_P2P_DISABLE" not in os.environ or "NCCL_IB_DISABLE" not in os.environ: raise NotImplementedError( "Using RTX 4000 series doesn't support faster communication broadband via P2P or IB. " 'Please set `NCCL_P2P_DISABLE="1"` and `NCCL_IB_DISABLE="1" or use `accelerate launch` which ' "will do this automatically." ) self.num_processes = torch.distributed.get_world_size() self.process_index = torch.distributed.get_rank() self.local_process_index = int(os.environ.get("LOCAL_RANK", -1)) if self.device is None: self.device = torch.device("cuda", self.local_process_index) torch.cuda.set_device(self.device) elif is_npu_available() and not cpu and int(os.environ.get("LOCAL_RANK", -1)) != -1: self.distributed_type = DistributedType.MULTI_NPU if not torch.distributed.is_initialized(): # Backend is not set by the user, we set it here kwargs.pop("backend", None) self.backend = "hccl" torch.distributed.init_process_group(backend=self.backend, **kwargs) self.num_processes = torch.distributed.get_world_size() self.process_index = torch.distributed.get_rank() self.local_process_index = int(os.environ.get("LOCAL_RANK", -1)) if self.device is None: self.device = torch.device("npu", self.local_process_index) torch.npu.set_device(self.device) elif ( get_int_from_env(["PMI_SIZE", "OMPI_COMM_WORLD_SIZE", "MV2_COMM_WORLD_SIZE", "WORLD_SIZE"], 1) > 1 or int(os.environ.get("LOCAL_RANK", -1)) != -1 ): if not cpu and is_xpu_available(): self.distributed_type = DistributedType.MULTI_XPU else: self.distributed_type = DistributedType.MULTI_CPU # Actually, CCL_WORKER_COUNT is a CPU only env var in CCL, no need to set it for XPU. if is_ccl_available() and ( get_int_from_env(["CCL_WORKER_COUNT"], 0) > 0 or self.distributed_type == DistributedType.MULTI_XPU ): if get_ccl_version() >= "1.12": import oneccl_bindings_for_pytorch # noqa: F401 else: import torch_ccl # noqa: F401 backend = "ccl" elif torch.distributed.is_mpi_available(): backend = "mpi" else: backend = "gloo" # Try to get launch configuration from environment variables set by MPI launcher - works for Intel MPI, OpenMPI and MVAPICH rank = get_int_from_env(["RANK", "PMI_RANK", "OMPI_COMM_WORLD_RANK", "MV2_COMM_WORLD_RANK"], 0) size = get_int_from_env(["WORLD_SIZE", "PMI_SIZE", "OMPI_COMM_WORLD_SIZE", "MV2_COMM_WORLD_SIZE"], 1) local_rank = get_int_from_env( ["LOCAL_RANK", "MPI_LOCALRANKID", "OMPI_COMM_WORLD_LOCAL_RANK", "MV2_COMM_WORLD_LOCAL_RANK"], 0 ) local_size = get_int_from_env( ["LOCAL_WORLD_SIZE", "MPI_LOCALNRANKS", "OMPI_COMM_WORLD_LOCAL_SIZE", "MV2_COMM_WORLD_LOCAL_SIZE"], 1, ) self.local_process_index = local_rank os.environ["RANK"] = str(rank) os.environ["WORLD_SIZE"] = str(size) os.environ["LOCAL_RANK"] = str(local_rank) os.environ["LOCAL_WORLD_SIZE"] = str(local_size) if backend == "ccl" and self.distributed_type == DistributedType.MULTI_XPU: os.environ["CCL_PROCESS_LAUNCHER"] = "none" os.environ["CCL_LOCAL_SIZE"] = str(local_size) os.environ["CCL_LOCAL_RANK"] = str(local_rank) if not os.environ.get("MASTER_PORT", None): os.environ["MASTER_PORT"] = "29500" if not os.environ.get("MASTER_ADDR", None): if local_size != size and backend != "mpi": raise ValueError( "Looks like distributed multinode run but MASTER_ADDR env not set, " "please try exporting rank 0's hostname as MASTER_ADDR" ) if ( self.distributed_type == DistributedType.MULTI_CPU and get_int_from_env(["OMP_NUM_THREADS", "MKL_NUM_THREADS"], 0) == 0 ): import psutil num_cpu_threads_per_process = int(psutil.cpu_count(logical=False) / local_size) if num_cpu_threads_per_process == 0: num_cpu_threads_per_process = 1 torch.set_num_threads(num_cpu_threads_per_process) warnings.warn( f"OMP_NUM_THREADS/MKL_NUM_THREADS unset, we set it at {num_cpu_threads_per_process} to improve oob" " performance." ) if not torch.distributed.is_initialized(): # Backend is not set by the user, we set it here kwargs.pop("backend", None) self.backend = backend torch.distributed.init_process_group(self.backend, rank=rank, world_size=size, **kwargs) self.num_processes = torch.distributed.get_world_size() self.process_index = torch.distributed.get_rank() if cpu: self.device = torch.device("cpu") elif is_xpu_available(): self.device = torch.device("xpu", self.local_process_index) torch.xpu.set_device(self.device) else: self.device = self.default_device else: self.distributed_type = ( DistributedType.NO if os.environ.get("ACCELERATE_USE_DEEPSPEED", "false") == "false" else DistributedType.DEEPSPEED ) self.num_processes = 1 self.process_index = self.local_process_index = 0 if self.device is None: self.device = torch.device("cpu") if cpu else self.default_device self.fork_launched = parse_flag_from_env("FORK_LAUNCHED", 0) def __repr__(self) -> str: return ( f"Distributed environment: {self.distributed_type}{(' Backend: ' + self.backend) if self.backend else ''}\n" f"Num processes: {self.num_processes}\n" f"Process index: {self.process_index}\n" f"Local process index: {self.local_process_index}\n" f"Device: {self.device}\n" ) def _reset_state(): "Resets `_shared_state`, is used internally and should not be called" PartialState._shared_state.clear() def initialized(self) -> bool: "Returns whether the `PartialState` has been initialized" return self._shared_state != {} def use_distributed(self): """ Whether the Accelerator is configured for distributed training """ return self.distributed_type != DistributedType.NO and self.num_processes > 1 def is_last_process(self) -> bool: "Returns whether the current process is the last one" return self.process_index == self.num_processes - 1 def is_main_process(self) -> bool: "Returns whether the current process is the main process" return ( self.process_index == 0 if self.distributed_type != DistributedType.MEGATRON_LM else self.is_last_process ) def is_local_main_process(self) -> bool: "Returns whether the current process is the main process on the local node" return ( self.local_process_index == 0 if self.distributed_type != DistributedType.MEGATRON_LM else self.is_last_process ) def wait_for_everyone(self): """ Will stop the execution of the current process until every other process has reached that point (so this does nothing when the script is only run in one process). Useful to do before saving a model. Example: ```python >>> # Assuming two GPU processes >>> import time >>> from accelerate.state import PartialState >>> state = PartialState() >>> if state.is_main_process: ... time.sleep(2) >>> else: ... print("I'm waiting for the main process to finish its sleep...") >>> state.wait_for_everyone() >>> # Should print on every process at the same time >>> print("Everyone is here") ``` """ if self.distributed_type in ( DistributedType.MULTI_GPU, DistributedType.MULTI_NPU, DistributedType.MULTI_XPU, DistributedType.MULTI_CPU, DistributedType.DEEPSPEED, DistributedType.FSDP, ): torch.distributed.barrier() elif self.distributed_type == DistributedType.XLA: xm.rendezvous("accelerate.utils.wait_for_everyone") def _goes_first(self, is_main: bool): if not is_main: self.wait_for_everyone() yield if is_main: self.wait_for_everyone() def split_between_processes(self, inputs: list | tuple | dict | torch.Tensor, apply_padding: bool = False): """ Splits `input` between `self.num_processes` quickly and can be then used on that process. Useful when doing distributed inference, such as with different prompts. Note that when using a `dict`, all keys need to have the same number of elements. Args: inputs (`list`, `tuple`, `torch.Tensor`, `dict` of `list`/`tuple`/`torch.Tensor`, or `datasets.Dataset`): The input to split between processes. apply_padding (`bool`, `optional`, defaults to `False`): Whether to apply padding by repeating the last element of the input so that all processes have the same number of elements. Useful when trying to perform actions such as `gather()` on the outputs or passing in less inputs than there are processes. If so, just remember to drop the padded elements afterwards. Example: ```python # Assume there are two processes from accelerate import PartialState state = PartialState() with state.split_between_processes(["A", "B", "C"]) as inputs: print(inputs) # Process 0 ["A", "B"] # Process 1 ["C"] with state.split_between_processes(["A", "B", "C"], apply_padding=True) as inputs: print(inputs) # Process 0 ["A", "B"] # Process 1 ["C", "C"] ``` """ if self.num_processes == 1: yield inputs return length = len(inputs) # Nested dictionary of any types if isinstance(inputs, dict): length = len(inputs[list(inputs.keys())[0]]) if not all(len(v) == length for v in inputs.values()): raise ValueError("All values in the dictionary must have the same length") num_samples_per_process = math.ceil(length / self.num_processes) start_index = self.process_index * num_samples_per_process end_index = start_index + num_samples_per_process if (len(inputs) % self.num_processes != 0) and (self.process_index == self.num_processes - 1): end_index = length def _split_values(inputs, start_index, end_index): if isinstance(inputs, (list, tuple, torch.Tensor)): if start_index >= len(inputs): result = inputs[-1:] else: result = inputs[start_index:end_index] if apply_padding: if isinstance(result, torch.Tensor): from accelerate.utils import pad_across_processes, send_to_device # The tensor needs to be on the device before we can pad it tensorized_result = send_to_device(result, self.device) result = pad_across_processes(tensorized_result, pad_index=inputs[-1]) else: result += [result[-1]] * (num_samples_per_process - len(result)) return result elif isinstance(inputs, dict): for key in inputs.keys(): inputs[key] = _split_values(inputs[key], start_index, end_index) return inputs else: if is_datasets_available(): from datasets import Dataset if isinstance(inputs, Dataset): if start_index >= len(inputs): start_index = len(inputs) - 1 if end_index > len(inputs): end_index = len(inputs) result_idcs = list(range(start_index, end_index)) if apply_padding: result_idcs += [end_index - 1] * (num_samples_per_process - len(result_idcs)) return inputs.select(result_idcs) return inputs yield _split_values(inputs, start_index, end_index) def main_process_first(self): """ Lets the main process go first inside a with block. The other processes will enter the with block after the main process exits. Example: ```python >>> from accelerate import Accelerator >>> accelerator = Accelerator() >>> with accelerator.main_process_first(): ... # This will be printed first by process 0 then in a seemingly ... # random order by the other processes. ... print(f"This will be printed by process {accelerator.process_index}") ``` """ yield from self._goes_first(self.is_main_process) def local_main_process_first(self): """ Lets the local main process go inside a with block. The other processes will enter the with block after the main process exits. Example: ```python >>> from accelerate.state import PartialState >>> state = PartialState() >>> with state.local_main_process_first(): ... # This will be printed first by local process 0 then in a seemingly ... # random order by the other processes. ... print(f"This will be printed by process {state.local_process_index}") ``` """ yield from self._goes_first(self.is_local_main_process) def on_main_process(self, function: Callable[..., Any] = None): """ Decorator that only runs the decorated function on the main process. Args: function (`Callable`): The function to decorate. Example: ```python >>> from accelerate.state import PartialState >>> state = PartialState() >>> @state.on_main_process ... def print_something(): ... print("This will be printed by process 0 only.") >>> print_something() "This will be printed by process 0 only" ``` """ if not self.initialized: raise ValueError("The `PartialState` or `Accelerator` must be initialized before calling this function.") if self.is_main_process or not self.use_distributed: return function return do_nothing def on_local_main_process(self, function: Callable[..., Any] = None): """ Decorator that only runs the decorated function on the local main process. Args: function (`Callable`): The function to decorate. Example: ```python # Assume we have 2 servers with 4 processes each. from accelerate.state import PartialState state = PartialState() def print_something(): print("This will be printed by process 0 only on each server.") print_something() # On server 1: "This will be printed by process 0 only" # On server 2: "This will be printed by process 0 only" ``` """ if self.is_local_main_process or not self.use_distributed: return function return do_nothing def on_last_process(self, function: Callable[..., Any]): """ Decorator that only runs the decorated function on the last process. Args: function (`Callable`): The function to decorate. Example: ```python # Assume we have 4 processes. from accelerate.state import PartialState state = PartialState() def print_something(): print(f"Printed on process {state.process_index}") print_something() "Printed on process 3" ``` """ if self.is_last_process or not self.use_distributed: return function return do_nothing def on_process(self, function: Callable[..., Any] = None, process_index: int = None): """ Decorator that only runs the decorated function on the process with the given index. Args: function (`Callable`, `optional`): The function to decorate. process_index (`int`, `optional`): The index of the process on which to run the function. Example: ```python # Assume we have 4 processes. from accelerate.state import PartialState state = PartialState() def print_something(): print(f"Printed on process {state.process_index}") print_something() "Printed on process 2" ``` """ if function is None: return partial(self.on_process, process_index=process_index) if (self.process_index == process_index) or (not self.use_distributed): return function return do_nothing def on_local_process(self, function: Callable[..., Any] = None, local_process_index: int = None): """ Decorator that only runs the decorated function on the process with the given index on the current node. Args: function (`Callable`, *optional*): The function to decorate. local_process_index (`int`, *optional*): The index of the local process on which to run the function. Example: ```python # Assume we have 2 servers with 4 processes each. from accelerate import Accelerator accelerator = Accelerator() def print_something(): print(f"Printed on process {accelerator.local_process_index}") print_something() # On server 1: "Printed on process 2" # On server 2: "Printed on process 2" ``` """ if function is None: return partial(self.on_local_process, local_process_index=local_process_index) if (self.local_process_index == local_process_index) or (not self.use_distributed): return function return do_nothing def print(self, *args, **kwargs): if self.is_local_main_process: print(*args, **kwargs) def default_device(self) -> torch.device: """ Returns the default device which is: - MPS if `torch.backends.mps.is_available()` and `torch.backends.mps.is_built()` both return True. - CUDA if `torch.cuda.is_available()` - NPU if `is_npu_available()` - CPU otherwise """ if is_mps_available(): os.environ["PYTORCH_ENABLE_MPS_FALLBACK"] = "1" return torch.device("mps") elif torch.cuda.is_available(): return torch.device("cuda") elif is_xpu_available(): return torch.device("xpu:0") elif is_npu_available(): return torch.device("npu") else: return torch.device("cpu") The provided code snippet includes necessary dependencies for implementing the `wait_for_everyone` function. Write a Python function `def wait_for_everyone()` to solve the following problem: Introduces a blocking point in the script, making sure all processes have reached this point before continuing. <Tip warning={true}> Make sure all processes will reach this instruction otherwise one of your processes will hang forever. </Tip> Here is the function: def wait_for_everyone(): """ Introduces a blocking point in the script, making sure all processes have reached this point before continuing. <Tip warning={true}> Make sure all processes will reach this instruction otherwise one of your processes will hang forever. </Tip> """ PartialState().wait_for_everyone()
Introduces a blocking point in the script, making sure all processes have reached this point before continuing. <Tip warning={true}> Make sure all processes will reach this instruction otherwise one of your processes will hang forever. </Tip>
2,923
import collections import os import platform import re import socket from contextlib import contextmanager from functools import partial, reduce from types import MethodType from typing import OrderedDict import torch from packaging.version import Version from safetensors.torch import save_file as safe_save_file from ..commands.config.default import write_basic_config from ..logging import get_logger from ..state import PartialState from .constants import FSDP_PYTORCH_VERSION from .dataclasses import DistributedType from .imports import is_deepspeed_available, is_torch_distributed_available, is_torch_xla_available from .modeling import id_tensor_storage from .transformer_engine import convert_model from .versions import is_torch_version def clean_state_dict_for_safetensors(state_dict: dict): """ Cleans the state dictionary from a model and removes tensor aliasing if present. Args: state_dict (`dict`): The state dictionary from a model """ ptrs = collections.defaultdict(list) # When bnb serialization is used, weights in state dict can be strings for name, tensor in state_dict.items(): if not isinstance(tensor, str): ptrs[id_tensor_storage(tensor)].append(name) # These are all pointers of tensors with shared memory shared_ptrs = {ptr: names for ptr, names in ptrs.items() if len(names) > 1} warn_names = set() for names in shared_ptrs.values(): # When not all duplicates have been cleaned, we still remove those keys but put a clear warning. # If the link between tensors was done at runtime then `from_pretrained` will not get # the key back leading to random tensor. A proper warning will be shown # during reload (if applicable), but since the file is not necessarily compatible with # the config, better show a proper warning. found_names = [name for name in names if name in state_dict] warn_names.update(found_names[1:]) for name in found_names[1:]: del state_dict[name] if len(warn_names) > 0: logger.warning( f"Removed shared tensor {warn_names} while saving. This should be OK, but check by verifying that you don't receive any warning while reloading", ) state_dict = {k: v.contiguous() if isinstance(v, torch.Tensor) else v for k, v in state_dict.items()} return state_dict class PartialState: """ Singleton class that has information about the current training environment and functions to help with process control. Designed to be used when only process control and device execution states are needed. Does *not* need to be initialized from `Accelerator`. **Available attributes:** - **device** (`torch.device`) -- The device to use. - **distributed_type** ([`~accelerate.state.DistributedType`]) -- The type of distributed environment currently in use. - **local_process_index** (`int`) -- The index of the current process on the current server. - **mixed_precision** (`str`) -- Whether or not the current script will use mixed precision, and if so the type of mixed precision being performed. (Choose from 'no','fp16','bf16 or 'fp8'). - **num_processes** (`int`) -- The number of processes currently launched in parallel. - **process_index** (`int`) -- The index of the current process. - **is_last_process** (`bool`) -- Whether or not the current process is the last one. - **is_main_process** (`bool`) -- Whether or not the current process is the main one. - **is_local_main_process** (`bool`) -- Whether or not the current process is the main one on the local node. - **debug** (`bool`) -- Whether or not the current script is being run in debug mode. """ _shared_state = SharedDict() def __init__(self, cpu: bool = False, **kwargs): self.__dict__ = self._shared_state if not self.initialized: self._cpu = cpu self.backend = None env_device = os.environ.get("ACCELERATE_TORCH_DEVICE", None) self.device = torch.device(env_device) if env_device is not None else None self.debug = parse_flag_from_env("ACCELERATE_DEBUG_MODE") use_sagemaker_dp = kwargs.pop("_use_sagemaker_dp", None) if use_sagemaker_dp is None: use_sagemaker_dp = ( os.environ.get("ACCELERATE_USE_SAGEMAKER", "false") == "true" and os.environ.get("ACCELERATE_SAGEMAKER_DISTRIBUTED_TYPE") != SageMakerDistributedType.NO ) if use_sagemaker_dp and not cpu: if ( os.environ.get("ACCELERATE_SAGEMAKER_DISTRIBUTED_TYPE") == SageMakerDistributedType.DATA_PARALLEL ) or use_sagemaker_dp: self.distributed_type = DistributedType.MULTI_GPU import smdistributed.dataparallel.torch.torch_smddp # noqa if not torch.distributed.is_initialized(): torch.distributed.init_process_group(backend="smddp") self.backend = "smddp" self.num_processes = torch.distributed.get_world_size() self.process_index = torch.distributed.get_rank() self.local_process_index = int(os.environ.get("LOCAL_RANK", -1)) if self.device is None: self.device = torch.device("cuda", self.local_process_index) torch.cuda.set_device(self.device) elif is_torch_xla_available() and not cpu: self.distributed_type = DistributedType.XLA self.device = xm.xla_device() xm.set_replication(self.device, xm.get_xla_supported_devices()) self.num_processes = xm.xrt_world_size() self.process_index = xm.get_ordinal() if is_torch_xla_available(check_is_tpu=True): self.local_process_index = xm.get_local_ordinal() else: self.local_process_index = int(os.environ.get("LOCAL_RANK", -1)) elif ( os.environ.get("ACCELERATE_USE_DEEPSPEED", "false") == "true" and int(os.environ.get("LOCAL_RANK", -1)) != -1 and not cpu ): assert ( is_deepspeed_available() ), "DeepSpeed is not available => install it using `pip3 install deepspeed` or build it from source" self.distributed_type = DistributedType.DEEPSPEED if not torch.distributed.is_initialized(): from deepspeed import comm as dist # DeepSpeed always uses nccl kwargs.pop("backend", None) if is_xpu_available and is_ccl_available(): # Set DeepSpeed backend to ccl for xpu self.backend = "ccl" os.environ["CCL_PROCESS_LAUNCHER"] = "none" os.environ["CCL_LOCAL_SIZE"] = os.environ.get("LOCAL_WORLD_SIZE", "1") os.environ["CCL_LOCAL_RANK"] = os.environ.get("LOCAL_RANK", "0") elif is_npu_available(): self.backend = "hccl" else: self.backend = "nccl" dist.init_distributed(dist_backend=self.backend, auto_mpi_discovery=False, **kwargs) self.num_processes = torch.distributed.get_world_size() self.process_index = torch.distributed.get_rank() self.local_process_index = int(os.environ.get("LOCAL_RANK", -1)) if self.device is None: if is_xpu_available(): self.device = torch.device("xpu", self.local_process_index) if self.device is not None: torch.xpu.set_device(self.device) elif is_npu_available(): self.device = torch.device("npu", self.local_process_index) if self.device is not None: torch.npu.set_device(self.device) else: self.device = torch.device("cuda", self.local_process_index) if self.device is not None: torch.cuda.set_device(self.device) if self.device.type == "cuda" and not check_cuda_p2p_ib_support(): if "NCCL_P2P_DISABLE" not in os.environ or "NCCL_IB_DISABLE" not in os.environ: raise NotImplementedError( "Using RTX 4000 series doesn't support faster communication broadband via P2P or IB. " 'Please set `NCCL_P2P_DISABLE="1"` and `NCCL_IB_DISABLE="1" or use `accelerate launch` which ' "will do this automatically." ) self._mixed_precision = "no" # deepspeed handles mixed_precision using deepspeed_config elif int(os.environ.get("LOCAL_RANK", -1)) != -1 and not cpu and torch.cuda.is_available(): self.distributed_type = DistributedType.MULTI_GPU if not torch.distributed.is_initialized(): self.backend = kwargs.pop("backend", "nccl") # Special case for `TrainingArguments`, where `backend` will be `None` if self.backend is None: self.backend = "nccl" torch.distributed.init_process_group(backend=self.backend, **kwargs) if not check_cuda_p2p_ib_support(): if "NCCL_P2P_DISABLE" not in os.environ or "NCCL_IB_DISABLE" not in os.environ: raise NotImplementedError( "Using RTX 4000 series doesn't support faster communication broadband via P2P or IB. " 'Please set `NCCL_P2P_DISABLE="1"` and `NCCL_IB_DISABLE="1" or use `accelerate launch` which ' "will do this automatically." ) self.num_processes = torch.distributed.get_world_size() self.process_index = torch.distributed.get_rank() self.local_process_index = int(os.environ.get("LOCAL_RANK", -1)) if self.device is None: self.device = torch.device("cuda", self.local_process_index) torch.cuda.set_device(self.device) elif is_npu_available() and not cpu and int(os.environ.get("LOCAL_RANK", -1)) != -1: self.distributed_type = DistributedType.MULTI_NPU if not torch.distributed.is_initialized(): # Backend is not set by the user, we set it here kwargs.pop("backend", None) self.backend = "hccl" torch.distributed.init_process_group(backend=self.backend, **kwargs) self.num_processes = torch.distributed.get_world_size() self.process_index = torch.distributed.get_rank() self.local_process_index = int(os.environ.get("LOCAL_RANK", -1)) if self.device is None: self.device = torch.device("npu", self.local_process_index) torch.npu.set_device(self.device) elif ( get_int_from_env(["PMI_SIZE", "OMPI_COMM_WORLD_SIZE", "MV2_COMM_WORLD_SIZE", "WORLD_SIZE"], 1) > 1 or int(os.environ.get("LOCAL_RANK", -1)) != -1 ): if not cpu and is_xpu_available(): self.distributed_type = DistributedType.MULTI_XPU else: self.distributed_type = DistributedType.MULTI_CPU # Actually, CCL_WORKER_COUNT is a CPU only env var in CCL, no need to set it for XPU. if is_ccl_available() and ( get_int_from_env(["CCL_WORKER_COUNT"], 0) > 0 or self.distributed_type == DistributedType.MULTI_XPU ): if get_ccl_version() >= "1.12": import oneccl_bindings_for_pytorch # noqa: F401 else: import torch_ccl # noqa: F401 backend = "ccl" elif torch.distributed.is_mpi_available(): backend = "mpi" else: backend = "gloo" # Try to get launch configuration from environment variables set by MPI launcher - works for Intel MPI, OpenMPI and MVAPICH rank = get_int_from_env(["RANK", "PMI_RANK", "OMPI_COMM_WORLD_RANK", "MV2_COMM_WORLD_RANK"], 0) size = get_int_from_env(["WORLD_SIZE", "PMI_SIZE", "OMPI_COMM_WORLD_SIZE", "MV2_COMM_WORLD_SIZE"], 1) local_rank = get_int_from_env( ["LOCAL_RANK", "MPI_LOCALRANKID", "OMPI_COMM_WORLD_LOCAL_RANK", "MV2_COMM_WORLD_LOCAL_RANK"], 0 ) local_size = get_int_from_env( ["LOCAL_WORLD_SIZE", "MPI_LOCALNRANKS", "OMPI_COMM_WORLD_LOCAL_SIZE", "MV2_COMM_WORLD_LOCAL_SIZE"], 1, ) self.local_process_index = local_rank os.environ["RANK"] = str(rank) os.environ["WORLD_SIZE"] = str(size) os.environ["LOCAL_RANK"] = str(local_rank) os.environ["LOCAL_WORLD_SIZE"] = str(local_size) if backend == "ccl" and self.distributed_type == DistributedType.MULTI_XPU: os.environ["CCL_PROCESS_LAUNCHER"] = "none" os.environ["CCL_LOCAL_SIZE"] = str(local_size) os.environ["CCL_LOCAL_RANK"] = str(local_rank) if not os.environ.get("MASTER_PORT", None): os.environ["MASTER_PORT"] = "29500" if not os.environ.get("MASTER_ADDR", None): if local_size != size and backend != "mpi": raise ValueError( "Looks like distributed multinode run but MASTER_ADDR env not set, " "please try exporting rank 0's hostname as MASTER_ADDR" ) if ( self.distributed_type == DistributedType.MULTI_CPU and get_int_from_env(["OMP_NUM_THREADS", "MKL_NUM_THREADS"], 0) == 0 ): import psutil num_cpu_threads_per_process = int(psutil.cpu_count(logical=False) / local_size) if num_cpu_threads_per_process == 0: num_cpu_threads_per_process = 1 torch.set_num_threads(num_cpu_threads_per_process) warnings.warn( f"OMP_NUM_THREADS/MKL_NUM_THREADS unset, we set it at {num_cpu_threads_per_process} to improve oob" " performance." ) if not torch.distributed.is_initialized(): # Backend is not set by the user, we set it here kwargs.pop("backend", None) self.backend = backend torch.distributed.init_process_group(self.backend, rank=rank, world_size=size, **kwargs) self.num_processes = torch.distributed.get_world_size() self.process_index = torch.distributed.get_rank() if cpu: self.device = torch.device("cpu") elif is_xpu_available(): self.device = torch.device("xpu", self.local_process_index) torch.xpu.set_device(self.device) else: self.device = self.default_device else: self.distributed_type = ( DistributedType.NO if os.environ.get("ACCELERATE_USE_DEEPSPEED", "false") == "false" else DistributedType.DEEPSPEED ) self.num_processes = 1 self.process_index = self.local_process_index = 0 if self.device is None: self.device = torch.device("cpu") if cpu else self.default_device self.fork_launched = parse_flag_from_env("FORK_LAUNCHED", 0) def __repr__(self) -> str: return ( f"Distributed environment: {self.distributed_type}{(' Backend: ' + self.backend) if self.backend else ''}\n" f"Num processes: {self.num_processes}\n" f"Process index: {self.process_index}\n" f"Local process index: {self.local_process_index}\n" f"Device: {self.device}\n" ) def _reset_state(): "Resets `_shared_state`, is used internally and should not be called" PartialState._shared_state.clear() def initialized(self) -> bool: "Returns whether the `PartialState` has been initialized" return self._shared_state != {} def use_distributed(self): """ Whether the Accelerator is configured for distributed training """ return self.distributed_type != DistributedType.NO and self.num_processes > 1 def is_last_process(self) -> bool: "Returns whether the current process is the last one" return self.process_index == self.num_processes - 1 def is_main_process(self) -> bool: "Returns whether the current process is the main process" return ( self.process_index == 0 if self.distributed_type != DistributedType.MEGATRON_LM else self.is_last_process ) def is_local_main_process(self) -> bool: "Returns whether the current process is the main process on the local node" return ( self.local_process_index == 0 if self.distributed_type != DistributedType.MEGATRON_LM else self.is_last_process ) def wait_for_everyone(self): """ Will stop the execution of the current process until every other process has reached that point (so this does nothing when the script is only run in one process). Useful to do before saving a model. Example: ```python >>> # Assuming two GPU processes >>> import time >>> from accelerate.state import PartialState >>> state = PartialState() >>> if state.is_main_process: ... time.sleep(2) >>> else: ... print("I'm waiting for the main process to finish its sleep...") >>> state.wait_for_everyone() >>> # Should print on every process at the same time >>> print("Everyone is here") ``` """ if self.distributed_type in ( DistributedType.MULTI_GPU, DistributedType.MULTI_NPU, DistributedType.MULTI_XPU, DistributedType.MULTI_CPU, DistributedType.DEEPSPEED, DistributedType.FSDP, ): torch.distributed.barrier() elif self.distributed_type == DistributedType.XLA: xm.rendezvous("accelerate.utils.wait_for_everyone") def _goes_first(self, is_main: bool): if not is_main: self.wait_for_everyone() yield if is_main: self.wait_for_everyone() def split_between_processes(self, inputs: list | tuple | dict | torch.Tensor, apply_padding: bool = False): """ Splits `input` between `self.num_processes` quickly and can be then used on that process. Useful when doing distributed inference, such as with different prompts. Note that when using a `dict`, all keys need to have the same number of elements. Args: inputs (`list`, `tuple`, `torch.Tensor`, `dict` of `list`/`tuple`/`torch.Tensor`, or `datasets.Dataset`): The input to split between processes. apply_padding (`bool`, `optional`, defaults to `False`): Whether to apply padding by repeating the last element of the input so that all processes have the same number of elements. Useful when trying to perform actions such as `gather()` on the outputs or passing in less inputs than there are processes. If so, just remember to drop the padded elements afterwards. Example: ```python # Assume there are two processes from accelerate import PartialState state = PartialState() with state.split_between_processes(["A", "B", "C"]) as inputs: print(inputs) # Process 0 ["A", "B"] # Process 1 ["C"] with state.split_between_processes(["A", "B", "C"], apply_padding=True) as inputs: print(inputs) # Process 0 ["A", "B"] # Process 1 ["C", "C"] ``` """ if self.num_processes == 1: yield inputs return length = len(inputs) # Nested dictionary of any types if isinstance(inputs, dict): length = len(inputs[list(inputs.keys())[0]]) if not all(len(v) == length for v in inputs.values()): raise ValueError("All values in the dictionary must have the same length") num_samples_per_process = math.ceil(length / self.num_processes) start_index = self.process_index * num_samples_per_process end_index = start_index + num_samples_per_process if (len(inputs) % self.num_processes != 0) and (self.process_index == self.num_processes - 1): end_index = length def _split_values(inputs, start_index, end_index): if isinstance(inputs, (list, tuple, torch.Tensor)): if start_index >= len(inputs): result = inputs[-1:] else: result = inputs[start_index:end_index] if apply_padding: if isinstance(result, torch.Tensor): from accelerate.utils import pad_across_processes, send_to_device # The tensor needs to be on the device before we can pad it tensorized_result = send_to_device(result, self.device) result = pad_across_processes(tensorized_result, pad_index=inputs[-1]) else: result += [result[-1]] * (num_samples_per_process - len(result)) return result elif isinstance(inputs, dict): for key in inputs.keys(): inputs[key] = _split_values(inputs[key], start_index, end_index) return inputs else: if is_datasets_available(): from datasets import Dataset if isinstance(inputs, Dataset): if start_index >= len(inputs): start_index = len(inputs) - 1 if end_index > len(inputs): end_index = len(inputs) result_idcs = list(range(start_index, end_index)) if apply_padding: result_idcs += [end_index - 1] * (num_samples_per_process - len(result_idcs)) return inputs.select(result_idcs) return inputs yield _split_values(inputs, start_index, end_index) def main_process_first(self): """ Lets the main process go first inside a with block. The other processes will enter the with block after the main process exits. Example: ```python >>> from accelerate import Accelerator >>> accelerator = Accelerator() >>> with accelerator.main_process_first(): ... # This will be printed first by process 0 then in a seemingly ... # random order by the other processes. ... print(f"This will be printed by process {accelerator.process_index}") ``` """ yield from self._goes_first(self.is_main_process) def local_main_process_first(self): """ Lets the local main process go inside a with block. The other processes will enter the with block after the main process exits. Example: ```python >>> from accelerate.state import PartialState >>> state = PartialState() >>> with state.local_main_process_first(): ... # This will be printed first by local process 0 then in a seemingly ... # random order by the other processes. ... print(f"This will be printed by process {state.local_process_index}") ``` """ yield from self._goes_first(self.is_local_main_process) def on_main_process(self, function: Callable[..., Any] = None): """ Decorator that only runs the decorated function on the main process. Args: function (`Callable`): The function to decorate. Example: ```python >>> from accelerate.state import PartialState >>> state = PartialState() >>> @state.on_main_process ... def print_something(): ... print("This will be printed by process 0 only.") >>> print_something() "This will be printed by process 0 only" ``` """ if not self.initialized: raise ValueError("The `PartialState` or `Accelerator` must be initialized before calling this function.") if self.is_main_process or not self.use_distributed: return function return do_nothing def on_local_main_process(self, function: Callable[..., Any] = None): """ Decorator that only runs the decorated function on the local main process. Args: function (`Callable`): The function to decorate. Example: ```python # Assume we have 2 servers with 4 processes each. from accelerate.state import PartialState state = PartialState() def print_something(): print("This will be printed by process 0 only on each server.") print_something() # On server 1: "This will be printed by process 0 only" # On server 2: "This will be printed by process 0 only" ``` """ if self.is_local_main_process or not self.use_distributed: return function return do_nothing def on_last_process(self, function: Callable[..., Any]): """ Decorator that only runs the decorated function on the last process. Args: function (`Callable`): The function to decorate. Example: ```python # Assume we have 4 processes. from accelerate.state import PartialState state = PartialState() def print_something(): print(f"Printed on process {state.process_index}") print_something() "Printed on process 3" ``` """ if self.is_last_process or not self.use_distributed: return function return do_nothing def on_process(self, function: Callable[..., Any] = None, process_index: int = None): """ Decorator that only runs the decorated function on the process with the given index. Args: function (`Callable`, `optional`): The function to decorate. process_index (`int`, `optional`): The index of the process on which to run the function. Example: ```python # Assume we have 4 processes. from accelerate.state import PartialState state = PartialState() def print_something(): print(f"Printed on process {state.process_index}") print_something() "Printed on process 2" ``` """ if function is None: return partial(self.on_process, process_index=process_index) if (self.process_index == process_index) or (not self.use_distributed): return function return do_nothing def on_local_process(self, function: Callable[..., Any] = None, local_process_index: int = None): """ Decorator that only runs the decorated function on the process with the given index on the current node. Args: function (`Callable`, *optional*): The function to decorate. local_process_index (`int`, *optional*): The index of the local process on which to run the function. Example: ```python # Assume we have 2 servers with 4 processes each. from accelerate import Accelerator accelerator = Accelerator() def print_something(): print(f"Printed on process {accelerator.local_process_index}") print_something() # On server 1: "Printed on process 2" # On server 2: "Printed on process 2" ``` """ if function is None: return partial(self.on_local_process, local_process_index=local_process_index) if (self.local_process_index == local_process_index) or (not self.use_distributed): return function return do_nothing def print(self, *args, **kwargs): if self.is_local_main_process: print(*args, **kwargs) def default_device(self) -> torch.device: """ Returns the default device which is: - MPS if `torch.backends.mps.is_available()` and `torch.backends.mps.is_built()` both return True. - CUDA if `torch.cuda.is_available()` - NPU if `is_npu_available()` - CPU otherwise """ if is_mps_available(): os.environ["PYTORCH_ENABLE_MPS_FALLBACK"] = "1" return torch.device("mps") elif torch.cuda.is_available(): return torch.device("cuda") elif is_xpu_available(): return torch.device("xpu:0") elif is_npu_available(): return torch.device("npu") else: return torch.device("cpu") class DistributedType(str, enum.Enum): """ Represents a type of distributed environment. Values: - **NO** -- Not a distributed environment, just a single process. - **MULTI_CPU** -- Distributed on multiple CPU nodes. - **MULTI_GPU** -- Distributed on multiple GPUs. - **MULTI_NPU** -- Distributed on multiple NPUs. - **MULTI_XPU** -- Distributed on multiple XPUs. - **DEEPSPEED** -- Using DeepSpeed. - **XLA** -- Using TorchXLA. - **TPU** -- This field will be deprecated in v0.27.0. Use XLA instead. """ # Subclassing str as well as Enum allows the `DistributedType` to be JSON-serializable out of the box. NO = "NO" MULTI_CPU = "MULTI_CPU" MULTI_GPU = "MULTI_GPU" MULTI_NPU = "MULTI_NPU" MULTI_XPU = "MULTI_XPU" DEEPSPEED = "DEEPSPEED" FSDP = "FSDP" XLA = "XLA" MEGATRON_LM = "MEGATRON_LM" TPU = DeprecatedFieldDescriptor("TPU", "XLA") The provided code snippet includes necessary dependencies for implementing the `save` function. Write a Python function `def save(obj, f, save_on_each_node: bool = False, safe_serialization: bool = False)` to solve the following problem: Save the data to disk. Use in place of `torch.save()`. Args: obj: The data to save f: The file (or file-like object) to use to save the data save_on_each_node (`bool`, *optional*, defaults to `False`): Whether to only save on the global main process safe_serialization (`bool`, *optional*, defaults to `False`): Whether to save `obj` using `safetensors` or the traditional PyTorch way (that uses `pickle`). Here is the function: def save(obj, f, save_on_each_node: bool = False, safe_serialization: bool = False): """ Save the data to disk. Use in place of `torch.save()`. Args: obj: The data to save f: The file (or file-like object) to use to save the data save_on_each_node (`bool`, *optional*, defaults to `False`): Whether to only save on the global main process safe_serialization (`bool`, *optional*, defaults to `False`): Whether to save `obj` using `safetensors` or the traditional PyTorch way (that uses `pickle`). """ # When TorchXLA is enabled, it's necessary to transfer all data to the CPU before saving. # Another issue arises with `id_tensor_storage`, which treats all XLA tensors as identical. # If tensors remain on XLA, calling `clean_state_dict_for_safetensors` will result in only # one XLA tensor remaining. if PartialState().distributed_type == DistributedType.XLA: obj = xm._maybe_convert_to_cpu(obj) # Check if it's a model and remove duplicates if safe_serialization: save_func = partial(safe_save_file, metadata={"format": "pt"}) if isinstance(obj, OrderedDict): obj = clean_state_dict_for_safetensors(obj) else: save_func = torch.save if PartialState().is_main_process and not save_on_each_node: save_func(obj, f) elif PartialState().is_local_main_process and save_on_each_node: save_func(obj, f)
Save the data to disk. Use in place of `torch.save()`. Args: obj: The data to save f: The file (or file-like object) to use to save the data save_on_each_node (`bool`, *optional*, defaults to `False`): Whether to only save on the global main process safe_serialization (`bool`, *optional*, defaults to `False`): Whether to save `obj` using `safetensors` or the traditional PyTorch way (that uses `pickle`).
2,924
import collections import os import platform import re import socket from contextlib import contextmanager from functools import partial, reduce from types import MethodType from typing import OrderedDict import torch from packaging.version import Version from safetensors.torch import save_file as safe_save_file from ..commands.config.default import write_basic_config from ..logging import get_logger from ..state import PartialState from .constants import FSDP_PYTORCH_VERSION from .dataclasses import DistributedType from .imports import is_deepspeed_available, is_torch_distributed_available, is_torch_xla_available from .modeling import id_tensor_storage from .transformer_engine import convert_model from .versions import is_torch_version The provided code snippet includes necessary dependencies for implementing the `clear_environment` function. Write a Python function `def clear_environment()` to solve the following problem: A context manager that will temporarily clear environment variables. When this context exits, the previous environment variables will be back. Example: ```python >>> import os >>> from accelerate.utils import clear_environment >>> os.environ["FOO"] = "bar" >>> with clear_environment(): ... print(os.environ) ... os.environ["FOO"] = "new_bar" ... print(os.environ["FOO"]) {} new_bar >>> print(os.environ["FOO"]) bar ``` Here is the function: def clear_environment(): """ A context manager that will temporarily clear environment variables. When this context exits, the previous environment variables will be back. Example: ```python >>> import os >>> from accelerate.utils import clear_environment >>> os.environ["FOO"] = "bar" >>> with clear_environment(): ... print(os.environ) ... os.environ["FOO"] = "new_bar" ... print(os.environ["FOO"]) {} new_bar >>> print(os.environ["FOO"]) bar ``` """ _old_os_environ = os.environ.copy() os.environ.clear() try: yield finally: os.environ.clear() # clear any added keys, os.environ.update(_old_os_environ) # then restore previous environment
A context manager that will temporarily clear environment variables. When this context exits, the previous environment variables will be back. Example: ```python >>> import os >>> from accelerate.utils import clear_environment >>> os.environ["FOO"] = "bar" >>> with clear_environment(): ... print(os.environ) ... os.environ["FOO"] = "new_bar" ... print(os.environ["FOO"]) {} new_bar >>> print(os.environ["FOO"]) bar ```
2,925
import collections import os import platform import re import socket from contextlib import contextmanager from functools import partial, reduce from types import MethodType from typing import OrderedDict import torch from packaging.version import Version from safetensors.torch import save_file as safe_save_file from ..commands.config.default import write_basic_config from ..logging import get_logger from ..state import PartialState from .constants import FSDP_PYTORCH_VERSION from .dataclasses import DistributedType from .imports import is_deepspeed_available, is_torch_distributed_available, is_torch_xla_available from .modeling import id_tensor_storage from .transformer_engine import convert_model from .versions import is_torch_version The provided code snippet includes necessary dependencies for implementing the `patch_environment` function. Write a Python function `def patch_environment(**kwargs)` to solve the following problem: A context manager that will add each keyword argument passed to `os.environ` and remove them when exiting. Will convert the values in `kwargs` to strings and upper-case all the keys. Example: ```python >>> import os >>> from accelerate.utils import patch_environment >>> with patch_environment(FOO="bar"): ... print(os.environ["FOO"]) # prints "bar" >>> print(os.environ["FOO"]) # raises KeyError ``` Here is the function: def patch_environment(**kwargs): """ A context manager that will add each keyword argument passed to `os.environ` and remove them when exiting. Will convert the values in `kwargs` to strings and upper-case all the keys. Example: ```python >>> import os >>> from accelerate.utils import patch_environment >>> with patch_environment(FOO="bar"): ... print(os.environ["FOO"]) # prints "bar" >>> print(os.environ["FOO"]) # raises KeyError ``` """ existing_vars = {} for key, value in kwargs.items(): key = key.upper() if key in os.environ: existing_vars[key] = os.environ[key] os.environ[key] = str(value) try: yield finally: for key in kwargs: key = key.upper() if key in existing_vars: # restore previous value os.environ[key] = existing_vars[key] else: os.environ.pop(key, None)
A context manager that will add each keyword argument passed to `os.environ` and remove them when exiting. Will convert the values in `kwargs` to strings and upper-case all the keys. Example: ```python >>> import os >>> from accelerate.utils import patch_environment >>> with patch_environment(FOO="bar"): ... print(os.environ["FOO"]) # prints "bar" >>> print(os.environ["FOO"]) # raises KeyError ```
2,926
import collections import os import platform import re import socket from contextlib import contextmanager from functools import partial, reduce from types import MethodType from typing import OrderedDict import torch from packaging.version import Version from safetensors.torch import save_file as safe_save_file from ..commands.config.default import write_basic_config from ..logging import get_logger from ..state import PartialState from .constants import FSDP_PYTORCH_VERSION from .dataclasses import DistributedType from .imports import is_deepspeed_available, is_torch_distributed_available, is_torch_xla_available from .modeling import id_tensor_storage from .transformer_engine import convert_model from .versions import is_torch_version The provided code snippet includes necessary dependencies for implementing the `get_pretty_name` function. Write a Python function `def get_pretty_name(obj)` to solve the following problem: Gets a pretty name from `obj`. Here is the function: def get_pretty_name(obj): """ Gets a pretty name from `obj`. """ if not hasattr(obj, "__qualname__") and not hasattr(obj, "__name__"): obj = getattr(obj, "__class__", obj) if hasattr(obj, "__qualname__"): return obj.__qualname__ if hasattr(obj, "__name__"): return obj.__name__ return str(obj)
Gets a pretty name from `obj`.
2,927
import collections import os import platform import re import socket from contextlib import contextmanager from functools import partial, reduce from types import MethodType from typing import OrderedDict import torch from packaging.version import Version from safetensors.torch import save_file as safe_save_file from ..commands.config.default import write_basic_config from ..logging import get_logger from ..state import PartialState from .constants import FSDP_PYTORCH_VERSION from .dataclasses import DistributedType from .imports import is_deepspeed_available, is_torch_distributed_available, is_torch_xla_available from .modeling import id_tensor_storage from .transformer_engine import convert_model from .versions import is_torch_version The provided code snippet includes necessary dependencies for implementing the `convert_bytes` function. Write a Python function `def convert_bytes(size)` to solve the following problem: Converts `size` from bytes to the largest possible unit Here is the function: def convert_bytes(size): "Converts `size` from bytes to the largest possible unit" for x in ["bytes", "KB", "MB", "GB", "TB"]: if size < 1024.0: return f"{round(size, 2)} {x}" size /= 1024.0 return f"{round(size, 2)} PB"
Converts `size` from bytes to the largest possible unit
2,928
import collections import os import platform import re import socket from contextlib import contextmanager from functools import partial, reduce from types import MethodType from typing import OrderedDict import torch from packaging.version import Version from safetensors.torch import save_file as safe_save_file from ..commands.config.default import write_basic_config from ..logging import get_logger from ..state import PartialState from .constants import FSDP_PYTORCH_VERSION from .dataclasses import DistributedType from .imports import is_deepspeed_available, is_torch_distributed_available, is_torch_xla_available from .modeling import id_tensor_storage from .transformer_engine import convert_model from .versions import is_torch_version logger = get_logger(__name__) The provided code snippet includes necessary dependencies for implementing the `check_os_kernel` function. Write a Python function `def check_os_kernel()` to solve the following problem: Warns if the kernel version is below the recommended minimum on Linux. Here is the function: def check_os_kernel(): """Warns if the kernel version is below the recommended minimum on Linux.""" # see issue #1929 info = platform.uname() system = info.system if system != "Linux": return _, version, *_ = re.split(r"(\d+\.\d+\.\d+)", info.release) min_version = "5.5.0" if Version(version) < Version(min_version): msg = ( f"Detected kernel version {version}, which is below the recommended minimum of {min_version}; this can " "cause the process to hang. It is recommended to upgrade the kernel to the minimum version or higher." ) logger.warning(msg, main_process_only=True)
Warns if the kernel version is below the recommended minimum on Linux.
2,929
import pickle import warnings from functools import update_wrapper, wraps from typing import Any, Mapping import torch from ..state import PartialState from .constants import TORCH_DISTRIBUTED_OPERATION_TYPES from .dataclasses import DistributedType, TensorInformation from .imports import ( is_npu_available, is_torch_distributed_available, is_torch_version, is_torch_xla_available, is_xpu_available, ) def is_torch_xpu_tensor(tensor): return isinstance( tensor, torch.xpu.FloatTensor, torch.xpu.ByteTensor, torch.xpu.IntTensor, torch.xpu.LongTensor, torch.xpu.HalfTensor, torch.xpu.DoubleTensor, torch.xpu.BFloat16Tensor, )
null
2,930
import pickle import warnings from functools import update_wrapper, wraps from typing import Any, Mapping import torch from ..state import PartialState from .constants import TORCH_DISTRIBUTED_OPERATION_TYPES from .dataclasses import DistributedType, TensorInformation from .imports import ( is_npu_available, is_torch_distributed_available, is_torch_version, is_torch_xla_available, is_xpu_available, ) def is_torch_tensor(tensor): return isinstance(tensor, torch.Tensor) def honor_type(obj, generator): """ Cast a generator to the same type as obj (list, tuple, or namedtuple) """ # Some objects may not be able to instantiate from a generator directly if is_namedtuple(obj): return type(obj)(*list(generator)) else: return type(obj)(generator) def is_npu_available(check_device=False): "Checks if `torch_npu` is installed and potentially if a NPU is in the environment" if importlib.util.find_spec("torch") is None or importlib.util.find_spec("torch_npu") is None: return False import torch import torch_npu # noqa: F401 if check_device: try: # Will raise a RuntimeError if no NPU is found _ = torch.npu.device_count() return torch.npu.is_available() except RuntimeError: return False return hasattr(torch, "npu") and torch.npu.is_available() def is_xpu_available(check_device=False): "check if user disables it explicitly" if not parse_flag_from_env("ACCELERATE_USE_XPU", default=True): return False "Checks if `intel_extension_for_pytorch` is installed and potentially if a XPU is in the environment" if is_ipex_available(): import torch if is_torch_version("<=", "1.12"): return False else: return False import intel_extension_for_pytorch # noqa: F401 if check_device: try: # Will raise a RuntimeError if no XPU is found _ = torch.xpu.device_count() return torch.xpu.is_available() except RuntimeError: return False return hasattr(torch, "xpu") and torch.xpu.is_available() The provided code snippet includes necessary dependencies for implementing the `send_to_device` function. Write a Python function `def send_to_device(tensor, device, non_blocking=False, skip_keys=None)` to solve the following problem: Recursively sends the elements in a nested list/tuple/dictionary of tensors to a given device. Args: tensor (nested list/tuple/dictionary of `torch.Tensor`): The data to send to a given device. device (`torch.device`): The device to send the data to. Returns: The same data structure as `tensor` with all tensors sent to the proper device. Here is the function: def send_to_device(tensor, device, non_blocking=False, skip_keys=None): """ Recursively sends the elements in a nested list/tuple/dictionary of tensors to a given device. Args: tensor (nested list/tuple/dictionary of `torch.Tensor`): The data to send to a given device. device (`torch.device`): The device to send the data to. Returns: The same data structure as `tensor` with all tensors sent to the proper device. """ if is_torch_tensor(tensor) or hasattr(tensor, "to"): # `torch.Tensor.to("npu")` could not find context when called for the first time (see this [issue](https://gitee.com/ascend/pytorch/issues/I8KECW?from=project-issue)). if device == "npu": device = "npu:0" if device == "xpu": device = "xpu:0" try: return tensor.to(device, non_blocking=non_blocking) except TypeError: # .to() doesn't accept non_blocking as kwarg return tensor.to(device) except AssertionError as error: # `torch.Tensor.to(<int num>)` is not supported by `torch_npu` (see this [issue](https://github.com/Ascend/pytorch/issues/16)). # This call is inside the try-block since is_npu_available is not supported by torch.compile. if is_npu_available(): if isinstance(device, int): device = f"npu:{device}" else: raise error except Exception as error: if is_xpu_available(): if isinstance(device, int): device = f"xpu:{device}" else: raise error try: return tensor.to(device, non_blocking=non_blocking) except TypeError: # .to() doesn't accept non_blocking as kwarg return tensor.to(device) elif isinstance(tensor, (tuple, list)): return honor_type( tensor, (send_to_device(t, device, non_blocking=non_blocking, skip_keys=skip_keys) for t in tensor) ) elif isinstance(tensor, Mapping): if isinstance(skip_keys, str): skip_keys = [skip_keys] elif skip_keys is None: skip_keys = [] return type(tensor)( { k: t if k in skip_keys else send_to_device(t, device, non_blocking=non_blocking, skip_keys=skip_keys) for k, t in tensor.items() } ) else: return tensor
Recursively sends the elements in a nested list/tuple/dictionary of tensors to a given device. Args: tensor (nested list/tuple/dictionary of `torch.Tensor`): The data to send to a given device. device (`torch.device`): The device to send the data to. Returns: The same data structure as `tensor` with all tensors sent to the proper device.
2,931
import pickle import warnings from functools import update_wrapper, wraps from typing import Any, Mapping import torch from ..state import PartialState from .constants import TORCH_DISTRIBUTED_OPERATION_TYPES from .dataclasses import DistributedType, TensorInformation from .imports import ( is_npu_available, is_torch_distributed_available, is_torch_version, is_torch_xla_available, is_xpu_available, ) def recursively_apply(func, data, *args, test_type=is_torch_tensor, error_on_other_type=False, **kwargs): """ Recursively apply a function on a data structure that is a nested list/tuple/dictionary of a given base type. Args: func (`callable`): The function to recursively apply. data (nested list/tuple/dictionary of `main_type`): The data on which to apply `func` *args: Positional arguments that will be passed to `func` when applied on the unpacked data. main_type (`type`, *optional*, defaults to `torch.Tensor`): The base type of the objects to which apply `func`. error_on_other_type (`bool`, *optional*, defaults to `False`): Whether to return an error or not if after unpacking `data`, we get on an object that is not of type `main_type`. If `False`, the function will leave objects of types different than `main_type` unchanged. **kwargs: Keyword arguments that will be passed to `func` when applied on the unpacked data. Returns: The same data structure as `data` with `func` applied to every object of type `main_type`. """ if isinstance(data, (tuple, list)): return honor_type( data, ( recursively_apply( func, o, *args, test_type=test_type, error_on_other_type=error_on_other_type, **kwargs ) for o in data ), ) elif isinstance(data, Mapping): return type(data)( { k: recursively_apply( func, v, *args, test_type=test_type, error_on_other_type=error_on_other_type, **kwargs ) for k, v in data.items() } ) elif test_type(data): return func(data, *args, **kwargs) elif error_on_other_type: raise TypeError( f"Unsupported types ({type(data)}) passed to `{func.__name__}`. Only nested list/tuple/dicts of " f"objects that are valid for `{test_type.__name__}` should be passed." ) return data class TensorInformation: shape: torch.Size dtype: torch.dtype The provided code snippet includes necessary dependencies for implementing the `get_data_structure` function. Write a Python function `def get_data_structure(data)` to solve the following problem: Recursively gathers the information needed to rebuild a nested list/tuple/dictionary of tensors. Args: data (nested list/tuple/dictionary of `torch.Tensor`): The data to send to analyze. Returns: The same data structure as `data` with [`~utils.TensorInformation`] instead of tensors. Here is the function: def get_data_structure(data): """ Recursively gathers the information needed to rebuild a nested list/tuple/dictionary of tensors. Args: data (nested list/tuple/dictionary of `torch.Tensor`): The data to send to analyze. Returns: The same data structure as `data` with [`~utils.TensorInformation`] instead of tensors. """ def _get_data_structure(tensor): return TensorInformation(shape=tensor.shape, dtype=tensor.dtype) return recursively_apply(_get_data_structure, data)
Recursively gathers the information needed to rebuild a nested list/tuple/dictionary of tensors. Args: data (nested list/tuple/dictionary of `torch.Tensor`): The data to send to analyze. Returns: The same data structure as `data` with [`~utils.TensorInformation`] instead of tensors.
2,932
import pickle import warnings from functools import update_wrapper, wraps from typing import Any, Mapping import torch from ..state import PartialState from .constants import TORCH_DISTRIBUTED_OPERATION_TYPES from .dataclasses import DistributedType, TensorInformation from .imports import ( is_npu_available, is_torch_distributed_available, is_torch_version, is_torch_xla_available, is_xpu_available, ) def is_tensor_information(tensor_info): return isinstance(tensor_info, TensorInformation) def recursively_apply(func, data, *args, test_type=is_torch_tensor, error_on_other_type=False, **kwargs): """ Recursively apply a function on a data structure that is a nested list/tuple/dictionary of a given base type. Args: func (`callable`): The function to recursively apply. data (nested list/tuple/dictionary of `main_type`): The data on which to apply `func` *args: Positional arguments that will be passed to `func` when applied on the unpacked data. main_type (`type`, *optional*, defaults to `torch.Tensor`): The base type of the objects to which apply `func`. error_on_other_type (`bool`, *optional*, defaults to `False`): Whether to return an error or not if after unpacking `data`, we get on an object that is not of type `main_type`. If `False`, the function will leave objects of types different than `main_type` unchanged. **kwargs: Keyword arguments that will be passed to `func` when applied on the unpacked data. Returns: The same data structure as `data` with `func` applied to every object of type `main_type`. """ if isinstance(data, (tuple, list)): return honor_type( data, ( recursively_apply( func, o, *args, test_type=test_type, error_on_other_type=error_on_other_type, **kwargs ) for o in data ), ) elif isinstance(data, Mapping): return type(data)( { k: recursively_apply( func, v, *args, test_type=test_type, error_on_other_type=error_on_other_type, **kwargs ) for k, v in data.items() } ) elif test_type(data): return func(data, *args, **kwargs) elif error_on_other_type: raise TypeError( f"Unsupported types ({type(data)}) passed to `{func.__name__}`. Only nested list/tuple/dicts of " f"objects that are valid for `{test_type.__name__}` should be passed." ) return data The provided code snippet includes necessary dependencies for implementing the `initialize_tensors` function. Write a Python function `def initialize_tensors(data_structure)` to solve the following problem: Recursively initializes tensors from a nested list/tuple/dictionary of [`~utils.TensorInformation`]. Returns: The same data structure as `data` with tensors instead of [`~utils.TensorInformation`]. Here is the function: def initialize_tensors(data_structure): """ Recursively initializes tensors from a nested list/tuple/dictionary of [`~utils.TensorInformation`]. Returns: The same data structure as `data` with tensors instead of [`~utils.TensorInformation`]. """ def _initialize_tensor(tensor_info): return torch.empty(*tensor_info.shape, dtype=tensor_info.dtype) return recursively_apply(_initialize_tensor, data_structure, test_type=is_tensor_information)
Recursively initializes tensors from a nested list/tuple/dictionary of [`~utils.TensorInformation`]. Returns: The same data structure as `data` with tensors instead of [`~utils.TensorInformation`].
2,933
import pickle import warnings from functools import update_wrapper, wraps from typing import Any, Mapping import torch from ..state import PartialState from .constants import TORCH_DISTRIBUTED_OPERATION_TYPES from .dataclasses import DistributedType, TensorInformation from .imports import ( is_npu_available, is_torch_distributed_available, is_torch_version, is_torch_xla_available, is_xpu_available, ) def find_batch_size(data): """ Recursively finds the batch size in a nested list/tuple/dictionary of lists of tensors. Args: data (nested list/tuple/dictionary of `torch.Tensor`): The data from which to find the batch size. Returns: `int`: The batch size. """ if isinstance(data, (tuple, list, Mapping)) and (len(data) == 0): raise ValueError(f"Cannot find the batch size from empty {type(data)}.") if isinstance(data, (tuple, list)): return find_batch_size(data[0]) elif isinstance(data, Mapping): for k in data.keys(): return find_batch_size(data[k]) elif not isinstance(data, torch.Tensor): raise TypeError(f"Can only find the batch size of tensors but got {type(data)}.") return data.shape[0] The provided code snippet includes necessary dependencies for implementing the `ignorant_find_batch_size` function. Write a Python function `def ignorant_find_batch_size(data)` to solve the following problem: Same as [`utils.operations.find_batch_size`] except will ignore if `ValueError` and `TypeErrors` are raised Args: data (nested list/tuple/dictionary of `torch.Tensor`): The data from which to find the batch size. Returns: `int`: The batch size. Here is the function: def ignorant_find_batch_size(data): """ Same as [`utils.operations.find_batch_size`] except will ignore if `ValueError` and `TypeErrors` are raised Args: data (nested list/tuple/dictionary of `torch.Tensor`): The data from which to find the batch size. Returns: `int`: The batch size. """ try: return find_batch_size(data) except (ValueError, TypeError): pass return None
Same as [`utils.operations.find_batch_size`] except will ignore if `ValueError` and `TypeErrors` are raised Args: data (nested list/tuple/dictionary of `torch.Tensor`): The data from which to find the batch size. Returns: `int`: The batch size.
2,934
import pickle import warnings from functools import update_wrapper, wraps from typing import Any, Mapping import torch from ..state import PartialState from .constants import TORCH_DISTRIBUTED_OPERATION_TYPES from .dataclasses import DistributedType, TensorInformation from .imports import ( is_npu_available, is_torch_distributed_available, is_torch_version, is_torch_xla_available, is_xpu_available, ) def recursively_apply(func, data, *args, test_type=is_torch_tensor, error_on_other_type=False, **kwargs): """ Recursively apply a function on a data structure that is a nested list/tuple/dictionary of a given base type. Args: func (`callable`): The function to recursively apply. data (nested list/tuple/dictionary of `main_type`): The data on which to apply `func` *args: Positional arguments that will be passed to `func` when applied on the unpacked data. main_type (`type`, *optional*, defaults to `torch.Tensor`): The base type of the objects to which apply `func`. error_on_other_type (`bool`, *optional*, defaults to `False`): Whether to return an error or not if after unpacking `data`, we get on an object that is not of type `main_type`. If `False`, the function will leave objects of types different than `main_type` unchanged. **kwargs: Keyword arguments that will be passed to `func` when applied on the unpacked data. Returns: The same data structure as `data` with `func` applied to every object of type `main_type`. """ if isinstance(data, (tuple, list)): return honor_type( data, ( recursively_apply( func, o, *args, test_type=test_type, error_on_other_type=error_on_other_type, **kwargs ) for o in data ), ) elif isinstance(data, Mapping): return type(data)( { k: recursively_apply( func, v, *args, test_type=test_type, error_on_other_type=error_on_other_type, **kwargs ) for k, v in data.items() } ) elif test_type(data): return func(data, *args, **kwargs) elif error_on_other_type: raise TypeError( f"Unsupported types ({type(data)}) passed to `{func.__name__}`. Only nested list/tuple/dicts of " f"objects that are valid for `{test_type.__name__}` should be passed." ) return data The provided code snippet includes necessary dependencies for implementing the `listify` function. Write a Python function `def listify(data)` to solve the following problem: Recursively finds tensors in a nested list/tuple/dictionary and converts them to a list of numbers. Args: data (nested list/tuple/dictionary of `torch.Tensor`): The data from which to convert to regular numbers. Returns: The same data structure as `data` with lists of numbers instead of `torch.Tensor`. Here is the function: def listify(data): """ Recursively finds tensors in a nested list/tuple/dictionary and converts them to a list of numbers. Args: data (nested list/tuple/dictionary of `torch.Tensor`): The data from which to convert to regular numbers. Returns: The same data structure as `data` with lists of numbers instead of `torch.Tensor`. """ def _convert_to_list(tensor): tensor = tensor.detach().cpu() if tensor.dtype == torch.bfloat16: # As of Numpy 1.21.4, NumPy does not support bfloat16 (see # https://github.com/numpy/numpy/blob/a47ecdea856986cd60eabbd53265c2ca5916ad5d/doc/source/user/basics.types.rst ). # Until Numpy adds bfloat16, we must convert float32. tensor = tensor.to(torch.float32) return tensor.tolist() return recursively_apply(_convert_to_list, data)
Recursively finds tensors in a nested list/tuple/dictionary and converts them to a list of numbers. Args: data (nested list/tuple/dictionary of `torch.Tensor`): The data from which to convert to regular numbers. Returns: The same data structure as `data` with lists of numbers instead of `torch.Tensor`.
2,935
import pickle import warnings from functools import update_wrapper, wraps from typing import Any, Mapping import torch from ..state import PartialState from .constants import TORCH_DISTRIBUTED_OPERATION_TYPES from .dataclasses import DistributedType, TensorInformation from .imports import ( is_npu_available, is_torch_distributed_available, is_torch_version, is_torch_xla_available, is_xpu_available, ) def get_shape(data): """ Recursively gathers the shape of a nested list/tuple/dictionary of tensors as a list. Args: data (nested list/tuple/dictionary of `torch.Tensor`): The data to send to analyze. Returns: The same data structure as `data` with lists of tensor shapes instead of tensors. """ def _get_shape(tensor): return list(tensor.shape) return recursively_apply(_get_shape, data) class DistributedOperationException(Exception): """ An exception class for distributed operations. Raised if the operation cannot be performed due to the shape of the tensors. """ pass def gather_object(object: Any): """ Recursively gather object in a nested list/tuple/dictionary of objects from all devices. Args: object (nested list/tuple/dictionary of picklable object): The data to gather. Returns: The same data structure as `object` with all the objects sent to every device. """ if PartialState().distributed_type == DistributedType.XLA: raise NotImplementedError("gather objects in TPU is not supported") elif PartialState().distributed_type in TORCH_DISTRIBUTED_OPERATION_TYPES: return _gpu_gather_object(object) else: return object def find_device(data): """ Finds the device on which a nested dict/list/tuple of tensors lies (assuming they are all on the same device). Args: (nested list/tuple/dictionary of `torch.Tensor`): The data we want to know the device of. """ if isinstance(data, Mapping): for obj in data.values(): device = find_device(obj) if device is not None: return device elif isinstance(data, (tuple, list)): for obj in data: device = find_device(obj) if device is not None: return device elif isinstance(data, torch.Tensor): return data.device class PartialState: """ Singleton class that has information about the current training environment and functions to help with process control. Designed to be used when only process control and device execution states are needed. Does *not* need to be initialized from `Accelerator`. **Available attributes:** - **device** (`torch.device`) -- The device to use. - **distributed_type** ([`~accelerate.state.DistributedType`]) -- The type of distributed environment currently in use. - **local_process_index** (`int`) -- The index of the current process on the current server. - **mixed_precision** (`str`) -- Whether or not the current script will use mixed precision, and if so the type of mixed precision being performed. (Choose from 'no','fp16','bf16 or 'fp8'). - **num_processes** (`int`) -- The number of processes currently launched in parallel. - **process_index** (`int`) -- The index of the current process. - **is_last_process** (`bool`) -- Whether or not the current process is the last one. - **is_main_process** (`bool`) -- Whether or not the current process is the main one. - **is_local_main_process** (`bool`) -- Whether or not the current process is the main one on the local node. - **debug** (`bool`) -- Whether or not the current script is being run in debug mode. """ _shared_state = SharedDict() def __init__(self, cpu: bool = False, **kwargs): self.__dict__ = self._shared_state if not self.initialized: self._cpu = cpu self.backend = None env_device = os.environ.get("ACCELERATE_TORCH_DEVICE", None) self.device = torch.device(env_device) if env_device is not None else None self.debug = parse_flag_from_env("ACCELERATE_DEBUG_MODE") use_sagemaker_dp = kwargs.pop("_use_sagemaker_dp", None) if use_sagemaker_dp is None: use_sagemaker_dp = ( os.environ.get("ACCELERATE_USE_SAGEMAKER", "false") == "true" and os.environ.get("ACCELERATE_SAGEMAKER_DISTRIBUTED_TYPE") != SageMakerDistributedType.NO ) if use_sagemaker_dp and not cpu: if ( os.environ.get("ACCELERATE_SAGEMAKER_DISTRIBUTED_TYPE") == SageMakerDistributedType.DATA_PARALLEL ) or use_sagemaker_dp: self.distributed_type = DistributedType.MULTI_GPU import smdistributed.dataparallel.torch.torch_smddp # noqa if not torch.distributed.is_initialized(): torch.distributed.init_process_group(backend="smddp") self.backend = "smddp" self.num_processes = torch.distributed.get_world_size() self.process_index = torch.distributed.get_rank() self.local_process_index = int(os.environ.get("LOCAL_RANK", -1)) if self.device is None: self.device = torch.device("cuda", self.local_process_index) torch.cuda.set_device(self.device) elif is_torch_xla_available() and not cpu: self.distributed_type = DistributedType.XLA self.device = xm.xla_device() xm.set_replication(self.device, xm.get_xla_supported_devices()) self.num_processes = xm.xrt_world_size() self.process_index = xm.get_ordinal() if is_torch_xla_available(check_is_tpu=True): self.local_process_index = xm.get_local_ordinal() else: self.local_process_index = int(os.environ.get("LOCAL_RANK", -1)) elif ( os.environ.get("ACCELERATE_USE_DEEPSPEED", "false") == "true" and int(os.environ.get("LOCAL_RANK", -1)) != -1 and not cpu ): assert ( is_deepspeed_available() ), "DeepSpeed is not available => install it using `pip3 install deepspeed` or build it from source" self.distributed_type = DistributedType.DEEPSPEED if not torch.distributed.is_initialized(): from deepspeed import comm as dist # DeepSpeed always uses nccl kwargs.pop("backend", None) if is_xpu_available and is_ccl_available(): # Set DeepSpeed backend to ccl for xpu self.backend = "ccl" os.environ["CCL_PROCESS_LAUNCHER"] = "none" os.environ["CCL_LOCAL_SIZE"] = os.environ.get("LOCAL_WORLD_SIZE", "1") os.environ["CCL_LOCAL_RANK"] = os.environ.get("LOCAL_RANK", "0") elif is_npu_available(): self.backend = "hccl" else: self.backend = "nccl" dist.init_distributed(dist_backend=self.backend, auto_mpi_discovery=False, **kwargs) self.num_processes = torch.distributed.get_world_size() self.process_index = torch.distributed.get_rank() self.local_process_index = int(os.environ.get("LOCAL_RANK", -1)) if self.device is None: if is_xpu_available(): self.device = torch.device("xpu", self.local_process_index) if self.device is not None: torch.xpu.set_device(self.device) elif is_npu_available(): self.device = torch.device("npu", self.local_process_index) if self.device is not None: torch.npu.set_device(self.device) else: self.device = torch.device("cuda", self.local_process_index) if self.device is not None: torch.cuda.set_device(self.device) if self.device.type == "cuda" and not check_cuda_p2p_ib_support(): if "NCCL_P2P_DISABLE" not in os.environ or "NCCL_IB_DISABLE" not in os.environ: raise NotImplementedError( "Using RTX 4000 series doesn't support faster communication broadband via P2P or IB. " 'Please set `NCCL_P2P_DISABLE="1"` and `NCCL_IB_DISABLE="1" or use `accelerate launch` which ' "will do this automatically." ) self._mixed_precision = "no" # deepspeed handles mixed_precision using deepspeed_config elif int(os.environ.get("LOCAL_RANK", -1)) != -1 and not cpu and torch.cuda.is_available(): self.distributed_type = DistributedType.MULTI_GPU if not torch.distributed.is_initialized(): self.backend = kwargs.pop("backend", "nccl") # Special case for `TrainingArguments`, where `backend` will be `None` if self.backend is None: self.backend = "nccl" torch.distributed.init_process_group(backend=self.backend, **kwargs) if not check_cuda_p2p_ib_support(): if "NCCL_P2P_DISABLE" not in os.environ or "NCCL_IB_DISABLE" not in os.environ: raise NotImplementedError( "Using RTX 4000 series doesn't support faster communication broadband via P2P or IB. " 'Please set `NCCL_P2P_DISABLE="1"` and `NCCL_IB_DISABLE="1" or use `accelerate launch` which ' "will do this automatically." ) self.num_processes = torch.distributed.get_world_size() self.process_index = torch.distributed.get_rank() self.local_process_index = int(os.environ.get("LOCAL_RANK", -1)) if self.device is None: self.device = torch.device("cuda", self.local_process_index) torch.cuda.set_device(self.device) elif is_npu_available() and not cpu and int(os.environ.get("LOCAL_RANK", -1)) != -1: self.distributed_type = DistributedType.MULTI_NPU if not torch.distributed.is_initialized(): # Backend is not set by the user, we set it here kwargs.pop("backend", None) self.backend = "hccl" torch.distributed.init_process_group(backend=self.backend, **kwargs) self.num_processes = torch.distributed.get_world_size() self.process_index = torch.distributed.get_rank() self.local_process_index = int(os.environ.get("LOCAL_RANK", -1)) if self.device is None: self.device = torch.device("npu", self.local_process_index) torch.npu.set_device(self.device) elif ( get_int_from_env(["PMI_SIZE", "OMPI_COMM_WORLD_SIZE", "MV2_COMM_WORLD_SIZE", "WORLD_SIZE"], 1) > 1 or int(os.environ.get("LOCAL_RANK", -1)) != -1 ): if not cpu and is_xpu_available(): self.distributed_type = DistributedType.MULTI_XPU else: self.distributed_type = DistributedType.MULTI_CPU # Actually, CCL_WORKER_COUNT is a CPU only env var in CCL, no need to set it for XPU. if is_ccl_available() and ( get_int_from_env(["CCL_WORKER_COUNT"], 0) > 0 or self.distributed_type == DistributedType.MULTI_XPU ): if get_ccl_version() >= "1.12": import oneccl_bindings_for_pytorch # noqa: F401 else: import torch_ccl # noqa: F401 backend = "ccl" elif torch.distributed.is_mpi_available(): backend = "mpi" else: backend = "gloo" # Try to get launch configuration from environment variables set by MPI launcher - works for Intel MPI, OpenMPI and MVAPICH rank = get_int_from_env(["RANK", "PMI_RANK", "OMPI_COMM_WORLD_RANK", "MV2_COMM_WORLD_RANK"], 0) size = get_int_from_env(["WORLD_SIZE", "PMI_SIZE", "OMPI_COMM_WORLD_SIZE", "MV2_COMM_WORLD_SIZE"], 1) local_rank = get_int_from_env( ["LOCAL_RANK", "MPI_LOCALRANKID", "OMPI_COMM_WORLD_LOCAL_RANK", "MV2_COMM_WORLD_LOCAL_RANK"], 0 ) local_size = get_int_from_env( ["LOCAL_WORLD_SIZE", "MPI_LOCALNRANKS", "OMPI_COMM_WORLD_LOCAL_SIZE", "MV2_COMM_WORLD_LOCAL_SIZE"], 1, ) self.local_process_index = local_rank os.environ["RANK"] = str(rank) os.environ["WORLD_SIZE"] = str(size) os.environ["LOCAL_RANK"] = str(local_rank) os.environ["LOCAL_WORLD_SIZE"] = str(local_size) if backend == "ccl" and self.distributed_type == DistributedType.MULTI_XPU: os.environ["CCL_PROCESS_LAUNCHER"] = "none" os.environ["CCL_LOCAL_SIZE"] = str(local_size) os.environ["CCL_LOCAL_RANK"] = str(local_rank) if not os.environ.get("MASTER_PORT", None): os.environ["MASTER_PORT"] = "29500" if not os.environ.get("MASTER_ADDR", None): if local_size != size and backend != "mpi": raise ValueError( "Looks like distributed multinode run but MASTER_ADDR env not set, " "please try exporting rank 0's hostname as MASTER_ADDR" ) if ( self.distributed_type == DistributedType.MULTI_CPU and get_int_from_env(["OMP_NUM_THREADS", "MKL_NUM_THREADS"], 0) == 0 ): import psutil num_cpu_threads_per_process = int(psutil.cpu_count(logical=False) / local_size) if num_cpu_threads_per_process == 0: num_cpu_threads_per_process = 1 torch.set_num_threads(num_cpu_threads_per_process) warnings.warn( f"OMP_NUM_THREADS/MKL_NUM_THREADS unset, we set it at {num_cpu_threads_per_process} to improve oob" " performance." ) if not torch.distributed.is_initialized(): # Backend is not set by the user, we set it here kwargs.pop("backend", None) self.backend = backend torch.distributed.init_process_group(self.backend, rank=rank, world_size=size, **kwargs) self.num_processes = torch.distributed.get_world_size() self.process_index = torch.distributed.get_rank() if cpu: self.device = torch.device("cpu") elif is_xpu_available(): self.device = torch.device("xpu", self.local_process_index) torch.xpu.set_device(self.device) else: self.device = self.default_device else: self.distributed_type = ( DistributedType.NO if os.environ.get("ACCELERATE_USE_DEEPSPEED", "false") == "false" else DistributedType.DEEPSPEED ) self.num_processes = 1 self.process_index = self.local_process_index = 0 if self.device is None: self.device = torch.device("cpu") if cpu else self.default_device self.fork_launched = parse_flag_from_env("FORK_LAUNCHED", 0) def __repr__(self) -> str: return ( f"Distributed environment: {self.distributed_type}{(' Backend: ' + self.backend) if self.backend else ''}\n" f"Num processes: {self.num_processes}\n" f"Process index: {self.process_index}\n" f"Local process index: {self.local_process_index}\n" f"Device: {self.device}\n" ) def _reset_state(): "Resets `_shared_state`, is used internally and should not be called" PartialState._shared_state.clear() def initialized(self) -> bool: "Returns whether the `PartialState` has been initialized" return self._shared_state != {} def use_distributed(self): """ Whether the Accelerator is configured for distributed training """ return self.distributed_type != DistributedType.NO and self.num_processes > 1 def is_last_process(self) -> bool: "Returns whether the current process is the last one" return self.process_index == self.num_processes - 1 def is_main_process(self) -> bool: "Returns whether the current process is the main process" return ( self.process_index == 0 if self.distributed_type != DistributedType.MEGATRON_LM else self.is_last_process ) def is_local_main_process(self) -> bool: "Returns whether the current process is the main process on the local node" return ( self.local_process_index == 0 if self.distributed_type != DistributedType.MEGATRON_LM else self.is_last_process ) def wait_for_everyone(self): """ Will stop the execution of the current process until every other process has reached that point (so this does nothing when the script is only run in one process). Useful to do before saving a model. Example: ```python >>> # Assuming two GPU processes >>> import time >>> from accelerate.state import PartialState >>> state = PartialState() >>> if state.is_main_process: ... time.sleep(2) >>> else: ... print("I'm waiting for the main process to finish its sleep...") >>> state.wait_for_everyone() >>> # Should print on every process at the same time >>> print("Everyone is here") ``` """ if self.distributed_type in ( DistributedType.MULTI_GPU, DistributedType.MULTI_NPU, DistributedType.MULTI_XPU, DistributedType.MULTI_CPU, DistributedType.DEEPSPEED, DistributedType.FSDP, ): torch.distributed.barrier() elif self.distributed_type == DistributedType.XLA: xm.rendezvous("accelerate.utils.wait_for_everyone") def _goes_first(self, is_main: bool): if not is_main: self.wait_for_everyone() yield if is_main: self.wait_for_everyone() def split_between_processes(self, inputs: list | tuple | dict | torch.Tensor, apply_padding: bool = False): """ Splits `input` between `self.num_processes` quickly and can be then used on that process. Useful when doing distributed inference, such as with different prompts. Note that when using a `dict`, all keys need to have the same number of elements. Args: inputs (`list`, `tuple`, `torch.Tensor`, `dict` of `list`/`tuple`/`torch.Tensor`, or `datasets.Dataset`): The input to split between processes. apply_padding (`bool`, `optional`, defaults to `False`): Whether to apply padding by repeating the last element of the input so that all processes have the same number of elements. Useful when trying to perform actions such as `gather()` on the outputs or passing in less inputs than there are processes. If so, just remember to drop the padded elements afterwards. Example: ```python # Assume there are two processes from accelerate import PartialState state = PartialState() with state.split_between_processes(["A", "B", "C"]) as inputs: print(inputs) # Process 0 ["A", "B"] # Process 1 ["C"] with state.split_between_processes(["A", "B", "C"], apply_padding=True) as inputs: print(inputs) # Process 0 ["A", "B"] # Process 1 ["C", "C"] ``` """ if self.num_processes == 1: yield inputs return length = len(inputs) # Nested dictionary of any types if isinstance(inputs, dict): length = len(inputs[list(inputs.keys())[0]]) if not all(len(v) == length for v in inputs.values()): raise ValueError("All values in the dictionary must have the same length") num_samples_per_process = math.ceil(length / self.num_processes) start_index = self.process_index * num_samples_per_process end_index = start_index + num_samples_per_process if (len(inputs) % self.num_processes != 0) and (self.process_index == self.num_processes - 1): end_index = length def _split_values(inputs, start_index, end_index): if isinstance(inputs, (list, tuple, torch.Tensor)): if start_index >= len(inputs): result = inputs[-1:] else: result = inputs[start_index:end_index] if apply_padding: if isinstance(result, torch.Tensor): from accelerate.utils import pad_across_processes, send_to_device # The tensor needs to be on the device before we can pad it tensorized_result = send_to_device(result, self.device) result = pad_across_processes(tensorized_result, pad_index=inputs[-1]) else: result += [result[-1]] * (num_samples_per_process - len(result)) return result elif isinstance(inputs, dict): for key in inputs.keys(): inputs[key] = _split_values(inputs[key], start_index, end_index) return inputs else: if is_datasets_available(): from datasets import Dataset if isinstance(inputs, Dataset): if start_index >= len(inputs): start_index = len(inputs) - 1 if end_index > len(inputs): end_index = len(inputs) result_idcs = list(range(start_index, end_index)) if apply_padding: result_idcs += [end_index - 1] * (num_samples_per_process - len(result_idcs)) return inputs.select(result_idcs) return inputs yield _split_values(inputs, start_index, end_index) def main_process_first(self): """ Lets the main process go first inside a with block. The other processes will enter the with block after the main process exits. Example: ```python >>> from accelerate import Accelerator >>> accelerator = Accelerator() >>> with accelerator.main_process_first(): ... # This will be printed first by process 0 then in a seemingly ... # random order by the other processes. ... print(f"This will be printed by process {accelerator.process_index}") ``` """ yield from self._goes_first(self.is_main_process) def local_main_process_first(self): """ Lets the local main process go inside a with block. The other processes will enter the with block after the main process exits. Example: ```python >>> from accelerate.state import PartialState >>> state = PartialState() >>> with state.local_main_process_first(): ... # This will be printed first by local process 0 then in a seemingly ... # random order by the other processes. ... print(f"This will be printed by process {state.local_process_index}") ``` """ yield from self._goes_first(self.is_local_main_process) def on_main_process(self, function: Callable[..., Any] = None): """ Decorator that only runs the decorated function on the main process. Args: function (`Callable`): The function to decorate. Example: ```python >>> from accelerate.state import PartialState >>> state = PartialState() >>> @state.on_main_process ... def print_something(): ... print("This will be printed by process 0 only.") >>> print_something() "This will be printed by process 0 only" ``` """ if not self.initialized: raise ValueError("The `PartialState` or `Accelerator` must be initialized before calling this function.") if self.is_main_process or not self.use_distributed: return function return do_nothing def on_local_main_process(self, function: Callable[..., Any] = None): """ Decorator that only runs the decorated function on the local main process. Args: function (`Callable`): The function to decorate. Example: ```python # Assume we have 2 servers with 4 processes each. from accelerate.state import PartialState state = PartialState() def print_something(): print("This will be printed by process 0 only on each server.") print_something() # On server 1: "This will be printed by process 0 only" # On server 2: "This will be printed by process 0 only" ``` """ if self.is_local_main_process or not self.use_distributed: return function return do_nothing def on_last_process(self, function: Callable[..., Any]): """ Decorator that only runs the decorated function on the last process. Args: function (`Callable`): The function to decorate. Example: ```python # Assume we have 4 processes. from accelerate.state import PartialState state = PartialState() def print_something(): print(f"Printed on process {state.process_index}") print_something() "Printed on process 3" ``` """ if self.is_last_process or not self.use_distributed: return function return do_nothing def on_process(self, function: Callable[..., Any] = None, process_index: int = None): """ Decorator that only runs the decorated function on the process with the given index. Args: function (`Callable`, `optional`): The function to decorate. process_index (`int`, `optional`): The index of the process on which to run the function. Example: ```python # Assume we have 4 processes. from accelerate.state import PartialState state = PartialState() def print_something(): print(f"Printed on process {state.process_index}") print_something() "Printed on process 2" ``` """ if function is None: return partial(self.on_process, process_index=process_index) if (self.process_index == process_index) or (not self.use_distributed): return function return do_nothing def on_local_process(self, function: Callable[..., Any] = None, local_process_index: int = None): """ Decorator that only runs the decorated function on the process with the given index on the current node. Args: function (`Callable`, *optional*): The function to decorate. local_process_index (`int`, *optional*): The index of the local process on which to run the function. Example: ```python # Assume we have 2 servers with 4 processes each. from accelerate import Accelerator accelerator = Accelerator() def print_something(): print(f"Printed on process {accelerator.local_process_index}") print_something() # On server 1: "Printed on process 2" # On server 2: "Printed on process 2" ``` """ if function is None: return partial(self.on_local_process, local_process_index=local_process_index) if (self.local_process_index == local_process_index) or (not self.use_distributed): return function return do_nothing def print(self, *args, **kwargs): if self.is_local_main_process: print(*args, **kwargs) def default_device(self) -> torch.device: """ Returns the default device which is: - MPS if `torch.backends.mps.is_available()` and `torch.backends.mps.is_built()` both return True. - CUDA if `torch.cuda.is_available()` - NPU if `is_npu_available()` - CPU otherwise """ if is_mps_available(): os.environ["PYTORCH_ENABLE_MPS_FALLBACK"] = "1" return torch.device("mps") elif torch.cuda.is_available(): return torch.device("cuda") elif is_xpu_available(): return torch.device("xpu:0") elif is_npu_available(): return torch.device("npu") else: return torch.device("cpu") class DistributedType(str, enum.Enum): """ Represents a type of distributed environment. Values: - **NO** -- Not a distributed environment, just a single process. - **MULTI_CPU** -- Distributed on multiple CPU nodes. - **MULTI_GPU** -- Distributed on multiple GPUs. - **MULTI_NPU** -- Distributed on multiple NPUs. - **MULTI_XPU** -- Distributed on multiple XPUs. - **DEEPSPEED** -- Using DeepSpeed. - **XLA** -- Using TorchXLA. - **TPU** -- This field will be deprecated in v0.27.0. Use XLA instead. """ # Subclassing str as well as Enum allows the `DistributedType` to be JSON-serializable out of the box. NO = "NO" MULTI_CPU = "MULTI_CPU" MULTI_GPU = "MULTI_GPU" MULTI_NPU = "MULTI_NPU" MULTI_XPU = "MULTI_XPU" DEEPSPEED = "DEEPSPEED" FSDP = "FSDP" XLA = "XLA" MEGATRON_LM = "MEGATRON_LM" TPU = DeprecatedFieldDescriptor("TPU", "XLA") The provided code snippet includes necessary dependencies for implementing the `verify_operation` function. Write a Python function `def verify_operation(function)` to solve the following problem: Verifies that `tensor` is the same shape across all processes. Only ran if `PartialState().debug` is `True`. Here is the function: def verify_operation(function): """ Verifies that `tensor` is the same shape across all processes. Only ran if `PartialState().debug` is `True`. """ @wraps(function) def wrapper(*args, **kwargs): if PartialState().distributed_type == DistributedType.NO or not PartialState().debug: return function(*args, **kwargs) operation = f"{function.__module__}.{function.__name__}" if "tensor" in kwargs: tensor = kwargs["tensor"] else: tensor = args[0] if PartialState().device.type != find_device(tensor).type: raise DistributedOperationException( f"One or more of the tensors passed to {operation} were not on the {tensor.device.type} while the `Accelerator` is configured for {PartialState().device.type}. " f"Please move it to the {PartialState().device.type} before calling {operation}." ) shapes = get_shape(tensor) output = gather_object([shapes]) if output[0] is not None: are_same = output.count(output[0]) == len(output) if not are_same: process_shape_str = "\n - ".join([f"Process {i}: {shape}" for i, shape in enumerate(output)]) raise DistributedOperationException( f"Cannot apply desired operation due to shape mismatches. " "All shapes across devices must be valid." f"\n\nOperation: `{operation}`\nInput shapes:\n - {process_shape_str}" ) return function(*args, **kwargs) return wrapper
Verifies that `tensor` is the same shape across all processes. Only ran if `PartialState().debug` is `True`.
2,936
import pickle import warnings from functools import update_wrapper, wraps from typing import Any, Mapping import torch from ..state import PartialState from .constants import TORCH_DISTRIBUTED_OPERATION_TYPES from .dataclasses import DistributedType, TensorInformation from .imports import ( is_npu_available, is_torch_distributed_available, is_torch_version, is_torch_xla_available, is_xpu_available, ) class DistributedOperationException(Exception): """ An exception class for distributed operations. Raised if the operation cannot be performed due to the shape of the tensors. """ pass The provided code snippet includes necessary dependencies for implementing the `chained_operation` function. Write a Python function `def chained_operation(function)` to solve the following problem: Checks that `verify_operation` failed and if so reports a more helpful error chaining the existing `DistributedOperationException`. Here is the function: def chained_operation(function): """ Checks that `verify_operation` failed and if so reports a more helpful error chaining the existing `DistributedOperationException`. """ @wraps(function) def wrapper(*args, **kwargs): try: return function(*args, **kwargs) except DistributedOperationException as e: operation = f"{function.__module__}.{function.__name__}" raise DistributedOperationException( f"Error found while calling `{operation}`. Please see the earlier error for more details." ) from e return wrapper
Checks that `verify_operation` failed and if so reports a more helpful error chaining the existing `DistributedOperationException`.
2,937
import pickle import warnings from functools import update_wrapper, wraps from typing import Any, Mapping import torch from ..state import PartialState from .constants import TORCH_DISTRIBUTED_OPERATION_TYPES from .dataclasses import DistributedType, TensorInformation from .imports import ( is_npu_available, is_torch_distributed_available, is_torch_version, is_torch_xla_available, is_xpu_available, ) TENSOR_INT_TO_DTYPE = {v: k for k, v in TENSOR_TYPE_TO_INT.items()} def gather_tensor_shape(tensor): """ Grabs the shape of `tensor` only available on one process and returns a tensor of its shape """ # Allocate 80 bytes to store the shape max_tensor_dimension = 2**20 state = PartialState() base_tensor = torch.empty(max_tensor_dimension, dtype=torch.int, device=state.device) # Since PyTorch can't just send a tensor to another GPU without # knowing its size, we store the size of the tensor with data # in an allocation if tensor is not None: shape = tensor.shape tensor_dtype = TENSOR_TYPE_TO_INT[tensor.dtype] base_tensor[: len(shape) + 1] = torch.tensor(list(shape) + [tensor_dtype], dtype=int) # Perform a reduction to copy the size data onto all GPUs base_tensor = reduce(base_tensor, reduction="sum") base_tensor = base_tensor[base_tensor.nonzero()] # The last non-zero data contains the coded dtype the source tensor is dtype = int(base_tensor[-1:][0]) base_tensor = base_tensor[:-1] return base_tensor, dtype def reduce(tensor, reduction="mean", scale=1.0): """ Recursively reduce the tensors in a nested list/tuple/dictionary of lists of tensors across all processes by the mean of a given operation. Args: tensor (nested list/tuple/dictionary of `torch.Tensor`): The data to reduce. reduction (`str`, *optional*, defaults to `"mean"`): A reduction method. Can be of "mean", "sum", or "none" scale (`float`, *optional*): A default scaling value to be applied after the reduce, only valied on XLA. Returns: The same data structure as `data` with all the tensors reduced. """ def _reduce_across_processes(tensor, reduction="mean", scale=1.0): state = PartialState() cloned_tensor = tensor.clone() if state.distributed_type == DistributedType.NO: return cloned_tensor if state.distributed_type == DistributedType.XLA: # Some processes may have different HLO graphs than other # processes, for example in the breakpoint API # accelerator.set_trigger(). Use mark_step to make HLOs # the same on all processes. xm.mark_step() xm.all_reduce(xm.REDUCE_SUM, [cloned_tensor], scale) xm.mark_step() elif state.distributed_type.value in TORCH_DISTRIBUTED_OPERATION_TYPES: torch.distributed.all_reduce(cloned_tensor, ReduceOp.SUM) if reduction == "mean": cloned_tensor /= state.num_processes return cloned_tensor return recursively_apply( _reduce_across_processes, tensor, error_on_other_type=True, reduction=reduction, scale=scale ) class PartialState: """ Singleton class that has information about the current training environment and functions to help with process control. Designed to be used when only process control and device execution states are needed. Does *not* need to be initialized from `Accelerator`. **Available attributes:** - **device** (`torch.device`) -- The device to use. - **distributed_type** ([`~accelerate.state.DistributedType`]) -- The type of distributed environment currently in use. - **local_process_index** (`int`) -- The index of the current process on the current server. - **mixed_precision** (`str`) -- Whether or not the current script will use mixed precision, and if so the type of mixed precision being performed. (Choose from 'no','fp16','bf16 or 'fp8'). - **num_processes** (`int`) -- The number of processes currently launched in parallel. - **process_index** (`int`) -- The index of the current process. - **is_last_process** (`bool`) -- Whether or not the current process is the last one. - **is_main_process** (`bool`) -- Whether or not the current process is the main one. - **is_local_main_process** (`bool`) -- Whether or not the current process is the main one on the local node. - **debug** (`bool`) -- Whether or not the current script is being run in debug mode. """ _shared_state = SharedDict() def __init__(self, cpu: bool = False, **kwargs): self.__dict__ = self._shared_state if not self.initialized: self._cpu = cpu self.backend = None env_device = os.environ.get("ACCELERATE_TORCH_DEVICE", None) self.device = torch.device(env_device) if env_device is not None else None self.debug = parse_flag_from_env("ACCELERATE_DEBUG_MODE") use_sagemaker_dp = kwargs.pop("_use_sagemaker_dp", None) if use_sagemaker_dp is None: use_sagemaker_dp = ( os.environ.get("ACCELERATE_USE_SAGEMAKER", "false") == "true" and os.environ.get("ACCELERATE_SAGEMAKER_DISTRIBUTED_TYPE") != SageMakerDistributedType.NO ) if use_sagemaker_dp and not cpu: if ( os.environ.get("ACCELERATE_SAGEMAKER_DISTRIBUTED_TYPE") == SageMakerDistributedType.DATA_PARALLEL ) or use_sagemaker_dp: self.distributed_type = DistributedType.MULTI_GPU import smdistributed.dataparallel.torch.torch_smddp # noqa if not torch.distributed.is_initialized(): torch.distributed.init_process_group(backend="smddp") self.backend = "smddp" self.num_processes = torch.distributed.get_world_size() self.process_index = torch.distributed.get_rank() self.local_process_index = int(os.environ.get("LOCAL_RANK", -1)) if self.device is None: self.device = torch.device("cuda", self.local_process_index) torch.cuda.set_device(self.device) elif is_torch_xla_available() and not cpu: self.distributed_type = DistributedType.XLA self.device = xm.xla_device() xm.set_replication(self.device, xm.get_xla_supported_devices()) self.num_processes = xm.xrt_world_size() self.process_index = xm.get_ordinal() if is_torch_xla_available(check_is_tpu=True): self.local_process_index = xm.get_local_ordinal() else: self.local_process_index = int(os.environ.get("LOCAL_RANK", -1)) elif ( os.environ.get("ACCELERATE_USE_DEEPSPEED", "false") == "true" and int(os.environ.get("LOCAL_RANK", -1)) != -1 and not cpu ): assert ( is_deepspeed_available() ), "DeepSpeed is not available => install it using `pip3 install deepspeed` or build it from source" self.distributed_type = DistributedType.DEEPSPEED if not torch.distributed.is_initialized(): from deepspeed import comm as dist # DeepSpeed always uses nccl kwargs.pop("backend", None) if is_xpu_available and is_ccl_available(): # Set DeepSpeed backend to ccl for xpu self.backend = "ccl" os.environ["CCL_PROCESS_LAUNCHER"] = "none" os.environ["CCL_LOCAL_SIZE"] = os.environ.get("LOCAL_WORLD_SIZE", "1") os.environ["CCL_LOCAL_RANK"] = os.environ.get("LOCAL_RANK", "0") elif is_npu_available(): self.backend = "hccl" else: self.backend = "nccl" dist.init_distributed(dist_backend=self.backend, auto_mpi_discovery=False, **kwargs) self.num_processes = torch.distributed.get_world_size() self.process_index = torch.distributed.get_rank() self.local_process_index = int(os.environ.get("LOCAL_RANK", -1)) if self.device is None: if is_xpu_available(): self.device = torch.device("xpu", self.local_process_index) if self.device is not None: torch.xpu.set_device(self.device) elif is_npu_available(): self.device = torch.device("npu", self.local_process_index) if self.device is not None: torch.npu.set_device(self.device) else: self.device = torch.device("cuda", self.local_process_index) if self.device is not None: torch.cuda.set_device(self.device) if self.device.type == "cuda" and not check_cuda_p2p_ib_support(): if "NCCL_P2P_DISABLE" not in os.environ or "NCCL_IB_DISABLE" not in os.environ: raise NotImplementedError( "Using RTX 4000 series doesn't support faster communication broadband via P2P or IB. " 'Please set `NCCL_P2P_DISABLE="1"` and `NCCL_IB_DISABLE="1" or use `accelerate launch` which ' "will do this automatically." ) self._mixed_precision = "no" # deepspeed handles mixed_precision using deepspeed_config elif int(os.environ.get("LOCAL_RANK", -1)) != -1 and not cpu and torch.cuda.is_available(): self.distributed_type = DistributedType.MULTI_GPU if not torch.distributed.is_initialized(): self.backend = kwargs.pop("backend", "nccl") # Special case for `TrainingArguments`, where `backend` will be `None` if self.backend is None: self.backend = "nccl" torch.distributed.init_process_group(backend=self.backend, **kwargs) if not check_cuda_p2p_ib_support(): if "NCCL_P2P_DISABLE" not in os.environ or "NCCL_IB_DISABLE" not in os.environ: raise NotImplementedError( "Using RTX 4000 series doesn't support faster communication broadband via P2P or IB. " 'Please set `NCCL_P2P_DISABLE="1"` and `NCCL_IB_DISABLE="1" or use `accelerate launch` which ' "will do this automatically." ) self.num_processes = torch.distributed.get_world_size() self.process_index = torch.distributed.get_rank() self.local_process_index = int(os.environ.get("LOCAL_RANK", -1)) if self.device is None: self.device = torch.device("cuda", self.local_process_index) torch.cuda.set_device(self.device) elif is_npu_available() and not cpu and int(os.environ.get("LOCAL_RANK", -1)) != -1: self.distributed_type = DistributedType.MULTI_NPU if not torch.distributed.is_initialized(): # Backend is not set by the user, we set it here kwargs.pop("backend", None) self.backend = "hccl" torch.distributed.init_process_group(backend=self.backend, **kwargs) self.num_processes = torch.distributed.get_world_size() self.process_index = torch.distributed.get_rank() self.local_process_index = int(os.environ.get("LOCAL_RANK", -1)) if self.device is None: self.device = torch.device("npu", self.local_process_index) torch.npu.set_device(self.device) elif ( get_int_from_env(["PMI_SIZE", "OMPI_COMM_WORLD_SIZE", "MV2_COMM_WORLD_SIZE", "WORLD_SIZE"], 1) > 1 or int(os.environ.get("LOCAL_RANK", -1)) != -1 ): if not cpu and is_xpu_available(): self.distributed_type = DistributedType.MULTI_XPU else: self.distributed_type = DistributedType.MULTI_CPU # Actually, CCL_WORKER_COUNT is a CPU only env var in CCL, no need to set it for XPU. if is_ccl_available() and ( get_int_from_env(["CCL_WORKER_COUNT"], 0) > 0 or self.distributed_type == DistributedType.MULTI_XPU ): if get_ccl_version() >= "1.12": import oneccl_bindings_for_pytorch # noqa: F401 else: import torch_ccl # noqa: F401 backend = "ccl" elif torch.distributed.is_mpi_available(): backend = "mpi" else: backend = "gloo" # Try to get launch configuration from environment variables set by MPI launcher - works for Intel MPI, OpenMPI and MVAPICH rank = get_int_from_env(["RANK", "PMI_RANK", "OMPI_COMM_WORLD_RANK", "MV2_COMM_WORLD_RANK"], 0) size = get_int_from_env(["WORLD_SIZE", "PMI_SIZE", "OMPI_COMM_WORLD_SIZE", "MV2_COMM_WORLD_SIZE"], 1) local_rank = get_int_from_env( ["LOCAL_RANK", "MPI_LOCALRANKID", "OMPI_COMM_WORLD_LOCAL_RANK", "MV2_COMM_WORLD_LOCAL_RANK"], 0 ) local_size = get_int_from_env( ["LOCAL_WORLD_SIZE", "MPI_LOCALNRANKS", "OMPI_COMM_WORLD_LOCAL_SIZE", "MV2_COMM_WORLD_LOCAL_SIZE"], 1, ) self.local_process_index = local_rank os.environ["RANK"] = str(rank) os.environ["WORLD_SIZE"] = str(size) os.environ["LOCAL_RANK"] = str(local_rank) os.environ["LOCAL_WORLD_SIZE"] = str(local_size) if backend == "ccl" and self.distributed_type == DistributedType.MULTI_XPU: os.environ["CCL_PROCESS_LAUNCHER"] = "none" os.environ["CCL_LOCAL_SIZE"] = str(local_size) os.environ["CCL_LOCAL_RANK"] = str(local_rank) if not os.environ.get("MASTER_PORT", None): os.environ["MASTER_PORT"] = "29500" if not os.environ.get("MASTER_ADDR", None): if local_size != size and backend != "mpi": raise ValueError( "Looks like distributed multinode run but MASTER_ADDR env not set, " "please try exporting rank 0's hostname as MASTER_ADDR" ) if ( self.distributed_type == DistributedType.MULTI_CPU and get_int_from_env(["OMP_NUM_THREADS", "MKL_NUM_THREADS"], 0) == 0 ): import psutil num_cpu_threads_per_process = int(psutil.cpu_count(logical=False) / local_size) if num_cpu_threads_per_process == 0: num_cpu_threads_per_process = 1 torch.set_num_threads(num_cpu_threads_per_process) warnings.warn( f"OMP_NUM_THREADS/MKL_NUM_THREADS unset, we set it at {num_cpu_threads_per_process} to improve oob" " performance." ) if not torch.distributed.is_initialized(): # Backend is not set by the user, we set it here kwargs.pop("backend", None) self.backend = backend torch.distributed.init_process_group(self.backend, rank=rank, world_size=size, **kwargs) self.num_processes = torch.distributed.get_world_size() self.process_index = torch.distributed.get_rank() if cpu: self.device = torch.device("cpu") elif is_xpu_available(): self.device = torch.device("xpu", self.local_process_index) torch.xpu.set_device(self.device) else: self.device = self.default_device else: self.distributed_type = ( DistributedType.NO if os.environ.get("ACCELERATE_USE_DEEPSPEED", "false") == "false" else DistributedType.DEEPSPEED ) self.num_processes = 1 self.process_index = self.local_process_index = 0 if self.device is None: self.device = torch.device("cpu") if cpu else self.default_device self.fork_launched = parse_flag_from_env("FORK_LAUNCHED", 0) def __repr__(self) -> str: return ( f"Distributed environment: {self.distributed_type}{(' Backend: ' + self.backend) if self.backend else ''}\n" f"Num processes: {self.num_processes}\n" f"Process index: {self.process_index}\n" f"Local process index: {self.local_process_index}\n" f"Device: {self.device}\n" ) def _reset_state(): "Resets `_shared_state`, is used internally and should not be called" PartialState._shared_state.clear() def initialized(self) -> bool: "Returns whether the `PartialState` has been initialized" return self._shared_state != {} def use_distributed(self): """ Whether the Accelerator is configured for distributed training """ return self.distributed_type != DistributedType.NO and self.num_processes > 1 def is_last_process(self) -> bool: "Returns whether the current process is the last one" return self.process_index == self.num_processes - 1 def is_main_process(self) -> bool: "Returns whether the current process is the main process" return ( self.process_index == 0 if self.distributed_type != DistributedType.MEGATRON_LM else self.is_last_process ) def is_local_main_process(self) -> bool: "Returns whether the current process is the main process on the local node" return ( self.local_process_index == 0 if self.distributed_type != DistributedType.MEGATRON_LM else self.is_last_process ) def wait_for_everyone(self): """ Will stop the execution of the current process until every other process has reached that point (so this does nothing when the script is only run in one process). Useful to do before saving a model. Example: ```python >>> # Assuming two GPU processes >>> import time >>> from accelerate.state import PartialState >>> state = PartialState() >>> if state.is_main_process: ... time.sleep(2) >>> else: ... print("I'm waiting for the main process to finish its sleep...") >>> state.wait_for_everyone() >>> # Should print on every process at the same time >>> print("Everyone is here") ``` """ if self.distributed_type in ( DistributedType.MULTI_GPU, DistributedType.MULTI_NPU, DistributedType.MULTI_XPU, DistributedType.MULTI_CPU, DistributedType.DEEPSPEED, DistributedType.FSDP, ): torch.distributed.barrier() elif self.distributed_type == DistributedType.XLA: xm.rendezvous("accelerate.utils.wait_for_everyone") def _goes_first(self, is_main: bool): if not is_main: self.wait_for_everyone() yield if is_main: self.wait_for_everyone() def split_between_processes(self, inputs: list | tuple | dict | torch.Tensor, apply_padding: bool = False): """ Splits `input` between `self.num_processes` quickly and can be then used on that process. Useful when doing distributed inference, such as with different prompts. Note that when using a `dict`, all keys need to have the same number of elements. Args: inputs (`list`, `tuple`, `torch.Tensor`, `dict` of `list`/`tuple`/`torch.Tensor`, or `datasets.Dataset`): The input to split between processes. apply_padding (`bool`, `optional`, defaults to `False`): Whether to apply padding by repeating the last element of the input so that all processes have the same number of elements. Useful when trying to perform actions such as `gather()` on the outputs or passing in less inputs than there are processes. If so, just remember to drop the padded elements afterwards. Example: ```python # Assume there are two processes from accelerate import PartialState state = PartialState() with state.split_between_processes(["A", "B", "C"]) as inputs: print(inputs) # Process 0 ["A", "B"] # Process 1 ["C"] with state.split_between_processes(["A", "B", "C"], apply_padding=True) as inputs: print(inputs) # Process 0 ["A", "B"] # Process 1 ["C", "C"] ``` """ if self.num_processes == 1: yield inputs return length = len(inputs) # Nested dictionary of any types if isinstance(inputs, dict): length = len(inputs[list(inputs.keys())[0]]) if not all(len(v) == length for v in inputs.values()): raise ValueError("All values in the dictionary must have the same length") num_samples_per_process = math.ceil(length / self.num_processes) start_index = self.process_index * num_samples_per_process end_index = start_index + num_samples_per_process if (len(inputs) % self.num_processes != 0) and (self.process_index == self.num_processes - 1): end_index = length def _split_values(inputs, start_index, end_index): if isinstance(inputs, (list, tuple, torch.Tensor)): if start_index >= len(inputs): result = inputs[-1:] else: result = inputs[start_index:end_index] if apply_padding: if isinstance(result, torch.Tensor): from accelerate.utils import pad_across_processes, send_to_device # The tensor needs to be on the device before we can pad it tensorized_result = send_to_device(result, self.device) result = pad_across_processes(tensorized_result, pad_index=inputs[-1]) else: result += [result[-1]] * (num_samples_per_process - len(result)) return result elif isinstance(inputs, dict): for key in inputs.keys(): inputs[key] = _split_values(inputs[key], start_index, end_index) return inputs else: if is_datasets_available(): from datasets import Dataset if isinstance(inputs, Dataset): if start_index >= len(inputs): start_index = len(inputs) - 1 if end_index > len(inputs): end_index = len(inputs) result_idcs = list(range(start_index, end_index)) if apply_padding: result_idcs += [end_index - 1] * (num_samples_per_process - len(result_idcs)) return inputs.select(result_idcs) return inputs yield _split_values(inputs, start_index, end_index) def main_process_first(self): """ Lets the main process go first inside a with block. The other processes will enter the with block after the main process exits. Example: ```python >>> from accelerate import Accelerator >>> accelerator = Accelerator() >>> with accelerator.main_process_first(): ... # This will be printed first by process 0 then in a seemingly ... # random order by the other processes. ... print(f"This will be printed by process {accelerator.process_index}") ``` """ yield from self._goes_first(self.is_main_process) def local_main_process_first(self): """ Lets the local main process go inside a with block. The other processes will enter the with block after the main process exits. Example: ```python >>> from accelerate.state import PartialState >>> state = PartialState() >>> with state.local_main_process_first(): ... # This will be printed first by local process 0 then in a seemingly ... # random order by the other processes. ... print(f"This will be printed by process {state.local_process_index}") ``` """ yield from self._goes_first(self.is_local_main_process) def on_main_process(self, function: Callable[..., Any] = None): """ Decorator that only runs the decorated function on the main process. Args: function (`Callable`): The function to decorate. Example: ```python >>> from accelerate.state import PartialState >>> state = PartialState() >>> @state.on_main_process ... def print_something(): ... print("This will be printed by process 0 only.") >>> print_something() "This will be printed by process 0 only" ``` """ if not self.initialized: raise ValueError("The `PartialState` or `Accelerator` must be initialized before calling this function.") if self.is_main_process or not self.use_distributed: return function return do_nothing def on_local_main_process(self, function: Callable[..., Any] = None): """ Decorator that only runs the decorated function on the local main process. Args: function (`Callable`): The function to decorate. Example: ```python # Assume we have 2 servers with 4 processes each. from accelerate.state import PartialState state = PartialState() def print_something(): print("This will be printed by process 0 only on each server.") print_something() # On server 1: "This will be printed by process 0 only" # On server 2: "This will be printed by process 0 only" ``` """ if self.is_local_main_process or not self.use_distributed: return function return do_nothing def on_last_process(self, function: Callable[..., Any]): """ Decorator that only runs the decorated function on the last process. Args: function (`Callable`): The function to decorate. Example: ```python # Assume we have 4 processes. from accelerate.state import PartialState state = PartialState() def print_something(): print(f"Printed on process {state.process_index}") print_something() "Printed on process 3" ``` """ if self.is_last_process or not self.use_distributed: return function return do_nothing def on_process(self, function: Callable[..., Any] = None, process_index: int = None): """ Decorator that only runs the decorated function on the process with the given index. Args: function (`Callable`, `optional`): The function to decorate. process_index (`int`, `optional`): The index of the process on which to run the function. Example: ```python # Assume we have 4 processes. from accelerate.state import PartialState state = PartialState() def print_something(): print(f"Printed on process {state.process_index}") print_something() "Printed on process 2" ``` """ if function is None: return partial(self.on_process, process_index=process_index) if (self.process_index == process_index) or (not self.use_distributed): return function return do_nothing def on_local_process(self, function: Callable[..., Any] = None, local_process_index: int = None): """ Decorator that only runs the decorated function on the process with the given index on the current node. Args: function (`Callable`, *optional*): The function to decorate. local_process_index (`int`, *optional*): The index of the local process on which to run the function. Example: ```python # Assume we have 2 servers with 4 processes each. from accelerate import Accelerator accelerator = Accelerator() def print_something(): print(f"Printed on process {accelerator.local_process_index}") print_something() # On server 1: "Printed on process 2" # On server 2: "Printed on process 2" ``` """ if function is None: return partial(self.on_local_process, local_process_index=local_process_index) if (self.local_process_index == local_process_index) or (not self.use_distributed): return function return do_nothing def print(self, *args, **kwargs): if self.is_local_main_process: print(*args, **kwargs) def default_device(self) -> torch.device: """ Returns the default device which is: - MPS if `torch.backends.mps.is_available()` and `torch.backends.mps.is_built()` both return True. - CUDA if `torch.cuda.is_available()` - NPU if `is_npu_available()` - CPU otherwise """ if is_mps_available(): os.environ["PYTORCH_ENABLE_MPS_FALLBACK"] = "1" return torch.device("mps") elif torch.cuda.is_available(): return torch.device("cuda") elif is_xpu_available(): return torch.device("xpu:0") elif is_npu_available(): return torch.device("npu") else: return torch.device("cpu") The provided code snippet includes necessary dependencies for implementing the `copy_tensor_to_devices` function. Write a Python function `def copy_tensor_to_devices(tensor=None) -> torch.Tensor` to solve the following problem: Copys a tensor that only exists on a single device and broadcasts it to other devices. Differs from `broadcast` as each worker doesn't need to know its shape when used (and tensor can be `None`) Args: tensor (`torch.tensor`): The tensor that should be sent to all devices. Must only have it be defined on a single device, the rest should be `None`. Here is the function: def copy_tensor_to_devices(tensor=None) -> torch.Tensor: """ Copys a tensor that only exists on a single device and broadcasts it to other devices. Differs from `broadcast` as each worker doesn't need to know its shape when used (and tensor can be `None`) Args: tensor (`torch.tensor`): The tensor that should be sent to all devices. Must only have it be defined on a single device, the rest should be `None`. """ state = PartialState() shape, dtype = gather_tensor_shape(tensor) if tensor is None: tensor = torch.zeros(shape, dtype=TENSOR_INT_TO_DTYPE[dtype]).to(state.device) return reduce(tensor, reduction="sum")
Copys a tensor that only exists on a single device and broadcasts it to other devices. Differs from `broadcast` as each worker doesn't need to know its shape when used (and tensor can be `None`) Args: tensor (`torch.tensor`): The tensor that should be sent to all devices. Must only have it be defined on a single device, the rest should be `None`.
2,938
import pickle import warnings from functools import update_wrapper, wraps from typing import Any, Mapping import torch from ..state import PartialState from .constants import TORCH_DISTRIBUTED_OPERATION_TYPES from .dataclasses import DistributedType, TensorInformation from .imports import ( is_npu_available, is_torch_distributed_available, is_torch_version, is_torch_xla_available, is_xpu_available, ) class PartialState: """ Singleton class that has information about the current training environment and functions to help with process control. Designed to be used when only process control and device execution states are needed. Does *not* need to be initialized from `Accelerator`. **Available attributes:** - **device** (`torch.device`) -- The device to use. - **distributed_type** ([`~accelerate.state.DistributedType`]) -- The type of distributed environment currently in use. - **local_process_index** (`int`) -- The index of the current process on the current server. - **mixed_precision** (`str`) -- Whether or not the current script will use mixed precision, and if so the type of mixed precision being performed. (Choose from 'no','fp16','bf16 or 'fp8'). - **num_processes** (`int`) -- The number of processes currently launched in parallel. - **process_index** (`int`) -- The index of the current process. - **is_last_process** (`bool`) -- Whether or not the current process is the last one. - **is_main_process** (`bool`) -- Whether or not the current process is the main one. - **is_local_main_process** (`bool`) -- Whether or not the current process is the main one on the local node. - **debug** (`bool`) -- Whether or not the current script is being run in debug mode. """ _shared_state = SharedDict() def __init__(self, cpu: bool = False, **kwargs): self.__dict__ = self._shared_state if not self.initialized: self._cpu = cpu self.backend = None env_device = os.environ.get("ACCELERATE_TORCH_DEVICE", None) self.device = torch.device(env_device) if env_device is not None else None self.debug = parse_flag_from_env("ACCELERATE_DEBUG_MODE") use_sagemaker_dp = kwargs.pop("_use_sagemaker_dp", None) if use_sagemaker_dp is None: use_sagemaker_dp = ( os.environ.get("ACCELERATE_USE_SAGEMAKER", "false") == "true" and os.environ.get("ACCELERATE_SAGEMAKER_DISTRIBUTED_TYPE") != SageMakerDistributedType.NO ) if use_sagemaker_dp and not cpu: if ( os.environ.get("ACCELERATE_SAGEMAKER_DISTRIBUTED_TYPE") == SageMakerDistributedType.DATA_PARALLEL ) or use_sagemaker_dp: self.distributed_type = DistributedType.MULTI_GPU import smdistributed.dataparallel.torch.torch_smddp # noqa if not torch.distributed.is_initialized(): torch.distributed.init_process_group(backend="smddp") self.backend = "smddp" self.num_processes = torch.distributed.get_world_size() self.process_index = torch.distributed.get_rank() self.local_process_index = int(os.environ.get("LOCAL_RANK", -1)) if self.device is None: self.device = torch.device("cuda", self.local_process_index) torch.cuda.set_device(self.device) elif is_torch_xla_available() and not cpu: self.distributed_type = DistributedType.XLA self.device = xm.xla_device() xm.set_replication(self.device, xm.get_xla_supported_devices()) self.num_processes = xm.xrt_world_size() self.process_index = xm.get_ordinal() if is_torch_xla_available(check_is_tpu=True): self.local_process_index = xm.get_local_ordinal() else: self.local_process_index = int(os.environ.get("LOCAL_RANK", -1)) elif ( os.environ.get("ACCELERATE_USE_DEEPSPEED", "false") == "true" and int(os.environ.get("LOCAL_RANK", -1)) != -1 and not cpu ): assert ( is_deepspeed_available() ), "DeepSpeed is not available => install it using `pip3 install deepspeed` or build it from source" self.distributed_type = DistributedType.DEEPSPEED if not torch.distributed.is_initialized(): from deepspeed import comm as dist # DeepSpeed always uses nccl kwargs.pop("backend", None) if is_xpu_available and is_ccl_available(): # Set DeepSpeed backend to ccl for xpu self.backend = "ccl" os.environ["CCL_PROCESS_LAUNCHER"] = "none" os.environ["CCL_LOCAL_SIZE"] = os.environ.get("LOCAL_WORLD_SIZE", "1") os.environ["CCL_LOCAL_RANK"] = os.environ.get("LOCAL_RANK", "0") elif is_npu_available(): self.backend = "hccl" else: self.backend = "nccl" dist.init_distributed(dist_backend=self.backend, auto_mpi_discovery=False, **kwargs) self.num_processes = torch.distributed.get_world_size() self.process_index = torch.distributed.get_rank() self.local_process_index = int(os.environ.get("LOCAL_RANK", -1)) if self.device is None: if is_xpu_available(): self.device = torch.device("xpu", self.local_process_index) if self.device is not None: torch.xpu.set_device(self.device) elif is_npu_available(): self.device = torch.device("npu", self.local_process_index) if self.device is not None: torch.npu.set_device(self.device) else: self.device = torch.device("cuda", self.local_process_index) if self.device is not None: torch.cuda.set_device(self.device) if self.device.type == "cuda" and not check_cuda_p2p_ib_support(): if "NCCL_P2P_DISABLE" not in os.environ or "NCCL_IB_DISABLE" not in os.environ: raise NotImplementedError( "Using RTX 4000 series doesn't support faster communication broadband via P2P or IB. " 'Please set `NCCL_P2P_DISABLE="1"` and `NCCL_IB_DISABLE="1" or use `accelerate launch` which ' "will do this automatically." ) self._mixed_precision = "no" # deepspeed handles mixed_precision using deepspeed_config elif int(os.environ.get("LOCAL_RANK", -1)) != -1 and not cpu and torch.cuda.is_available(): self.distributed_type = DistributedType.MULTI_GPU if not torch.distributed.is_initialized(): self.backend = kwargs.pop("backend", "nccl") # Special case for `TrainingArguments`, where `backend` will be `None` if self.backend is None: self.backend = "nccl" torch.distributed.init_process_group(backend=self.backend, **kwargs) if not check_cuda_p2p_ib_support(): if "NCCL_P2P_DISABLE" not in os.environ or "NCCL_IB_DISABLE" not in os.environ: raise NotImplementedError( "Using RTX 4000 series doesn't support faster communication broadband via P2P or IB. " 'Please set `NCCL_P2P_DISABLE="1"` and `NCCL_IB_DISABLE="1" or use `accelerate launch` which ' "will do this automatically." ) self.num_processes = torch.distributed.get_world_size() self.process_index = torch.distributed.get_rank() self.local_process_index = int(os.environ.get("LOCAL_RANK", -1)) if self.device is None: self.device = torch.device("cuda", self.local_process_index) torch.cuda.set_device(self.device) elif is_npu_available() and not cpu and int(os.environ.get("LOCAL_RANK", -1)) != -1: self.distributed_type = DistributedType.MULTI_NPU if not torch.distributed.is_initialized(): # Backend is not set by the user, we set it here kwargs.pop("backend", None) self.backend = "hccl" torch.distributed.init_process_group(backend=self.backend, **kwargs) self.num_processes = torch.distributed.get_world_size() self.process_index = torch.distributed.get_rank() self.local_process_index = int(os.environ.get("LOCAL_RANK", -1)) if self.device is None: self.device = torch.device("npu", self.local_process_index) torch.npu.set_device(self.device) elif ( get_int_from_env(["PMI_SIZE", "OMPI_COMM_WORLD_SIZE", "MV2_COMM_WORLD_SIZE", "WORLD_SIZE"], 1) > 1 or int(os.environ.get("LOCAL_RANK", -1)) != -1 ): if not cpu and is_xpu_available(): self.distributed_type = DistributedType.MULTI_XPU else: self.distributed_type = DistributedType.MULTI_CPU # Actually, CCL_WORKER_COUNT is a CPU only env var in CCL, no need to set it for XPU. if is_ccl_available() and ( get_int_from_env(["CCL_WORKER_COUNT"], 0) > 0 or self.distributed_type == DistributedType.MULTI_XPU ): if get_ccl_version() >= "1.12": import oneccl_bindings_for_pytorch # noqa: F401 else: import torch_ccl # noqa: F401 backend = "ccl" elif torch.distributed.is_mpi_available(): backend = "mpi" else: backend = "gloo" # Try to get launch configuration from environment variables set by MPI launcher - works for Intel MPI, OpenMPI and MVAPICH rank = get_int_from_env(["RANK", "PMI_RANK", "OMPI_COMM_WORLD_RANK", "MV2_COMM_WORLD_RANK"], 0) size = get_int_from_env(["WORLD_SIZE", "PMI_SIZE", "OMPI_COMM_WORLD_SIZE", "MV2_COMM_WORLD_SIZE"], 1) local_rank = get_int_from_env( ["LOCAL_RANK", "MPI_LOCALRANKID", "OMPI_COMM_WORLD_LOCAL_RANK", "MV2_COMM_WORLD_LOCAL_RANK"], 0 ) local_size = get_int_from_env( ["LOCAL_WORLD_SIZE", "MPI_LOCALNRANKS", "OMPI_COMM_WORLD_LOCAL_SIZE", "MV2_COMM_WORLD_LOCAL_SIZE"], 1, ) self.local_process_index = local_rank os.environ["RANK"] = str(rank) os.environ["WORLD_SIZE"] = str(size) os.environ["LOCAL_RANK"] = str(local_rank) os.environ["LOCAL_WORLD_SIZE"] = str(local_size) if backend == "ccl" and self.distributed_type == DistributedType.MULTI_XPU: os.environ["CCL_PROCESS_LAUNCHER"] = "none" os.environ["CCL_LOCAL_SIZE"] = str(local_size) os.environ["CCL_LOCAL_RANK"] = str(local_rank) if not os.environ.get("MASTER_PORT", None): os.environ["MASTER_PORT"] = "29500" if not os.environ.get("MASTER_ADDR", None): if local_size != size and backend != "mpi": raise ValueError( "Looks like distributed multinode run but MASTER_ADDR env not set, " "please try exporting rank 0's hostname as MASTER_ADDR" ) if ( self.distributed_type == DistributedType.MULTI_CPU and get_int_from_env(["OMP_NUM_THREADS", "MKL_NUM_THREADS"], 0) == 0 ): import psutil num_cpu_threads_per_process = int(psutil.cpu_count(logical=False) / local_size) if num_cpu_threads_per_process == 0: num_cpu_threads_per_process = 1 torch.set_num_threads(num_cpu_threads_per_process) warnings.warn( f"OMP_NUM_THREADS/MKL_NUM_THREADS unset, we set it at {num_cpu_threads_per_process} to improve oob" " performance." ) if not torch.distributed.is_initialized(): # Backend is not set by the user, we set it here kwargs.pop("backend", None) self.backend = backend torch.distributed.init_process_group(self.backend, rank=rank, world_size=size, **kwargs) self.num_processes = torch.distributed.get_world_size() self.process_index = torch.distributed.get_rank() if cpu: self.device = torch.device("cpu") elif is_xpu_available(): self.device = torch.device("xpu", self.local_process_index) torch.xpu.set_device(self.device) else: self.device = self.default_device else: self.distributed_type = ( DistributedType.NO if os.environ.get("ACCELERATE_USE_DEEPSPEED", "false") == "false" else DistributedType.DEEPSPEED ) self.num_processes = 1 self.process_index = self.local_process_index = 0 if self.device is None: self.device = torch.device("cpu") if cpu else self.default_device self.fork_launched = parse_flag_from_env("FORK_LAUNCHED", 0) def __repr__(self) -> str: return ( f"Distributed environment: {self.distributed_type}{(' Backend: ' + self.backend) if self.backend else ''}\n" f"Num processes: {self.num_processes}\n" f"Process index: {self.process_index}\n" f"Local process index: {self.local_process_index}\n" f"Device: {self.device}\n" ) def _reset_state(): "Resets `_shared_state`, is used internally and should not be called" PartialState._shared_state.clear() def initialized(self) -> bool: "Returns whether the `PartialState` has been initialized" return self._shared_state != {} def use_distributed(self): """ Whether the Accelerator is configured for distributed training """ return self.distributed_type != DistributedType.NO and self.num_processes > 1 def is_last_process(self) -> bool: "Returns whether the current process is the last one" return self.process_index == self.num_processes - 1 def is_main_process(self) -> bool: "Returns whether the current process is the main process" return ( self.process_index == 0 if self.distributed_type != DistributedType.MEGATRON_LM else self.is_last_process ) def is_local_main_process(self) -> bool: "Returns whether the current process is the main process on the local node" return ( self.local_process_index == 0 if self.distributed_type != DistributedType.MEGATRON_LM else self.is_last_process ) def wait_for_everyone(self): """ Will stop the execution of the current process until every other process has reached that point (so this does nothing when the script is only run in one process). Useful to do before saving a model. Example: ```python >>> # Assuming two GPU processes >>> import time >>> from accelerate.state import PartialState >>> state = PartialState() >>> if state.is_main_process: ... time.sleep(2) >>> else: ... print("I'm waiting for the main process to finish its sleep...") >>> state.wait_for_everyone() >>> # Should print on every process at the same time >>> print("Everyone is here") ``` """ if self.distributed_type in ( DistributedType.MULTI_GPU, DistributedType.MULTI_NPU, DistributedType.MULTI_XPU, DistributedType.MULTI_CPU, DistributedType.DEEPSPEED, DistributedType.FSDP, ): torch.distributed.barrier() elif self.distributed_type == DistributedType.XLA: xm.rendezvous("accelerate.utils.wait_for_everyone") def _goes_first(self, is_main: bool): if not is_main: self.wait_for_everyone() yield if is_main: self.wait_for_everyone() def split_between_processes(self, inputs: list | tuple | dict | torch.Tensor, apply_padding: bool = False): """ Splits `input` between `self.num_processes` quickly and can be then used on that process. Useful when doing distributed inference, such as with different prompts. Note that when using a `dict`, all keys need to have the same number of elements. Args: inputs (`list`, `tuple`, `torch.Tensor`, `dict` of `list`/`tuple`/`torch.Tensor`, or `datasets.Dataset`): The input to split between processes. apply_padding (`bool`, `optional`, defaults to `False`): Whether to apply padding by repeating the last element of the input so that all processes have the same number of elements. Useful when trying to perform actions such as `gather()` on the outputs or passing in less inputs than there are processes. If so, just remember to drop the padded elements afterwards. Example: ```python # Assume there are two processes from accelerate import PartialState state = PartialState() with state.split_between_processes(["A", "B", "C"]) as inputs: print(inputs) # Process 0 ["A", "B"] # Process 1 ["C"] with state.split_between_processes(["A", "B", "C"], apply_padding=True) as inputs: print(inputs) # Process 0 ["A", "B"] # Process 1 ["C", "C"] ``` """ if self.num_processes == 1: yield inputs return length = len(inputs) # Nested dictionary of any types if isinstance(inputs, dict): length = len(inputs[list(inputs.keys())[0]]) if not all(len(v) == length for v in inputs.values()): raise ValueError("All values in the dictionary must have the same length") num_samples_per_process = math.ceil(length / self.num_processes) start_index = self.process_index * num_samples_per_process end_index = start_index + num_samples_per_process if (len(inputs) % self.num_processes != 0) and (self.process_index == self.num_processes - 1): end_index = length def _split_values(inputs, start_index, end_index): if isinstance(inputs, (list, tuple, torch.Tensor)): if start_index >= len(inputs): result = inputs[-1:] else: result = inputs[start_index:end_index] if apply_padding: if isinstance(result, torch.Tensor): from accelerate.utils import pad_across_processes, send_to_device # The tensor needs to be on the device before we can pad it tensorized_result = send_to_device(result, self.device) result = pad_across_processes(tensorized_result, pad_index=inputs[-1]) else: result += [result[-1]] * (num_samples_per_process - len(result)) return result elif isinstance(inputs, dict): for key in inputs.keys(): inputs[key] = _split_values(inputs[key], start_index, end_index) return inputs else: if is_datasets_available(): from datasets import Dataset if isinstance(inputs, Dataset): if start_index >= len(inputs): start_index = len(inputs) - 1 if end_index > len(inputs): end_index = len(inputs) result_idcs = list(range(start_index, end_index)) if apply_padding: result_idcs += [end_index - 1] * (num_samples_per_process - len(result_idcs)) return inputs.select(result_idcs) return inputs yield _split_values(inputs, start_index, end_index) def main_process_first(self): """ Lets the main process go first inside a with block. The other processes will enter the with block after the main process exits. Example: ```python >>> from accelerate import Accelerator >>> accelerator = Accelerator() >>> with accelerator.main_process_first(): ... # This will be printed first by process 0 then in a seemingly ... # random order by the other processes. ... print(f"This will be printed by process {accelerator.process_index}") ``` """ yield from self._goes_first(self.is_main_process) def local_main_process_first(self): """ Lets the local main process go inside a with block. The other processes will enter the with block after the main process exits. Example: ```python >>> from accelerate.state import PartialState >>> state = PartialState() >>> with state.local_main_process_first(): ... # This will be printed first by local process 0 then in a seemingly ... # random order by the other processes. ... print(f"This will be printed by process {state.local_process_index}") ``` """ yield from self._goes_first(self.is_local_main_process) def on_main_process(self, function: Callable[..., Any] = None): """ Decorator that only runs the decorated function on the main process. Args: function (`Callable`): The function to decorate. Example: ```python >>> from accelerate.state import PartialState >>> state = PartialState() >>> @state.on_main_process ... def print_something(): ... print("This will be printed by process 0 only.") >>> print_something() "This will be printed by process 0 only" ``` """ if not self.initialized: raise ValueError("The `PartialState` or `Accelerator` must be initialized before calling this function.") if self.is_main_process or not self.use_distributed: return function return do_nothing def on_local_main_process(self, function: Callable[..., Any] = None): """ Decorator that only runs the decorated function on the local main process. Args: function (`Callable`): The function to decorate. Example: ```python # Assume we have 2 servers with 4 processes each. from accelerate.state import PartialState state = PartialState() def print_something(): print("This will be printed by process 0 only on each server.") print_something() # On server 1: "This will be printed by process 0 only" # On server 2: "This will be printed by process 0 only" ``` """ if self.is_local_main_process or not self.use_distributed: return function return do_nothing def on_last_process(self, function: Callable[..., Any]): """ Decorator that only runs the decorated function on the last process. Args: function (`Callable`): The function to decorate. Example: ```python # Assume we have 4 processes. from accelerate.state import PartialState state = PartialState() def print_something(): print(f"Printed on process {state.process_index}") print_something() "Printed on process 3" ``` """ if self.is_last_process or not self.use_distributed: return function return do_nothing def on_process(self, function: Callable[..., Any] = None, process_index: int = None): """ Decorator that only runs the decorated function on the process with the given index. Args: function (`Callable`, `optional`): The function to decorate. process_index (`int`, `optional`): The index of the process on which to run the function. Example: ```python # Assume we have 4 processes. from accelerate.state import PartialState state = PartialState() def print_something(): print(f"Printed on process {state.process_index}") print_something() "Printed on process 2" ``` """ if function is None: return partial(self.on_process, process_index=process_index) if (self.process_index == process_index) or (not self.use_distributed): return function return do_nothing def on_local_process(self, function: Callable[..., Any] = None, local_process_index: int = None): """ Decorator that only runs the decorated function on the process with the given index on the current node. Args: function (`Callable`, *optional*): The function to decorate. local_process_index (`int`, *optional*): The index of the local process on which to run the function. Example: ```python # Assume we have 2 servers with 4 processes each. from accelerate import Accelerator accelerator = Accelerator() def print_something(): print(f"Printed on process {accelerator.local_process_index}") print_something() # On server 1: "Printed on process 2" # On server 2: "Printed on process 2" ``` """ if function is None: return partial(self.on_local_process, local_process_index=local_process_index) if (self.local_process_index == local_process_index) or (not self.use_distributed): return function return do_nothing def print(self, *args, **kwargs): if self.is_local_main_process: print(*args, **kwargs) def default_device(self) -> torch.device: """ Returns the default device which is: - MPS if `torch.backends.mps.is_available()` and `torch.backends.mps.is_built()` both return True. - CUDA if `torch.cuda.is_available()` - NPU if `is_npu_available()` - CPU otherwise """ if is_mps_available(): os.environ["PYTORCH_ENABLE_MPS_FALLBACK"] = "1" return torch.device("mps") elif torch.cuda.is_available(): return torch.device("cuda") elif is_xpu_available(): return torch.device("xpu:0") elif is_npu_available(): return torch.device("npu") else: return torch.device("cpu") TORCH_DISTRIBUTED_OPERATION_TYPES = CUDA_DISTRIBUTED_TYPES + ["MULTI_NPU", "MULTI_XPU", "MULTI_CPU"] class DistributedType(str, enum.Enum): """ Represents a type of distributed environment. Values: - **NO** -- Not a distributed environment, just a single process. - **MULTI_CPU** -- Distributed on multiple CPU nodes. - **MULTI_GPU** -- Distributed on multiple GPUs. - **MULTI_NPU** -- Distributed on multiple NPUs. - **MULTI_XPU** -- Distributed on multiple XPUs. - **DEEPSPEED** -- Using DeepSpeed. - **XLA** -- Using TorchXLA. - **TPU** -- This field will be deprecated in v0.27.0. Use XLA instead. """ # Subclassing str as well as Enum allows the `DistributedType` to be JSON-serializable out of the box. NO = "NO" MULTI_CPU = "MULTI_CPU" MULTI_GPU = "MULTI_GPU" MULTI_NPU = "MULTI_NPU" MULTI_XPU = "MULTI_XPU" DEEPSPEED = "DEEPSPEED" FSDP = "FSDP" XLA = "XLA" MEGATRON_LM = "MEGATRON_LM" TPU = DeprecatedFieldDescriptor("TPU", "XLA") The provided code snippet includes necessary dependencies for implementing the `broadcast_object_list` function. Write a Python function `def broadcast_object_list(object_list, from_process: int = 0)` to solve the following problem: Broadcast a list of picklable objects form one process to the others. Args: object_list (list of picklable objects): The list of objects to broadcast. This list will be modified inplace. from_process (`int`, *optional*, defaults to 0): The process from which to send the data. Returns: The same list containing the objects from process 0. Here is the function: def broadcast_object_list(object_list, from_process: int = 0): """ Broadcast a list of picklable objects form one process to the others. Args: object_list (list of picklable objects): The list of objects to broadcast. This list will be modified inplace. from_process (`int`, *optional*, defaults to 0): The process from which to send the data. Returns: The same list containing the objects from process 0. """ if PartialState().distributed_type == DistributedType.XLA: for i, obj in enumerate(object_list): object_list[i] = xm.mesh_reduce("accelerate.utils.broadcast_object_list", obj, lambda x: x[from_process]) elif PartialState().distributed_type in TORCH_DISTRIBUTED_OPERATION_TYPES: torch.distributed.broadcast_object_list(object_list, src=from_process) return object_list
Broadcast a list of picklable objects form one process to the others. Args: object_list (list of picklable objects): The list of objects to broadcast. This list will be modified inplace. from_process (`int`, *optional*, defaults to 0): The process from which to send the data. Returns: The same list containing the objects from process 0.
2,939
import pickle import warnings from functools import update_wrapper, wraps from typing import Any, Mapping import torch from ..state import PartialState from .constants import TORCH_DISTRIBUTED_OPERATION_TYPES from .dataclasses import DistributedType, TensorInformation from .imports import ( is_npu_available, is_torch_distributed_available, is_torch_version, is_torch_xla_available, is_xpu_available, ) def recursively_apply(func, data, *args, test_type=is_torch_tensor, error_on_other_type=False, **kwargs): """ Recursively apply a function on a data structure that is a nested list/tuple/dictionary of a given base type. Args: func (`callable`): The function to recursively apply. data (nested list/tuple/dictionary of `main_type`): The data on which to apply `func` *args: Positional arguments that will be passed to `func` when applied on the unpacked data. main_type (`type`, *optional*, defaults to `torch.Tensor`): The base type of the objects to which apply `func`. error_on_other_type (`bool`, *optional*, defaults to `False`): Whether to return an error or not if after unpacking `data`, we get on an object that is not of type `main_type`. If `False`, the function will leave objects of types different than `main_type` unchanged. **kwargs: Keyword arguments that will be passed to `func` when applied on the unpacked data. Returns: The same data structure as `data` with `func` applied to every object of type `main_type`. """ if isinstance(data, (tuple, list)): return honor_type( data, ( recursively_apply( func, o, *args, test_type=test_type, error_on_other_type=error_on_other_type, **kwargs ) for o in data ), ) elif isinstance(data, Mapping): return type(data)( { k: recursively_apply( func, v, *args, test_type=test_type, error_on_other_type=error_on_other_type, **kwargs ) for k, v in data.items() } ) elif test_type(data): return func(data, *args, **kwargs) elif error_on_other_type: raise TypeError( f"Unsupported types ({type(data)}) passed to `{func.__name__}`. Only nested list/tuple/dicts of " f"objects that are valid for `{test_type.__name__}` should be passed." ) return data The provided code snippet includes necessary dependencies for implementing the `slice_tensors` function. Write a Python function `def slice_tensors(data, tensor_slice, process_index=None, num_processes=None)` to solve the following problem: Recursively takes a slice in a nested list/tuple/dictionary of tensors. Args: data (nested list/tuple/dictionary of `torch.Tensor`): The data to slice. tensor_slice (`slice`): The slice to take. Returns: The same data structure as `data` with all the tensors slices. Here is the function: def slice_tensors(data, tensor_slice, process_index=None, num_processes=None): """ Recursively takes a slice in a nested list/tuple/dictionary of tensors. Args: data (nested list/tuple/dictionary of `torch.Tensor`): The data to slice. tensor_slice (`slice`): The slice to take. Returns: The same data structure as `data` with all the tensors slices. """ def _slice_tensor(tensor, tensor_slice): return tensor[tensor_slice] return recursively_apply(_slice_tensor, data, tensor_slice)
Recursively takes a slice in a nested list/tuple/dictionary of tensors. Args: data (nested list/tuple/dictionary of `torch.Tensor`): The data to slice. tensor_slice (`slice`): The slice to take. Returns: The same data structure as `data` with all the tensors slices.
2,940
import pickle import warnings from functools import update_wrapper, wraps from typing import Any, Mapping import torch from ..state import PartialState from .constants import TORCH_DISTRIBUTED_OPERATION_TYPES from .dataclasses import DistributedType, TensorInformation from .imports import ( is_npu_available, is_torch_distributed_available, is_torch_version, is_torch_xla_available, is_xpu_available, ) def honor_type(obj, generator): """ Cast a generator to the same type as obj (list, tuple, or namedtuple) """ # Some objects may not be able to instantiate from a generator directly if is_namedtuple(obj): return type(obj)(*list(generator)) else: return type(obj)(generator) The provided code snippet includes necessary dependencies for implementing the `concatenate` function. Write a Python function `def concatenate(data, dim=0)` to solve the following problem: Recursively concatenate the tensors in a nested list/tuple/dictionary of lists of tensors with the same shape. Args: data (nested list/tuple/dictionary of lists of tensors `torch.Tensor`): The data to concatenate. dim (`int`, *optional*, defaults to 0): The dimension on which to concatenate. Returns: The same data structure as `data` with all the tensors concatenated. Here is the function: def concatenate(data, dim=0): """ Recursively concatenate the tensors in a nested list/tuple/dictionary of lists of tensors with the same shape. Args: data (nested list/tuple/dictionary of lists of tensors `torch.Tensor`): The data to concatenate. dim (`int`, *optional*, defaults to 0): The dimension on which to concatenate. Returns: The same data structure as `data` with all the tensors concatenated. """ if isinstance(data[0], (tuple, list)): return honor_type(data[0], (concatenate([d[i] for d in data], dim=dim) for i in range(len(data[0])))) elif isinstance(data[0], Mapping): return type(data[0])({k: concatenate([d[k] for d in data], dim=dim) for k in data[0].keys()}) elif not isinstance(data[0], torch.Tensor): raise TypeError(f"Can only concatenate tensors but got {type(data[0])}") return torch.cat(data, dim=dim)
Recursively concatenate the tensors in a nested list/tuple/dictionary of lists of tensors with the same shape. Args: data (nested list/tuple/dictionary of lists of tensors `torch.Tensor`): The data to concatenate. dim (`int`, *optional*, defaults to 0): The dimension on which to concatenate. Returns: The same data structure as `data` with all the tensors concatenated.
2,941
import pickle import warnings from functools import update_wrapper, wraps from typing import Any, Mapping import torch from ..state import PartialState from .constants import TORCH_DISTRIBUTED_OPERATION_TYPES from .dataclasses import DistributedType, TensorInformation from .imports import ( is_npu_available, is_torch_distributed_available, is_torch_version, is_torch_xla_available, is_xpu_available, ) def recursively_apply(func, data, *args, test_type=is_torch_tensor, error_on_other_type=False, **kwargs): """ Recursively apply a function on a data structure that is a nested list/tuple/dictionary of a given base type. Args: func (`callable`): The function to recursively apply. data (nested list/tuple/dictionary of `main_type`): The data on which to apply `func` *args: Positional arguments that will be passed to `func` when applied on the unpacked data. main_type (`type`, *optional*, defaults to `torch.Tensor`): The base type of the objects to which apply `func`. error_on_other_type (`bool`, *optional*, defaults to `False`): Whether to return an error or not if after unpacking `data`, we get on an object that is not of type `main_type`. If `False`, the function will leave objects of types different than `main_type` unchanged. **kwargs: Keyword arguments that will be passed to `func` when applied on the unpacked data. Returns: The same data structure as `data` with `func` applied to every object of type `main_type`. """ if isinstance(data, (tuple, list)): return honor_type( data, ( recursively_apply( func, o, *args, test_type=test_type, error_on_other_type=error_on_other_type, **kwargs ) for o in data ), ) elif isinstance(data, Mapping): return type(data)( { k: recursively_apply( func, v, *args, test_type=test_type, error_on_other_type=error_on_other_type, **kwargs ) for k, v in data.items() } ) elif test_type(data): return func(data, *args, **kwargs) elif error_on_other_type: raise TypeError( f"Unsupported types ({type(data)}) passed to `{func.__name__}`. Only nested list/tuple/dicts of " f"objects that are valid for `{test_type.__name__}` should be passed." ) return data def gather(tensor): """ Recursively gather tensor in a nested list/tuple/dictionary of tensors from all devices. Args: tensor (nested list/tuple/dictionary of `torch.Tensor`): The data to gather. Returns: The same data structure as `tensor` with all tensors sent to the proper device. """ if PartialState().distributed_type == DistributedType.XLA: return _tpu_gather(tensor) elif PartialState().distributed_type in TORCH_DISTRIBUTED_OPERATION_TYPES: return _gpu_gather(tensor) else: return tensor class CannotPadNestedTensorWarning(UserWarning): pass The provided code snippet includes necessary dependencies for implementing the `pad_across_processes` function. Write a Python function `def pad_across_processes(tensor, dim=0, pad_index=0, pad_first=False)` to solve the following problem: Recursively pad the tensors in a nested list/tuple/dictionary of tensors from all devices to the same size so they can safely be gathered. Args: tensor (nested list/tuple/dictionary of `torch.Tensor`): The data to gather. dim (`int`, *optional*, defaults to 0): The dimension on which to pad. pad_index (`int`, *optional*, defaults to 0): The value with which to pad. pad_first (`bool`, *optional*, defaults to `False`): Whether to pad at the beginning or the end. Here is the function: def pad_across_processes(tensor, dim=0, pad_index=0, pad_first=False): """ Recursively pad the tensors in a nested list/tuple/dictionary of tensors from all devices to the same size so they can safely be gathered. Args: tensor (nested list/tuple/dictionary of `torch.Tensor`): The data to gather. dim (`int`, *optional*, defaults to 0): The dimension on which to pad. pad_index (`int`, *optional*, defaults to 0): The value with which to pad. pad_first (`bool`, *optional*, defaults to `False`): Whether to pad at the beginning or the end. """ def _pad_across_processes(tensor, dim=0, pad_index=0, pad_first=False): if getattr(tensor, "is_nested", False): warnings.warn( "Cannot pad nested tensors without more information. Leaving unprocessed.", CannotPadNestedTensorWarning, ) return tensor if dim >= len(tensor.shape): return tensor # Gather all sizes size = torch.tensor(tensor.shape, device=tensor.device)[None] sizes = gather(size).cpu() # Then pad to the maximum size max_size = max(s[dim] for s in sizes) if max_size == tensor.shape[dim]: return tensor old_size = tensor.shape new_size = list(old_size) new_size[dim] = max_size new_tensor = tensor.new_zeros(tuple(new_size)) + pad_index if pad_first: indices = tuple( slice(max_size - old_size[dim], max_size) if i == dim else slice(None) for i in range(len(new_size)) ) else: indices = tuple(slice(0, old_size[dim]) if i == dim else slice(None) for i in range(len(new_size))) new_tensor[indices] = tensor return new_tensor return recursively_apply( _pad_across_processes, tensor, error_on_other_type=True, dim=dim, pad_index=pad_index, pad_first=pad_first )
Recursively pad the tensors in a nested list/tuple/dictionary of tensors from all devices to the same size so they can safely be gathered. Args: tensor (nested list/tuple/dictionary of `torch.Tensor`): The data to gather. dim (`int`, *optional*, defaults to 0): The dimension on which to pad. pad_index (`int`, *optional*, defaults to 0): The value with which to pad. pad_first (`bool`, *optional*, defaults to `False`): Whether to pad at the beginning or the end.
2,942
import pickle import warnings from functools import update_wrapper, wraps from typing import Any, Mapping import torch from ..state import PartialState from .constants import TORCH_DISTRIBUTED_OPERATION_TYPES from .dataclasses import DistributedType, TensorInformation from .imports import ( is_npu_available, is_torch_distributed_available, is_torch_version, is_torch_xla_available, is_xpu_available, ) def recursively_apply(func, data, *args, test_type=is_torch_tensor, error_on_other_type=False, **kwargs): """ Recursively apply a function on a data structure that is a nested list/tuple/dictionary of a given base type. Args: func (`callable`): The function to recursively apply. data (nested list/tuple/dictionary of `main_type`): The data on which to apply `func` *args: Positional arguments that will be passed to `func` when applied on the unpacked data. main_type (`type`, *optional*, defaults to `torch.Tensor`): The base type of the objects to which apply `func`. error_on_other_type (`bool`, *optional*, defaults to `False`): Whether to return an error or not if after unpacking `data`, we get on an object that is not of type `main_type`. If `False`, the function will leave objects of types different than `main_type` unchanged. **kwargs: Keyword arguments that will be passed to `func` when applied on the unpacked data. Returns: The same data structure as `data` with `func` applied to every object of type `main_type`. """ if isinstance(data, (tuple, list)): return honor_type( data, ( recursively_apply( func, o, *args, test_type=test_type, error_on_other_type=error_on_other_type, **kwargs ) for o in data ), ) elif isinstance(data, Mapping): return type(data)( { k: recursively_apply( func, v, *args, test_type=test_type, error_on_other_type=error_on_other_type, **kwargs ) for k, v in data.items() } ) elif test_type(data): return func(data, *args, **kwargs) elif error_on_other_type: raise TypeError( f"Unsupported types ({type(data)}) passed to `{func.__name__}`. Only nested list/tuple/dicts of " f"objects that are valid for `{test_type.__name__}` should be passed." ) return data The provided code snippet includes necessary dependencies for implementing the `pad_input_tensors` function. Write a Python function `def pad_input_tensors(tensor, batch_size, num_processes, dim=0)` to solve the following problem: Takes a `tensor` of arbitrary size and pads it so that it can work given `num_processes` needed dimensions. New tensors are just the last input repeated. E.g.: Tensor: ([3,4,4]) Num processes: 4 Expected result shape: ([4,4,4]) Here is the function: def pad_input_tensors(tensor, batch_size, num_processes, dim=0): """ Takes a `tensor` of arbitrary size and pads it so that it can work given `num_processes` needed dimensions. New tensors are just the last input repeated. E.g.: Tensor: ([3,4,4]) Num processes: 4 Expected result shape: ([4,4,4]) """ def _pad_input_tensors(tensor, batch_size, num_processes, dim=0): remainder = batch_size // num_processes last_inputs = batch_size - (remainder * num_processes) if batch_size // num_processes == 0: to_pad = num_processes - batch_size else: to_pad = num_processes - (batch_size // num_processes) # In the rare case that `to_pad` is negative, # we need to pad the last inputs - the found `to_pad` if last_inputs > to_pad & to_pad < 1: to_pad = last_inputs - to_pad old_size = tensor.shape new_size = list(old_size) new_size[0] = batch_size + to_pad new_tensor = tensor.new_zeros(tuple(new_size)) indices = tuple(slice(0, old_size[dim]) if i == dim else slice(None) for i in range(len(new_size))) new_tensor[indices] = tensor return new_tensor return recursively_apply( _pad_input_tensors, tensor, error_on_other_type=True, batch_size=batch_size, num_processes=num_processes, dim=dim, )
Takes a `tensor` of arbitrary size and pads it so that it can work given `num_processes` needed dimensions. New tensors are just the last input repeated. E.g.: Tensor: ([3,4,4]) Num processes: 4 Expected result shape: ([4,4,4])
2,943
import pickle import warnings from functools import update_wrapper, wraps from typing import Any, Mapping import torch from ..state import PartialState from .constants import TORCH_DISTRIBUTED_OPERATION_TYPES from .dataclasses import DistributedType, TensorInformation from .imports import ( is_npu_available, is_torch_distributed_available, is_torch_version, is_torch_xla_available, is_xpu_available, ) def is_torch_tensor(tensor): return isinstance(tensor, torch.Tensor) def recursively_apply(func, data, *args, test_type=is_torch_tensor, error_on_other_type=False, **kwargs): """ Recursively apply a function on a data structure that is a nested list/tuple/dictionary of a given base type. Args: func (`callable`): The function to recursively apply. data (nested list/tuple/dictionary of `main_type`): The data on which to apply `func` *args: Positional arguments that will be passed to `func` when applied on the unpacked data. main_type (`type`, *optional*, defaults to `torch.Tensor`): The base type of the objects to which apply `func`. error_on_other_type (`bool`, *optional*, defaults to `False`): Whether to return an error or not if after unpacking `data`, we get on an object that is not of type `main_type`. If `False`, the function will leave objects of types different than `main_type` unchanged. **kwargs: Keyword arguments that will be passed to `func` when applied on the unpacked data. Returns: The same data structure as `data` with `func` applied to every object of type `main_type`. """ if isinstance(data, (tuple, list)): return honor_type( data, ( recursively_apply( func, o, *args, test_type=test_type, error_on_other_type=error_on_other_type, **kwargs ) for o in data ), ) elif isinstance(data, Mapping): return type(data)( { k: recursively_apply( func, v, *args, test_type=test_type, error_on_other_type=error_on_other_type, **kwargs ) for k, v in data.items() } ) elif test_type(data): return func(data, *args, **kwargs) elif error_on_other_type: raise TypeError( f"Unsupported types ({type(data)}) passed to `{func.__name__}`. Only nested list/tuple/dicts of " f"objects that are valid for `{test_type.__name__}` should be passed." ) return data The provided code snippet includes necessary dependencies for implementing the `convert_to_fp32` function. Write a Python function `def convert_to_fp32(tensor)` to solve the following problem: Recursively converts the elements nested list/tuple/dictionary of tensors in FP16/BF16 precision to FP32. Args: tensor (nested list/tuple/dictionary of `torch.Tensor`): The data to convert from FP16/BF16 to FP32. Returns: The same data structure as `tensor` with all tensors that were in FP16/BF16 precision converted to FP32. Here is the function: def convert_to_fp32(tensor): """ Recursively converts the elements nested list/tuple/dictionary of tensors in FP16/BF16 precision to FP32. Args: tensor (nested list/tuple/dictionary of `torch.Tensor`): The data to convert from FP16/BF16 to FP32. Returns: The same data structure as `tensor` with all tensors that were in FP16/BF16 precision converted to FP32. """ def _convert_to_fp32(tensor): return tensor.float() def _is_fp16_bf16_tensor(tensor): return (is_torch_tensor(tensor) or hasattr(tensor, "dtype")) and tensor.dtype in ( torch.float16, torch.bfloat16, ) return recursively_apply(_convert_to_fp32, tensor, test_type=_is_fp16_bf16_tensor)
Recursively converts the elements nested list/tuple/dictionary of tensors in FP16/BF16 precision to FP32. Args: tensor (nested list/tuple/dictionary of `torch.Tensor`): The data to convert from FP16/BF16 to FP32. Returns: The same data structure as `tensor` with all tensors that were in FP16/BF16 precision converted to FP32.
2,944
import pickle import warnings from functools import update_wrapper, wraps from typing import Any, Mapping import torch from ..state import PartialState from .constants import TORCH_DISTRIBUTED_OPERATION_TYPES from .dataclasses import DistributedType, TensorInformation from .imports import ( is_npu_available, is_torch_distributed_available, is_torch_version, is_torch_xla_available, is_xpu_available, ) class ConvertOutputsToFp32: """ Decorator to apply to a function outputing tensors (like a model forward pass) that ensures the outputs in FP16 precision will be convert back to FP32. Args: model_forward (`Callable`): The function which outputs we want to treat. Returns: The same function as `model_forward` but with converted outputs. """ def __init__(self, model_forward): self.model_forward = model_forward update_wrapper(self, model_forward) def __call__(self, *args, **kwargs): return convert_to_fp32(self.model_forward(*args, **kwargs)) def __getstate__(self): raise pickle.PicklingError( "Cannot pickle a prepared model with automatic mixed precision, please unwrap the model with `Accelerator.unwrap_model(model)` before pickling it." ) def convert_outputs_to_fp32(model_forward): model_forward = ConvertOutputsToFp32(model_forward) def forward(*args, **kwargs): return model_forward(*args, **kwargs) # To act like a decorator so that it can be popped when doing `extract_model_from_parallel` forward.__wrapped__ = model_forward return forward
null
2,945
import torch.nn as nn from .imports import is_fp8_available if is_fp8_available(): import transformer_engine.pytorch as te def is_fp8_available(): return is_msamp_available() or is_transformer_engine_available() The provided code snippet includes necessary dependencies for implementing the `has_transformer_engine_layers` function. Write a Python function `def has_transformer_engine_layers(model)` to solve the following problem: Returns whether a given model has some `transformer_engine` layer or not. Here is the function: def has_transformer_engine_layers(model): """ Returns whether a given model has some `transformer_engine` layer or not. """ if not is_fp8_available(): raise ImportError("Using `has_transformer_engine_layers` requires transformer_engine to be installed.") for m in model.modules(): if isinstance(m, (te.LayerNorm, te.Linear, te.TransformerLayer)): return True return False
Returns whether a given model has some `transformer_engine` layer or not.
2,946
import random from typing import List, Optional, Union import numpy as np import torch from ..state import AcceleratorState from .constants import CUDA_DISTRIBUTED_TYPES from .dataclasses import DistributedType, RNGType from .imports import is_npu_available, is_torch_xla_available, is_xpu_available if is_torch_xla_available(): import torch_xla.core.xla_model as xm import random class AcceleratorState: """ Singleton class that has information about the current training environment. **Available attributes:** - **device** (`torch.device`) -- The device to use. - **distributed_type** ([`~accelerate.state.DistributedType`]) -- The type of distributed environment currently in use. - **initialized** (`bool`) -- Whether or not the `AcceleratorState` has been initialized from `Accelerator`. - **local_process_index** (`int`) -- The index of the current process on the current server. - **mixed_precision** (`str`) -- Whether or not the current script will use mixed precision, and if so the type of mixed precision being performed. (Choose from 'no','fp16','bf16 or 'fp8'). - **num_processes** (`int`) -- The number of processes currently launched in parallel. - **process_index** (`int`) -- The index of the current process. - **is_last_process** (`bool`) -- Whether or not the current process is the last one. - **is_main_process** (`bool`) -- Whether or not the current process is the main one. - **is_local_main_process** (`bool`) -- Whether or not the current process is the main one on the local node. - **debug** (`bool`) -- Whether or not the current script is being run in debug mode. """ _shared_state = SharedDict() def __init__( self, mixed_precision: str = None, cpu: bool = False, dynamo_plugin=None, deepspeed_plugin=None, fsdp_plugin=None, megatron_lm_plugin=None, _from_accelerator: bool = False, **kwargs, ): self.__dict__ = self._shared_state if parse_flag_from_env("ACCELERATE_USE_CPU"): cpu = True if PartialState._shared_state == {}: PartialState(cpu, **kwargs) self.__dict__.update(PartialState._shared_state) self._check_initialized(mixed_precision, cpu) if not self.initialized: self.deepspeed_plugin = None self.use_ipex = None mixed_precision = ( parse_choice_from_env("ACCELERATE_MIXED_PRECISION", "no") if mixed_precision is None else mixed_precision.lower() ) if mixed_precision == "fp8": if not is_fp8_available(): raise ValueError( "Using `fp8` precision requires `transformer_engine` or `MS-AMP` to be installed." ) elif not check_fp8_capability(): logger.warning( f"The current device has compute capability of {torch.cuda.get_device_capability()} which is " "insufficient for FP8 mixed precision training (requires a GPU Hopper/Ada Lovelace " "or higher, compute capability of 8.9 or higher). Will use FP16 instead." ) mixed_precision = "fp16" self.dynamo_plugin = dynamo_plugin if not _from_accelerator: raise ValueError( "Please make sure to properly initialize your accelerator via `accelerator = Accelerator()` " "before using any functionality from the `accelerate` library." ) # deepspeed handles mixed_precision using deepspeed_config self._mixed_precision = "no" if self.distributed_type == DistributedType.DEEPSPEED else mixed_precision if self.distributed_type == DistributedType.XLA and is_torch_xla_available(check_is_tpu=True): if mixed_precision == "bf16": if os.environ.get("ACCELERATE_DOWNCAST_BF16"): os.environ["XLA_USE_BF16"] = str(0) os.environ["XLA_DOWNCAST_BF16"] = str(1) self.downcast_bfloat = True else: os.environ["XLA_USE_BF16"] = str(1) os.environ["XLA_DOWNCAST_BF16"] = str(0) self.downcast_bfloat = False elif os.environ.get("ACCELERATE_USE_DEEPSPEED", "false") == "true" and not cpu: self.deepspeed_plugin = deepspeed_plugin elif self.distributed_type == DistributedType.MULTI_GPU: if os.environ.get("ACCELERATE_USE_FSDP", "false") == "true": self.distributed_type = DistributedType.FSDP if self._mixed_precision != "no": fsdp_plugin.set_mixed_precision(self._mixed_precision) self.fsdp_plugin = fsdp_plugin if os.environ.get("ACCELERATE_USE_MEGATRON_LM", "false") == "true": self.distributed_type = DistributedType.MEGATRON_LM megatron_lm_plugin.set_mixed_precision(self._mixed_precision) self.megatron_lm_plugin = megatron_lm_plugin elif self.distributed_type == DistributedType.MULTI_NPU: if os.environ.get("ACCELERATE_USE_FSDP", "false") == "true": self.distributed_type = DistributedType.FSDP if self._mixed_precision != "no": fsdp_plugin.set_mixed_precision(self._mixed_precision) self.fsdp_plugin = fsdp_plugin elif self.distributed_type in [DistributedType.MULTI_CPU, DistributedType.MULTI_XPU, DistributedType.NO]: if is_ipex_available(): "check if user disables it explicitly" self.use_ipex = parse_flag_from_env("ACCELERATE_USE_IPEX", default=True) else: self.use_ipex = False if self.distributed_type == DistributedType.MULTI_XPU: if os.environ.get("ACCELERATE_USE_FSDP", "false") == "true": self.distributed_type = DistributedType.FSDP if self._mixed_precision != "no": fsdp_plugin.set_mixed_precision(self._mixed_precision) self.fsdp_plugin = fsdp_plugin if ( self.dynamo_plugin.backend != DynamoBackend.NO and self._mixed_precision == "no" and self.device.type == "cuda" ): torch.backends.cuda.matmul.allow_tf32 = True PartialState._shared_state["distributed_type"] = self.distributed_type def initialized(self) -> bool: return self._shared_state != PartialState._shared_state def __repr__(self): repr = PartialState().__repr__() + f"\nMixed precision type: {self.mixed_precision}\n" if self.distributed_type == DistributedType.DEEPSPEED: repr += f"ds_config: {self.deepspeed_plugin.deepspeed_config}\n" return repr def _check_initialized(self, mixed_precision=None, cpu=None): "Checks if a modification is trying to be made and the `AcceleratorState` has already been initialized" if self.initialized: err = "AcceleratorState has already been initialized and cannot be changed, restart your runtime completely and pass `{flag}` to `Accelerator()`." if cpu and self.device.type != "cpu": raise ValueError(err.format(flag="cpu=True")) if ( mixed_precision is not None and mixed_precision != self._mixed_precision and self.distributed_type != DistributedType.DEEPSPEED ): raise ValueError(err.format(flag=f"mixed_precision='{mixed_precision}'")) # For backward compatibility def use_fp16(self): warnings.warn( "The `use_fp16` property is deprecated and will be removed in version 1.0 of Accelerate use " "`AcceleratorState.mixed_precision == 'fp16'` instead.", FutureWarning, ) return self._mixed_precision != "no" def mixed_precision(self): if self.distributed_type == DistributedType.DEEPSPEED: config = self.deepspeed_plugin.deepspeed_config if config.get("fp16", {}).get("enabled", False): mixed_precision = "fp16" elif config.get("bf16", {}).get("enabled", False): mixed_precision = "bf16" else: mixed_precision = "no" else: mixed_precision = self._mixed_precision return mixed_precision def _reset_state(reset_partial_state: bool = False): "Resets `_shared_state`, is used internally and should not be called" AcceleratorState._shared_state.clear() if reset_partial_state: PartialState._reset_state() def use_distributed(self): """ Whether the Accelerator is configured for distributed training """ return PartialState().use_distributed def is_last_process(self) -> bool: "Returns whether the current process is the last one" return PartialState().is_last_process def is_main_process(self) -> bool: "Returns whether the current process is the main process" return PartialState().is_main_process def is_local_main_process(self) -> bool: "Returns whether the current process is the main process on the local node" return PartialState().is_local_main_process def wait_for_everyone(self): PartialState().wait_for_everyone() def split_between_processes(self, inputs: list | tuple | dict | torch.Tensor, apply_padding: bool = False): """ Splits `input` between `self.num_processes` quickly and can be then used on that process. Useful when doing distributed inference, such as with different prompts. Note that when using a `dict`, all keys need to have the same number of elements. Args: inputs (`list`, `tuple`, `torch.Tensor`, or `dict` of `list`/`tuple`/`torch.Tensor`): The input to split between processes. apply_padding (`bool`, `optional`, defaults to `False`): Whether to apply padding by repeating the last element of the input so that all processes have the same number of elements. Useful when trying to perform actions such as `gather()` on the outputs or passing in less inputs than there are processes. If so, just remember to drop the padded elements afterwards. Example: ```python # Assume there are two processes from accelerate.state import AcceleratorState state = AcceleratorState() with state.split_between_processes(["A", "B", "C"]) as inputs: print(inputs) # Process 0 ["A", "B"] # Process 1 ["C"] with state.split_between_processes(["A", "B", "C"], apply_padding=True) as inputs: print(inputs) # Process 0 ["A", "B"] # Process 1 ["C", "C"] ``` """ with PartialState().split_between_processes(inputs, apply_padding=apply_padding) as inputs: yield inputs def main_process_first(self): """ Lets the main process go first inside a with block. The other processes will enter the with block after the main process exits. """ with PartialState().main_process_first(): yield def local_main_process_first(self): """ Lets the local main process go inside a with block. The other processes will enter the with block after the main process exits. """ with PartialState().local_main_process_first(): yield def print(self, *args, **kwargs): PartialState().print(*args, **kwargs) def is_torch_xla_available(check_is_tpu=False, check_is_gpu=False): """ Check if `torch_xla` is available. To train a native pytorch job in an environment with torch xla installed, set the USE_TORCH_XLA to false. """ assert not (check_is_tpu and check_is_gpu), "The check_is_tpu and check_is_gpu cannot both be true." if not _torch_xla_available: return False elif check_is_gpu: return torch_xla.runtime.device_type() in ["GPU", "CUDA"] elif check_is_tpu: return torch_xla.runtime.device_type() == "TPU" return True def is_npu_available(check_device=False): "Checks if `torch_npu` is installed and potentially if a NPU is in the environment" if importlib.util.find_spec("torch") is None or importlib.util.find_spec("torch_npu") is None: return False import torch import torch_npu # noqa: F401 if check_device: try: # Will raise a RuntimeError if no NPU is found _ = torch.npu.device_count() return torch.npu.is_available() except RuntimeError: return False return hasattr(torch, "npu") and torch.npu.is_available() def is_xpu_available(check_device=False): "check if user disables it explicitly" if not parse_flag_from_env("ACCELERATE_USE_XPU", default=True): return False "Checks if `intel_extension_for_pytorch` is installed and potentially if a XPU is in the environment" if is_ipex_available(): import torch if is_torch_version("<=", "1.12"): return False else: return False import intel_extension_for_pytorch # noqa: F401 if check_device: try: # Will raise a RuntimeError if no XPU is found _ = torch.xpu.device_count() return torch.xpu.is_available() except RuntimeError: return False return hasattr(torch, "xpu") and torch.xpu.is_available() The provided code snippet includes necessary dependencies for implementing the `set_seed` function. Write a Python function `def set_seed(seed: int, device_specific: bool = False)` to solve the following problem: Helper function for reproducible behavior to set the seed in `random`, `numpy`, `torch`. Args: seed (`int`): The seed to set. device_specific (`bool`, *optional*, defaults to `False`): Whether to differ the seed on each device slightly with `self.process_index`. Here is the function: def set_seed(seed: int, device_specific: bool = False): """ Helper function for reproducible behavior to set the seed in `random`, `numpy`, `torch`. Args: seed (`int`): The seed to set. device_specific (`bool`, *optional*, defaults to `False`): Whether to differ the seed on each device slightly with `self.process_index`. """ if device_specific: seed += AcceleratorState().process_index random.seed(seed) np.random.seed(seed) torch.manual_seed(seed) if is_xpu_available(): torch.xpu.manual_seed_all(seed) elif is_npu_available(): torch.npu.manual_seed_all(seed) else: torch.cuda.manual_seed_all(seed) # ^^ safe to call this function even if cuda is not available if is_torch_xla_available(): xm.set_rng_state(seed)
Helper function for reproducible behavior to set the seed in `random`, `numpy`, `torch`. Args: seed (`int`): The seed to set. device_specific (`bool`, *optional*, defaults to `False`): Whether to differ the seed on each device slightly with `self.process_index`.
2,947
import random from typing import List, Optional, Union import numpy as np import torch from ..state import AcceleratorState from .constants import CUDA_DISTRIBUTED_TYPES from .dataclasses import DistributedType, RNGType from .imports import is_npu_available, is_torch_xla_available, is_xpu_available def synchronize_rng_state(rng_type: Optional[RNGType] = None, generator: Optional[torch.Generator] = None): # Get the proper rng state if rng_type == RNGType.TORCH: rng_state = torch.get_rng_state() elif rng_type == RNGType.CUDA: rng_state = torch.cuda.get_rng_state() elif rng_type == RNGType.XLA: assert is_torch_xla_available(), "Can't synchronize XLA seeds as torch_xla is unavailable." rng_state = torch.tensor(xm.get_rng_state()) elif rng_type == RNGType.NPU: assert is_npu_available(), "Can't synchronize NPU seeds on an environment without NPUs." rng_state = torch.npu.get_rng_state() elif rng_type == RNGType.XPU: assert is_xpu_available(), "Can't synchronize XPU seeds on an environment without XPUs." rng_state = torch.xpu.get_rng_state() elif rng_type == RNGType.GENERATOR: assert generator is not None, "Need a generator to synchronize its seed." rng_state = generator.get_state() # Broadcast the rng state from device 0 to other devices state = AcceleratorState() if state.distributed_type == DistributedType.XLA: rng_state = rng_state.to(xm.xla_device()) xm.collective_broadcast([rng_state]) xm.mark_step() rng_state = rng_state.cpu() elif ( state.distributed_type in CUDA_DISTRIBUTED_TYPES or state.distributed_type == DistributedType.MULTI_NPU or state.distributed_type == DistributedType.MULTI_XPU ): rng_state = rng_state.to(state.device) torch.distributed.broadcast(rng_state, 0) rng_state = rng_state.cpu() elif state.distributed_type == DistributedType.MULTI_CPU: torch.distributed.broadcast(rng_state, 0) # Set the broadcast rng state if rng_type == RNGType.TORCH: torch.set_rng_state(rng_state) elif rng_type == RNGType.CUDA: torch.cuda.set_rng_state(rng_state) elif rng_type == RNGType.NPU: torch.npu.set_rng_state(rng_state) elif rng_type == RNGType.XPU: torch.xpu.set_rng_state(rng_state) elif rng_type == RNGType.XLA: xm.set_rng_state(rng_state.item()) elif rng_type == RNGType.GENERATOR: generator.set_state(rng_state) class RNGType(BaseEnum): TORCH = "torch" CUDA = "cuda" NPU = "npu" XLA = "xla" XPU = "xpu" GENERATOR = "generator" def synchronize_rng_states(rng_types: List[Union[str, RNGType]], generator: Optional[torch.Generator] = None): for rng_type in rng_types: synchronize_rng_state(RNGType(rng_type), generator=generator)
null
2,948
import json import os from collections.abc import Mapping from typing import Dict, List, Optional, Union import numpy as np import torch from safetensors import safe_open def offload_weight(weight, weight_name, offload_folder, index=None): dtype = None # Check the string instead of the dtype to be compatible with versions of PyTorch that don't have bfloat16. if str(weight.dtype) == "torch.bfloat16": # Need to reinterpret the underlined data as int16 since NumPy does not handle bfloat16s. weight = weight.view(torch.int16) dtype = "bfloat16" array = weight.cpu().numpy() tensor_file = os.path.join(offload_folder, f"{weight_name}.dat") if index is not None: if dtype is None: dtype = str(array.dtype) index[weight_name] = {"dtype": dtype, "shape": list(array.shape)} if array.ndim == 0: array = array[None] file_array = np.memmap(tensor_file, dtype=array.dtype, mode="w+", shape=array.shape) file_array[:] = array[:] file_array.flush() return index def save_offload_index(index, offload_folder): if index is None or len(index) == 0: # Nothing to save return offload_index_file = os.path.join(offload_folder, "index.json") if os.path.isfile(offload_index_file): with open(offload_index_file, encoding="utf-8") as f: current_index = json.load(f) else: current_index = {} current_index.update(index) with open(offload_index_file, "w", encoding="utf-8") as f: json.dump(current_index, f, indent=2) The provided code snippet includes necessary dependencies for implementing the `offload_state_dict` function. Write a Python function `def offload_state_dict(save_dir: Union[str, os.PathLike], state_dict: Dict[str, torch.Tensor])` to solve the following problem: Offload a state dict in a given folder. Args: save_dir (`str` or `os.PathLike`): The directory in which to offload the state dict. state_dict (`Dict[str, torch.Tensor]`): The dictionary of tensors to offload. Here is the function: def offload_state_dict(save_dir: Union[str, os.PathLike], state_dict: Dict[str, torch.Tensor]): """ Offload a state dict in a given folder. Args: save_dir (`str` or `os.PathLike`): The directory in which to offload the state dict. state_dict (`Dict[str, torch.Tensor]`): The dictionary of tensors to offload. """ os.makedirs(save_dir, exist_ok=True) index = {} for name, parameter in state_dict.items(): index = offload_weight(parameter, name, save_dir, index=index) # Update index save_offload_index(index, save_dir)
Offload a state dict in a given folder. Args: save_dir (`str` or `os.PathLike`): The directory in which to offload the state dict. state_dict (`Dict[str, torch.Tensor]`): The dictionary of tensors to offload.
2,949
import json import os from collections.abc import Mapping from typing import Dict, List, Optional, Union import numpy as np import torch from safetensors import safe_open The provided code snippet includes necessary dependencies for implementing the `extract_submodules_state_dict` function. Write a Python function `def extract_submodules_state_dict(state_dict: Dict[str, torch.Tensor], submodule_names: List[str])` to solve the following problem: Extract the sub state-dict corresponding to a list of given submodules. Args: state_dict (`Dict[str, torch.Tensor]`): The state dict to extract from. submodule_names (`List[str]`): The list of submodule names we want to extract. Here is the function: def extract_submodules_state_dict(state_dict: Dict[str, torch.Tensor], submodule_names: List[str]): """ Extract the sub state-dict corresponding to a list of given submodules. Args: state_dict (`Dict[str, torch.Tensor]`): The state dict to extract from. submodule_names (`List[str]`): The list of submodule names we want to extract. """ result = {} for module_name in submodule_names: # We want to catch module_name parameter (module_name.xxx) or potentially module_name, but not any of the # submodules that could being like module_name (transformers.h.1 and transformers.h.10 for instance) result.update( { key: param for key, param in state_dict.items() if key == module_name or key.startswith(module_name + ".") } ) return result
Extract the sub state-dict corresponding to a list of given submodules. Args: state_dict (`Dict[str, torch.Tensor]`): The state dict to extract from. submodule_names (`List[str]`): The list of submodule names we want to extract.
2,950
import functools import gc import inspect import torch from .imports import is_mps_available, is_npu_available, is_xpu_available def is_mps_available(): return is_torch_version(">=", "1.12") and torch.backends.mps.is_available() and torch.backends.mps.is_built() def is_npu_available(check_device=False): "Checks if `torch_npu` is installed and potentially if a NPU is in the environment" if importlib.util.find_spec("torch") is None or importlib.util.find_spec("torch_npu") is None: return False import torch import torch_npu # noqa: F401 if check_device: try: # Will raise a RuntimeError if no NPU is found _ = torch.npu.device_count() return torch.npu.is_available() except RuntimeError: return False return hasattr(torch, "npu") and torch.npu.is_available() def is_xpu_available(check_device=False): "check if user disables it explicitly" if not parse_flag_from_env("ACCELERATE_USE_XPU", default=True): return False "Checks if `intel_extension_for_pytorch` is installed and potentially if a XPU is in the environment" if is_ipex_available(): import torch if is_torch_version("<=", "1.12"): return False else: return False import intel_extension_for_pytorch # noqa: F401 if check_device: try: # Will raise a RuntimeError if no XPU is found _ = torch.xpu.device_count() return torch.xpu.is_available() except RuntimeError: return False return hasattr(torch, "xpu") and torch.xpu.is_available() The provided code snippet includes necessary dependencies for implementing the `release_memory` function. Write a Python function `def release_memory(*objects)` to solve the following problem: Releases memory from `objects` by setting them to `None` and calls `gc.collect()` and `torch.cuda.empty_cache()`. Returned objects should be reassigned to the same variables. Args: objects (`Iterable`): An iterable of objects Returns: A list of `None` objects to replace `objects` Example: ```python >>> import torch >>> from accelerate.utils import release_memory >>> a = torch.ones(1000, 1000).cuda() >>> b = torch.ones(1000, 1000).cuda() >>> a, b = release_memory(a, b) ``` Here is the function: def release_memory(*objects): """ Releases memory from `objects` by setting them to `None` and calls `gc.collect()` and `torch.cuda.empty_cache()`. Returned objects should be reassigned to the same variables. Args: objects (`Iterable`): An iterable of objects Returns: A list of `None` objects to replace `objects` Example: ```python >>> import torch >>> from accelerate.utils import release_memory >>> a = torch.ones(1000, 1000).cuda() >>> b = torch.ones(1000, 1000).cuda() >>> a, b = release_memory(a, b) ``` """ if not isinstance(objects, list): objects = list(objects) for i in range(len(objects)): objects[i] = None gc.collect() if is_xpu_available(): torch.xpu.empty_cache() elif is_npu_available(): torch.npu.empty_cache() elif is_mps_available(): torch.mps.empty_cache() else: torch.cuda.empty_cache() return objects
Releases memory from `objects` by setting them to `None` and calls `gc.collect()` and `torch.cuda.empty_cache()`. Returned objects should be reassigned to the same variables. Args: objects (`Iterable`): An iterable of objects Returns: A list of `None` objects to replace `objects` Example: ```python >>> import torch >>> from accelerate.utils import release_memory >>> a = torch.ones(1000, 1000).cuda() >>> b = torch.ones(1000, 1000).cuda() >>> a, b = release_memory(a, b) ```
2,951
import functools import gc import inspect import torch from .imports import is_mps_available, is_npu_available, is_xpu_available def should_reduce_batch_size(exception: Exception) -> bool: """ Checks if `exception` relates to CUDA out-of-memory, CUDNN not supported, or CPU out-of-memory Args: exception (`Exception`): An exception """ _statements = [ "CUDA out of memory.", # CUDA OOM "cuDNN error: CUDNN_STATUS_NOT_SUPPORTED.", # CUDNN SNAFU "DefaultCPUAllocator: can't allocate memory", # CPU OOM ] if isinstance(exception, RuntimeError) and len(exception.args) == 1: return any(err in exception.args[0] for err in _statements) return False def is_npu_available(check_device=False): "Checks if `torch_npu` is installed and potentially if a NPU is in the environment" if importlib.util.find_spec("torch") is None or importlib.util.find_spec("torch_npu") is None: return False import torch import torch_npu # noqa: F401 if check_device: try: # Will raise a RuntimeError if no NPU is found _ = torch.npu.device_count() return torch.npu.is_available() except RuntimeError: return False return hasattr(torch, "npu") and torch.npu.is_available() def is_xpu_available(check_device=False): "check if user disables it explicitly" if not parse_flag_from_env("ACCELERATE_USE_XPU", default=True): return False "Checks if `intel_extension_for_pytorch` is installed and potentially if a XPU is in the environment" if is_ipex_available(): import torch if is_torch_version("<=", "1.12"): return False else: return False import intel_extension_for_pytorch # noqa: F401 if check_device: try: # Will raise a RuntimeError if no XPU is found _ = torch.xpu.device_count() return torch.xpu.is_available() except RuntimeError: return False return hasattr(torch, "xpu") and torch.xpu.is_available() The provided code snippet includes necessary dependencies for implementing the `find_executable_batch_size` function. Write a Python function `def find_executable_batch_size(function: callable = None, starting_batch_size: int = 128)` to solve the following problem: A basic decorator that will try to execute `function`. If it fails from exceptions related to out-of-memory or CUDNN, the batch size is cut in half and passed to `function` `function` must take in a `batch_size` parameter as its first argument. Args: function (`callable`, *optional*): A function to wrap starting_batch_size (`int`, *optional*): The batch size to try and fit into memory Example: ```python >>> from accelerate.utils import find_executable_batch_size >>> @find_executable_batch_size(starting_batch_size=128) ... def train(batch_size, model, optimizer): ... ... >>> train(model, optimizer) ``` Here is the function: def find_executable_batch_size(function: callable = None, starting_batch_size: int = 128): """ A basic decorator that will try to execute `function`. If it fails from exceptions related to out-of-memory or CUDNN, the batch size is cut in half and passed to `function` `function` must take in a `batch_size` parameter as its first argument. Args: function (`callable`, *optional*): A function to wrap starting_batch_size (`int`, *optional*): The batch size to try and fit into memory Example: ```python >>> from accelerate.utils import find_executable_batch_size >>> @find_executable_batch_size(starting_batch_size=128) ... def train(batch_size, model, optimizer): ... ... >>> train(model, optimizer) ``` """ if function is None: return functools.partial(find_executable_batch_size, starting_batch_size=starting_batch_size) batch_size = starting_batch_size def decorator(*args, **kwargs): nonlocal batch_size gc.collect() if is_xpu_available(): torch.xpu.empty_cache() elif is_npu_available(): torch.npu.empty_cache() else: torch.cuda.empty_cache() params = list(inspect.signature(function).parameters.keys()) # Guard against user error if len(params) < (len(args) + 1): arg_str = ", ".join([f"{arg}={value}" for arg, value in zip(params[1:], args[1:])]) raise TypeError( f"Batch size was passed into `{function.__name__}` as the first argument when called." f"Remove this as the decorator already does so: `{function.__name__}({arg_str})`" ) while True: if batch_size == 0: raise RuntimeError("No executable batch size found, reached zero.") try: return function(batch_size, *args, **kwargs) except Exception as e: if should_reduce_batch_size(e): gc.collect() if is_xpu_available(): torch.xpu.empty_cache() elif is_npu_available(): torch.npu.empty_cache() else: torch.cuda.empty_cache() batch_size //= 2 else: raise return decorator
A basic decorator that will try to execute `function`. If it fails from exceptions related to out-of-memory or CUDNN, the batch size is cut in half and passed to `function` `function` must take in a `batch_size` parameter as its first argument. Args: function (`callable`, *optional*): A function to wrap starting_batch_size (`int`, *optional*): The batch size to try and fit into memory Example: ```python >>> from accelerate.utils import find_executable_batch_size >>> @find_executable_batch_size(starting_batch_size=128) ... def train(batch_size, model, optimizer): ... ... >>> train(model, optimizer) ```
2,952
import importlib import importlib.metadata import os import warnings from functools import lru_cache import torch from packaging import version from packaging.version import parse from .environment import parse_flag_from_env, str_to_bool from .versions import compare_versions, is_torch_version def is_ccl_available(): try: pass except ImportError: print( "Intel(R) oneCCL Bindings for PyTorch* is required to run DDP on Intel(R) GPUs, but it is not" " detected. If you see \"ValueError: Invalid backend: 'ccl'\" error, please install Intel(R) oneCCL" " Bindings for PyTorch*." ) return ( importlib.util.find_spec("torch_ccl") is not None or importlib.util.find_spec("oneccl_bindings_for_pytorch") is not None )
null
2,953
import importlib import importlib.metadata import os import warnings from functools import lru_cache import torch from packaging import version from packaging.version import parse from .environment import parse_flag_from_env, str_to_bool from .versions import compare_versions, is_torch_version def get_ccl_version(): return importlib.metadata.version("oneccl_bind_pt")
null
2,954
import importlib import importlib.metadata import os import warnings from functools import lru_cache import torch from packaging import version from packaging.version import parse from .environment import parse_flag_from_env, str_to_bool from .versions import compare_versions, is_torch_version _tpu_available = _torch_xla_available def is_cuda_available(): """ Checks if `cuda` is available via an `nvml-based` check which won't trigger the drivers and leave cuda uninitialized. """ pytorch_nvml_based_cuda_check_previous_value = os.environ.get("PYTORCH_NVML_BASED_CUDA_CHECK") try: os.environ["PYTORCH_NVML_BASED_CUDA_CHECK"] = str(1) available = torch.cuda.is_available() finally: if pytorch_nvml_based_cuda_check_previous_value: os.environ["PYTORCH_NVML_BASED_CUDA_CHECK"] = pytorch_nvml_based_cuda_check_previous_value else: os.environ.pop("PYTORCH_NVML_BASED_CUDA_CHECK", None) return available The provided code snippet includes necessary dependencies for implementing the `is_tpu_available` function. Write a Python function `def is_tpu_available(check_device=True)` to solve the following problem: Checks if `torch_xla` is installed and potentially if a TPU is in the environment Here is the function: def is_tpu_available(check_device=True): "Checks if `torch_xla` is installed and potentially if a TPU is in the environment" warnings.warn( "`is_tpu_available` is deprecated and will be removed in v0.27.0. " "Please use the `is_torch_xla_available` instead.", FutureWarning, ) # Due to bugs on the amp series GPUs, we disable torch-xla on them if is_cuda_available(): return False if check_device: if _tpu_available: try: # Will raise a RuntimeError if no XLA configuration is found _ = xm.xla_device() return True except RuntimeError: return False return _tpu_available
Checks if `torch_xla` is installed and potentially if a TPU is in the environment
2,955
import importlib import importlib.metadata import os import warnings from functools import lru_cache import torch from packaging import version from packaging.version import parse from .environment import parse_flag_from_env, str_to_bool from .versions import compare_versions, is_torch_version def _is_package_available(pkg_name, metadata_name=None): # Check we're not importing a "pkg_name" directory somewhere but the actual library by trying to grab the version package_exists = importlib.util.find_spec(pkg_name) is not None if package_exists: try: # Some libraries have different names in the metadata _ = importlib.metadata.metadata(pkg_name if metadata_name is None else metadata_name) return True except importlib.metadata.PackageNotFoundError: return False def compare_versions(library_or_version: Union[str, Version], operation: str, requirement_version: str): """ Compares a library version to some requirement using a given operation. Args: library_or_version (`str` or `packaging.version.Version`): A library name or a version to check. operation (`str`): A string representation of an operator, such as `">"` or `"<="`. requirement_version (`str`): The version to compare the library version against """ if operation not in STR_OPERATION_TO_FUNC.keys(): raise ValueError(f"`operation` must be one of {list(STR_OPERATION_TO_FUNC.keys())}, received {operation}") operation = STR_OPERATION_TO_FUNC[operation] if isinstance(library_or_version, str): library_or_version = parse(importlib.metadata.version(library_or_version)) return operation(library_or_version, parse(requirement_version)) def is_pippy_available(): package_exists = _is_package_available("pippy", "torchpippy") if package_exists: pippy_version = version.parse(importlib.metadata.version("torchpippy")) return compare_versions(pippy_version, ">", "0.1.1") return False
null
2,956
import importlib import importlib.metadata import os import warnings from functools import lru_cache import torch from packaging import version from packaging.version import parse from .environment import parse_flag_from_env, str_to_bool from .versions import compare_versions, is_torch_version def is_cuda_available(): """ Checks if `cuda` is available via an `nvml-based` check which won't trigger the drivers and leave cuda uninitialized. """ pytorch_nvml_based_cuda_check_previous_value = os.environ.get("PYTORCH_NVML_BASED_CUDA_CHECK") try: os.environ["PYTORCH_NVML_BASED_CUDA_CHECK"] = str(1) available = torch.cuda.is_available() finally: if pytorch_nvml_based_cuda_check_previous_value: os.environ["PYTORCH_NVML_BASED_CUDA_CHECK"] = pytorch_nvml_based_cuda_check_previous_value else: os.environ.pop("PYTORCH_NVML_BASED_CUDA_CHECK", None) return available def is_torch_xla_available(check_is_tpu=False, check_is_gpu=False): """ Check if `torch_xla` is available. To train a native pytorch job in an environment with torch xla installed, set the USE_TORCH_XLA to false. """ assert not (check_is_tpu and check_is_gpu), "The check_is_tpu and check_is_gpu cannot both be true." if not _torch_xla_available: return False elif check_is_gpu: return torch_xla.runtime.device_type() in ["GPU", "CUDA"] elif check_is_tpu: return torch_xla.runtime.device_type() == "TPU" return True The provided code snippet includes necessary dependencies for implementing the `is_bf16_available` function. Write a Python function `def is_bf16_available(ignore_tpu=False)` to solve the following problem: Checks if bf16 is supported, optionally ignoring the TPU Here is the function: def is_bf16_available(ignore_tpu=False): "Checks if bf16 is supported, optionally ignoring the TPU" if is_torch_xla_available(check_is_tpu=True): return not ignore_tpu if is_cuda_available(): return torch.cuda.is_bf16_supported() return True
Checks if bf16 is supported, optionally ignoring the TPU
2,957
import importlib import importlib.metadata import os import warnings from functools import lru_cache import torch from packaging import version from packaging.version import parse from .environment import parse_flag_from_env, str_to_bool from .versions import compare_versions, is_torch_version def _is_package_available(pkg_name, metadata_name=None): # Check we're not importing a "pkg_name" directory somewhere but the actual library by trying to grab the version package_exists = importlib.util.find_spec(pkg_name) is not None if package_exists: try: # Some libraries have different names in the metadata _ = importlib.metadata.metadata(pkg_name if metadata_name is None else metadata_name) return True except importlib.metadata.PackageNotFoundError: return False def is_bnb_available(): return _is_package_available("bitsandbytes")
null
2,958
import importlib import importlib.metadata import os import warnings from functools import lru_cache import torch from packaging import version from packaging.version import parse from .environment import parse_flag_from_env, str_to_bool from .versions import compare_versions, is_torch_version def str_to_bool(value) -> int: """ Converts a string representation of truth to `True` (1) or `False` (0). True values are `y`, `yes`, `t`, `true`, `on`, and `1`; False value are `n`, `no`, `f`, `false`, `off`, and `0`; """ value = value.lower() if value in ("y", "yes", "t", "true", "on", "1"): return 1 elif value in ("n", "no", "f", "false", "off", "0"): return 0 else: raise ValueError(f"invalid truth value {value}") def compare_versions(library_or_version: Union[str, Version], operation: str, requirement_version: str): """ Compares a library version to some requirement using a given operation. Args: library_or_version (`str` or `packaging.version.Version`): A library name or a version to check. operation (`str`): A string representation of an operator, such as `">"` or `"<="`. requirement_version (`str`): The version to compare the library version against """ if operation not in STR_OPERATION_TO_FUNC.keys(): raise ValueError(f"`operation` must be one of {list(STR_OPERATION_TO_FUNC.keys())}, received {operation}") operation = STR_OPERATION_TO_FUNC[operation] if isinstance(library_or_version, str): library_or_version = parse(importlib.metadata.version(library_or_version)) return operation(library_or_version, parse(requirement_version)) def is_megatron_lm_available(): if str_to_bool(os.environ.get("ACCELERATE_USE_MEGATRON_LM", "False")) == 1: package_exists = importlib.util.find_spec("megatron") is not None if package_exists: try: megatron_version = parse(importlib.metadata.version("megatron-lm")) return compare_versions(megatron_version, ">=", "2.2.0") except Exception as e: warnings.warn(f"Parse Megatron version failed. Exception:{e}") return False
null
2,959
import importlib import importlib.metadata import os import warnings from functools import lru_cache import torch from packaging import version from packaging.version import parse from .environment import parse_flag_from_env, str_to_bool from .versions import compare_versions, is_torch_version def _is_package_available(pkg_name, metadata_name=None): # Check we're not importing a "pkg_name" directory somewhere but the actual library by trying to grab the version package_exists = importlib.util.find_spec(pkg_name) is not None if package_exists: try: # Some libraries have different names in the metadata _ = importlib.metadata.metadata(pkg_name if metadata_name is None else metadata_name) return True except importlib.metadata.PackageNotFoundError: return False def is_transformers_available(): return _is_package_available("transformers")
null
2,960
import importlib import importlib.metadata import os import warnings from functools import lru_cache import torch from packaging import version from packaging.version import parse from .environment import parse_flag_from_env, str_to_bool from .versions import compare_versions, is_torch_version def _is_package_available(pkg_name, metadata_name=None): # Check we're not importing a "pkg_name" directory somewhere but the actual library by trying to grab the version package_exists = importlib.util.find_spec(pkg_name) is not None if package_exists: try: # Some libraries have different names in the metadata _ = importlib.metadata.metadata(pkg_name if metadata_name is None else metadata_name) return True except importlib.metadata.PackageNotFoundError: return False def is_datasets_available(): return _is_package_available("datasets")
null
2,961
import importlib import importlib.metadata import os import warnings from functools import lru_cache import torch from packaging import version from packaging.version import parse from .environment import parse_flag_from_env, str_to_bool from .versions import compare_versions, is_torch_version def _is_package_available(pkg_name, metadata_name=None): # Check we're not importing a "pkg_name" directory somewhere but the actual library by trying to grab the version package_exists = importlib.util.find_spec(pkg_name) is not None if package_exists: try: # Some libraries have different names in the metadata _ = importlib.metadata.metadata(pkg_name if metadata_name is None else metadata_name) return True except importlib.metadata.PackageNotFoundError: return False def is_timm_available(): return _is_package_available("timm")
null
2,962
import importlib import importlib.metadata import os import warnings from functools import lru_cache import torch from packaging import version from packaging.version import parse from .environment import parse_flag_from_env, str_to_bool from .versions import compare_versions, is_torch_version def _is_package_available(pkg_name, metadata_name=None): # Check we're not importing a "pkg_name" directory somewhere but the actual library by trying to grab the version package_exists = importlib.util.find_spec(pkg_name) is not None if package_exists: try: # Some libraries have different names in the metadata _ = importlib.metadata.metadata(pkg_name if metadata_name is None else metadata_name) return True except importlib.metadata.PackageNotFoundError: return False def compare_versions(library_or_version: Union[str, Version], operation: str, requirement_version: str): """ Compares a library version to some requirement using a given operation. Args: library_or_version (`str` or `packaging.version.Version`): A library name or a version to check. operation (`str`): A string representation of an operator, such as `">"` or `"<="`. requirement_version (`str`): The version to compare the library version against """ if operation not in STR_OPERATION_TO_FUNC.keys(): raise ValueError(f"`operation` must be one of {list(STR_OPERATION_TO_FUNC.keys())}, received {operation}") operation = STR_OPERATION_TO_FUNC[operation] if isinstance(library_or_version, str): library_or_version = parse(importlib.metadata.version(library_or_version)) return operation(library_or_version, parse(requirement_version)) def is_aim_available(): package_exists = _is_package_available("aim") if package_exists: aim_version = version.parse(importlib.metadata.version("aim")) return compare_versions(aim_version, "<", "4.0.0") return False
null
2,963
import importlib import importlib.metadata import os import warnings from functools import lru_cache import torch from packaging import version from packaging.version import parse from .environment import parse_flag_from_env, str_to_bool from .versions import compare_versions, is_torch_version def _is_package_available(pkg_name, metadata_name=None): # Check we're not importing a "pkg_name" directory somewhere but the actual library by trying to grab the version package_exists = importlib.util.find_spec(pkg_name) is not None if package_exists: try: # Some libraries have different names in the metadata _ = importlib.metadata.metadata(pkg_name if metadata_name is None else metadata_name) return True except importlib.metadata.PackageNotFoundError: return False def is_tensorboard_available(): return _is_package_available("tensorboard") or _is_package_available("tensorboardX")
null
2,964
import importlib import importlib.metadata import os import warnings from functools import lru_cache import torch from packaging import version from packaging.version import parse from .environment import parse_flag_from_env, str_to_bool from .versions import compare_versions, is_torch_version def _is_package_available(pkg_name, metadata_name=None): # Check we're not importing a "pkg_name" directory somewhere but the actual library by trying to grab the version package_exists = importlib.util.find_spec(pkg_name) is not None if package_exists: try: # Some libraries have different names in the metadata _ = importlib.metadata.metadata(pkg_name if metadata_name is None else metadata_name) return True except importlib.metadata.PackageNotFoundError: return False def is_wandb_available(): return _is_package_available("wandb")
null
2,965
import importlib import importlib.metadata import os import warnings from functools import lru_cache import torch from packaging import version from packaging.version import parse from .environment import parse_flag_from_env, str_to_bool from .versions import compare_versions, is_torch_version def _is_package_available(pkg_name, metadata_name=None): # Check we're not importing a "pkg_name" directory somewhere but the actual library by trying to grab the version package_exists = importlib.util.find_spec(pkg_name) is not None if package_exists: try: # Some libraries have different names in the metadata _ = importlib.metadata.metadata(pkg_name if metadata_name is None else metadata_name) return True except importlib.metadata.PackageNotFoundError: return False def is_comet_ml_available(): return _is_package_available("comet_ml")
null