python_code stringlengths 0 992k | repo_name stringlengths 8 46 | file_path stringlengths 5 162 |
|---|---|---|
import os
from pathlib import Path
current_dir = Path(__file__).parent.absolute()
import pytest
import torch
import dotenv
from src.datamodules.language_modeling_hf import LMDataModule
# load environment variables from `.env` file if it exists
# recursively searches for `.env` in all folders starting from work di... | EXA-1-master | exa/modular_components/attentions/flash-attention/training/tests/datamodules/test_language_modeling_hf.py |
from typing import List, Optional, Sequence
from pathlib import Path
import hydra
from omegaconf import OmegaConf, DictConfig
from pytorch_lightning import (
Callback,
LightningDataModule,
LightningModule,
Trainer,
seed_everything,
)
from pytorch_lightning.loggers import LightningLoggerBase
from s... | EXA-1-master | exa/modular_components/attentions/flash-attention/training/src/train.py |
from typing import List, Optional
from pathlib import Path
import torch
import hydra
from omegaconf import OmegaConf, DictConfig
from pytorch_lightning import (
Callback,
LightningDataModule,
LightningModule,
Trainer,
seed_everything,
)
from pytorch_lightning.loggers import LightningLoggerBase
fr... | EXA-1-master | exa/modular_components/attentions/flash-attention/training/src/eval.py |
from typing import Any, Dict, Optional
import torch
from torch import Tensor
from torchmetrics import Metric
class NumTokens(Metric):
"""Keep track of how many tokens we've seen.
"""
# TODO: how do we prevent the reset between the epochs? The reset happens on the 1st batch
# of the next epoch.
#... | EXA-1-master | exa/modular_components/attentions/flash-attention/training/src/metrics/num_tokens.py |
# Inspired by https://github.com/NVIDIA/NeMo/blob/main/nemo/collections/common/metrics/perplexity.py
# But we compute the perplexity correctly: exp(average(nll)), not average(exp(nll))
# Also adapted from https://github.com/Lightning-AI/metrics/blob/master/src/torchmetrics/text/perplexity.py
# But we pass in the loss t... | EXA-1-master | exa/modular_components/attentions/flash-attention/training/src/metrics/perplexity.py |
import torch
from torch import Tensor
from torchmetrics import Metric, Accuracy
class AccuracyMine(Accuracy):
"""Wrap torchmetrics.Accuracy to take argmax of y in case of Mixup.
"""
def update(self, preds: Tensor, target: Tensor) -> None: # type: ignore
super().update(preds, target.argmax(dim=-1... | EXA-1-master | exa/modular_components/attentions/flash-attention/training/src/metrics/accuracy.py |
from typing import Any, List
import inspect
import torch
import hydra
from pytorch_lightning import LightningModule, LightningDataModule
from torchmetrics import MetricCollection
from einops import rearrange
from omegaconf import OmegaConf
from src.utils.utils import get_logger
from src.optim.param_grouping import ... | EXA-1-master | exa/modular_components/attentions/flash-attention/training/src/tasks/seq.py |
# Adapted from https://pytorch.org/docs/stable/_modules/torch/distributed/algorithms/ddp_comm_hooks/default_hooks.html
# We divide by world_size first before converting to fp16, so it's safer.
from typing import Any, Callable
import torch
import torch.distributed as dist
def fp16_compress_hook(
process_group: di... | EXA-1-master | exa/modular_components/attentions/flash-attention/training/src/distributed/ddp_comm_hooks.py |
# Adapted from https://github.com/rwightman/pytorch-image-models/blob/master/benchmark.py
from typing import Any, List, Sequence
import torch
from pytorch_lightning import Callback, Trainer, LightningModule
from pytorch_lightning.utilities import rank_zero_only
from pytorch_lightning.utilities.parsing import Attribut... | EXA-1-master | exa/modular_components/attentions/flash-attention/training/src/callbacks/flop_count.py |
import subprocess
from pathlib import Path
from typing import List
import matplotlib.pyplot as plt
import seaborn as sn
import torch
import wandb
from pytorch_lightning import Callback, Trainer
from pytorch_lightning.loggers import LoggerCollection, WandbLogger
from pytorch_lightning.utilities import rank_zero_only
fr... | EXA-1-master | exa/modular_components/attentions/flash-attention/training/src/callbacks/wandb_callbacks.py |
import torch
from pytorch_lightning import Callback, Trainer, LightningModule
import logging
log = logging.getLogger(__name__) # We want a logger for each process, not just the rank 0
def l2_promote():
import ctypes
_libcudart = ctypes.CDLL('libcudart.so')
# Set device limit on the current device
... | EXA-1-master | exa/modular_components/attentions/flash-attention/training/src/callbacks/gpu_affinity.py |
# Inspired by https://github.com/Lightning-AI/lightning/blob/master/src/pytorch_lightning/utilities/grads.py
# However, they compute grad at every iteration (I think), and the .item() calls incur a lot of overhead
# (6-7% slow down on GPT-2 small). Instead we only compute for iterations where we need to log, and don't
... | EXA-1-master | exa/modular_components/attentions/flash-attention/training/src/callbacks/norm_monitor.py |
import pytorch_lightning as pl
from pytorch_lightning import Callback
from pytorch_lightning.utilities import rank_zero_only
import torch
from torch.autograd import grad
class CausalityMonitor(Callback):
r"""Monitor causality of a model by tracking gradient leakage forward in time.
In a fully causal model, ... | EXA-1-master | exa/modular_components/attentions/flash-attention/training/src/callbacks/causality_monitor.py |
# Inspired by https://github.com/PyTorchLightning/pytorch-lightning/blob/master/pytorch_lightning/callbacks/stochastic_weight_avg.py
# https://github.com/PyTorchLightning/Lightning-Bolts/blob/master/pl_bolts/callbacks/byol_updates.py
# https://forums.pytorchlightning.ai/t/adopting-exponential-moving-average-ema-for-pl-... | EXA-1-master | exa/modular_components/attentions/flash-attention/training/src/callbacks/ema.py |
# Adapted from https://pytorch-lightning.readthedocs.io/en/latest/_modules/pytorch_lightning/callbacks/gpu_stats_monitor.html#GPUStatsMonitor
# We only need the speed monitoring, not the GPU monitoring
import time
from typing import Any
from pytorch_lightning import Callback, Trainer
from pytorch_lightning.utilities i... | EXA-1-master | exa/modular_components/attentions/flash-attention/training/src/callbacks/speed_monitor.py |
EXA-1-master | exa/modular_components/attentions/flash-attention/training/src/callbacks/__init__.py | |
# Adapted from https://github.com/Lightning-AI/lightning/blob/master/src/pytorch_lightning/callbacks/fault_tolerance.py
from typing import Any
from pathlib import Path
import pytorch_lightning as pl
class ModelCheckpointMine(pl.callbacks.model_checkpoint.ModelCheckpoint):
def __init__(self, *args, fault_toleran... | EXA-1-master | exa/modular_components/attentions/flash-attention/training/src/callbacks/model_checkpoint.py |
from typing import Any
from pytorch_lightning import Callback, Trainer, LightningModule
from pytorch_lightning.utilities import rank_zero_only
from pytorch_lightning.utilities.parsing import AttributeDict
class ParamsLog(Callback):
"""Log the number of parameters of the model
"""
def __init__(self, total... | EXA-1-master | exa/modular_components/attentions/flash-attention/training/src/callbacks/params_log.py |
# Adapted from https://github.com/Lightning-AI/lightning/blob/master/src/pytorch_lightning/callbacks/lr_monitor.py.
from typing import Any
from pytorch_lightning import Callback, Trainer
from pytorch_lightning.utilities import rank_zero_only
from pytorch_lightning.strategies import DeepSpeedStrategy
class LossScaleM... | EXA-1-master | exa/modular_components/attentions/flash-attention/training/src/callbacks/loss_scale_monitor.py |
# Adapted from https://github.com/huggingface/transformers/blob/master/examples/pytorch/language-modeling/run_clm.py
from itertools import chain
from pathlib import Path
import pickle
from typing import Any, List, Union
import subprocess
import mmap
from multiprocessing.shared_memory import SharedMemory
import numpy ... | EXA-1-master | exa/modular_components/attentions/flash-attention/training/src/datamodules/language_modeling_hf.py |
# Adapted from https://github.com/PyTorchLightning/lightning-bolts/blob/master/pl_bolts/datamodules/imagenet_datamodule.py
import os
from pathlib import Path
from typing import Any, List, Union, Callable, Optional
import torch
from torch.utils.data import Dataset, DataLoader, SequentialSampler
from torch.utils.data.da... | EXA-1-master | exa/modular_components/attentions/flash-attention/training/src/datamodules/imagenet.py |
import torch
from timm.data import Mixup
from timm.data.mixup import mixup_target
class TimmMixup(Mixup):
""" Wrap timm.data.Mixup that avoids the assert that batch size must be even.
"""
def __call__(self, x, target):
if self.mode == 'elem':
lam = self._mix_elem(x)
elif self.... | EXA-1-master | exa/modular_components/attentions/flash-attention/training/src/datamodules/timm_mixup.py |
# Adapted from https://github.com/Lightning-AI/lightning/blob/2845e7565dbe6b765ae32870e7d2bc456529c30a/tests/tests_pytorch/utilities/test_auto_restart.py#L1397
from typing import Iterator
import math
import torch
from torch.utils.data import RandomSampler, DistributedSampler
class RandomFaultTolerantSampler(RandomSa... | EXA-1-master | exa/modular_components/attentions/flash-attention/training/src/datamodules/fault_tolerant_sampler.py |
# Copied from https://github.com/stanford-crfm/mistral/blob/main/src/corpora/detokenization.py
# Which was originally from https://github.com/NVIDIA/Megatron-LM/blob/aed2f75e209e525c842aec7c044af7acae2a4614/tasks/zeroshot_gpt/detokenizer.py
"""
Handle detokenization for different dataset for zero-shot LM evaluation.
"... | EXA-1-master | exa/modular_components/attentions/flash-attention/training/src/datamodules/datasets/detokenizer.py |
# Inspired by https://github.com/NVIDIA/Megatron-LM/blob/main/tasks/zeroshot_gpt/datasets.py
# Except we don't pad the last block and don't use overlapping eval
# And we return both the input and the target
import math
import numpy as np
import torch
class LMDataset(torch.utils.data.Dataset):
def __init__(self,... | EXA-1-master | exa/modular_components/attentions/flash-attention/training/src/datamodules/datasets/lm_dataset.py |
import inspect
import torch.nn as nn
import hydra
try:
from apex.contrib.layer_norm import FastLayerNorm
except ImportError:
FastLayerNorm = None
from src.models.modules.seq_common import PositionalEncoding
def group_parameters_for_optimizer(model, optimizer_cfg, bias_weight_decay=False,
... | EXA-1-master | exa/modular_components/attentions/flash-attention/training/src/optim/param_grouping.py |
import torch
from torch.optim import Optimizer
from timm.scheduler import CosineLRScheduler
# We need to subclass torch.optim.lr_scheduler._LRScheduler, or Pytorch-lightning will complain
class TimmCosineLRScheduler(CosineLRScheduler, torch.optim.lr_scheduler._LRScheduler):
""" Wrap timm.scheduler.CosineLRSchedu... | EXA-1-master | exa/modular_components/attentions/flash-attention/training/src/optim/timm_lr_scheduler.py |
# Meant to work with Apex's DistributeFusedAdam
from typing import Any, Callable, Dict, List, Optional, Union
from pathlib import Path
import types
import torch
from torch.optim.optimizer import Optimizer
from torch.optim import LBFGS
from apex.contrib.optimizers.distributed_fused_adam import DistributedFusedAdam
f... | EXA-1-master | exa/modular_components/attentions/flash-attention/training/src/utils/ddp_zero2.py |
import collections
import math
import os
import pathlib
import re
import pynvml
pynvml.nvmlInit()
def systemGetDriverVersion():
return pynvml.nvmlSystemGetDriverVersion()
def deviceGetCount():
return pynvml.nvmlDeviceGetCount()
class device:
# assume nvml returns list of 64 bit ints
_nvml_affini... | EXA-1-master | exa/modular_components/attentions/flash-attention/training/src/utils/gpu_affinity.py |
import re
from pathlib import Path
import torch
import math
from einops import rearrange
def load_checkpoint(path, device='cpu'):
path = Path(path).expanduser()
is_deepspeed = False
if path.is_dir(): # DeepSpeed checkpoint
is_deepspeed = True
latest_path = path / 'latest'
if lates... | EXA-1-master | exa/modular_components/attentions/flash-attention/training/src/utils/checkpoint.py |
# Copied from https://github.com/fadel/pytorch_ema/blob/master/torch_ema/ema.py
from __future__ import division
from __future__ import unicode_literals
from typing import Iterable, Optional
import weakref
import copy
import contextlib
import torch
def to_float_maybe(x):
return x.float() if x.dtype in [torch.flo... | EXA-1-master | exa/modular_components/attentions/flash-attention/training/src/utils/ema.py |
# Adapted from https://github.com/rwightman/pytorch-image-models/blob/master/benchmark.py
import torch
try:
from deepspeed.profiling.flops_profiler import get_model_profile
has_deepspeed_profiling = True
except ImportError as e:
has_deepspeed_profiling = False
try:
from fvcore.nn import FlopCountAnaly... | EXA-1-master | exa/modular_components/attentions/flash-attention/training/src/utils/flops.py |
# Copied from https://github.com/NVIDIA/DeepLearningExamples/blob/master/PyTorch/LanguageModeling/Transformer-XL/pytorch/utils/distributed.py
# Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in com... | EXA-1-master | exa/modular_components/attentions/flash-attention/training/src/utils/distributed.py |
import logging
import warnings
from typing import List, Sequence
import pytorch_lightning as pl
import rich.syntax
import rich.tree
from omegaconf import DictConfig, OmegaConf
from pytorch_lightning.utilities import rank_zero_only
# Copied from https://docs.python.org/3/howto/logging-cookbook.html#using-a-context-ma... | EXA-1-master | exa/modular_components/attentions/flash-attention/training/src/utils/utils.py |
# Meant to work with Pytorch's ZeroRedundancyOptimizer
from typing import Any, Callable, Dict, List, Optional, Union
from pathlib import Path
import torch
from torch.optim.optimizer import Optimizer
from torch.distributed.optim import ZeroRedundancyOptimizer
from pytorch_lightning.strategies.ddp import DDPStrategy
f... | EXA-1-master | exa/modular_components/attentions/flash-attention/training/src/utils/ddp_zero1.py |
import math
from functools import partial
from collections import namedtuple
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.modules.utils import _pair
import hydra
from einops import reduce, rearrange
def pooling(x, pooling_mode='CLS', key_padding_mask=None, batch_first=True):
... | EXA-1-master | exa/modular_components/attentions/flash-attention/training/src/models/modules/seq_common.py |
import math
import torch
import torch.nn.functional as F
import pytest
from einops import rearrange
from flash_attn.layers.rotary import apply_rotary_emb_func, apply_rotary_emb_torch
is_sm8x = torch.cuda.get_device_capability('cuda') >= (8, 0)
@pytest.mark.parametrize('dtype', ([torch.float16] if not is_sm8x else... | EXA-1-master | exa/modular_components/attentions/flash-attention/tests/test_rotary.py |
import math
from functools import partial
import torch
import torch.nn.functional as F
import pytest
from einops import rearrange, repeat
from flash_attn.flash_attn_interface import flash_attn_func, flash_attn_unpadded_qkvpacked_func, _get_block_size, flash_attn_unpadded_kvpacked_func, flash_attn_unpadded_func
from... | EXA-1-master | exa/modular_components/attentions/flash-attention/tests/test_flash_attn.py |
import math
import torch
import torch.nn.functional as F
import pytest
from einops import rearrange
from flash_attn.losses.cross_entropy import CrossEntropyLossApex
is_sm8x = torch.cuda.get_device_capability('cuda')[0] >= 8
@pytest.mark.parametrize('dtype', [torch.float16, torch.float32] + ([torch.bfloat16] if is... | EXA-1-master | exa/modular_components/attentions/flash-attention/tests/losses/test_cross_entropy.py |
# Run test with:
# torchrun --no_python --nproc_per_node=8 pytest -q -s tests/losses/test_cross_entropy_parallel.py
import math
import torch
import torch.nn.functional as F
import pytest
from apex.transformer import parallel_state
from apex.transformer import tensor_parallel
from flash_attn.losses.cross_entropy imp... | EXA-1-master | exa/modular_components/attentions/flash-attention/tests/losses/test_cross_entropy_parallel.py |
# Copyright (c) 2023, Tri Dao.
import math
import torch
import torch.nn.functional as F
import pytest
from einops import rearrange
from transformers.models.gpt_neox.modeling_gpt_neox import RotaryEmbedding as RotaryEmbeddingNeoX
from transformers.models.gpt_neox.modeling_gpt_neox import apply_rotary_pos_emb as appl... | EXA-1-master | exa/modular_components/attentions/flash-attention/tests/layers/test_rotary.py |
import re
import torch
import pytest
from transformers import OPTConfig
from transformers.models.opt.modeling_opt import OPTForCausalLM
from flash_attn.models.gpt import GPTLMHeadModel
from flash_attn.models.opt import remap_state_dict_hf_opt, opt_config_to_gpt2_config
from flash_attn.utils.pretrained import state_d... | EXA-1-master | exa/modular_components/attentions/flash-attention/tests/models/test_opt.py |
import os
import re
import time
import torch
import pytest
from einops import rearrange
from transformers import GPT2Config, GPT2Tokenizer, OPTConfig, AutoTokenizer
from transformers.models.gpt2.modeling_gpt2 import GPT2LMHeadModel as GPT2LMHeadModelHF
from transformers.models.opt.modeling_opt import OPTForCausalLM
... | EXA-1-master | exa/modular_components/attentions/flash-attention/tests/models/test_gpt_generation.py |
import time
import torch
import pytest
from transformers import GPTJConfig, AutoTokenizer
from transformers.models.gptj.modeling_gptj import GPTJForCausalLM
from flash_attn.models.gpt import GPTLMHeadModel
from flash_attn.models.gptj import remap_state_dict_hf_gptj, gptj_config_to_gpt2_config
from flash_attn.utils.p... | EXA-1-master | exa/modular_components/attentions/flash-attention/tests/models/test_gptj.py |
# Run test with:
# torchrun --no_python --nproc_per_node=8 pytest -q -s tests/models/test_gpt_generation_parallel.py -k "parallel"
import os
import re
import torch
import pytest
from einops import rearrange
from transformers import GPT2Config, GPT2Tokenizer
from transformers.models.gpt2.modeling_gpt2 import GPT2LMHe... | EXA-1-master | exa/modular_components/attentions/flash-attention/tests/models/test_gpt_generation_parallel.py |
import time
import torch
import pytest
from transformers import GPTNeoXConfig, AutoTokenizer
from transformers.models.gpt_neox.modeling_gpt_neox import GPTNeoXForCausalLM
from flash_attn.models.gpt import GPTLMHeadModel
from flash_attn.models.gpt_neox import remap_state_dict_hf_gpt_neox, gpt_neox_config_to_gpt2_conf... | EXA-1-master | exa/modular_components/attentions/flash-attention/tests/models/test_gpt_neox.py |
import re
import torch
import pytest
from transformers import GPT2Config
from transformers.models.gpt2.modeling_gpt2 import GPT2LMHeadModel as GPT2LMHeadModelHF
from flash_attn.models.gpt import GPTLMHeadModel
from flash_attn.models.gpt import remap_state_dict_hf_gpt2
from flash_attn.utils.pretrained import state_di... | EXA-1-master | exa/modular_components/attentions/flash-attention/tests/models/test_gpt.py |
import re
import torch
import pytest
from timm.models.vision_transformer import vit_base_patch16_224
from flash_attn.models.vit import vit_base_patch16_224 as flash_vit_base_patch16_224
@pytest.mark.parametrize('fused_mlp', [False, True])
# @pytest.mark.parametrize('fused_mlp', [False])
@pytest.mark.parametrize('o... | EXA-1-master | exa/modular_components/attentions/flash-attention/tests/models/test_vit.py |
import re
from collections import OrderedDict
import torch
import torch.nn.functional as F
import pytest
from einops import rearrange
from transformers import BertConfig
from transformers.models.bert.modeling_bert import BertModel as BertModelHF
from transformers.models.bert.modeling_bert import BertForPreTraining a... | EXA-1-master | exa/modular_components/attentions/flash-attention/tests/models/test_bert.py |
# Run test with:
# torchrun --no_python --nproc_per_node=8 pytest -q -s tests/models/test_gpt_parallel.py
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
import pytest
from einops import rearrange
from transformers import GPT2Config
from apex.transformer import parallel_state
from f... | EXA-1-master | exa/modular_components/attentions/flash-attention/tests/models/test_gpt_parallel.py |
import math
import torch
import torch.nn.functional as F
import pytest
from einops import rearrange, repeat
from flash_attn.ops.layer_norm import DropoutAddLayerNorm, dropout_add_layer_norm
from flash_attn.ops.layer_norm import dropout_add_layer_norm_subset
from flash_attn.ops.rms_norm import DropoutAddRMSNorm, drop... | EXA-1-master | exa/modular_components/attentions/flash-attention/tests/ops/test_dropout_layer_norm.py |
import math
from functools import partial
import torch
import torch.nn.functional as F
import pytest
from einops import rearrange
from flash_attn.ops.fused_dense import FusedDense, FusedMLP
@pytest.mark.parametrize('dtype', [torch.float16, torch.bfloat16])
@pytest.mark.parametrize('return_residual', [False, True])... | EXA-1-master | exa/modular_components/attentions/flash-attention/tests/ops/test_fused_dense.py |
# Run test with:
# torchrun --no_python --nproc_per_node=8 pytest -q -s tests/ops/test_fused_dense_parallel.py
import math
import torch
import torch.nn.functional as F
import pytest
from apex.transformer import parallel_state
from apex.transformer import tensor_parallel
from flash_attn.ops.fused_dense import FusedD... | EXA-1-master | exa/modular_components/attentions/flash-attention/tests/ops/test_fused_dense_parallel.py |
# Run test with:
# torchrun --no_python --nproc_per_node=8 pytest -q -s tests/modules/test_embedding_parallel.py
import torch
import torch.nn as nn
import torch.nn.functional as F
import pytest
from einops import rearrange
from apex.transformer import parallel_state
from flash_attn.modules.embedding import GPT2Embe... | EXA-1-master | exa/modular_components/attentions/flash-attention/tests/modules/test_embedding_parallel.py |
# Run test with:
# torchrun --no_python --nproc_per_node=8 pytest -q -s tests/modules/test_block_parallel.py
import math
from functools import partial
import torch
import torch.nn as nn
import torch.nn.functional as F
import pytest
from einops import rearrange
from apex.transformer import parallel_state
from apex.t... | EXA-1-master | exa/modular_components/attentions/flash-attention/tests/modules/test_block_parallel.py |
# Run test with:
# torchrun --no_python --nproc_per_node=8 pytest -q -s tests/modules/test_mha_parallel.py
import math
import torch
import torch.nn.functional as F
import pytest
from einops import rearrange
from apex.transformer import parallel_state
from apex.transformer import tensor_parallel
from flash_attn.mod... | EXA-1-master | exa/modular_components/attentions/flash-attention/tests/modules/test_mha_parallel.py |
from functools import partial
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from einops import rearrange, repeat
from flash_attn.utils.benchmark import benchmark_forward, benchmark_all, pytorch_profiler
from flash_attn.flash_attn_interface import flash_attn_unpadded_qkvpacked_func
# f... | EXA-1-master | exa/modular_components/attentions/flash-attention/benchmarks/benchmark_causal.py |
from functools import partial
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from einops import rearrange, repeat
from flash_attn.utils.benchmark import benchmark_all, benchmark_forward, benchmark_backward, benchmark_combined
from flash_attn.bert_padding import unpad_input, pad_input
f... | EXA-1-master | exa/modular_components/attentions/flash-attention/benchmarks/benchmark_flash_attention.py |
# [2022-10-23] Copied from https://github.com/NVIDIA/apex/blob/master/apex/transformer/functional/fused_softmax.py
# for benchmarking.
# We added support for seqlen=2k and seqlen=4k
# coding=utf-8
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "L... | EXA-1-master | exa/modular_components/attentions/flash-attention/flash_attn/fused_softmax.py |
# Adapted from https://github.com/mlcommons/training_results_v1.1/blob/main/NVIDIA/benchmarks/bert/implementations/pytorch/fmha.py
import torch
import torch.nn as nn
import flash_attn_cuda
def convert_blockmask(blockmask, causal):
"""Convert from the 0-1 format to the format used by the CUDA code.
0 means th... | EXA-1-master | exa/modular_components/attentions/flash-attention/flash_attn/flash_blocksparse_attn_interface.py |
import math
import torch
import torch.nn as nn
from einops import rearrange
import hydra
from flash_attn.flash_blocksparse_attn_interface import flash_blocksparse_attn_func
from flash_attn.flash_blocksparse_attn_interface import convert_blockmask
from flash_attn.bert_padding import unpad_input, pad_input, index_firs... | EXA-1-master | exa/modular_components/attentions/flash-attention/flash_attn/flash_blocksparse_attention.py |
# Adapted from https://github.com/mlcommons/training_results_v1.1/blob/main/NVIDIA/benchmarks/bert/implementations/pytorch/padding.py
import torch
import torch.nn.functional as F
from einops import rearrange, repeat
class IndexFirstAxis(torch.autograd.Function):
@staticmethod
def forward(ctx, input, indice... | EXA-1-master | exa/modular_components/attentions/flash-attention/flash_attn/bert_padding.py |
EXA-1-master | exa/modular_components/attentions/flash-attention/flash_attn/__init__.py | |
# [2022-10-23] Downloaded from https://github.com/openai/triton/blob/master/python/tutorials/06-fused-attention.py
# for benchmarking.
# We fixed a few dtype cast to make it work for bf16
"""
Fused Attention
===============
This is a Triton implementation of the Flash Attention algorithm
(see: Dao et al., https://arxi... | EXA-1-master | exa/modular_components/attentions/flash-attention/flash_attn/flash_attn_triton_og.py |
import math
import torch
import torch.nn as nn
from einops import rearrange
from flash_attn.flash_attn_interface import flash_attn_unpadded_qkvpacked_func
from flash_attn.bert_padding import unpad_input, pad_input
class FlashAttention(nn.Module):
"""Implement the scaled dot product attention with softmax.
A... | EXA-1-master | exa/modular_components/attentions/flash-attention/flash_attn/flash_attention.py |
"""
*Experimental* implementation of FlashAttention in Triton.
Tested with triton==2.0.0.dev20221202.
Triton 2.0 has a new backend (MLIR) but seems like it doesn't yet work for head dimensions
other than 64:
https://github.com/openai/triton/blob/d376020f90002757eea3ea9475d4f7cfc2ec5ead/python/triton/ops/flash_attention... | EXA-1-master | exa/modular_components/attentions/flash-attention/flash_attn/flash_attn_triton.py |
import torch
import torch.nn as nn
import torch.nn.functional as F
import flash_attn_cuda
def _get_block_size(device, head_dim, is_dropout):
assert head_dim % 8 == 0 and head_dim <= 128
return 256 if head_dim <= 64 else 128
def _flash_attn_forward(q, k, v, out, cu_seqlens_q, cu_seqlens_k, max_seqlen_q, max... | EXA-1-master | exa/modular_components/attentions/flash-attention/flash_attn/flash_attn_interface.py |
# Inspired by https://github.com/NVIDIA/apex/blob/master/apex/transformer/tensor_parallel/cross_entropy.py
# But we make it much faster: we compute the local loss and the LSE, and by exchanging the LSE and
# the losses we can get the global loss. There's no need to do it step by step
# (compute local max, exchange, com... | EXA-1-master | exa/modular_components/attentions/flash-attention/flash_attn/losses/cross_entropy.py |
EXA-1-master | exa/modular_components/attentions/flash-attention/flash_attn/losses/__init__.py | |
EXA-1-master | exa/modular_components/attentions/flash-attention/flash_attn/layers/__init__.py | |
# We use the same API as https://github.com/rwightman/pytorch-image-models/blob/v0.6.11/timm/models/layers/patch_embed.py
# But we use nn.Linear instead of Conv2d and it's about 8x faster.
from functools import partial
import torch.nn as nn
from torch import _assert
from torch.nn.modules.utils import _pair
from eino... | EXA-1-master | exa/modular_components/attentions/flash-attention/flash_attn/layers/patch_embed.py |
# Copyright (c) 2023, Tri Dao.
from typing import Tuple
import math
import torch
from einops import rearrange, repeat
import rotary_emb
def rotate_half(x, interleaved=False):
if not interleaved:
x1, x2 = x.chunk(2, dim=-1)
return torch.cat((-x2, x1), dim=-1)
else:
x1, x2 = x[..., :... | EXA-1-master | exa/modular_components/attentions/flash-attention/flash_attn/layers/rotary.py |
import torch
from transformers.utils import WEIGHTS_NAME, WEIGHTS_INDEX_NAME
from transformers.utils import is_remote_url
from transformers.modeling_utils import load_state_dict
from transformers.utils.hub import cached_file, get_checkpoint_shard_files
def state_dict_from_pretrained(model_name, device=None, dtype=No... | EXA-1-master | exa/modular_components/attentions/flash-attention/flash_attn/utils/pretrained.py |
# Copyright (c) 2023, Tri Dao.
# Adapted from https://github.com/NVIDIA/Megatron-LM/blob/0bb597b42c53355a567aba2a1357cc34b9d99ddd/megatron/text_generation/forward_step.py#L31
from typing import Optional, Union, Sequence, Callable
import gc
import time
from dataclasses import dataclass, field
from collections import na... | EXA-1-master | exa/modular_components/attentions/flash-attention/flash_attn/utils/generation.py |
# Copyright (c) 2022, Tri Dao.
""" Useful functions for writing test code. """
import torch
import torch.utils.benchmark as benchmark
def benchmark_forward(fn, *inputs, repeats=10, desc='', verbose=True, amp=False,
amp_dtype=torch.float16, **kwinputs):
""" Use Pytorch Benchmark on the forwa... | EXA-1-master | exa/modular_components/attentions/flash-attention/flash_attn/utils/benchmark.py |
EXA-1-master | exa/modular_components/attentions/flash-attention/flash_attn/utils/__init__.py | |
from typing import Optional
import torch
from torch import Tensor
from torch.distributed import ProcessGroup
# `all_gather_into_tensor` and `reduce_scatter_tensor` are new placeholders for
# `_all_gather_base` and `_reduce_scatter_base`. They require the most recent
# version of PyTorch. The following 4 lines are for... | EXA-1-master | exa/modular_components/attentions/flash-attention/flash_attn/utils/distributed.py |
# Copyright (c) 2023, Tri Dao.
import math
import re
from collections import OrderedDict
import torch
import torch.nn.functional as F
from transformers import GPT2Config, GPTJConfig
def remap_state_dict_hf_gptj(state_dict, config):
def key_mapping_layers(key):
return re.sub(r'^transformer.h.', 'transf... | EXA-1-master | exa/modular_components/attentions/flash-attention/flash_attn/models/gptj.py |
EXA-1-master | exa/modular_components/attentions/flash-attention/flash_attn/models/__init__.py | |
# Copyright (c) 2023, Tri Dao.
import math
import re
from collections import OrderedDict
import torch
import torch.nn.functional as F
from transformers import GPT2Config, OPTConfig
def remap_state_dict_hf_opt(state_dict, config):
def key_mapping_model(key):
key = re.sub(r'^model.decoder.', 'transforme... | EXA-1-master | exa/modular_components/attentions/flash-attention/flash_attn/models/opt.py |
# Copyright (c) 2022, Tri Dao.
# Inspired by / adapted from https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/vision_transformer.py
import math
import re
from functools import partial
from copy import deepcopy
from collections import OrderedDict
import torch
import torch.nn as nn
import torch.... | EXA-1-master | exa/modular_components/attentions/flash-attention/flash_attn/models/vit.py |
# Copyright (c) 2022, Tri Dao.
# This BERT implementation is based on our MLPerf 2.0 and MLPerf 2.1 BERT implementation.
# https://github.com/mlcommons/training_results_v2.0/blob/main/HazyResearch/benchmarks/bert/implementations/pytorch/modeling.py
# https://github.com/mlcommons/training_results_v2.1/blob/main/Azure-Ha... | EXA-1-master | exa/modular_components/attentions/flash-attention/flash_attn/models/bert.py |
# Copyright (c) 2023, Tri Dao.
import math
import re
from collections import OrderedDict
import torch
import torch.nn.functional as F
from einops import rearrange
from transformers import GPT2Config, GPTNeoXConfig
def remap_state_dict_hf_gpt_neox(state_dict, config):
def key_mapping_layers(key):
retu... | EXA-1-master | exa/modular_components/attentions/flash-attention/flash_attn/models/gpt_neox.py |
# Copyright (c) 2023, Tri Dao.
import logging
import math
import re
from functools import partial
from collections import namedtuple, OrderedDict
from collections.abc import Sequence
import torch
import torch.nn as nn
import torch.nn.functional as F
from transformers import GPT2Config
from einops import rearrange
... | EXA-1-master | exa/modular_components/attentions/flash-attention/flash_attn/models/gpt.py |
# Copied from https://github.com/mlcommons/training_results_v1.1/blob/main/NVIDIA/benchmarks/bert/implementations/pytorch/model/layers/activations.py
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
# 1/sqrt(2*pi)-> 0.3989423
# 1/sqrt(2) -> 0.70710678
# sqrt(2/pi) -> 0.79788456
# th... | EXA-1-master | exa/modular_components/attentions/flash-attention/flash_attn/ops/activations.py |
EXA-1-master | exa/modular_components/attentions/flash-attention/flash_attn/ops/__init__.py | |
# Copyright (c) 2023, Tri Dao.
# Inspired by https://github.com/NVIDIA/apex/blob/master/apex/fused_dense/fused_dense.py
# We make it work with pytorch amp and with bfloat16.
# The TensorParallel linear modules are inspired by https://github.com/NVIDIA/apex/blob/master/apex/transformer/tensor_parallel/layers.py
from typ... | EXA-1-master | exa/modular_components/attentions/flash-attention/flash_attn/ops/fused_dense.py |
# Copyright (c) 2022, Tri Dao.
# Adapted from https://github.com/NVIDIA/apex/blob/master/apex/contrib/layer_norm/layer_norm.py
import torch
from torch.nn import init
from flash_attn.ops.layer_norm import DropoutAddLayerNormFn, DropoutAddLayerNormSubsetFn
from flash_attn.ops.layer_norm import DropoutAddLayerNormParall... | EXA-1-master | exa/modular_components/attentions/flash-attention/flash_attn/ops/rms_norm.py |
# Copyright (c) 2022, Tri Dao.
# Adapted from https://github.com/NVIDIA/apex/blob/master/apex/contrib/layer_norm/layer_norm.py
import torch
from torch.nn import init
import dropout_layer_norm
def _dropout_add_layer_norm_forward(x0, residual, gamma, beta, rowscale, colscale, dropout_p,
... | EXA-1-master | exa/modular_components/attentions/flash-attention/flash_attn/ops/layer_norm.py |
# Adapted on https://github.com/ELS-RD/kernl/blob/main/src/kernl/implementations/linear_layer.py
# and https://github.com/openai/triton/blob/master/python/triton/ops/matmul.py
from typing import Optional
import torch
import triton
import triton.language as tl
from torch.autograd.function import FunctionCtx
from torch.... | EXA-1-master | exa/modular_components/attentions/flash-attention/flash_attn/ops/triton/linear.py |
# Adapted from https://github.com/facebookresearch/xformers/blob/main/xformers/triton/k_activations.py
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
import math
from e... | EXA-1-master | exa/modular_components/attentions/flash-attention/flash_attn/ops/triton/k_activations.py |
# The triton fused matmul + sqrelu is faster for fp16 but slower for bf16, compared
# to naive implementation.
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.cuda.amp import custom_bwd, custom_fwd
import fused_dense_lib as fused_dense_cuda
from flash_attn.ops.triton.linear import triton... | EXA-1-master | exa/modular_components/attentions/flash-attention/flash_attn/ops/triton/mlp.py |
# Copyright (c) 2022, Tri Dao.
import torch
import torch.nn as nn
from torch import Tensor
from einops import rearrange
from flash_attn.utils.distributed import reduce_scatter, all_reduce
class GPT2Embeddings(nn.Module):
def __init__(self, embed_dim, vocab_size, max_position_embeddings, padding_idx=None,
... | EXA-1-master | exa/modular_components/attentions/flash-attention/flash_attn/modules/embedding.py |
EXA-1-master | exa/modular_components/attentions/flash-attention/flash_attn/modules/__init__.py | |
# Copyright (c) 2022, Tri Dao.
import torch
import torch.nn as nn
import torch.nn.functional as F
try:
from flash_attn.ops.fused_dense import FusedMLP, ParallelFusedMLP
except ImportError:
FusedMLP, ParallelFusedMLP = None, None
class Mlp(nn.Module):
def __init__(self, in_features, hidden_features=None... | EXA-1-master | exa/modular_components/attentions/flash-attention/flash_attn/modules/mlp.py |
# Copyright (c) 2022, Tri Dao.
from typing import Optional
from functools import partial
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch import Tensor
from torchvision.ops import StochasticDepth
from flash_attn.modules.mha import MHA
from flash_attn.modules.mlp import Mlp
try:
fro... | EXA-1-master | exa/modular_components/attentions/flash-attention/flash_attn/modules/block.py |
# Copyright (c) 2022, Tri Dao.
import math
from functools import partial
import torch
import torch.nn as nn
import torch.nn.functional as F
from einops import rearrange
try:
from flash_attn.flash_attn_interface import flash_attn_unpadded_qkvpacked_func
from flash_attn.flash_attn_interface import flash_attn_... | EXA-1-master | exa/modular_components/attentions/flash-attention/flash_attn/modules/mha.py |
import math
from functools import partial
import torch
import torch.nn.functional as F
from torch import nn, einsum
from local_attention import LocalMHA
from einops import rearrange, repeat, pack, unpack
# helper functions
def exists(val):
return val is not None
def default(val, d):
return val if exists(va... | EXA-1-master | exa/modular_components/attentions/conditional_flash_attention/other.py |
EXA-1-master | exa/modular_components/attentions/conditional_flash_attention/coltflash.py | |
# from nebulaV4 import one_hot_encoding
# from nebulaV4 import Nebula
# import torch
# import numpy as np
# import matplotlib.pyplot as plt
# import torch.nn as nn
# class LossFunction:
# def compute_loss(self, y_pred, y_true):
# raise NotImplemented("compute_loss method must be implemented")
# #impl... | EXA-1-master | exa/modular_components/lossFunctions/nebula/test.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.