python_code
stringlengths 0
992k
| repo_name
stringlengths 8
46
| file_path
stringlengths 5
162
|
|---|---|---|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
# CREDITS:
# inspired by
# https://github.com/nateraw/lightning-vision-transformer
# which in turn references https://github.com/lucidrains/vit-pytorch
# Orignal author: Sean Naren
import math
from enum import Enum
import pytorch_lightning as pl
import torch
from pl_bolts.datamodules import CIFAR10DataModule
from torch import nn
from torchmetrics import Accuracy
from xformers.factory import xFormer, xFormerConfig
class Classifier(str, Enum):
GAP = "gap"
TOKEN = "token"
class VisionTransformer(pl.LightningModule):
def __init__(
self,
steps,
learning_rate=5e-4,
betas=(0.9, 0.99),
weight_decay=0.03,
image_size=32,
num_classes=10,
patch_size=2,
dim=384,
n_layer=6,
n_head=6,
resid_pdrop=0.0,
attn_pdrop=0.0,
mlp_pdrop=0.0,
attention="scaled_dot_product",
residual_norm_style="pre",
hidden_layer_multiplier=4,
use_rotary_embeddings=True,
linear_warmup_ratio=0.1,
classifier: Classifier = Classifier.TOKEN,
):
super().__init__()
# all the inputs are saved under self.hparams (hyperparams)
self.save_hyperparameters()
assert image_size % patch_size == 0
num_patches = (image_size // patch_size) ** 2
# A list of the encoder or decoder blocks which constitute the Transformer.
xformer_config = [
{
"block_type": "encoder",
"num_layers": n_layer,
"dim_model": dim,
"residual_norm_style": residual_norm_style,
"multi_head_config": {
"num_heads": n_head,
"residual_dropout": resid_pdrop,
"use_rotary_embeddings": use_rotary_embeddings,
"attention": {
"name": attention,
"dropout": attn_pdrop,
"causal": False,
},
},
"feedforward_config": {
"name": "FusedMLP",
"dropout": mlp_pdrop,
"activation": "gelu",
"hidden_layer_multiplier": hidden_layer_multiplier,
},
"position_encoding_config": {
"name": "learnable",
"seq_len": num_patches,
"dim_model": dim,
"add_class_token": classifier == Classifier.TOKEN,
},
"patch_embedding_config": {
"in_channels": 3,
"out_channels": dim,
"kernel_size": patch_size,
"stride": patch_size,
},
}
]
# The ViT trunk
config = xFormerConfig(xformer_config)
self.vit = xFormer.from_config(config)
print(self.vit)
# The classifier head
self.ln = nn.LayerNorm(dim)
self.head = nn.Linear(dim, num_classes)
self.criterion = torch.nn.CrossEntropyLoss()
self.val_accuracy = Accuracy()
@staticmethod
def linear_warmup_cosine_decay(warmup_steps, total_steps):
"""
Linear warmup for warmup_steps, with cosine annealing to 0 at total_steps
"""
def fn(step):
if step < warmup_steps:
return float(step) / float(max(1, warmup_steps))
progress = float(step - warmup_steps) / float(
max(1, total_steps - warmup_steps)
)
return 0.5 * (1.0 + math.cos(math.pi * progress))
return fn
def configure_optimizers(self):
optimizer = torch.optim.AdamW(
self.parameters(),
lr=self.hparams.learning_rate,
betas=self.hparams.betas,
weight_decay=self.hparams.weight_decay,
)
warmup_steps = int(self.hparams.linear_warmup_ratio * self.hparams.steps)
scheduler = {
"scheduler": torch.optim.lr_scheduler.LambdaLR(
optimizer,
self.linear_warmup_cosine_decay(warmup_steps, self.hparams.steps),
),
"interval": "step",
}
return [optimizer], [scheduler]
def forward(self, x):
x = self.vit(x)
x = self.ln(x)
if self.hparams.classifier == Classifier.TOKEN:
x = x[:, 0] # only consider the token, we're classifying anyway
elif self.hparams.classifier == Classifier.GAP:
x = x.mean(dim=1) # mean over sequence len
x = self.head(x)
return x
def training_step(self, batch, _):
x, y = batch
y_hat = self(x)
loss = self.criterion(y_hat, y)
self.logger.log_metrics(
{
"train_loss": loss.mean(),
"learning_rate": self.lr_schedulers().get_last_lr()[0],
},
step=self.global_step,
)
return loss
def evaluate(self, batch, stage=None):
x, y = batch
y_hat = self(x)
loss = self.criterion(y_hat, y)
acc = self.val_accuracy(y_hat, y)
if stage:
self.log(f"{stage}_loss", loss, prog_bar=True)
self.log(f"{stage}_acc", acc, prog_bar=True)
def validation_step(self, batch, _):
self.evaluate(batch, "val")
def test_step(self, batch, _):
self.evaluate(batch, "test")
if __name__ == "__main__":
pl.seed_everything(42)
# Adjust batch depending on the available memory on your machine.
# You can also use reversible layers to save memory
REF_BATCH = 512
BATCH = 128
MAX_EPOCHS = 30
NUM_WORKERS = 4
GPUS = 1
# We'll use a datamodule here, which already handles dataset/dataloader/sampler
# - See https://pytorchlightning.github.io/lightning-tutorials/notebooks/lightning_examples/cifar10-baseline.html
# for a full tutorial
# - Please note that default transforms are being used
dm = CIFAR10DataModule(
data_dir="data",
batch_size=BATCH,
num_workers=NUM_WORKERS,
pin_memory=True,
)
image_size = dm.size(-1) # 32 for CIFAR
num_classes = dm.num_classes # 10 for CIFAR
# compute total number of steps
batch_size = BATCH * GPUS
steps = dm.num_samples // REF_BATCH * MAX_EPOCHS
lm = VisionTransformer(
steps=steps,
image_size=image_size,
num_classes=num_classes,
attention="scaled_dot_product",
classifier=Classifier.TOKEN,
residual_norm_style="pre",
use_rotary_embeddings=True,
)
trainer = pl.Trainer(
gpus=GPUS,
max_epochs=MAX_EPOCHS,
detect_anomaly=False,
precision=16,
accumulate_grad_batches=REF_BATCH // BATCH,
)
trainer.fit(lm, dm)
# check the training
trainer.test(lm, datamodule=dm)
|
EXA-1-master
|
exa/libraries/xformers/examples/cifar_ViT.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
# A MinGPT + Lightning + xFormers example Code from Sean Naren (@seannaren)
# This is an hommage to https://github.com/karpathy/minGPT
import math
import os
import pytorch_lightning as pl
import torch
import torch.nn as nn
from pytorch_lightning import Trainer, seed_everything
from pytorch_lightning.utilities import rank_zero_info
from torch.nn import functional as F
from torch.utils.data import DataLoader, Dataset, RandomSampler
from xformers.factory.model_factory import xFormer, xFormerConfig
class GPT(pl.LightningModule):
"""the full GPT language model, with a context size of block_size"""
def __init__(
self,
vocab_size,
weight_decay=0.1,
betas=(0.9, 0.95),
learning_rate=6e-4,
n_embd=512,
block_size=128,
n_layer=8,
n_head=8,
resid_pdrop=0.1,
attn_pdrop=0.1,
mlp_pdrop=0.1,
attention="scaled_dot_product",
hidden_layer_multiplier=4,
warmup_tokens=20,
final_tokens=1000,
):
super().__init__()
# auto creates self.hparams from the method signature
self.save_hyperparameters()
# A list of the encoder or decoder blocks which constitute the Transformer.
xformer_config = [
{
"reversible": False, # Turn on to test the effect of using reversible layers
"block_type": "encoder",
"num_layers": self.hparams.n_layer,
"dim_model": self.hparams.n_embd,
"residual_norm_style": "post",
"position_encoding_config": {
"name": "vocab",
"seq_len": self.hparams.block_size,
"vocab_size": self.hparams.vocab_size,
},
"multi_head_config": {
"num_heads": self.hparams.n_head,
"residual_dropout": self.hparams.resid_pdrop,
"use_rotary_embeddings": True,
"attention": {
"name": self.hparams.attention,
"dropout": self.hparams.attn_pdrop,
"causal": True,
"seq_len": self.hparams.block_size,
"num_rules": self.hparams.n_head,
},
},
"feedforward_config": {
"name": "FusedMLP", # Use MLP if Triton is not available
"dropout": self.hparams.mlp_pdrop,
"activation": "gelu",
"hidden_layer_multiplier": self.hparams.hidden_layer_multiplier,
},
}
]
config = xFormerConfig(xformer_config)
config.weight_init = "small"
self.model = xFormer.from_config(config)
# decoder head
self.ln_f = nn.LayerNorm(self.hparams.n_embd)
self.head = nn.Linear(self.hparams.n_embd, self.hparams.vocab_size, bias=False)
self.block_size = self.hparams.block_size
self.apply(self._init_weights)
self._tokens_seen = 0
def _init_weights(self, module):
if isinstance(module, (nn.Linear, nn.Embedding)):
module.weight.data.normal_(mean=0.0, std=0.02)
if isinstance(module, nn.Linear) and module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, nn.LayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
# Reset the token counter
self._tokens_seen = 0
def get_block_size(self):
return self.block_size
def configure_optimizers(self):
# Create the optimizer and the training schedule:
# - Handle the per-param weight decay
no_decay = ["bias", "LayerNorm.weight"]
params_decay = [
p for n, p in self.named_parameters() if not any(nd in n for nd in no_decay)
]
params_nodecay = [
p for n, p in self.named_parameters() if any(nd in n for nd in no_decay)
]
optim_groups = [
{"params": params_decay, "weight_decay": self.hparams.weight_decay},
{"params": params_nodecay, "weight_decay": 0.0},
]
# - Start with a warm up, ramp up then cosine
optimizer = torch.optim.AdamW(
optim_groups, lr=self.hparams.learning_rate, betas=self.hparams.betas
)
def update_lr(*_):
config = self.hparams
if self._tokens_seen < config.warmup_tokens:
# linear warmup
lr_mult = float(self._tokens_seen) / float(max(1, config.warmup_tokens))
lr_mult = max(lr_mult, 1e-2) # could be that we've not seen any yet
else:
# cosine learning rate decay
progress = float(self._tokens_seen - config.warmup_tokens) / float(
max(1, config.final_tokens - config.warmup_tokens)
)
lr_mult = max(0.1, 0.5 * (1.0 + math.cos(math.pi * progress)))
return lr_mult
lr_scheduler = {
"scheduler": torch.optim.lr_scheduler.LambdaLR(
optimizer,
lr_lambda=[update_lr, update_lr],
),
"name": "learning_rate",
"interval": "step", # The unit of the scheduler's step size
"frequency": 1, # The frequency of the scheduler
}
return [optimizer], [lr_scheduler]
def forward(self, src):
# predict the next tokens (in latent space)
prediction = self.model(src)
# translate the predictions into tokens
prediction = self.ln_f(prediction)
logits = self.head(prediction)
return logits
def training_step(self, batch, _):
src, targets = batch
# Update the tokens we've seen (tracked for LR scheduling)
self._tokens_seen += (src >= 0).numel()
# same action as inference
logits = self(src)
# if we are given some desired targets also calculate the loss
loss = None
if targets is not None:
loss = F.cross_entropy(logits.view(-1, logits.size(-1)), targets.view(-1))
self.logger.log_metrics(
{
"train_loss": loss.mean(),
"learning_rate": self.lr_schedulers().get_last_lr()[0],
},
step=trainer.global_step,
)
return loss
class CharDataset(Dataset):
def __init__(self, data, block_size):
chars = list(set(data))
data_size, vocab_size = len(data), len(chars)
rank_zero_info("data has %d characters, %d unique." % (data_size, vocab_size))
self.stoi = {ch: i for i, ch in enumerate(chars)}
self.itos = {i: ch for i, ch in enumerate(chars)}
self.block_size = block_size
self.vocab_size = vocab_size
self.data = data
def __len__(self):
return len(self.data) - self.block_size
def __getitem__(self, i):
chunk = self.data[i : i + self.block_size + 1]
dix = [self.stoi[s] for s in chunk]
# src and target are off by one, we want the model to predict the next word
x = torch.tensor(dix[:-1], dtype=torch.long)
y = torch.tensor(dix[1:], dtype=torch.long)
return x, y
def to_tokens(self, message, device):
return torch.tensor([self.stoi[s] for s in message], dtype=torch.long)[
None, ...
].to(device)
def from_tokens(self, tokens):
return "".join([self.itos[int(i)] for i in tokens])
@torch.no_grad()
def sample(model, x, steps, temperature=1.0, sample=False, top_k=None):
"""
take a conditioning sequence of indices in x (of shape (b,t)) and predict the next token in
the sequence, feeding the predictions back into the model each time. Clearly the sampling
has quadratic complexity unlike an RNN that is only linear, and has a finite context window
of block_size, unlike an RNN that has an infinite context window.
"""
block_size = model.get_block_size()
model.eval()
# CREDITS: https://github.com/karpathy/minGPT/blob/master/mingpt/utils.py
def top_k_logits(logits, k):
v, _ = torch.topk(logits, k)
out = logits.clone()
out[out < v[:, [-1]]] = -float("Inf")
return out
for _ in range(steps):
x_cond = (
x if x.size(1) <= block_size else x[:, -block_size:]
) # crop context if needed
logits = model(x_cond)
# pluck the logits at the final step and scale by temperature
logits = logits[:, -1, :] / temperature
# optionally crop probabilities to only the top k options
if top_k is not None:
logits = top_k_logits(logits, top_k)
# apply softmax to convert to probabilities
probs = F.softmax(logits, dim=-1)
# sample from the distribution or take the most likely
if sample:
ix = torch.multinomial(probs, num_samples=1)
else:
_, ix = torch.topk(probs, k=1, dim=-1)
# append to the sequence and continue
x = torch.cat((x, ix), dim=1)
return x[0] # escape the batch dimension
if __name__ == "__main__":
seed_everything(42)
# Adjust batch depending on the available memory on your machine.
# You can also use reversible layers to save memory
REF_BATCH = 512
BATCH = 128
WORKERS = 4
EPOCHS = 1
BLOCK = 128
WARMUP = 20
if not os.path.exists("input.txt"):
os.system(
"wget https://raw.githubusercontent.com/karpathy/char-rnn/master/data/tinyshakespeare/input.txt"
)
text = open("input.txt", "r").read()
train_dataset = CharDataset(
text, BLOCK
) # one line of poem is roughly 50 characters
random_sampler = RandomSampler(train_dataset)
train_loader = DataLoader(
train_dataset,
sampler=random_sampler,
batch_size=BATCH,
num_workers=WORKERS,
pin_memory=True,
)
model = GPT(
vocab_size=train_dataset.vocab_size,
block_size=train_dataset.block_size,
attention="scaled_dot_product",
warmup_tokens=REF_BATCH * WARMUP,
final_tokens=EPOCHS * len(train_dataset) * BLOCK,
)
print(model)
trainer = Trainer(
gpus=1,
max_epochs=EPOCHS,
precision=16,
log_every_n_steps=1,
accumulate_grad_batches=REF_BATCH // BATCH,
)
trainer.fit(model, train_loader)
# Sample from the model, let it predict a paragraph
context = "Friends of my soul" # prime with something
x = train_dataset.to_tokens(context, model.device)
y = sample(model, x, steps=1000, temperature=1.0, sample=True, top_k=10)
print(train_dataset.from_tokens(y))
|
EXA-1-master
|
exa/libraries/xformers/examples/microGPT.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
import hydra
from omegaconf import DictConfig
from xformers.factory.hydra_helper import import_xformer_config_schema
@hydra.main(config_path="conf", config_name="config")
def my_app(cfg: DictConfig) -> None:
model = hydra.utils.instantiate(cfg.xformer, _convert_="all")
print(
f"Built a model with {len(cfg.xformer.stack_configs)} stack: {cfg.xformer.stack_configs.keys()}"
)
print(model)
if __name__ == "__main__":
# optional - only needed when you want to use xformer config dataclass
# to validate config values.
import_xformer_config_schema()
my_app()
|
EXA-1-master
|
exa/libraries/xformers/examples/build_model/my_model.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
import glob
import os
import shlex
import subprocess
import sys
import torch
import xformers
# Build failed - return early
if not xformers._has_cpp_library:
print("xFormers wasn't built correctly - can't run benchmarks")
sys.exit(0)
benchmark_script = os.path.join("xformers", "benchmarks", sys.argv[1])
benchmark_fn = sys.argv[2]
label = subprocess.check_output(["git", "rev-parse", "HEAD"], text=True).strip()[:8]
cmd = [
sys.executable,
benchmark_script,
"--label",
label,
"--fn",
benchmark_fn,
"--fail_if_regression",
"--quiet",
]
env = (
torch.cuda.get_device_name(torch.cuda.current_device())
.replace(" ", "_")
.replace("-", "_")
.replace(".", "_")
)
# Figure out the name of the baseline
pattern = os.path.join(os.environ["XFORMERS_BENCHMARKS_CACHE"], benchmark_fn, "*.csv")
ref_names = glob.glob(pattern)
baseline_names = set(
os.path.basename(s)[: -len(".csv")]
for s in ref_names
# Only compare to benchmark data on same hardware
if env in os.path.basename(s)
)
if baseline_names:
if len(baseline_names) > 1:
raise RuntimeError(
f"Supplied more than one reference for this benchmark: {','.join(baseline_names)}"
)
cmd += ["--compare", ",".join(baseline_names)]
print("EXEC:", shlex.join(cmd))
retcode = 0
try:
subprocess.check_call(cmd)
except subprocess.CalledProcessError as e:
retcode = e.returncode
# Remove original benchmark files
for f in ref_names:
os.remove(f)
# Rename new ones as 'ref'
for f in glob.glob(pattern):
os.rename(f, f.replace(label, "reference"))
sys.exit(retcode)
|
EXA-1-master
|
exa/libraries/xformers/.github/run_benchmark_wrapper.py
|
#!/usr/bin/env python
"""
MIT License
Copyright (c) 2017 Guillaume Papin
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
"""A wrapper script around clang-format, suitable for linting multiple files
and to use for continuous integration.
This is an alternative API for the clang-format command line.
It runs over multiple files and directories in parallel.
A diff output is produced and a sensible exit code is returned.
"""
import argparse # noqa: E402
import difflib # noqa: E402
import fnmatch # noqa: E402
import io # noqa: E402
import multiprocessing # noqa: E402
import os # noqa: E402
import signal # noqa: E402
import subprocess # noqa: E402
import sys # noqa: E402
import traceback # noqa: E402
from functools import partial # noqa: E402
from subprocess import DEVNULL # noqa: E402
DEFAULT_EXTENSIONS = "c,h,C,H,cpp,hpp,cc,hh,c++,h++,cxx,hxx,cu"
class ExitStatus:
SUCCESS = 0
DIFF = 1
TROUBLE = 2
def list_files(files, recursive=False, extensions=None, exclude=None):
if extensions is None:
extensions = []
if exclude is None:
exclude = []
out = []
for file in files:
if recursive and os.path.isdir(file):
for dirpath, dnames, fnames in os.walk(file):
fpaths = [os.path.join(dirpath, fname) for fname in fnames]
for pattern in exclude:
# os.walk() supports trimming down the dnames list
# by modifying it in-place,
# to avoid unnecessary directory listings.
dnames[:] = [
x
for x in dnames
if not fnmatch.fnmatch(os.path.join(dirpath, x), pattern)
]
fpaths = [x for x in fpaths if not fnmatch.fnmatch(x, pattern)]
for f in fpaths:
ext = os.path.splitext(f)[1][1:]
if ext in extensions:
out.append(f)
else:
out.append(file)
return out
def make_diff(file, original, reformatted):
return list(
difflib.unified_diff(
original,
reformatted,
fromfile="a/{}\t(original)".format(file),
tofile="b/{}\t(reformatted)".format(file),
n=3,
)
)
class DiffError(Exception):
def __init__(self, message, errs=None):
super(DiffError, self).__init__(message)
self.errs = errs or []
class UnexpectedError(Exception):
def __init__(self, message, exc=None):
super(UnexpectedError, self).__init__(message)
self.formatted_traceback = traceback.format_exc()
self.exc = exc
def run_clang_format_diff_wrapper(args, file):
try:
ret = run_clang_format_diff(args, file)
return ret
except DiffError:
raise
except Exception as e:
raise UnexpectedError("{}: {}: {}".format(file, e.__class__.__name__, e), e)
def run_clang_format_diff(args, file):
try:
with io.open(file, "r", encoding="utf-8") as f:
original = f.readlines()
except IOError as exc:
raise DiffError(str(exc))
invocation = [args.clang_format_executable, file]
# Use of utf-8 to decode the process output.
#
# Hopefully, this is the correct thing to do.
#
# It's done due to the following assumptions (which may be incorrect):
# - clang-format will returns the bytes read from the files as-is,
# without conversion, and it is already assumed that the files use utf-8.
# - if the diagnostics were internationalized, they would use utf-8:
# > Adding Translations to Clang
# >
# > Not possible yet!
# > Diagnostic strings should be written in UTF-8,
# > the client can translate to the relevant code page if needed.
# > Each translation completely replaces the format string
# > for the diagnostic.
# > -- http://clang.llvm.org/docs/InternalsManual.html#internals-diag-translation
try:
proc = subprocess.Popen(
invocation,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
universal_newlines=True,
encoding="utf-8",
)
except OSError as exc:
raise DiffError(
"Command '{}' failed to start: {}".format(
subprocess.list2cmdline(invocation), exc
)
)
proc_stdout = proc.stdout
proc_stderr = proc.stderr
# hopefully the stderr pipe won't get full and block the process
outs = list(proc_stdout.readlines())
errs = list(proc_stderr.readlines())
proc.wait()
if proc.returncode:
raise DiffError(
"Command '{}' returned non-zero exit status {}".format(
subprocess.list2cmdline(invocation), proc.returncode
),
errs,
)
return make_diff(file, original, outs), errs
def bold_red(s):
return "\x1b[1m\x1b[31m" + s + "\x1b[0m"
def colorize(diff_lines):
def bold(s):
return "\x1b[1m" + s + "\x1b[0m"
def cyan(s):
return "\x1b[36m" + s + "\x1b[0m"
def green(s):
return "\x1b[32m" + s + "\x1b[0m"
def red(s):
return "\x1b[31m" + s + "\x1b[0m"
for line in diff_lines:
if line[:4] in ["--- ", "+++ "]:
yield bold(line)
elif line.startswith("@@ "):
yield cyan(line)
elif line.startswith("+"):
yield green(line)
elif line.startswith("-"):
yield red(line)
else:
yield line
def print_diff(diff_lines, use_color):
if use_color:
diff_lines = colorize(diff_lines)
sys.stdout.writelines(diff_lines)
def print_trouble(prog, message, use_colors):
error_text = "error:"
if use_colors:
error_text = bold_red(error_text)
print("{}: {} {}".format(prog, error_text, message), file=sys.stderr)
def main():
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument(
"--clang-format-executable",
metavar="EXECUTABLE",
help="path to the clang-format executable",
default="clang-format",
)
parser.add_argument(
"--extensions",
help="comma separated list of file extensions (default: {})".format(
DEFAULT_EXTENSIONS
),
default=DEFAULT_EXTENSIONS,
)
parser.add_argument(
"-r",
"--recursive",
action="store_true",
help="run recursively over directories",
)
parser.add_argument("files", metavar="file", nargs="+")
parser.add_argument("-q", "--quiet", action="store_true")
parser.add_argument(
"-j",
metavar="N",
type=int,
default=0,
help="run N clang-format jobs in parallel" " (default number of cpus + 1)",
)
parser.add_argument(
"--color",
default="auto",
choices=["auto", "always", "never"],
help="show colored diff (default: auto)",
)
parser.add_argument(
"-e",
"--exclude",
metavar="PATTERN",
action="append",
default=[],
help="exclude paths matching the given glob-like pattern(s)"
" from recursive search",
)
args = parser.parse_args()
# use default signal handling, like diff return SIGINT value on ^C
# https://bugs.python.org/issue14229#msg156446
signal.signal(signal.SIGINT, signal.SIG_DFL)
try:
signal.SIGPIPE
except AttributeError:
# compatibility, SIGPIPE does not exist on Windows
pass
else:
signal.signal(signal.SIGPIPE, signal.SIG_DFL)
colored_stdout = False
colored_stderr = False
if args.color == "always":
colored_stdout = True
colored_stderr = True
elif args.color == "auto":
colored_stdout = sys.stdout.isatty()
colored_stderr = sys.stderr.isatty()
version_invocation = [args.clang_format_executable, str("--version")]
try:
subprocess.check_call(version_invocation, stdout=DEVNULL)
except subprocess.CalledProcessError as e:
print_trouble(parser.prog, str(e), use_colors=colored_stderr)
return ExitStatus.TROUBLE
except OSError as e:
print_trouble(
parser.prog,
"Command '{}' failed to start: {}".format(
subprocess.list2cmdline(version_invocation), e
),
use_colors=colored_stderr,
)
return ExitStatus.TROUBLE
retcode = ExitStatus.SUCCESS
files = list_files(
args.files,
recursive=args.recursive,
exclude=args.exclude,
extensions=args.extensions.split(","),
)
if not files:
return
njobs = args.j
if njobs == 0:
njobs = multiprocessing.cpu_count() + 1
njobs = min(len(files), njobs)
if njobs == 1:
# execute directly instead of in a pool,
# less overhead, simpler stacktraces
it = (run_clang_format_diff_wrapper(args, file) for file in files)
pool = None
else:
pool = multiprocessing.Pool(njobs)
it = pool.imap_unordered(partial(run_clang_format_diff_wrapper, args), files)
while True:
try:
outs, errs = next(it)
except StopIteration:
break
except DiffError as e:
print_trouble(parser.prog, str(e), use_colors=colored_stderr)
retcode = ExitStatus.TROUBLE
sys.stderr.writelines(e.errs)
except UnexpectedError as e:
print_trouble(parser.prog, str(e), use_colors=colored_stderr)
sys.stderr.write(e.formatted_traceback)
retcode = ExitStatus.TROUBLE
# stop at the first unexpected error,
# something could be very wrong,
# don't process all files unnecessarily
if pool:
pool.terminate()
break
else:
sys.stderr.writelines(errs)
if outs == []:
continue
if not args.quiet:
print_diff(outs, use_color=colored_stdout)
if retcode == ExitStatus.SUCCESS:
retcode = ExitStatus.DIFF
return retcode
if __name__ == "__main__":
sys.exit(main())
|
EXA-1-master
|
exa/libraries/xformers/.github/run-clang-format.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
import glob
import os
import subprocess
import xformers.benchmarks.utils as utils
class NamedObject:
def __init__(self, name) -> None:
self.__name__ = name
def git_file_at(filename: str, ref: str) -> str:
try:
return subprocess.check_output(
["git", "show", f"{ref}:{filename}"], text=True
).strip()
except subprocess.CalledProcessError:
return "" # File does not exist in that revision
GITHUB_BASE_REF = subprocess.check_output(
["git", "rev-parse", "origin/" + os.environ["GITHUB_BASE_REF"]], text=True
).strip()
XFORMERS_BENCHMARKS_CACHE = os.environ["XFORMERS_BENCHMARKS_CACHE"]
GITHUB_CURRENT_REF = subprocess.check_output(
["git", "rev-parse", "HEAD"], text=True
).strip()
for f in glob.glob(os.path.join(XFORMERS_BENCHMARKS_CACHE, "*", "*.csv")):
before = git_file_at(f, ref=GITHUB_BASE_REF)
now = git_file_at(f, ref=GITHUB_CURRENT_REF)
if before == "" or before == now:
continue
benchmark_name = os.path.basename(os.path.dirname(f))
print("#" * 100)
print(f"# UPDATED: {f}")
print("#" * 100)
filename_before = f.replace("reference", "before")
filename_now = f.replace("reference", "now")
with open(filename_before, "w+") as fd:
fd.write(before)
with open(filename_now, "w+") as fd:
fd.write(now)
utils.benchmark_run_and_compare(
benchmark_fn=NamedObject(benchmark_name),
cases=[],
compare=[
os.path.basename(filename_before)[: -len(".csv")],
os.path.basename(filename_now)[: -len(".csv")],
],
)
|
EXA-1-master
|
exa/libraries/xformers/.github/gpu_benchmark_diff.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
import logging
import os
import torch
from . import _cpp_lib
try:
from .version import __version__ # noqa: F401
except ImportError:
__version__ = "0.0.0"
logger = logging.getLogger("xformers")
_has_cpp_library: bool = _cpp_lib._cpp_library_load_exception is None
# Set to true to utilize functorch
_is_functorch_available: bool = False
_is_opensource: bool = True
def compute_once(func):
value = None
def func_wrapper():
nonlocal value
if value is None:
value = func()
return value
return func_wrapper
@compute_once
def _is_triton_available():
if not torch.cuda.is_available():
return False
if os.environ.get("XFORMERS_FORCE_DISABLE_TRITON", "0") == "1":
return False
try:
from xformers.triton.softmax import softmax as triton_softmax # noqa
return True
except (ImportError, AttributeError) as e:
logger.warning(
f"A matching Triton is not available, some optimizations will not be enabled.\nError caught was: {e}"
)
return False
if _is_functorch_available:
try:
from xformers.components.nvfuser import NVFusedBiasActivationDropout # noqa
except ImportError as e:
logger.warning(
f"Functorch is not available, some optimizations will not be enabled.\nError caught was: {e}"
)
_is_functorch_available = False
|
EXA-1-master
|
exa/libraries/xformers/xformers/__init__.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
|
EXA-1-master
|
exa/libraries/xformers/xformers/test.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
import importlib
import os
import sys
from collections import namedtuple
from dataclasses import fields
from typing import Any, Callable, Dict, List
Item = namedtuple("Item", ["constructor", "config"])
# credit: snippet used in ClassyVision (and probably other places)
def import_all_modules(root: str, base_module: str) -> List[str]:
modules: List[str] = []
for file in os.listdir(root):
if file.endswith((".py", ".pyc")) and not file.startswith("_"):
module = file[: file.find(".py")]
if module not in sys.modules:
module_name = ".".join([base_module, module])
importlib.import_module(module_name)
modules.append(module_name)
return modules
def get_registry_decorator(
class_registry, name_registry, reference_class, default_config
) -> Callable[[str, Any], Callable[[Any], Any]]:
def register_item(name: str, config: Any = default_config):
"""Registers a subclass.
This decorator allows xFormers to instantiate a given subclass
from a configuration file, even if the class itself is not part of the
xFormers library."""
def register_cls(cls):
if name in class_registry:
raise ValueError("Cannot register duplicate item ({})".format(name))
if not issubclass(cls, reference_class):
raise ValueError(
"Item ({}: {}) must extend the base class: {}".format(
name, cls.__name__, reference_class.__name__
)
)
if cls.__name__ in name_registry:
raise ValueError(
"Cannot register item with duplicate class name ({})".format(
cls.__name__
)
)
class_registry[name] = Item(constructor=cls, config=config)
name_registry.add(cls.__name__)
return cls
return register_cls
return register_item
def generate_matching_config(superset: Dict[str, Any], config_class: Any) -> Any:
"""Given a superset of the inputs and a reference config class,
return exactly the needed config"""
# Extract the required fields
field_names = list(map(lambda x: x.name, fields(config_class)))
subset = {k: v for k, v in superset.items() if k in field_names}
# The missing fields get Noned
for k in field_names:
if k not in subset.keys():
subset[k] = None
return config_class(**subset)
|
EXA-1-master
|
exa/libraries/xformers/xformers/utils.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
import dataclasses
import json
import logging
import os
import platform
from typing import Any, Dict, Optional
import torch
logger = logging.getLogger("xformers")
UNAVAILABLE_FEATURES_MSG = (
" Memory-efficient attention, SwiGLU, sparse and more won't be available."
)
@dataclasses.dataclass
class _BuildInfo:
metadata: Dict[str, Any]
@property
def cuda_version(self) -> Optional[int]:
return self.metadata["version"]["cuda"]
@property
def torch_version(self) -> str:
return self.metadata["version"]["torch"]
@property
def python_version(self) -> str:
return self.metadata["version"]["python"]
@property
def build_env(self) -> Dict[str, Any]:
return self.metadata["env"]
class xFormersWasNotBuiltException(Exception):
def __str__(self) -> str:
return (
"Need to compile C++ extensions to use all xFormers features.\n"
" Please install xformers properly "
"(see https://github.com/facebookresearch/xformers#installing-xformers)\n"
+ UNAVAILABLE_FEATURES_MSG
)
class xFormersInvalidLibException(Exception):
def __init__(self, build_info: Optional[_BuildInfo]) -> None:
self.build_info = build_info
def __str__(self) -> str:
if self.build_info is None:
msg = "xFormers was built for a different version of PyTorch or Python."
else:
msg = f"""xFormers was built for:
PyTorch {self.build_info.torch_version} with CUDA {self.build_info.cuda_version} (you have {torch.__version__})
Python {self.build_info.python_version} (you have {platform.python_version()})"""
return (
"xFormers can't load C++/CUDA extensions. "
+ msg
+ "\n Please reinstall xformers "
"(see https://github.com/facebookresearch/xformers#installing-xformers)\n"
+ UNAVAILABLE_FEATURES_MSG
)
def _register_extensions():
import importlib
import os
import torch
# load the custom_op_library and register the custom ops
lib_dir = os.path.dirname(__file__)
if os.name == "nt":
# Register the main torchvision library location on the default DLL path
import ctypes
import sys
kernel32 = ctypes.WinDLL("kernel32.dll", use_last_error=True)
with_load_library_flags = hasattr(kernel32, "AddDllDirectory")
prev_error_mode = kernel32.SetErrorMode(0x0001)
if with_load_library_flags:
kernel32.AddDllDirectory.restype = ctypes.c_void_p
if sys.version_info >= (3, 8):
os.add_dll_directory(lib_dir)
elif with_load_library_flags:
res = kernel32.AddDllDirectory(lib_dir)
if res is None:
err = ctypes.WinError(ctypes.get_last_error())
err.strerror += f' Error adding "{lib_dir}" to the DLL directories.'
raise err
kernel32.SetErrorMode(prev_error_mode)
loader_details = (
importlib.machinery.ExtensionFileLoader,
importlib.machinery.EXTENSION_SUFFIXES,
)
extfinder = importlib.machinery.FileFinder(lib_dir, loader_details)
ext_specs = extfinder.find_spec("_C")
if ext_specs is None:
raise xFormersWasNotBuiltException()
cpp_lib_json = os.path.join(lib_dir, "cpp_lib.json")
with open(cpp_lib_json, "r") as fp:
build_metadata = _BuildInfo(json.load(fp))
try:
torch.ops.load_library(ext_specs.origin)
except OSError as exc:
raise xFormersInvalidLibException(build_metadata) from exc
return build_metadata
_cpp_library_load_exception = None
_build_metadata: Optional[_BuildInfo] = None
try:
_build_metadata = _register_extensions()
except (xFormersInvalidLibException, xFormersWasNotBuiltException) as e:
ENV_VAR_FOR_DETAILS = "XFORMERS_MORE_DETAILS"
if os.environ.get(ENV_VAR_FOR_DETAILS, False):
logger.warning(f"WARNING[XFORMERS]: {e}", exc_info=e)
else:
logger.warning(
f"WARNING[XFORMERS]: {e}\n Set {ENV_VAR_FOR_DETAILS}=1 for more details"
)
_cpp_library_load_exception = e
_built_with_cuda = (
_build_metadata is not None and _build_metadata.cuda_version is not None
)
|
EXA-1-master
|
exa/libraries/xformers/xformers/_cpp_lib.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
from typing import Dict
import torch
from . import (
__version__,
_cpp_lib,
_is_functorch_available,
_is_opensource,
_is_triton_available,
ops,
)
from .ops.common import OPERATORS_REGISTRY
def get_features_status() -> Dict[str, str]:
features = {}
for op in OPERATORS_REGISTRY:
status_str = "available" if op.is_available() else "unavailable"
features[f"{op.OPERATOR_CATEGORY}.{op.NAME}"] = status_str
for k, v in ops.swiglu_op._info().items():
features[f"swiglu.{k}"] = v
features["is_triton_available"] = str(_is_triton_available())
features["is_functorch_available"] = str(_is_functorch_available)
return features
def print_info():
features = get_features_status()
print(f"xFormers {__version__}")
features["pytorch.version"] = torch.__version__
if torch.cuda.is_available():
features["pytorch.cuda"] = "available"
device = torch.cuda.current_device()
cap = torch.cuda.get_device_capability(device)
features["gpu.compute_capability"] = ".".join(str(ver) for ver in cap)
features["gpu.name"] = torch.cuda.get_device_name(device)
else:
features["pytorch.cuda"] = "not available"
build_info = _cpp_lib._build_metadata
if build_info is None and isinstance(
_cpp_lib._cpp_library_load_exception, _cpp_lib.xFormersInvalidLibException
):
build_info = _cpp_lib._cpp_library_load_exception.build_info
if build_info is not None:
features["build.info"] = "available"
features["build.cuda_version"] = build_info.cuda_version
features["build.python_version"] = build_info.python_version
features["build.torch_version"] = build_info.torch_version
for k, v in build_info.build_env.items():
features[f"build.env.{k}"] = v
else:
features["build.info"] = "none"
try:
features["build.nvcc_version"] = ".".join(
str(v) for v in torch.ops.xformers._nvcc_build_version()
)
except (RuntimeError, AttributeError):
pass
if _is_opensource:
features["source.privacy"] = "open source"
else:
features["source.privacy"] = "fairinternal"
for name, status in features.items():
print("{:<50} {}".format(f"{name}:", status))
if __name__ == "__main__":
print_info()
|
EXA-1-master
|
exa/libraries/xformers/xformers/info.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
# Generates combination of kernels - implementations and registry
# Kernels are ordered (see `sort_index`), and when dispatching,
# we select the first kernel in the list that supports the inputs
import collections
import itertools
from dataclasses import dataclass, field
from pathlib import Path
from typing import Dict, List, Optional, Tuple, TypeVar
DTYPES = {
"f32": "float",
"f16": "cutlass::half_t",
"bf16": "cutlass::bfloat16_t",
}
SM = [50, 70, 75, 80]
KERNEL_IMPL_TEMPLATE = """__global__ void __launch_bounds__(
{CPP_CLASS}::kNumThreads,
{CPP_CLASS}::kMinBlocksPerSm)
{NAME}(typename {CPP_CLASS}::Params p) {{
#ifdef __CUDA_ARCH__
#if __CUDA_ARCH__ >= {SM}0
#if __CUDA_ARCH__ < {SM_MAX}0
if (!p.advance_to_block()) {{
return;
}}
{CPP_CLASS}::attention_kernel(p);
return;
#endif
#endif
printf(
"FATAL: kernel `{NAME}` is for sm{SM}-sm{SM_MAX}, but was built for sm%d\\n",
int(__CUDA_ARCH__ + 0) / 10);
#endif
}}
"""
@dataclass(order=True)
class FwdKernel:
sort_index: Tuple[int, ...] = field(init=False, repr=False)
aligned: bool
dtype: str
sm_range: Tuple[int, int]
q: int
k: int
max_k: int
supports_dropout: bool = True
supports_bias: bool = True
dispatch_cond: Optional[str] = None
def __post_init__(self) -> None:
# Set kernel selection priority
# The lowest value that matches inputs
# will be selected
self.sort_index = (
# First select aligned kernel
0 if self.aligned else 1,
# Then keep output in RF
self.max_k,
self.k,
# Prefer kernels without dropout/bias if available
1 if self.supports_dropout else 0,
1 if self.supports_bias else 0,
)
@property
def _aligned_suffix(self) -> str:
return "aligned" if self.aligned else "notaligned"
@property
def name(self) -> str:
acc = "rf" if self.max_k <= self.k else "gmem"
return f"fmha_cutlassF_{self.dtype}_{self._aligned_suffix}_{self.q}x{self.k}_{acc}_sm{self.sm_range[0]}"
@property
def cpp_class(self) -> str:
template_args = ", ".join(
[
DTYPES[self.dtype],
f"cutlass::arch::Sm{self.sm_range[0]}",
"true" if self.aligned else "false",
str(self.q),
str(self.k),
str(self.max_k),
"true" if self.supports_dropout else "false",
"true" if self.supports_bias else "false",
]
)
return f"AttentionKernel<{template_args}>"
@property
def impl_group(self) -> str:
# Maps to file which will contain the implementation
return f"{self.dtype}_{self._aligned_suffix}"
@property
def cpp_impl(self) -> str:
return KERNEL_IMPL_TEMPLATE.format(
CPP_CLASS=self.cpp_class,
NAME=self.name,
SM=self.sm_range[0],
SM_MAX=self.sm_range[1],
)
@classmethod
def get_all(cls) -> List["FwdKernel"]:
kernels: List[FwdKernel] = []
for aligned, dtype, (sm, sm_max) in itertools.product(
[True, False], DTYPES.keys(), zip(SM, SM[1:] + [90])
):
# Remove some kernels we don't use
if dtype == "bf16" and sm < 80:
continue
if not aligned and sm >= 80:
continue
for q, k, max_k in [
(64, 64, 64),
# We get better perf with 64x128 on A100
(64 if sm > 75 else 32, 128, 128),
(32, 128, 2**16),
]:
kernels.append(
cls(
aligned=aligned,
dtype=dtype,
sm_range=(sm, sm_max),
q=q,
k=k,
max_k=max_k,
)
)
return kernels
@dataclass(order=True)
class BwdKernel:
sort_index: Tuple[int, ...] = field(init=False, repr=False)
sm_range: Tuple[int, int]
dtype: str
aligned: bool
apply_dropout: bool
preload_mmas: bool
block_i: int
block_j: int
max_k: int
dispatch_cond: Optional[str] = None
keys_queries_aligned_to_blocksizes: bool = False
def __post_init__(self) -> None:
# Set kernel selection priority
# The lowest value that matches inputs
# will be selected
self.sort_index = (
# First select aligned kernel
0 if self.aligned else 1,
# Take a kernel without dropout if possible
1 if self.apply_dropout else 0,
# Then take the smallest maxK
self.max_k,
# .. and the highest block_i
-self.block_i,
# and finally avoid bounds-checks if possible
0 if self.keys_queries_aligned_to_blocksizes else 1,
)
@property
def _aligned_suffix(self) -> str:
return "aligned" if self.aligned else "notaligned"
@property
def name(self) -> str:
dropout_suffix = "_dropout" if self.apply_dropout else ""
seqlen_aligned_suffix = (
"_seqaligned" if self.keys_queries_aligned_to_blocksizes else ""
)
return (
f"fmha_cutlassB_{self.dtype}_{self._aligned_suffix}"
f"_{self.block_i}x{self.block_j}_k{self.max_k}{dropout_suffix}{seqlen_aligned_suffix}_sm{self.sm_range[0]}"
)
@property
def cpp_class(self) -> str:
template_args = ", ".join(
[
f"cutlass::arch::Sm{self.sm_range[0]}",
DTYPES[self.dtype],
"true" if self.aligned else "false",
"true" if self.apply_dropout else "false",
"true" if self.preload_mmas else "false",
str(self.block_i),
str(self.block_j),
str(self.max_k),
]
)
if self.keys_queries_aligned_to_blocksizes:
template_args += ", true"
return f"AttentionBackwardKernel<{template_args}>"
@property
def impl_group(self) -> str:
# Maps to file which will contain the implementation
dropout_suffix = "_dropout" if self.apply_dropout else ""
return f"{self.dtype}_{self._aligned_suffix}_k{self.max_k}{dropout_suffix}"
@property
def cpp_impl(self) -> str:
return KERNEL_IMPL_TEMPLATE.format(
CPP_CLASS=self.cpp_class,
NAME=self.name,
SM=self.sm_range[0],
SM_MAX=self.sm_range[1],
)
@classmethod
def get_all(cls) -> List["BwdKernel"]:
kernels: List[BwdKernel] = []
for aligned, dtype, (sm, sm_max), apply_dropout, max_k in itertools.product(
[True, False],
DTYPES.keys(),
zip(SM, SM[1:] + [90]),
[True, False],
[32, 64, 128, 2**16],
):
if dtype == "bf16" and sm < 80:
continue
if not aligned and sm >= 80:
continue
is_half = dtype in ["bf16", "f16"]
bi_values = [64]
# Some architectures have more shmem and can use 128
# We still need fallback to 64 for GPUs with less shmem
# (Sm75, Sm86 ...)
if sm >= 80 or (sm >= 70 and is_half):
if max_k > 64:
bi_values.append(128)
for bi in bi_values:
output_in_rf = is_half and max_k <= bi
preload_mmas = is_half and sm >= 80 and output_in_rf
bj = 128 if (preload_mmas and max_k > 64) else 64
kernels.append(
cls(
aligned=aligned,
dtype=dtype,
sm_range=(sm, sm_max),
apply_dropout=apply_dropout,
preload_mmas=preload_mmas,
block_i=bi,
block_j=bj,
max_k=max_k,
)
)
# A few specialized kernels that are faster
if apply_dropout or max_k > 128 or not is_half or not aligned:
continue
if sm not in [70, 80]:
continue
kernels.append(
cls(
aligned=aligned,
dtype=dtype,
sm_range=(sm, sm_max),
apply_dropout=apply_dropout,
preload_mmas=preload_mmas,
block_i=bi,
block_j=bj,
max_k=max_k,
keys_queries_aligned_to_blocksizes=True,
)
)
# Add some specialized kernels for stable diffusion BW (K=80)
# This is the only kernel that can keep the outputs on RF on
# Sm86/Sm89, so it's much faster than the 64x64 one
for dtype in ["f16", "bf16"]:
kernels.append(
cls(
aligned=True,
dtype=dtype,
sm_range=(80, 90),
apply_dropout=False,
preload_mmas=True,
block_i=128,
block_j=64,
max_k=96,
# Sm80 has a faster kernel for this case
dispatch_cond="cc == 86 || cc == 89",
)
)
return kernels
T = TypeVar("T", FwdKernel, BwdKernel)
def write_decl_impl(
kernels: List[T], family_name: str, impl_file: str, disable_def: str
) -> None:
cpp_file_header = """/*
* Copyright (c) Meta Platforms, Inc. and affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
// This file is auto-generated. See "generate_kernels.py"
"""
kernels.sort()
implfile_to_kernels: Dict[str, List[T]] = collections.defaultdict(list)
cat_to_kernels: Dict[Tuple[str, int, int], List[T]] = collections.defaultdict(list)
dispatch_all = ""
declarations = cpp_file_header + "#pragma once\n"
declarations += f"#ifndef {disable_def}\n"
declarations += f"""#include "../{impl_file}"\n"""
# Declaration of kernel functions
for k in kernels:
implfile_to_kernels[k.impl_group].append(k)
cat_to_kernels[(k.dtype, k.sm_range[0], k.sm_range[1])].append(k)
for (cat_dt, cat_sm, cat_sm_max), kernels in cat_to_kernels.items():
declarations += f"// ======== {cat_dt} / sm{cat_sm} ========\n"
declarations += "\n".join(
k.cpp_impl.split("{")[0].rstrip() + ";" for k in kernels
)
dispatch_category_fn = f"dispatch_{family_name}_{cat_dt}_sm{cat_sm}"
declarations += (
f"\n\ntemplate <typename T> void {dispatch_category_fn}(T cb, int cc) {{\n"
)
for k in kernels:
_call = f"cb({k.cpp_class}(), {k.name});\n"
if k.dispatch_cond is not None:
_call = f"if ({k.dispatch_cond}) {_call}"
declarations += f" {_call}"
declarations += "}\n\n"
dispatch_all += f"""
if (std::is_same<DT, {DTYPES[cat_dt]}>::value && {cat_sm} <= cc && cc < {cat_sm_max}) {{
{dispatch_category_fn}(cb, cc);
}}"""
declarations += f"""
template <typename DT, typename T>
void dispatch_{family_name}(T cb, int cc = 0) {{
{dispatch_all}
}}
"""
declarations += f"#endif // {disable_def}\n"
autogen_dir = Path(__file__).parent / "autogen"
(autogen_dir / f"{family_name}.h").write_text(declarations)
for f, f_kernels in implfile_to_kernels.items():
impl_cu = cpp_file_header
impl_cu += f"#ifndef {disable_def}\n"
impl_cu += f"""#include "../../{impl_file}"\n"""
for k in f_kernels:
impl_cu += k.cpp_impl
impl_cu += f"#endif // {disable_def}\n"
(autogen_dir / "impl" / f"{family_name}_{f}.cu").write_text(impl_cu)
write_decl_impl(
FwdKernel.get_all(),
"cutlassF",
impl_file="kernel_forward.h",
disable_def="XFORMERS_MEM_EFF_ATTENTION_DISABLE_FORWARD",
)
write_decl_impl(
BwdKernel.get_all(),
"cutlassB",
impl_file="kernel_backward.h",
disable_def="XFORMERS_MEM_EFF_ATTENTION_DISABLE_BACKWARD",
)
|
EXA-1-master
|
exa/libraries/xformers/xformers/csrc/attention/cuda/fmha/generate_kernels.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
import math
from typing import Any, Optional
import torch
import torch.nn as nn
from torch.cuda.amp import custom_bwd, custom_fwd
from xformers.components.activations import Activation
from xformers.triton.k_activations import get_triton_activation_index
from xformers.triton.k_fused_matmul_bw import fused_matmul_backward
from xformers.triton.k_fused_matmul_fw import fused_matmul
class _fused_linear_triton(torch.autograd.Function):
@staticmethod
@custom_fwd(cast_inputs=torch.float16)
def forward(
ctx,
x,
weight,
bias,
activation,
trainable_weight,
trainable_bias,
save_activation_inputs,
):
# Kick the fused Triton kernel, handling bias and activation in one go
y, activation_inputs = fused_matmul(
x, weight, bias, activation, save_activation_inputs
)
ctx.activation = activation
ctx.trainable_weight = trainable_weight
ctx.trainable_bias = trainable_bias
# Micro-optimization: saving these is not always needed (?)
if x.requires_grad or ctx.trainable_weight or ctx.trainable_bias:
ctx.save_for_backward(weight, activation_inputs, x)
return y
@staticmethod
@custom_bwd
def backward(
ctx: Any, grad_out: torch.Tensor
) -> Any: # pragma: no cover # this is covered, but called directly from C++
"""
Compute the derivative with respect to x, other tensors were not trainable inputs.
"""
(weight, activation_inputs, x) = ctx.saved_tensors
grad_input, grad_weight, grad_bias = fused_matmul_backward(
grad_out=grad_out,
inputs=x,
act_in=activation_inputs,
weight=weight,
trainable_weight=ctx.trainable_weight,
trainable_bias=ctx.trainable_bias,
activation_grad=ctx.activation,
)
return grad_input, grad_weight, grad_bias, None, None, None, None, None, None
class FusedLinear(nn.Module):
"""
Handle a linear transform, like torch.nn.Linear_, and a given activation, in a single kernel.
The whole transform: is :math:`y = activation(xA^T + b)`.
This is typically significantly faster than PyTorch while using fp16 and non-sigmoid activations,
as of September 2021.
.. _torch.nn.Linear: https://pytorch.org/docs/stable/generated/torch.nn.Linear.html
"""
def __init__(
self,
in_features: int,
out_features: int,
bias: bool = False,
activation: Optional[Activation] = None,
**_,
):
super().__init__()
self.weight = nn.Parameter(
torch.empty(out_features, in_features), requires_grad=True
)
self.bias = (
nn.Parameter(torch.empty(out_features), requires_grad=True)
if bias
else None
)
self._activation_index = get_triton_activation_index(activation)
self.reset_parameters()
def reset_parameters(self) -> None:
torch.nn.init.kaiming_uniform_(self.weight, a=math.sqrt(5))
if self.bias is not None:
fan_in, _ = torch.nn.init._calculate_fan_in_and_fan_out(self.weight)
bound = 1 / math.sqrt(fan_in) if fan_in > 0 else 0
torch.nn.init.uniform_(self.bias, -bound, bound)
def forward(self, x):
return _fused_linear_triton.apply(
x,
self.weight,
self.bias,
self._activation_index,
self.weight.requires_grad,
self.bias.requires_grad if self.bias is not None else False,
self.training and x.requires_grad and self._activation_index > 0,
)
|
EXA-1-master
|
exa/libraries/xformers/xformers/triton/fused_linear_layer.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
import torch
import triton
from xformers.triton.k_sum import k_sum_0
def sum_2d_dim_0(x: torch.Tensor):
"""
Sum a 2D tensor across the first dimension
"""
out = torch.empty(x.shape[1], device=x.device, dtype=x.dtype)
assert (
x.ndim == 2
), "This is a very specific kernel, only for 2-dim tensors and summing along dim 0"
M, N = x.shape
# This kernel is not competitive for these sizes
if M > 2048 or M < 8:
return x.sum(dim=0)
assert (
M >= 4
), "This is a very specific kernel, requires the reduction dimension to be bigger than 4"
assert x.stride(1) == 1, (
"We're expecting x to be contiguous along dim 1, and non contiguous along dim 0.\n"
" You would probably be better served with torch.sum()"
)
BLOCK_M = min(triton.next_power_of_2(M), 2048)
BLOCK_N = 32
if BLOCK_M > 256:
BLOCK_N = 16
if BLOCK_M > 1024:
BLOCK_N = 8
def grid(meta):
return (triton.cdiv(N, meta["BLOCK_N"]),)
# fmt: off
k_sum_0[grid](
out, x,
x.stride(0),
M, N,
x.dtype == torch.float16,
BLOCK_M=BLOCK_M,
BLOCK_N=BLOCK_N,
num_stages=4,
)
# fmt: on
return out
|
EXA-1-master
|
exa/libraries/xformers/xformers/triton/sum_strided.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
import math
from typing import Optional
import triton
import triton.language as tl
from xformers.components import Activation
_kAlpha = math.sqrt(2.0 / math.pi)
def get_triton_activation_index(activation: Optional[Activation]) -> int:
return (
{
Activation.ReLU: 1,
Activation.LeakyReLU: 2,
Activation.GeLU: 3,
Activation.SquaredReLU: 4,
Activation.SmeLU: 5,
Activation.StarReLU: 6,
}[activation]
if activation is not None
else 0
)
@triton.jit
def tanh(x):
# Tanh is just a scaled sigmoid
return 2 * tl.sigmoid(2 * x) - 1
@triton.jit
def cosh(x):
exp_x = tl.exp(x)
return (exp_x + 1.0 / exp_x) * 0.5
# a Triton implementation of the most used activations
# See for instance http://arxiv.org/abs/1606.08415 for an overview
# ReLU
@triton.jit
def relu(x):
"""
ReLU_ activation function
.. _ReLU: https://pytorch.org/docs/stable/generated/torch.nn.ReLU.html
"""
return tl.where(x >= 0, x, 0.0)
@triton.jit
def relu_grad(x):
# ReLU is different from other activations
# in that it does not require the input to retrospectively compute its gradient
# here the input is the downstream gradient, and we return the upstream gradient directly
return tl.where(x >= 0, 1.0, 0.0)
@triton.jit
def squared_relu(x):
"""
Squared ReLU activation, as proposed in the Primer_ paper.
.. _Primer: https://arxiv.org/abs/2109.08668
"""
x_sq = x * x
return tl.where(x > 0.0, x_sq, 0.0)
@triton.jit
def squared_relu_grad(x):
return tl.where(x >= 0.0, 2 * x, 0.0)
@triton.jit
def star_relu(x):
"""
Star ReLU activation, as proposed in the "MetaFormer Baselines for Vision"_ paper.
.. _ "MetaFormer Baselines for Vision": https://arxiv.org/pdf/2210.13452.pdf
"""
x_sq = x * x
return 0.8944 * tl.where(x > 0.0, x_sq, 0.0) - 0.4472
@triton.jit
def star_relu_grad(x):
return tl.where(x >= 0.0, 1.7888 * x, 0.0)
# Leaky ReLU
@triton.jit
def leaky_relu(x):
"""
LeakyReLU_ activation
.. _LeakyReLU: https://pytorch.org/docs/stable/generated/torch.nn.LeakyReLU.html
"""
return tl.where(x >= 0.0, x, 0.01 * x)
@triton.jit
def leaky_relu_grad(x):
return tl.where(x >= 0.0, 1.0, 0.01)
@triton.jit
def gelu(x):
"""
GeLU_ activation - Gaussian error linear unit
.. _GeLU: https://arxiv.org/pdf/1606.08415.pdf
"""
return 0.5 * x * (1 + tanh(_kAlpha * (x + 0.044715 * x * x * x)))
@triton.jit
def gelu_grad(x):
# CREDITS: Fast implementation proposed in
# https://github.com/NVIDIA/Megatron-LM/blob/main/megatron/model/fused_bias_gelu.py#L30
tanh_out = tanh(0.79788456 * x * (1 + 0.044715 * x * x))
return 0.5 * x * (
(1 - tanh_out * tanh_out) * (0.79788456 + 0.1070322243 * x * x)
) + 0.5 * (1 + tanh_out)
@triton.jit
def smelu(x):
"""
SmeLU_ activation - Smooth ReLU with beta=2.0
.. _SmeLU: https://arxiv.org/pdf/2202.06499.pdf
"""
beta = 2.0
relu = tl.where(x >= beta, x, 0.0)
return tl.where(tl.abs(x) <= beta, (x + beta) * (x + beta) / (4.0 * beta), relu)
@triton.jit
def smelu_grad(x):
beta = 2.0
relu_grad = tl.where(x >= beta, 1.0, 0.0)
return tl.where(tl.abs(x) <= beta, (beta + x) / (2.0 * beta), relu_grad)
|
EXA-1-master
|
exa/libraries/xformers/xformers/triton/k_activations.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
import torch
_triton_available = torch.cuda.is_available()
if _triton_available:
try:
from .dropout import FusedDropoutBias, dropout # noqa
from .fused_linear_layer import FusedLinear # noqa
from .layer_norm import FusedLayerNorm, layer_norm # noqa
from .softmax import log_softmax, softmax # noqa
__all__ = [
"dropout",
"softmax",
"log_softmax",
"FusedDropoutBias",
"FusedLinear",
"FusedLayerNorm",
"layer_norm",
]
except ImportError:
__all__ = []
|
EXA-1-master
|
exa/libraries/xformers/xformers/triton/__init__.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
# CREDITS: This comes almost as-is from the Triton layer norm tutorial
# https://github.com/openai/triton/blob/master/python/tutorials/05-layer-norm.py
import triton
import triton.language as tl
# fmt: off
@triton.jit
def layer_norm_fw(X, Y, W, B, M, V, stride, N, eps, affine: tl.constexpr, BLOCK_SIZE_N: tl.constexpr):
# fmt: on
"""
Fused layernorm kernel over a 3d tensor.
The layer norm is applied over the last dimension.
Compute
y = (x - E(x))/(sqrt(var(x) + epsilon)) * gamma + beta
"""
row = tl.program_id(0)
cols = tl.arange(0, BLOCK_SIZE_N)
mask = cols < N
# Move to this row
x_ptrs = X + row * stride + cols
x = tl.load(x_ptrs, mask=mask, other=0.0).to(tl.float32)
# Compute mean and variance
mean = tl.sum(x, axis=0) / N
x_zm = tl.where(mask, x - mean, 0.0)
tl.store(M + row, mean)
x_var = tl.sum(x_zm * x_zm, axis=0) / N
rstd = 1.0 / tl.sqrt(x_var + eps)
# Normalize, optionally affine
y = x_zm * rstd
tl.store(V + row, rstd)
mask = cols < N
if affine:
w = tl.load(W + cols, mask=mask, other=1.0)
b = tl.load(B + cols, mask=mask, other=0.0)
y = y * w + b
y_ptrs = Y + row * stride + cols
tl.store(y_ptrs, y, mask=mask)
# Backward pass (DX + partial DW + partial DB)
# fmt: off
@triton.jit
def layer_norm_bwd_dx_fused(
DX, DY, DW, DB,
X, W, M, V,
Lock, stride, N,
# META-parameters
affine: tl.constexpr,
GROUP_SIZE_M: tl.constexpr,
BLOCK_SIZE_N: tl.constexpr,
):
# fmt: on
# position of elements processed by this program
row = tl.program_id(0)
cols = tl.arange(0, BLOCK_SIZE_N)
mask = cols < N
# offset data pointers to start at the row of interest
x_ptrs = X + row * stride + cols
dy_ptrs = DY + row * stride + cols
# load data to SRAM
x = tl.load(x_ptrs, mask=mask, other=0)
dy = tl.load(dy_ptrs, mask=mask, other=0)
mean = tl.load(M + row)
rstd = tl.load(V + row)
# compute dx
xhat = (x - mean) * rstd
if affine:
w = tl.load(W + cols, mask=mask, other=0)
wdy = w * dy
else:
wdy = dy
xhat = tl.where(mask, xhat, 0.)
wdy = tl.where(mask, wdy, 0.)
mean1 = tl.sum(xhat * wdy, axis=0) / N
mean2 = tl.sum(wdy, axis=0) / N
dx = (wdy - (xhat * mean1 + mean2)) * rstd
# write-back dx
cols = tl.arange(0, BLOCK_SIZE_N)
mask = cols < N # re-materialize the mask to save registers
dx_ptrs = DX + row * stride + cols
tl.store(dx_ptrs, dx, mask=mask)
if affine:
# accumulate partial sums for dw/db
partial_dw = (dy * xhat).to(w.dtype)
partial_db = dy.to(w.dtype)
# offset locks and weight/bias gradient pointer
# each kernel instance accumulates partial sums for
# DW and DB into one of GROUP_SIZE_M independent buffers
# these buffers stay in the L2, which allow this kernel
# to be fast
lock_id = row % GROUP_SIZE_M
Lock += lock_id
Count = Lock + GROUP_SIZE_M
# - wait for a lock on the accumulated dw/db
while tl.atomic_cas(Lock, 0, 1) == 1:
pass
count = tl.load(Count)
# - we got the lock, accumulate this kernel's results with
# the stored values.
dw_ptrs = DW + lock_id * N + cols
db_ptrs = DB + lock_id * N + cols
if count == 0:
# first store doesn't accumulate
tl.atomic_xchg(Count, 1)
else:
partial_dw += tl.load(dw_ptrs, mask=mask, other=0.)
partial_db += tl.load(db_ptrs, mask=mask, other=0.)
tl.store(dw_ptrs, partial_dw, mask=mask)
tl.store(db_ptrs, partial_db, mask=mask)
# release lock
tl.atomic_xchg(Lock, 0)
# Backward pass (total DW + total DB)
# fmt: off
@triton.jit
def layer_norm_bwd_dwdb(
DW, DB, FINAL_DW, FINAL_DB,
M, N,
BLOCK_SIZE_M: tl.constexpr,
BLOCK_SIZE_N: tl.constexpr
):
# fmt: on
pid = tl.program_id(0)
cols = pid * BLOCK_SIZE_N + tl.arange(0, BLOCK_SIZE_N)
mask_cols = cols < N
dw = tl.zeros((BLOCK_SIZE_M, BLOCK_SIZE_N), dtype=tl.float32)
db = tl.zeros((BLOCK_SIZE_M, BLOCK_SIZE_N), dtype=tl.float32)
for i in range(0, M, BLOCK_SIZE_M):
rows = i + tl.arange(0, BLOCK_SIZE_M)
offs = rows[:, None] * N + cols[None, :]
mask_rm = rows < M
dw += tl.load(DW + offs, mask=mask_rm[:, None] & mask_cols[None, :], other=0.0)
db += tl.load(DB + offs, mask=mask_rm[:, None] & mask_cols[None, :], other=0.0)
sum_dw = tl.sum(dw, axis=0)
sum_db = tl.sum(db, axis=0)
cols = pid * BLOCK_SIZE_N + tl.arange(0, BLOCK_SIZE_N)
mask_cols = cols < N
tl.store(FINAL_DW + cols, sum_dw, mask=mask_cols)
tl.store(FINAL_DB + cols, sum_db, mask=mask_cols)
|
EXA-1-master
|
exa/libraries/xformers/xformers/triton/k_layer_norm.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
import triton
import triton.language as tl
# fmt: off
@triton.jit
def k_sum_0(
Y, X,
stride_xm,
M, N,
is_fp16,
# META-params
BLOCK_M: tl.constexpr,
BLOCK_N: tl.constexpr,
):
# fmt: om
"""
Sum a 2d tensor over the first (strided) dimension.
This extracts some speed through a parallel sum across the second dimension
"""
# partial row indices. We'll reduce over this dimension
m = tl.arange(0, BLOCK_M)
# To get some extra parallelization, we handle several columns in the same thread block
rn = tl.program_id(axis=0) * BLOCK_N + tl.arange(0, BLOCK_N)
# the memory address of all the elements that we want to load can be computed as follows
x_ptrs = X + m[:, None] * stride_xm + rn[None, :]
x_sum = tl.zeros((BLOCK_N,), dtype=tl.float32)
tiles = M // BLOCK_M
if M % BLOCK_M > 0:
tiles += 1
col_mask = (rn[None, :] < N)
for _ in range(tiles):
# load input data; pad out-of-bounds elements with 0
# NOTE: make sure to accumulate in fp32 to prevent a trivial overflow
mask = (m[:, None] < M) & col_mask
x = tl.load(x_ptrs, mask=mask, other=0.0)
x_sum += tl.sum(x, 0)
# move the load pointer
x_ptrs += BLOCK_M * stride_xm
m += BLOCK_M # update the mask check
tl.store(Y + rn, x_sum, mask=rn < N)
|
EXA-1-master
|
exa/libraries/xformers/xformers/triton/k_sum.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
import logging
from typing import Optional
import torch
logger = logging.getLogger("xformers")
_gpu_is_old: Optional[bool] = None
def gpu_capabilities_older_than_70() -> bool:
"""Return True if the GPU's compute capability is older than SM70."""
global _gpu_is_old
if _gpu_is_old is None:
for i in range(torch.cuda.device_count()):
major, _ = torch.cuda.get_device_capability(f"cuda:{i}")
if major < 7:
_gpu_is_old = True
if _gpu_is_old is None:
_gpu_is_old = False
return _gpu_is_old
SUPPORTED_CUDA_DEVICES = ["V100", "A100", "T4"]
def get_current_cuda_device():
current_device = str(torch.cuda.get_device_properties(torch.cuda.current_device()))
for device_str in SUPPORTED_CUDA_DEVICES:
if current_device.find(device_str) > 0:
return device_str
logger.warning("Unsupported device, Triton code generation may fail")
return "P100" # default to an old GPU
|
EXA-1-master
|
exa/libraries/xformers/xformers/triton/utils.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
from typing import Optional
import torch
import triton
import triton.language as tl
from xformers.triton.k_activations import (
gelu,
leaky_relu,
relu,
smelu,
squared_relu,
star_relu,
)
# CREDITS: Initially inspired by the Triton tutorial on matrix multiplications
def get_configs(block_k):
return [
triton.Config(
{"BLOCK_M": 64, "BLOCK_N": 32, "BLOCK_K": block_k},
num_stages=4,
num_warps=2,
),
triton.Config(
{"BLOCK_M": 32, "BLOCK_N": 64, "BLOCK_K": block_k},
num_stages=4,
num_warps=2,
),
triton.Config(
{"BLOCK_M": 128, "BLOCK_N": 64, "BLOCK_K": block_k},
num_stages=3,
num_warps=4,
),
triton.Config(
{"BLOCK_M": 64, "BLOCK_N": 128, "BLOCK_K": block_k},
num_stages=3,
num_warps=4,
),
triton.Config(
{"BLOCK_M": 128, "BLOCK_N": 128, "BLOCK_K": block_k},
num_stages=3,
num_warps=4,
),
triton.Config(
{"BLOCK_M": 64, "BLOCK_N": 256, "BLOCK_K": block_k},
num_stages=3,
num_warps=4,
),
triton.Config(
{"BLOCK_M": 256, "BLOCK_N": 64, "BLOCK_K": block_k},
num_stages=3,
num_warps=4,
),
# Fails on small GPUS
# triton.Config(
# {"BLOCK_M": 128, "BLOCK_N": 256, "BLOCK_K": block_k},
# num_stages=3,
# num_warps=8,
# ),
# triton.Config(
# {"BLOCK_M": 256, "BLOCK_N": 128, "BLOCK_K": block_k},
# num_stages=3,
# num_warps=8,
# ),
]
# fmt: off
@triton.autotune(
configs=[c for block_k in [32, 64] for c in get_configs(block_k)],
key=["M", "N", "K"],
)
@triton.heuristics({
'EVEN_N': lambda args: args["N"] % (args['BLOCK_N']) == 0,
})
@triton.jit
def kernel_fma(
# Pointers to matrices
OUT, ACT_INPUTS, INPUT, WEIGHT, bias,
# Matrix dimensions
M, N, K,
# The stride variables represent how much to increase the ptr by when moving by 1
# element in a particular dimension. E.g. stride_am is how much to increase a_ptr
# by to get the element one row down (A has M rows)
stride_om, stride_im,
stride_wn,
# Meta-parameters
BLOCK_M: tl.constexpr, GROUP_M: tl.constexpr,
BLOCK_N: tl.constexpr, BLOCK_K: tl.constexpr,
EVEN_N: tl.constexpr,
BIAS: tl.constexpr,
SAVE_ACT_INPUTS: tl.constexpr,
ACTIVATION: tl.constexpr,
is_fp16: tl.constexpr, # autotune
):
# fmt: on
"""
Kernel for computing Out = activation(A x W + C)
- Input has shape (M, K)
- Weight has shape (K, N)
- Bias has shape (N,)
- Output has shape (M, N)
- ActInputs (optional) has shape (M, N)
'ActInputs' optionally saves the A x W + C intermediate for backward computations
This kernel will consolidate over K
"""
# programs are grouped together to improve L2 hit rate
# the logic is that we'll consolidate over K. If the programs were not grouped,
# then multiple cols/rows in the result would end up pulling in the same row and lines
# from the inputs. By grouping the computation we ensure some data reuse, which the hardware
# covers via the L2 cache
pid = tl.program_id(axis=0)
num_pid_m = tl.cdiv(M, BLOCK_M) # number of program ids along the M axis
num_pid_n = tl.cdiv(N, BLOCK_N) # number of programs ids along the N axis
num_pid_in_group = GROUP_M * num_pid_n # number of programs in group
group_id = pid // num_pid_in_group # id of the group this program is in
first_pid_m = group_id * GROUP_M # row-id of the first program in the group
GROUP_M = min(
num_pid_m - first_pid_m, GROUP_M
) # if `num_pid_m` isn't divisible by `GROUP_M`, the last group is smaller
# *within groups*, programs are ordered in a column-major order
# row-id /col-id of the program in the *launch grid*
pid_m = first_pid_m + (pid % GROUP_M)
pid_n = (pid % num_pid_in_group) // GROUP_M
# now compute the block that each program will go through
# rm (resp. rn) denotes a range of indices
# for rows (resp. col) of C
rm = pid_m * BLOCK_M + tl.arange(0, BLOCK_M)
rn = pid_n * BLOCK_N + tl.arange(0, BLOCK_N)
rk = tl.arange(0, BLOCK_K)
# the memory addresses of elements can follow numpy broadcasting
input_ptrs = INPUT + rm[:, None] * stride_im
weight_ptrs = WEIGHT + rn[None, :] * stride_wn
# initialize and iteratively update accumulator
acc = tl.zeros((BLOCK_M, BLOCK_N), dtype=tl.float32)
if BIAS:
if EVEN_N:
bias = tl.load(bias + rn).to(tl.float32)
else:
bias = tl.load(bias + rn, mask=rn < N, other=0.0).to(tl.float32)
acc += bias[None, :]
# block level matrix multiplication.
# We fetch a block memory block from both inputs, matmul and accumulate, then repeat
mask_rn = rn < N
mask_rm = rm < M
for i in range(0, K, BLOCK_K):
rk = tl.arange(0, BLOCK_K) + i
a = tl.load(input_ptrs + rk[None, :], mask=((rk[None, :] < K) & mask_rm[:, None]), other=0.0)
w = tl.load(weight_ptrs + rk[:, None], mask=((rk[:, None] < K) & mask_rn[None, :]), other=0.0)
acc += tl.dot(a, w)
rm = pid_m * BLOCK_M + tl.arange(0, BLOCK_M)
rn = pid_n * BLOCK_N + tl.arange(0, BLOCK_N)
# optional: save the activation inputs
if SAVE_ACT_INPUTS:
act_in_ptrs = ACT_INPUTS + rm[:, None] * stride_om + rn[None, :]
tl.store(act_in_ptrs, acc, mask=mask_rm[:, None] & mask_rn[None, :])
# optional: fused activation (while the data is in shared memory)
if ACTIVATION == 1:
acc = relu(acc)
elif ACTIVATION == 2:
acc = leaky_relu(acc)
elif ACTIVATION == 3:
acc = gelu(acc)
elif ACTIVATION == 4:
acc = squared_relu(acc)
elif ACTIVATION == 5:
acc = smelu(acc)
elif ACTIVATION == 6:
acc = star_relu(acc)
# write back result
out_ptrs = OUT + rm[:, None] * stride_om + rn[None, :]
tl.store(out_ptrs, acc, mask=mask_rm[:, None] & mask_rn[None, :])
# Activation needs to be a triton kernel
def fused_matmul(
x: torch.Tensor,
weight: torch.Tensor,
bias: Optional[torch.Tensor],
activation=0,
save_act_inputs: bool = False
):
"""
Compute e = activation(x @ weight + bias).
This wrapper kicks the `kernel_fma` Triton kernel
"""
if not x.is_contiguous():
x = x.contiguous()
x_ = x if x.ndim == 2 else x.flatten(0, 1)
assert (
x_.shape[1] == weight.shape[1]
), f"Incompatible dimensions in between inputs and weight, {x_.shape} - {weight.shape}"
assert bias is None or bias.is_contiguous()
assert (
bias is None or bias.shape[0] == weight.shape[0]
), "Incompatible dimensions in between weight and bias"
assert weight.is_contiguous()
M, K = x_.shape
N, K = weight.shape
outputs = torch.empty((M, N), device=x.device, dtype=x.dtype)
act_inputs = torch.empty_like(outputs) if save_act_inputs else x # will not be used in that case
# 1D launch kernel where each block gets its own program.
grid = lambda META: (triton.cdiv(M, META["BLOCK_M"]) * triton.cdiv(N, META["BLOCK_N"]),) # noqa
# fmt: off
kernel_fma[grid](
outputs, act_inputs, x_, weight, # data ptrs
bias if bias is not None else x, # auto skip bias if not present
M, N, K, # shapes
outputs.stride(0), x_.stride(0), # strides
weight.stride(0),
ACTIVATION=activation, # optional fused activation
BIAS=bias is not None, # optional fused bias
GROUP_M=8, # speed optimization: group the programs
SAVE_ACT_INPUTS=save_act_inputs,
is_fp16=x_.dtype == torch.float16
)
# fmt: on
outputs = outputs if x.ndim == 2 else outputs.reshape(x.shape[0], -1, N)
return outputs, act_inputs if save_act_inputs else None
|
EXA-1-master
|
exa/libraries/xformers/xformers/triton/k_fused_matmul_fw.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
# CREDITS: This is heavily inspired by the Triton dropout tutorial
# https://raw.githubusercontent.com/openai/triton/master/python/tutorials/04-low-memory-dropout.py
from typing import Optional
import torch
import triton
from torch.cuda.amp import custom_bwd, custom_fwd
from xformers.components.activations import Activation, build_activation
from xformers.triton.k_activations import get_triton_activation_index
from xformers.triton.k_dropout import k_dropout_bw, k_dropout_fw
BLOCK_M = 32
BLOCK_N = 64 # NOTE: This should ideally be GPU dependent, big impact on perf
# Helper to handle the SPMD launch grid and error cases
class _dropout(torch.autograd.Function):
@staticmethod
@custom_fwd(cast_inputs=torch.float16)
def forward(ctx, x, p, bias, activation, trainable_bias):
# Soft-flatten an hypothetical 3rd dimension
x_ = x.reshape(-1, x.shape[-1]).contiguous()
y = torch.empty_like(x_)
M, N = x_.shape
assert bias is None or (bias.dtype == x.dtype and bias.shape[0] == N)
assert p > 0.0
def grid(meta):
return (
triton.cdiv(M, meta["BLOCK_M"]),
triton.cdiv(N, meta["BLOCK_N"]),
)
N_BLOCK_N = triton.cdiv(N, BLOCK_N)
# Generate one seed per sample
# seed max is int32 max for positive numbers: 2**16
seeds = torch.randint(65536, (N_BLOCK_N,), device=x.device, dtype=torch.int32)
# fmt: off
bias_ptr = bias if bias is not None else x_ # Possibly not being used
k_dropout_fw[grid](
y, x_,
bias_ptr,
seeds,
y.stride(0),
M, N,
p,
x.dtype == torch.float16,
USE_BIAS=bias is not None,
ACTIVATION=activation,
BLOCK_M=BLOCK_M,
BLOCK_N=BLOCK_N,
)
# fmt: on
if activation is not None:
ctx.save_for_backward(seeds, bias, x)
else:
ctx.save_for_backward(seeds, bias, None)
ctx.trainable_bias = bias is not None and trainable_bias
ctx.activation = activation
ctx.p = p
return y.reshape_as(x)
@staticmethod
@custom_bwd
def backward(
ctx, grad_out
): # pragma: no cover # This is covered, but called from C++ and not tracked
(seeds, bias, inputs) = ctx.saved_tensors
# Soft-flatten an hypothetical 3rd dimension
grad_out_ = grad_out.reshape(-1, grad_out.shape[-1]).contiguous()
grad_in = torch.empty_like(grad_out_)
M, N = grad_out_.shape
# Optional inputs to compute the activation contribution to the gradient
assert inputs is not None or ctx.activation is None
if inputs is None:
inputs = grad_out_
elif inputs.ndim > 2:
inputs = inputs.reshape(-1, N)
# We split the problem in tiles:
# - over M there will be a follow up reduction
# - over N we compromise in between trying to use as much memory paralellism as possible,
# (fill in the warps, there are 32 threads per warps, and 4 warps default), and not being too
# big because of register spilling
N_BLOCKS_M = triton.cdiv(M, BLOCK_M)
if ctx.trainable_bias:
grad_bias = torch.empty(
(
N_BLOCKS_M,
N,
),
device=grad_in.device,
dtype=grad_in.dtype,
)
else:
grad_bias = grad_in # will not be used
def grid(meta):
# NOTE: We use Triton Philox random number generator, which optimally generates 4 blocks for
# a given seed and offsets. "BLOCK_M" here describes the size of one of these blocks
# but we need to take this factor of 4 into account when scheduling all the kernels
return (
N_BLOCKS_M,
triton.cdiv(N, meta["BLOCK_N"]),
)
# fmt: off
k_dropout_bw[grid](
grad_in, grad_bias, grad_out_,
inputs, bias if bias is not None else inputs,
seeds,
grad_out_.stride(0), inputs.stride(0),
M, N,
ctx.p,
grad_in.dtype == torch.float16,
USE_BIAS=bias is not None,
ACTIVATION=ctx.activation,
TRAINABLE_BIAS=ctx.trainable_bias,
BLOCK_M=BLOCK_M,
BLOCK_N=BLOCK_N,
)
# fmt: on
return (
grad_in.reshape_as(grad_out),
None,
torch.sum(grad_bias, dim=0) if ctx.trainable_bias else None,
None,
None,
None,
)
def dropout(
x: torch.Tensor,
p: float,
bias: Optional[torch.Tensor] = None,
activation: Optional[Activation] = None,
):
"""
Apply dropout on the input tensor.
Optionally add a bias, the computation will be fused.
"""
assert p <= 1.0 and p >= 0.0
if p == 1.0:
return torch.zeros_like(x)
# Micro optim, skip dropout
if p == 0.0:
x = x + bias if bias is not None else x
if activation is not None:
activation_fn = build_activation(activation)
return activation_fn(x)
return x
# The normal triton enabled codepath
activation_index = get_triton_activation_index(activation)
return _dropout.apply(
x,
float(p),
bias,
activation_index,
bias is not None and bias.requires_grad,
)
class FusedDropoutBias(torch.nn.Module):
"""
A layer which fuses the computation of Dropout(Activation(x))
in a single GPU kernel
"""
def __init__(
self,
p: float,
bias_shape: Optional[int],
activation: Optional[Activation] = None,
) -> None:
super().__init__()
self.p = float(p)
assert (
self.p < 1.0
), f"We don't want to drop all the values, most probably p={p} is not properly set"
self.activation_type = activation
self.bias = (
torch.zeros(bias_shape, requires_grad=True)
if bias_shape is not None
else None
)
self.activation = get_triton_activation_index(self.activation_type)
self.activation_pytorch = build_activation(self.activation_type)
def init_weights(self, *args, **kwargs):
with torch.no_grad():
if self.bias is not None:
self.bias.fill_(0.0)
def forward(self, x: torch.Tensor) -> torch.Tensor:
# Convenience, catch a possible type or device mismatch
if self.bias is not None:
self.bias = self.bias.to(dtype=x.dtype, device=x.device) # type: ignore
# Train/inference
p = self.p if self.training else 0.0
# This kernel is slower than pytorch for small buffers, bypassing it in that case
perf_check = x.shape[-1] > 512
# Catch a non-cuda setup, fallback to pytorch
if not x.is_cuda or not perf_check or p == 0.0:
x = x + self.bias if self.bias is not None else x
x = self.activation_pytorch(x)
return torch.nn.functional.dropout(x, p) if p > 0.0 else x
# The normal, Triton-backed path
return _dropout.apply(x, p, self.bias, self.activation, True)
|
EXA-1-master
|
exa/libraries/xformers/xformers/triton/dropout.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
# CREDITS: This is heavily inspired by the Triton dropout tutorial
# https://raw.githubusercontent.com/openai/triton/master/python/tutorials/04-low-memory-dropout.py
import triton
import triton.language as tl
from xformers.triton.k_activations import (
gelu,
gelu_grad,
leaky_relu,
leaky_relu_grad,
relu,
relu_grad,
smelu,
smelu_grad,
squared_relu,
squared_relu_grad,
)
_configs = [
triton.Config({}, num_warps=1),
triton.Config({}, num_warps=2),
triton.Config({}, num_warps=4),
triton.Config({}, num_warps=8),
triton.Config({}, num_warps=16),
]
# fmt: off
@triton.heuristics({"SIZE_RAND_BLOCK": lambda args: args["BLOCK_N"] * args["BLOCK_M"]})
@triton.autotune(
configs=_configs,
key=["M", "N", "is_fp16"],
)
@triton.jit
def k_dropout_fw(
Y, X, BIAS, SEEDS,
stride,
M, N,
p: tl.constexpr,
is_fp16: tl.constexpr, # autotune
ACTIVATION: tl.constexpr,
# Meta-parameters
BLOCK_M: tl.constexpr,
BLOCK_N: tl.constexpr,
SIZE_RAND_BLOCK: tl.constexpr,
USE_BIAS: tl.constexpr,
):
"""
Apply dropout on an input tensor
Y : Output (M, N)
X : Input (M, N)
BIAS (N,)
SEEDS (M,)
p : dropout probability
"""
# fmt: on
row_id = tl.program_id(axis=0)
rows = row_id * BLOCK_M + tl.arange(0, BLOCK_M)
col_id = tl.program_id(axis=1)
cols = col_id * BLOCK_N + tl.arange(0, BLOCK_N)
# pointers starting point
x_ptrs = X + rows[:, None] * stride + cols[None, :]
y_ptrs = Y + rows[:, None] * stride + cols[None, :]
# good to go, start the layer computations
col_mask = cols[None, :] < N
p_scale = 1. / (1. - p)
if USE_BIAS:
b_ptrs = BIAS + cols[None, :]
bias = tl.load(b_ptrs, mask=cols[None, :] < N, other=0.)
else:
bias = x_ptrs # will not be used
block_mask = (rows[:, None] < M) & col_mask
x = tl.load(x_ptrs, mask=block_mask, other=0.0)
# optionally apply a fused bias
if USE_BIAS:
x += bias
# optional: fused activation (while the data is in shared memory)
if ACTIVATION == 1:
x = relu(x)
elif ACTIVATION == 2:
x = leaky_relu(x)
elif ACTIVATION == 3:
x = gelu(x)
elif ACTIVATION == 4:
x = squared_relu(x)
elif ACTIVATION == 5:
x = smelu(x)
# get the random keep mask
rand_offsets = tl.arange(0, SIZE_RAND_BLOCK)
seed_int = tl.load(SEEDS + col_id)
r = tl.rand(seed_int, rand_offsets)
keep_mask = r > p
# prune and normalize in one go
keep = tl.view(keep_mask, x.shape)
output = tl.where(keep, (x * p_scale).to(x.dtype), 0.)
tl.store(y_ptrs, output, mask=block_mask) # output
# fmt: off
@triton.heuristics({"SIZE_RAND_BLOCK": lambda args: args["BLOCK_N"] * args["BLOCK_M"]})
@triton.autotune(
configs=_configs,
key=["M", "N", "is_fp16"],
)
@triton.jit
def k_dropout_bw(
GRAD_IN, GRAD_BIAS, GRAD_OUT,
INPUTS, BIAS, SEEDS,
stride_grad, stride_inputs,
M, N,
p: tl.constexpr,
is_fp16: tl.constexpr, # autotune
ACTIVATION: tl.constexpr,
# Meta-parameters
BLOCK_M: tl.constexpr, # heuristics
BLOCK_N: tl.constexpr,
SIZE_RAND_BLOCK: tl.constexpr,
TRAINABLE_BIAS: tl.constexpr,
USE_BIAS: tl.constexpr,
):
"""
Apply dropout on an input tensor
GRAD_OUT (M, N)
GRAD_BIAS (N,)
GRAD_IN (M, N)
BIAS (N,)
SEEDS (N,)
p : dropout probability
"""
# fmt: on
row_id = tl.program_id(axis=0)
rows = row_id * BLOCK_M + tl.arange(0, BLOCK_M)
col_id = tl.program_id(axis=1)
cols = col_id * BLOCK_N + tl.arange(0, BLOCK_N)
# pointers starting point
grad_out_ptrs = GRAD_OUT + rows[:, None] * stride_grad + cols[None, :]
grad_in_ptrs = GRAD_IN + rows[:, None] * stride_grad + cols[None, :]
input_ptrs = INPUTS + rows[:, None] * stride_inputs + cols[None, :]
# now go over the tiles
grad_bias = tl.zeros((BLOCK_N,), dtype=tl.float32)
col_mask = cols[None, :] < N
p_scale = 1. / (1. - p)
if USE_BIAS:
b_ptrs = BIAS + cols[None, :]
bias = tl.load(b_ptrs, mask=col_mask, other=0.)
block_mask = (rows[:, None] < M) & col_mask
grad_out = tl.load(grad_out_ptrs, mask=block_mask, other=0.)
# optional: fused activation (while the data is in shared memory)
if ACTIVATION:
inputs = tl.load(input_ptrs, mask=block_mask, other=0.)
# optionally apply a fused bias
if USE_BIAS:
inputs += bias
if ACTIVATION == 1:
act_grad = relu_grad(inputs)
elif ACTIVATION == 2:
act_grad = leaky_relu_grad(inputs)
elif ACTIVATION == 3:
act_grad = gelu_grad(inputs)
elif ACTIVATION == 4:
act_grad = squared_relu_grad(inputs)
elif ACTIVATION == 5:
act_grad = smelu_grad(inputs)
grad_out *= act_grad
# randomly prune (and scale) the resulting buffer, possibly a no-op
# note that even if we did not save the mask from the FW pass, it is generated
# from the same seeds, so the same drop mask is applied here
rand_offsets = tl.arange(0, SIZE_RAND_BLOCK)
seed_int = tl.load(SEEDS + col_id)
r = tl.rand(seed_int, rand_offsets)
r = tl.view(r, grad_out.shape)
output = tl.where(r > p, (grad_out * p_scale).to(grad_out.dtype), 0.)
# write-back
tl.store(grad_in_ptrs, output, mask=block_mask)
# optionally accumulate the bias gradient
if TRAINABLE_BIAS:
grad_bias += tl.sum(output, axis=0)
if TRAINABLE_BIAS:
grad_bias_ptr = GRAD_BIAS + row_id * N + cols
tl.store(grad_bias_ptr, grad_bias, mask=cols < N)
|
EXA-1-master
|
exa/libraries/xformers/xformers/triton/k_dropout.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
import logging
from typing import Optional
import torch
import triton
from torch.cuda.amp import custom_bwd, custom_fwd
from xformers.triton.k_softmax import _softmax, _softmax_backward
# CREDITS: This is adapted from the vanilla Triton example. See https://openai.com/blog/triton/
# and https://triton-lang.org/getting-started/tutorials/02-fused-softmax.html
logger = logging.getLogger("xformers")
_triton_softmax_fp16_enabled = False # NOTE: PyTorch keeps softmax as fp32
_triton_registered_warnings = False
# Helper to handle the SPMD launch grid and error cases
class _softmax_triton(torch.autograd.Function):
@staticmethod
@custom_fwd(cast_inputs=torch.float16 if _triton_softmax_fp16_enabled else None)
def forward(ctx, x, mask, log_outputs, causal):
"""
Fused softmax implementation, using the Triton programming model.
This only supports a reduction over the last dimension for now
"""
# Handle 2D/3D tensors
x_ = x.unsqueeze(0) if x.ndim == 2 else x
x_ = x_.flatten(0, -3)
if not x_.is_contiguous():
x_ = x_.contiguous()
y = torch.empty_like(x_)
assert (
y.stride(2) == 1 and x_.stride(2) == 1
), f"{x.shape} - {x_.shape} - {x_.stride()}"
# SPMD launch grid
grid_2d = (
x_.shape[0],
x_.shape[1],
)
# enqueue GPU kernel
use_mask = True
if mask is None:
# placeholder, will not be used
mask = x_
use_mask = False
else:
# Make sure that the mask is binary
assert mask.dtype == x.dtype, "An additive mask is requested"
_softmax[grid_2d](
y,
x_,
mask,
y.stride(0),
y.stride(1),
x_.stride(0),
x_.stride(1),
mask.stride(0),
x_.shape[2],
log=log_outputs,
use_mask=use_mask,
causal=causal,
)
ctx.save_for_backward(y)
ctx.log_outputs = log_outputs
ctx.causal = causal
return y.reshape_as(x)
@staticmethod
@custom_bwd
def backward(
ctx, grad_out
): # pragma: no cover # this is covered, but called directly from C++
(out,) = ctx.saved_tensors
# Handle 2D/3D tensors
grad_out_ = grad_out.unsqueeze(0) if grad_out.ndim == 2 else grad_out
grad_out_ = grad_out_.flatten(0, -3)
# SPMD launch grid
grid_2d = (
grad_out_.shape[0],
grad_out_.shape[1],
)
depth = triton.next_power_of_2(grad_out_.shape[2])
grad_in = torch.empty_like(
out
) # torch.zeros is measurably slower, we'll zero out in the kernel
# Make sure that the tensor are contiguous
grad_in, grad_out, out = map(lambda x: x.contiguous(), [grad_in, grad_out, out])
# fmt: off
_softmax_backward[grid_2d](
grad_in, grad_out_, out,
grad_in.stride(0), grad_in.stride(1),
grad_out_.stride(0), grad_out_.stride(1),
out.stride(0), out.stride(1),
out.shape[2],
depth=depth,
log=ctx.log_outputs,
causal=ctx.causal
)
# fmt: on
return grad_in.reshape_as(grad_out), None, None, None
def softmax(
x: torch.Tensor, mask: Optional[torch.Tensor] = None, causal: bool = False
) -> torch.Tensor:
r"""Applies the Softmax function to an 3-dimensional input Tensor
rescaling them so that the elements of the n-dimensional output Tensor
lie in the range [0,1] and sum to 1.
Softmax is defined as:
.. math::
\text{Softmax}(x_{i}) = \frac{\exp(x_i)}{\sum_j \exp(x_j)}
.. warning: softmax is computed on the last dimension of the input tensor.
Args:
x: input tensor.
mask: optional mask, its application will be fused to the softmax computation if triton is used
causal: optional performance optimization, if triton is used and the attention is causal
Returns:
a Tensor of the same dimension and shape as the input with
values in the range [0, 1] and sum to 1
"""
return _softmax_dispatch(x, log=False, mask=mask, causal=causal)
def log_softmax(
x: torch.Tensor, mask: Optional[torch.Tensor] = None, causal: bool = False
) -> torch.Tensor:
r"""Applies the :math:`\log(\text{Softmax}(x))` function to an 3-dimensional
input Tensor. The LogSoftmax formulation can be simplified as:
.. math::
\text{LogSoftmax}(x_{i}) = \log\left(\frac{\exp(x_i) }{ \sum_j \exp(x_j)} \right)
Args:
x: input tensor.
Returns:
a Tensor of the same dimension and shape as the input with
values in the range [-inf, 0)
"""
return _softmax_dispatch(x, log=True, mask=mask, causal=causal)
def _softmax_dispatch(
x: torch.Tensor, log: bool, mask: Optional[torch.Tensor], causal: bool = False
) -> torch.Tensor:
# Triton is used if
# - CUDA
# - there's enough data to make it faster than pytorch. This could change over time, Triton is improving
# - there was no previous failure
global _triton_registered_warnings
try:
if torch.cuda.is_available() and x.is_cuda and not _triton_registered_warnings:
return _softmax_triton.apply(x, mask, log, causal)
except RuntimeError as e:
# Catch cases where the current GPU does not have enough registers to hold a full tensor line
# fallback to PyTorch's implementation, which streams the tensor in and out
_triton_registered_warnings = True
logger.warning(
"Triton softmax kernel register spillover or invalid image caught."
"Deactivating this kernel, please file an issue int the xFormers repository"
)
logger.warning(e)
if mask is not None:
x = x + mask
if causal:
x = x + torch.triu(torch.full_like(x, float("-inf")), diagonal=1)
if log:
return torch.log_softmax(x, dim=-1)
else:
return torch.softmax(x, dim=-1)
|
EXA-1-master
|
exa/libraries/xformers/xformers/triton/softmax.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
# CREDITS: the underlying kernel comes straight from the Triton tutorials
# see https://github.com/openai/triton/blob/master/python/tutorials/05-layer-norm.py
import logging
from typing import Optional
import torch
import torch.nn as nn
import triton
from torch.cuda.amp import custom_bwd, custom_fwd
from xformers.triton.k_layer_norm import (
layer_norm_bwd_dwdb,
layer_norm_bwd_dx_fused,
layer_norm_fw,
)
logger = logging.getLogger("xformers")
_triton_layernorm_fp16_enabled = False # NOTE: PyTorch keeps layernorm as fp32
_triton_registered_warnings = False
class _LayerNorm(torch.autograd.Function):
@staticmethod
@custom_fwd(cast_inputs=torch.float16 if _triton_layernorm_fp16_enabled else None)
def forward(ctx, x, weight, bias, eps):
# catch eps being too small if the tensors are fp16
if x.dtype == torch.float16:
eps = max(eps, 1.6e-5)
# allocate output
y = torch.empty_like(x)
# reshape input data into 2D tensor
x_arg = x.reshape(-1, x.shape[-1])
M, N = x_arg.shape
# allocate mean and std, they'll be used in the backward pass
mean = torch.empty((M,), dtype=torch.float32, device="cuda")
rstd = torch.empty((M,), dtype=torch.float32, device="cuda")
# Less than 64KB per feature: enqueue fused kernel
MAX_FUSED_SIZE = 65536 // x.element_size()
BLOCK_SIZE_N = min(MAX_FUSED_SIZE, triton.next_power_of_2(N))
if N > BLOCK_SIZE_N:
raise RuntimeError("This layer norm doesn't support feature dim >= 64KB.")
if not x_arg.is_contiguous() or not y.is_contiguous():
global _triton_registered_warnings
if not _triton_registered_warnings:
logger.warning(
"Non-contiguous input tensor found. Making it contiguous,"
+ " but could have perf or trainer implications"
)
_triton_registered_warnings = True
x_arg = x_arg.contiguous()
y = y.contiguous()
# heuristics for number of warps.
num_warps = min(max(BLOCK_SIZE_N // 256, 1), 16)
# enqueue kernel
# fmt: off
layer_norm_fw[(M,)](
x_arg, y, weight, bias, mean, rstd,
x_arg.stride(0),
N,
eps,
num_warps=num_warps,
BLOCK_SIZE_N=BLOCK_SIZE_N,
affine=weight is not None
)
# fmt: on
ctx.save_for_backward(x, mean, rstd, weight)
ctx.BLOCK_SIZE_N = BLOCK_SIZE_N
ctx.num_warps = num_warps
return y.reshape_as(x)
@staticmethod
@custom_bwd
def backward(
ctx, dy
): # pragma: no cover # this is covered, but called directly from C++
x, mean, rstd, weight = ctx.saved_tensors
# flatten the batch dimension, if any.
# We're interested in 'samples' x norm_dimension
x = x.reshape(-1, x.size(-1))
M, N = x.size()
# heuristics for amount of parallel reduction stream for DG/DB
GROUP_SIZE_M = 32
if N <= 8192:
GROUP_SIZE_M = 64
if N <= 4096:
GROUP_SIZE_M = 96
if N <= 2048:
GROUP_SIZE_M = 128
if N <= 1024:
GROUP_SIZE_M = 256
if dy.dtype == torch.float32:
GROUP_SIZE_M = GROUP_SIZE_M // 2
# allocate output
locks = torch.zeros(2 * GROUP_SIZE_M, dtype=torch.int32, device="cuda")
t_args = {"dtype": x.dtype, "device": x.device}
_dw = torch.empty((GROUP_SIZE_M, x.size(-1)), **t_args)
_db = torch.empty_like(_dw)
dw = torch.empty((x.size(-1),), **t_args)
db = torch.empty_like(dw)
dy = dy.contiguous()
dx = torch.empty_like(dy)
# Check the tensor shapes and layouts
# we suppose in the kernel that they have the same size and are contiguous
assert (
dy.numel() == x.numel()
), "Something is wrong in the backward graph, possibly because of an inplace operation after the layernorm"
# enqueue kernel using forward pass heuristics
# also compute partial sums for DW and DB
num_warps = min(max(ctx.BLOCK_SIZE_N // 256, 1), 16)
# fmt: off
layer_norm_bwd_dx_fused[(M,)](
dx, dy, _dw, _db, x,
weight if weight is not None else x,
mean, rstd,
locks,
x.stride(0),
N,
affine=weight is not None,
GROUP_SIZE_M=GROUP_SIZE_M,
BLOCK_SIZE_N=ctx.BLOCK_SIZE_N,
num_warps=num_warps
)
# fmt: on
def grid(meta):
return [triton.cdiv(N, meta["BLOCK_SIZE_N"])]
# accumulate partial sums in separate kernel
# fmt: off
layer_norm_bwd_dwdb[grid](
_dw, _db, dw, db,
GROUP_SIZE_M,
N,
BLOCK_SIZE_M=32,
BLOCK_SIZE_N=64
)
# fmt: on
dx = dx.reshape_as(dy)
return dx, dw, db, None
class FusedLayerNorm(nn.Module):
"""
Handle a layer normalization, like torch.nn.LayerNorm_.
This implementation should be measurably faster than the default PyTorch layernorm (as of PyTorch 1.9),
both for training and inference worloads.
.. NOTE: Computations under Torch AMP are kept as float32 by default, one can change this to be float16
by setting the flag `xformers.triton.k_layer_norm._triton_layernorm_fp16_enabled = True`
.. _torch.nn.LayerNorm: https://pytorch.org/docs/stable/generated/torch.nn.LayerNorm.html
"""
def __init__(self, normalized_shape, affine=True, eps=1e-06):
super().__init__()
if affine:
self.weight = nn.Parameter(torch.ones(normalized_shape))
self.bias = nn.Parameter(torch.zeros(normalized_shape))
else:
self.weight = self.bias = None
self.epsilon = eps
def forward(self, x):
return layer_norm(x, self.weight, self.bias, self.epsilon)
def init_weights(self, *args, **kwargs):
with torch.no_grad():
if self.weight is not None:
self.weight.fill_(1.0)
if self.bias is not None:
self.bias.fill_(0.0)
def layer_norm(
x: torch.Tensor,
weight: Optional[torch.Tensor] = None,
bias: Optional[torch.Tensor] = None,
eps: float = 1e-06,
) -> torch.Tensor:
global _triton_registered_warnings
r"""Applies normalization over a mini batch of inputs"""
try:
if (
not _triton_registered_warnings
and torch.cuda.is_available()
and x.is_cuda
and weight is not None
and bias is not None
):
return _LayerNorm.apply(x, weight, bias, eps)
except RuntimeError as e:
# Catch cases where the current GPU does not have enough registers to hold a full tensor line
# fallback to PyTorch's implementation, which streams the tensor in and out
_triton_registered_warnings = True
logger.warning(
"Triton layernorm kernel register spillover or invalid image caught. "
"Deactivating this kernel, please file an issue in the xFormers repository"
)
logger.warning(e)
return torch.nn.functional.layer_norm(
x, [x.shape[-1]], weight=weight, bias=bias, eps=eps
)
|
EXA-1-master
|
exa/libraries/xformers/xformers/triton/layer_norm.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
from typing import Optional
import torch
import triton
import triton.language as tl
from xformers.triton.k_activations import (
gelu_grad,
leaky_relu_grad,
relu_grad,
smelu_grad,
squared_relu_grad,
star_relu_grad,
)
# fmt: off
@triton.autotune(
configs=[
triton.Config({"BLOCK_N": 64}, num_stages=4, num_warps=2),
triton.Config({"BLOCK_N": 128}, num_stages=3, num_warps=2),
triton.Config({"BLOCK_N": 256}, num_stages=3, num_warps=4),
triton.Config({"BLOCK_N": 512}, num_stages=3, num_warps=4),
triton.Config({"BLOCK_N": 1024}, num_stages=3, num_warps=4),
],
key=["N"],
)
@triton.heuristics({
'EVEN_N': lambda args: args["N"] % (args['BLOCK_N']) == 0,
})
@triton.jit
def kernel_bw(
# Pointers to matrices
GRAD_ACT, GRAD_OUT, ACT_INPUTS,
# Matrix dimensions
N,
# The stride variables represent how much to increase the ptr by when moving by 1
# element in a particular dimension. E.g. stride_am is how much to increase a_ptr
# by to get the element one row down (A has M rows)
stride_gom, stride_aim,
# Meta-parameters
BLOCK_N: tl.constexpr,
EVEN_N: tl.constexpr,
ACTIVATION_GRAD: tl.constexpr,
):
# fmt: on
"""
Go over all the activation inputs, compute the corresponding gradient
"""
# this kernel is relatively simple in terms of scheduling:
# - per row (pid_m)
# - each program a given chunk on the col axis,
# since it's more effective memory and occupancy wise
pid_m, pid_n = tl.program_id(axis=0), tl.program_id(axis=1)
rn = pid_n * BLOCK_N + tl.arange(0, BLOCK_N)
# the memory addresses of elements in the first block of
# A and W can be computed using numpy-style broadcasting
act_input_ptrs = ACT_INPUTS + pid_m * stride_aim + rn
# compute the gradient which is related to this activation
if EVEN_N:
act_in = tl.load(act_input_ptrs)
else:
act_in = tl.load(act_input_ptrs, mask=rn < N, other=0.0)
if ACTIVATION_GRAD == 1:
grad_act = relu_grad(act_in)
elif ACTIVATION_GRAD == 2:
grad_act = leaky_relu_grad(act_in)
elif ACTIVATION_GRAD == 3:
grad_act = gelu_grad(act_in)
elif ACTIVATION_GRAD == 4:
grad_act = squared_relu_grad(act_in)
elif ACTIVATION_GRAD == 5:
grad_act = smelu_grad(act_in)
elif ACTIVATION_GRAD == 6:
grad_act = star_relu_grad(act_in)
else:
grad_act = act_in
# now read the incoming gradient, the backpropagated one is the multiple of both
grad_out_ptrs = GRAD_OUT + pid_m * stride_gom + rn
if EVEN_N:
grad_out = tl.load(grad_out_ptrs)
else:
grad_out = tl.load(grad_out_ptrs, mask=rn < N)
grad_act *= grad_out
# write back result
grad_act_ptrs = GRAD_ACT + pid_m * stride_gom + rn
tl.store(grad_act_ptrs, grad_act, mask=rn < N)
def fused_matmul_backward(
grad_out: torch.Tensor,
inputs: torch.Tensor,
act_in: Optional[torch.Tensor],
weight: torch.Tensor,
trainable_weight: bool,
trainable_bias: bool,
activation_grad: int = 0,
):
"""
Compute grad_in = activation^-1(grad_out) @ weight.transpose()
.. note: The weight buffer is transposed on the fly
.. note: Activation gradient needs to be a Triton kernel
"""
# Make sure that we don't have to handle the stride over cols
if not grad_out.is_contiguous():
grad_out = grad_out.contiguous()
grad_out_ = grad_out if grad_out.ndim == 2 else grad_out.flatten(0, 1)
inputs_ = inputs if inputs.ndim == 2 else inputs.flatten(0, 1)
assert grad_out_.shape[1] == weight.shape[0], "Incompatible dimensions in between grad_out and weight"
M, N = grad_out_.shape
N, _ = weight.shape
# Compute the gradient for the activation
if activation_grad > 0:
grad_act = torch.empty_like(grad_out_)
# Some activations do not require their inputs to
# know of their grad, the downstream grad is enough
if act_in is None:
act_in = grad_out_
grid = lambda META: (M, triton.cdiv(N, META["BLOCK_N"])) # noqa
# fmt: off
kernel_bw[grid](
grad_act, grad_out_, act_in, # data ptrs
N, # shapes
grad_act.stride(0), act_in.stride(0), # strides
ACTIVATION_GRAD=activation_grad, # optional fused activation
)
# fmt: on
# Backpropagation going up, the reference gradient is now
# just before the activation
grad_out_ = grad_act
# The following ops can also be handled by pytorch
grad_in = triton.ops.matmul(grad_out_, weight)
grad_weight = grad_out_.transpose(1, 0) @ inputs_ if trainable_weight else None
grad_bias = torch.sum(grad_out_, dim=0) if trainable_bias else None
return grad_in.reshape_as(inputs), grad_weight, grad_bias
|
EXA-1-master
|
exa/libraries/xformers/xformers/triton/k_fused_matmul_bw.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
import triton
import triton.language as tl
# CREDITS: This is adapted from the vanilla Triton example. See https://openai.com/blog/triton/
# and https://triton-lang.org/getting-started/tutorials/02-fused-softmax.html
def get_depth(args):
return triton.next_power_of_2(args["K"])
# autotune: Triton will test out these configurations, and automatically pick the fastest one.
# heuristic: add arguments to the kernel call automatically given some heuristics. These arguments are passed in "meta"
# fmt: off
@triton.autotune(
configs=[
triton.Config({}, num_warps=1),
triton.Config({}, num_warps=2),
triton.Config({}, num_warps=4),
triton.Config({}, num_warps=8),
triton.Config({}, num_warps=16),
triton.Config({}, num_warps=32),
],
key=["K"],
)
@triton.heuristics(values={"depth": get_depth})
@triton.jit
def _softmax(
Y, X, M,
stride_ym, stride_yn,
stride_xm, stride_xn,
stride_mn,
K,
# Meta-params
depth: tl.constexpr,
causal: tl.constexpr,
use_mask: tl.constexpr,
log: tl.constexpr,
):
# fmt: om
"""
Fused softmax kernel over a 3d tensor.
The softmax is applied over the last dimension, meaning that this is equivalent to torch.softmax(tensor, dim=-1)
Note, if the last dimension is large, say 128K elements, the kernel compile time can shot up to many minutes when
the kernel is run for the first time.
"""
m = tl.program_id(0)
n = tl.program_id(1)
# col indices
k = tl.arange(0, depth)
# the memory address of all the elements that we want to load can be computed as follows
x_ptrs = X + m * stride_xm + n * stride_xn + k
# load input data; pad out-of-bounds elements with 0
io_mask = k < K
# Causal - 1: skip on the loads directly
if causal:
io_mask = io_mask & (k <= n)
x = tl.load(x_ptrs, mask=io_mask, other=float("-inf")).to(tl.float32)
# Causal - 2: enforce correctness over a couple of misloaded values
if causal:
off = float("-inf")
off = off.to(x.dtype) # type: ignore
x = tl.where(k > n, off, x)
if use_mask:
mask_ptrs = M + n * stride_mn + k
add_mask = tl.load(mask_ptrs, io_mask, other=float("-inf")).to(tl.float32)
x += add_mask
# compute numerically-stable softmax
z = x - tl.max(x, axis=0)
num = tl.exp(z)
denom = tl.sum(num, axis=0)
if log:
y = z - tl.log(denom)
else:
y = num / denom
# write back to Y.
# we only write once, hence the "fused" softmax naming
y_ptrs = Y + m * stride_ym + n * stride_yn + k
# technically we could write only the lower triangular matrix in the causal case
# but this is deemed to error prone
tl.store(y_ptrs, y, mask=k < K)
# fmt: off
@triton.autotune(
configs=[
triton.Config({}, num_warps=1),
triton.Config({}, num_warps=2),
triton.Config({}, num_warps=4),
triton.Config({}, num_warps=8),
triton.Config({}, num_warps=16),
],
key=["K"],
)
@triton.jit
def _softmax_backward(
GradIn, GradOut, Out,
stride_bm, stride_bn,
stride_gm, stride_gn,
stride_om, stride_on,
K,
# meta-params
depth: tl.constexpr,
causal: tl.constexpr,
log: tl.constexpr,
):
# fmt: on
"""
Compute the softmax gradients.
..Note: Not autotuning for now because this would lead to broken accumulated gradients
"""
m = tl.program_id(0)
n = tl.program_id(1)
# col indices
k = tl.arange(0, depth)
# the memory address of all the elements that we want to load can be computed as follows
grad_out_ptrs = GradOut + m * stride_gm + n * stride_gn + k
out_ptrs = Out + m * stride_om + n * stride_on + k
# load input data; pad out-of-bounds elements with 0
io_mask = k < K
# Causal - 1: skip on the loads directly
if causal:
io_mask = io_mask & (k <= n)
g = tl.load(grad_out_ptrs, mask=io_mask, other=float(0)).to(tl.float32)
o = tl.load(out_ptrs, mask=io_mask, other=float(0)).to(tl.float32)
# Causal - 2: enforce correctness over a couple of misloaded values
if causal:
zero = float(0)
zero = zero.to(g.dtype) # type: ignore
g = tl.where(k > n, zero, g)
o = tl.where(k > n, zero, o)
if log:
s = tl.sum(g, 0)
grad_in = g - tl.exp(o) * s
else:
# Step 1: Compute the intermediate sum used for the gradient
s = tl.sum(g * o, 0)
# Step 2: Compute the gradients
grad_in = o * (g - s)
# write back to the input gradients
# technically we could write only the lower triangular matrix in the causal case
# but this is deemed to error prone
grad_in_ptrs = GradIn + m * stride_bm + n * stride_bn + k
tl.store(grad_in_ptrs, grad_in, mask=k < K)
|
EXA-1-master
|
exa/libraries/xformers/xformers/triton/k_softmax.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
from dataclasses import asdict, dataclass
from typing import Optional, Type, TypeVar
import torch
from xformers import _is_triton_available
Self = TypeVar("Self", bound="SimplicialEmbedding")
@dataclass
class SimplicialEmbeddingConfig:
L: int
temperature: float
class SimplicialEmbedding(torch.nn.Module):
"""
An implementation of the "Simplicial Embeddings"_, as proposed by Lavoie et. al
Arguments:
- L: the number of embedding chunks
- temperature: optional scaling parameter for the softmax operation.
A small (<1.) temperature will lead to a sparse representation (up to one-hot),
while a large (>1.) temperature will make the vector more uniform
_"Simplicial Embeddings": https://arxiv.org/pdf/2204.00616.pdf
"""
def __init__(self, L: int, temperature: Optional[float] = None) -> None:
super().__init__()
self.L = L
self.temperature = temperature
def forward(self, x: torch.Tensor) -> torch.Tensor:
assert (
x.shape[-1] % self.L == 0
), f"The embedding dimension {x.shape[-1]} is not divisible by the chosen L parameter {self.L}"
# Seperate the input tensor into V chunks
B, C, E = x.shape
V = E // self.L
Vs = x.reshape(B, C, self.L, V)
# Softmax normalize them, with the proposed temperature
# This is done over the last dimension, so only within Vs
if self.temperature is not None:
Vs /= self.temperature
if _is_triton_available():
from xformers.triton.softmax import softmax as triton_softmax
Vs = triton_softmax(
Vs, mask=None, causal=False
) # the softmax is on the last dimension
else:
Vs = torch.nn.functional.softmax(Vs, dim=-1)
# Concatenate back and return
return Vs.reshape(B, C, E)
@classmethod
def from_config(cls: Type[Self], config: SimplicialEmbeddingConfig) -> Self:
# Generate the class inputs from the config
fields = asdict(config)
return cls(**fields)
|
EXA-1-master
|
exa/libraries/xformers/xformers/components/simplicial_embedding.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
from enum import Enum
from typing import List, Optional, Tuple, Union
import torch
import torch.nn as nn
from xformers import _is_triton_available
if _is_triton_available():
from xformers.triton.layer_norm import FusedLayerNorm
from collections import namedtuple
class ResidualNormStyle(str, Enum):
"""Support different residual path and norm styles.
See "On Layer Normalization in the Transformer Architecture",
Xiong et al., https://arxiv.org/pdf/2002.04745v1.pdf
"""
Pre = "pre"
Post = "post"
DeepNorm = "deepnorm"
class NormalizationType(str, Enum):
LayerNorm = "layernorm"
Skip = "skip"
# TODO: BatchNorm = "batchnorm"
# TODO: GroupNorm = "groupnorm"
def get_normalization_layer(normalization_type: NormalizationType):
class Skip(nn.Module):
def __init__(self, *_, **__) -> None:
super().__init__()
def forward(self, x: torch.Tensor, **_):
return x
return {
NormalizationType.LayerNorm: nn.LayerNorm,
NormalizationType.Skip: Skip,
}[normalization_type]
class RequiresWrappedInputs:
"""Used to mark, through inheritance,
the fact that this class will require inputs to be passed as a single list"""
pass
# CREDITS: the following is inspired by FastAI's Transformer implementation
class Residual(nn.Module, RequiresWrappedInputs):
"""
Object-oriented handling of the residual path
This supports scaling of the residual path, as proposed by DeepNet_
.. _DeepNet: https://arxiv.org/pdf/2203.00555v1.pdf
.. Note: the wrapped layers must accept all the inputs as a single list
"""
def __init__(self, layer: nn.Module, scale: Optional[float] = None):
super().__init__()
self.layer = layer
self.scale = scale
# PreNorm and PostNorm require all the tensors to be passed as a list
self.wrap_inputs = isinstance(layer, RequiresWrappedInputs)
def forward(self, inputs: List[torch.Tensor], **kwargs):
if self.scale is not None:
residue = inputs[0] * self.scale
else:
residue = inputs[0]
if self.wrap_inputs:
return residue + self.layer(inputs=inputs, **kwargs)
else:
return residue + self.layer(*inputs, **kwargs)
class PreNorm(nn.Module, RequiresWrappedInputs):
"""Adds a normalization before computing attention
..Note: If a list of inputs is passed, all of them get normalized"""
def __init__(
self,
d_norm: int,
sublayer: nn.Module,
normalization: NormalizationType,
use_triton: bool = True,
):
super().__init__()
if (
_is_triton_available()
and use_triton
and normalization == NormalizationType.LayerNorm
):
self.norm: Union[nn.LayerNorm, FusedLayerNorm] = FusedLayerNorm(d_norm)
else:
self.norm = get_normalization_layer(normalization)(d_norm)
self.sublayer = sublayer
self.wrap_inputs = isinstance(sublayer, RequiresWrappedInputs)
def forward(self, inputs: List[torch.Tensor], **kwargs):
assert len(inputs) > 0
# Perf improvement: if the inputs are all the same, only norm once
ids = [id(x) for x in inputs]
if ids.count(ids[0]) == len(ids):
# The same tensor is passed multiple times
x_norm = self.norm(inputs[0])
inputs_normed = [x_norm for _ in inputs]
else:
# The inputs differ, norm them all
inputs_normed = [self.norm(x_) for x_ in inputs]
if self.wrap_inputs:
return self.sublayer(inputs=inputs_normed, **kwargs)
else:
return self.sublayer(*inputs_normed, **kwargs)
class PostNorm(nn.Module, RequiresWrappedInputs):
"""Adds LayerNorm after computing attention"""
def __init__(
self,
d_norm: int,
sublayer: nn.Module,
normalization: NormalizationType,
use_triton: bool = True,
):
super().__init__()
if (
_is_triton_available()
and use_triton
and normalization == NormalizationType.LayerNorm
):
self.norm: Union[nn.LayerNorm, FusedLayerNorm] = FusedLayerNorm(d_norm)
else:
self.norm = get_normalization_layer(normalization)(d_norm)
self.sublayer = sublayer
self.wrap_inputs = isinstance(sublayer, RequiresWrappedInputs)
def forward(self, inputs: List[torch.Tensor], **kwargs):
if self.wrap_inputs:
x = self.sublayer(inputs=inputs, **kwargs)
else:
x = self.sublayer(*inputs, **kwargs)
return self.norm(x)
DeepNormCoefficients = namedtuple("DeepNormCoefficients", ["alpha", "beta"])
def get_deepnorm_coefficients(
encoder_layers: int, decoder_layers: int
) -> Tuple[Optional[DeepNormCoefficients], Optional[DeepNormCoefficients]]:
"""
See DeepNet_.
Returns alpha and beta depending on the number of encoder and decoder layers,
first tuple is for the encoder and second for the decoder
.. _DeepNet: https://arxiv.org/pdf/2203.00555v1.pdf
"""
N = encoder_layers
M = decoder_layers
if decoder_layers == 0:
# Encoder only
return (
DeepNormCoefficients(alpha=(2 * N) ** 0.25, beta=(8 * N) ** -0.25),
None,
)
elif encoder_layers == 0:
# Decoder only
return None, DeepNormCoefficients(alpha=(2 * M) ** 0.25, beta=(8 * M) ** -0.25)
else:
# Encoder/decoder
encoder_coeffs = DeepNormCoefficients(
alpha=0.81 * ((N**4) * M) ** 0.0625, beta=0.87 * ((N**4) * M) ** -0.0625
)
decoder_coeffs = DeepNormCoefficients(
alpha=(3 * M) ** 0.25, beta=(12 * M) ** -0.25
)
return (encoder_coeffs, decoder_coeffs)
|
EXA-1-master
|
exa/libraries/xformers/xformers/components/residual.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
from typing import List
import torch
import torch.nn as nn
from torch.autograd.function import Function
from torch.utils.checkpoint import get_device_states, set_device_states
from xformers.components import RequiresWrappedInputs
# CREDITS: Code adapted from
# https://github.com/lucidrains/reformer-pytorch/blob/master/reformer_pytorch/reversible.py
# https://github.com/RobinBruegger/RevTorch/blob/master/revtorch/revtorch.py,
# https://pytorch.org/docs/stable/_modules/torch/utils/checkpoint.html
# pyre-fixme[13]: `cpu_state` is not initialized in the constructor.
class Deterministic(nn.Module):
def __init__(self, net: nn.Module):
super().__init__()
self.net = net
self.cpu_state: torch.Tensor = torch.get_rng_state()
self.cuda_in_fwd: bool = False
self.gpu_devices: List[int] = []
self.gpu_states: List[torch.Tensor] = []
self.wrap_inputs = isinstance(net, RequiresWrappedInputs)
def record_rng(self, *args):
self.cpu_state = torch.get_rng_state()
if torch.cuda._initialized:
self.cuda_in_fwd = True
self.gpu_devices, self.gpu_states = get_device_states(*args)
def forward(self, *args, record_rng: bool = False, set_rng: bool = False, **kwargs):
if record_rng:
self.record_rng(*args)
if not set_rng:
# Normal FW run
if self.wrap_inputs:
return self.net(inputs=args, **kwargs)
else:
return self.net(*args, **kwargs)
else: # pragma: no cover # this is called in the backward pass, not picked up
# This is analogous to checkpointing, reset the original random state
rng_devices: List[int] = []
if self.cuda_in_fwd:
rng_devices = self.gpu_devices
with torch.random.fork_rng(devices=rng_devices, enabled=True):
torch.set_rng_state(self.cpu_state)
if self.cuda_in_fwd:
set_device_states(self.gpu_devices, self.gpu_states)
if self.wrap_inputs:
return self.net(inputs=args, **kwargs)
else:
return self.net(*args, **kwargs)
class ReversibleBlock(nn.Module):
def __init__(self, f: nn.Module, g: nn.Module, split_dim: int = -1):
super().__init__()
self.f = Deterministic(f)
self.g = Deterministic(g)
self.split_dim = split_dim
def forward(self, x: torch.Tensor, f_args={}, g_args={}):
x1, x2 = torch.chunk(x, 2, dim=-1)
y1, y2 = None, None
with torch.no_grad():
y1 = x1 + self.f(x2, record_rng=self.training, **f_args)
y2 = x2 + self.g(y1, record_rng=self.training, **g_args)
return torch.cat([y1, y2], dim=self.split_dim)
def backward_pass(
self, y: torch.Tensor, dy: torch.Tensor, f_args={}, g_args={}
): # pragma: no cover # this is covered, but called directly from C++
y1, y2 = torch.chunk(y, 2, dim=self.split_dim)
del y
dy1, dy2 = torch.chunk(dy, 2, dim=self.split_dim)
del dy
with torch.enable_grad():
y1.requires_grad = True
gy1 = self.g(y1, set_rng=True, **g_args)
torch.autograd.backward(gy1, dy2)
with torch.no_grad():
x2 = y2 - gy1
del y2, gy1
dx1 = dy1 + y1.grad
del dy1
y1.grad = None
with torch.enable_grad():
x2.requires_grad = True
fx2 = self.f(x2, set_rng=True, **f_args)
torch.autograd.backward(fx2, dx1)
with torch.no_grad():
x1 = y1 - fx2
del y1, fx2
dx2 = dy2 + x2.grad
del dy2
x2.grad = None
x = torch.cat([x1, x2.detach()], dim=self.split_dim)
dx = torch.cat([dx1, dx2], dim=self.split_dim)
return x, dx
class _ReversibleFunction(Function):
@staticmethod
def forward(ctx, x, blocks, kwargs):
ctx.kwargs = kwargs
for block in blocks:
x = block(x, **kwargs)
ctx.y = x.detach()
ctx.blocks = blocks
return x
@staticmethod
def backward(
ctx, dy
): # pragma: no cover # this is covered, but called directly from C++
y = ctx.y
kwargs = ctx.kwargs
for block in ctx.blocks[::-1]:
y, dy = block.backward_pass(y, dy, **kwargs)
return dy, None, None
class ReversibleSequence(nn.Module):
def __init__(self, blocks: nn.ModuleList):
super().__init__()
# pyre-fixme[23]: Unable to unpack `torch.nn.Module` into 2 values.
self.blocks = nn.ModuleList([ReversibleBlock(f, g) for f, g in blocks])
def forward(self, x, arg_route=(True, False), **kwargs):
f_args, g_args = map(lambda route: kwargs if route else {}, arg_route)
block_kwargs = {"f_args": f_args, "g_args": g_args}
return _ReversibleFunction.apply(x, self.blocks, block_kwargs)
|
EXA-1-master
|
exa/libraries/xformers/xformers/components/reversible.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
from enum import Enum
from typing import Optional
import torch
from torch import nn
class Activation(str, Enum):
SquaredReLU = "squared_relu"
GeLU = "gelu"
LeakyReLU = "leaky_relu"
ReLU = "relu"
SmeLU = "smelu"
StarReLU = "star_relu"
# For unit testing / parity comparisons, probably not the fastest way
class SquaredReLU(nn.Module):
def __init__(self) -> None:
super().__init__()
def forward(self, x: torch.Tensor) -> torch.Tensor:
x_ = torch.nn.functional.relu(x)
return x_ * x_
class StarReLU(nn.Module):
def __init__(self) -> None:
super().__init__()
def forward(self, x: torch.Tensor) -> torch.Tensor:
x_ = torch.nn.functional.relu(x)
return 0.8944 * x_ * x_ - 0.4472
class SmeLU(nn.Module):
def __init__(self, beta: float = 2.0) -> None:
super().__init__()
self.beta = beta
def forward(self, x: torch.Tensor) -> torch.Tensor:
relu = torch.where(
x >= self.beta,
x,
torch.tensor([0.0], device=x.device, dtype=x.dtype),
)
return torch.where(
torch.abs(x) <= self.beta,
((x + self.beta) ** 2).type_as(x) / (4.0 * self.beta),
relu,
)
def build_activation(activation: Optional[Activation]):
if not activation:
return nn.Identity()
return {
Activation.ReLU: nn.ReLU,
Activation.GeLU: nn.GELU,
Activation.LeakyReLU: nn.LeakyReLU,
Activation.SquaredReLU: SquaredReLU,
Activation.StarReLU: StarReLU,
Activation.SmeLU: SmeLU,
}[activation]()
|
EXA-1-master
|
exa/libraries/xformers/xformers/components/activations.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
import logging
from dataclasses import asdict, dataclass
from typing import Optional, Tuple
import torch
import torch.nn as nn
from torch.nn.init import constant_
from xformers.components.attention import Attention
from xformers.components.input_projection import InputProjection, InputProjectionConfig
from xformers.components.positional_embedding import RotaryEmbedding
logger = logging.getLogger("xformers")
@dataclass
class MultiHeadDispatchConfig:
dim_model: int
num_heads: int
attention: Attention
bias: bool
residual_dropout: float
dim_key: Optional[int]
dim_value: Optional[int]
in_proj_container: Optional[InputProjection]
use_separate_proj_weight: Optional[bool]
use_rotary_embeddings: Optional[bool]
out_proj: Optional[nn.Module]
def __getitem__(self, item):
return getattr(self, item)
# Move head forward and fold into batch dim. dimensions become (B * nh, S, hs)
def _fold_heads(t: torch.Tensor, B: int, S: int, H: int, Hs: int):
return t.view(B, S, H, Hs).transpose(1, 2).flatten(start_dim=0, end_dim=1)
# Move head forward and fold into batch dim. dimensions become (B, nh, S, hs)
def _split_heads(t: torch.Tensor, B: int, S: int, H: int, Hs: int):
return t.view(B, S, H, Hs).transpose(1, 2)
class MultiHeadDispatch(nn.Module):
"""
A multi-head masked self-attention dispatch mechanism, with a projection at the end,
following the architecture proposed in `Attention is all you need`_, Vaswani et al.
The actual attention mechanism can vary, as well as the projections.
This can be used to wrap the proposed attention mechanisms and make them multi-head aware,
but it is optional.
Args:
dim_model: The model/embedding dimension
num_heads: The number of heads being used
attention: The attention mechanism (needs to be registered to the xformers library)
bias: Whether to use bias for the projections : (Q, K, V, Output)
residual_dropout: Amount of dropout on the residual path
use_separate_proj_weight: Use different weights for the Q, K, V projections
dim_key: Optionally use a different dimension for the key
dim_value: Optionally use a different dimension for the value
in_proj_container: Optionally provide the input projection module
use_rotary_embeddings: Use rotary embeddings
out_proj: Optionally provide the output projection module
.. _`Attention is all you need`: https://arxiv.org/abs/1706.03762v5
"""
def __init__(
self,
dim_model: int,
num_heads: int,
attention: Attention,
bias: Tuple[bool, bool, bool, bool] = (True, True, True, True),
residual_dropout: float = 0.0,
use_separate_proj_weight: bool = True,
dim_key: Optional[int] = None,
dim_value: Optional[int] = None,
in_proj_container: Optional[InputProjection] = None,
use_rotary_embeddings: Optional[bool] = False,
out_proj: Optional[nn.Module] = None,
*args,
**kwargs,
):
super().__init__()
if isinstance(bias, bool):
logger.warning(
"Single bias value provided for the MHA projections."
+ f" Assuming the same parameter ({bias}) is to be used everywhere"
)
bias = (bias, bias, bias, bias)
assert (
dim_model % num_heads == 0
) # static preset for now, each head works on 1/d the embeddings, could be relaxed
assert num_heads > 0
# Popular default is that all latent dimensions are the same
dim_key, dim_value = map(lambda x: x if x else dim_model, (dim_key, dim_value))
self.num_heads = num_heads
self.dim_key_head = dim_key // num_heads
self.dim_value_head = dim_value // num_heads
self.dim_model = dim_model
self.attention = attention
# key, query, value projections for all heads
# critical options are
# - are we sharing weights ?
# - are we adding biases ?
if attention.requires_input_projection:
self.in_proj_container = (
in_proj_container
if in_proj_container is not None
else InputProjection(
query_proj_params=InputProjectionConfig(
dim_model, dim_key, bias=bias[0]
),
key_proj_params=InputProjectionConfig(
dim_model, dim_key, bias=bias[1]
),
value_proj_params=InputProjectionConfig(
dim_model, dim_value, bias=bias[2]
),
use_separate_proj_weight=use_separate_proj_weight,
)
)
# Optional rotary embeddings
self.rotary_embeddings = (
RotaryEmbedding(self.dim_key_head) if use_rotary_embeddings else None
)
# Regularization
self.resid_drop = nn.Dropout(residual_dropout, inplace=False)
# Output projection
self.proj = (
out_proj if out_proj else nn.Linear(dim_model, dim_model, bias=bias[3])
)
if isinstance(self.proj, nn.Linear) and self.proj.bias is not None:
constant_(self.proj.bias, 0.0)
def forward(
self,
query: torch.Tensor,
key: Optional[torch.Tensor] = None,
value: Optional[torch.Tensor] = None,
att_mask: Optional[torch.Tensor] = None,
key_padding_mask: Optional[torch.Tensor] = None,
) -> torch.Tensor:
"""
Expected input dimensions are [batch size, sequence length, embed dim]
Output dimensions are [batch size, sequence length, embed dim]
"""
if key is None:
key = query
if value is None:
value = query
if query.shape[0] != key.shape[0] or query.shape[0] != value.shape[0]:
max_batch = max((query.shape[0], key.shape[0], value.shape[0]))
query, key, value = map(
lambda x: x.expand(max_batch, -1, -1), [query, key, value]
)
B, S_Q, _ = query.size() # Batch x Sequence x Embedding (latent)
_, S_K, _ = key.size() # K, Q's sequence length could differ
# Catch different query and key length but a causal attention
if S_Q != S_K:
assert (
not self.attention.requires_same_k_q_dimensions
), "This attention mechanism requires query and key to have the same sequence (context) lengths"
if hasattr(self.attention, "causal"):
assert not self.attention.causal, (
"Causal attention is not supported when key and query have different sequence lengths.\n"
+ "In that case causality is ill-determined. Please pad your sequences accordingly"
)
kw_mask_args = {}
if att_mask is not None:
assert (
self.attention.supports_attention_mask
), "This attention does not support attention masks"
kw_mask_args["att_mask"] = att_mask
if key_padding_mask is not None:
assert (
self.attention.supports_key_padding_mask
), "This attention does not support key padding masks"
kw_mask_args["key_padding_mask"] = key_padding_mask
if self.attention.requires_skip_multi_head:
return self.attention(query, key, value, **kw_mask_args)
# Calculate query, key, values for all heads in batch
if self.attention.requires_input_projection:
q, k, v = self.in_proj_container(query=query, key=key, value=value)
else:
k, q, v = key, query, value
# Check the dimensions properly
def check(t, name):
assert (
t.shape[2] % self.num_heads == 0
), f"the {name} embeddings need to be divisible by the number of heads"
check(q, "projected query")
check(v, "projected value")
check(k, "projected key")
# Optional: rotary embedding, add relative positioning information
if self.rotary_embeddings:
# rotary requires the head dimension
q = _split_heads(q, B, S_Q, self.num_heads, self.dim_key_head)
k = _split_heads(k, B, S_K, self.num_heads, self.dim_key_head)
v = _split_heads(v, B, S_K, self.num_heads, self.dim_value_head)
q, k = self.rotary_embeddings(q=q, k=k)
if not self.attention.requires_head_dimension:
q, k, v = q.flatten(0, 1), k.flatten(0, 1), v.flatten(0, 1)
else:
# Reshape k/q/v to either expose the heads, or fold the head dimension into the batch
reshape_fn = (
_split_heads if self.attention.requires_head_dimension else _fold_heads
)
q = reshape_fn(q, B, S_Q, self.num_heads, self.dim_key_head)
k = reshape_fn(k, B, S_K, self.num_heads, self.dim_key_head)
v = reshape_fn(v, B, S_K, self.num_heads, self.dim_value_head)
# Self-attend
y = self.attention(q, k, v, **kw_mask_args)
# Re-assemble all head outputs side by side
y = (
y.view(B, self.num_heads, S_Q, self.dim_value_head)
.transpose(1, 2)
.flatten(start_dim=2, end_dim=3)
)
# Output projection, dropout and good to go
y = self.resid_drop(self.proj(y))
# Return the same sequence size as the input
return y
@classmethod
def from_config(cls, config: MultiHeadDispatchConfig):
# Generate the class inputs from the config
fields = asdict(config)
# Skip all Nones so that default values are used
fields = {k: v for k, v in fields.items() if v is not None}
return cls(**fields)
|
EXA-1-master
|
exa/libraries/xformers/xformers/components/multi_head_dispatch.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
from dataclasses import fields
from pathlib import Path
from typing import Any, Dict, Union
from xformers.utils import import_all_modules
from .activations import Activation, build_activation # noqa
from .attention import Attention, build_attention # noqa
from .input_projection import InputProjection, InputProjectionConfig # noqa
from .multi_head_dispatch import MultiHeadDispatch # noqa
from .multi_head_dispatch import MultiHeadDispatchConfig
from .patch_embedding import PatchEmbeddingConfig # noqa
from .patch_embedding import build_patch_embedding # noqa
from .residual import NormalizationType # noqa
from .residual import PostNorm # noqa
from .residual import PreNorm # noqa
from .residual import RequiresWrappedInputs # noqa
from .residual import Residual # noqa
from .residual import ResidualNormStyle # noqa
# automatically import any Python files in the directory
import_all_modules(str(Path(__file__).parent), "xformers.components")
def build_multi_head_attention(
multi_head_config: Union[MultiHeadDispatchConfig, Dict[str, Any]],
):
"""Builds a multihead attention from a config.
This assumes a 'name' key in the config which is used to determine what
attention class to instantiate. For instance, a config `{"name": "my_attention",
"foo": "bar"}` will find a class that was registered as "my_attention"
(see :func:`register_attention`) and call .from_config on it."""
if not isinstance(multi_head_config, MultiHeadDispatchConfig):
# Extract the required fields
field_names = list(map(lambda x: x.name, fields(MultiHeadDispatchConfig)))
# The missing fields get Noned
for k in field_names:
if k not in multi_head_config.keys():
multi_head_config[k] = None
# Could be that the attention needs to be instantiated
if not isinstance(multi_head_config["attention"], Attention):
# Convenience: fill in possible missing fields
if "num_heads" not in multi_head_config["attention"]:
multi_head_config["attention"]["num_heads"] = multi_head_config[
"num_heads"
]
if "dim_model" not in multi_head_config["attention"]:
multi_head_config["attention"]["dim_model"] = multi_head_config[
"dim_model"
]
if (
"dim_features" not in multi_head_config["attention"]
or multi_head_config["attention"]["dim_features"] is None
):
multi_head_config["attention"]["dim_features"] = (
multi_head_config["dim_model"] // multi_head_config["num_heads"]
)
multi_head_config["attention"] = build_attention(
multi_head_config["attention"]
)
multi_head_config = MultiHeadDispatchConfig(**multi_head_config)
return MultiHeadDispatch.from_config(multi_head_config)
|
EXA-1-master
|
exa/libraries/xformers/xformers/components/__init__.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
# CREDITS: Inspired by https://github.com/pytorch/text/blob/master/torchtext/nn/modules/multiheadattention.py
# and the MultiHeadAttention implementation from PyTorch
import logging
from dataclasses import dataclass
from typing import Optional, Tuple
import torch
from torch import nn
logger = logging.getLogger("xformers")
@dataclass
class InputProjectionConfig:
in_features: int
out_features: int
bias: bool
class InputProjection(nn.Module):
"""
Handle all the input projections in one go, opportunistically fuse some operations.
"""
def __init__(
self,
query_proj_params: InputProjectionConfig,
key_proj_params: Optional[InputProjectionConfig],
value_proj_params: Optional[InputProjectionConfig],
use_separate_proj_weight: bool = True,
):
super().__init__()
self.out_features = query_proj_params.out_features
# Each input gets a seperate projection
self.q_proj = nn.Linear(
query_proj_params.in_features,
query_proj_params.out_features,
query_proj_params.bias,
)
if key_proj_params is not None:
self.k_proj = nn.Linear(
key_proj_params.in_features,
key_proj_params.out_features,
key_proj_params.bias,
)
else:
logger.info(
"No Key projection parameters were passed, assuming that the weights"
+ " are shared with the query projection"
)
self.k_proj = self.q_proj
if value_proj_params is not None:
self.v_proj = nn.Linear(
value_proj_params.in_features,
value_proj_params.out_features,
value_proj_params.bias,
)
else:
logger.info(
"No Value projection parameters were passed, assuming that the weights"
+ " are shared with the query projection"
)
self.v_proj = self.q_proj
if not use_separate_proj_weight:
# Compute optimization used at times, share the parameters in between Q/K/V
with torch.no_grad():
self.k_proj.weight = self.q_proj.weight
self.v_proj.weight = self.q_proj.weight
def forward(
self,
query: torch.Tensor,
key: torch.Tensor,
value: torch.Tensor,
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
# One projection per input tensor
# NOTE: Would it make sense to catch self attention + shared weights, to skip a projection step ?
q, k, v = map(
lambda fn, x: fn(x),
[self.q_proj, self.k_proj, self.v_proj],
[query, key, value],
)
return q, k, v
|
EXA-1-master
|
exa/libraries/xformers/xformers/components/input_projection.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
import math
from dataclasses import dataclass
from enum import Enum
import torch
class PoolType(str, Enum):
Conv2D = "CONV_2D"
# ...
# TODO: Support more cases ?
@dataclass
class PatchEmbeddingConfig:
"""
The configuration for the patch embedding layer, which takes the raw token passed in
and returns a pooled representation along a given embedding dimension.
This typically trades the spatial (context length) representation with the embedding size
This is canonicaly used by ViT, but other papers (like MetaFormer or other hierarchical transformers)
propose a more general use case for this
"""
in_channels: int
out_channels: int
kernel_size: int
stride: int
padding: int = 0
pool_type: PoolType = PoolType.Conv2D
class ConditionalReshape(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
def forward(self, x):
if x.ndim == 3:
B, HW, C = x.shape
# NOTE: We're assuming a square sample here
H = int(math.sqrt(HW))
assert H * H == HW, f"{H, HW}"
x = x.transpose(1, 2).reshape(B, C, H, H)
return x
class PatchToSequence(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
def forward(self, x):
return x.flatten(2, 3).transpose(1, 2).contiguous() # B HW C
def build_patch_embedding(config: PatchEmbeddingConfig):
if not isinstance(config, PatchEmbeddingConfig):
config = PatchEmbeddingConfig(**config)
if config.pool_type == PoolType.Conv2D:
pool = torch.nn.Conv2d(
config.in_channels,
config.out_channels,
kernel_size=config.kernel_size,
stride=config.stride,
padding=config.padding,
)
else:
raise NotImplementedError
# The patch embedding supposes that the input really is 2D in essence
# If this block is in the middle of a stack, we need to reshape
return torch.nn.Sequential(ConditionalReshape(), pool, PatchToSequence())
|
EXA-1-master
|
exa/libraries/xformers/xformers/components/patch_embedding.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
from dataclasses import dataclass
from typing import Optional, Union
import torch
import torch.nn as nn
from xformers.components.attention import (
Attention,
AttentionConfig,
AttentionMask,
maybe_sparsify,
register_attention,
sparsify,
)
from xformers.components.attention.attention_patterns import (
causal_1d_pattern,
global_token_pattern,
)
from xformers.components.attention.core import scaled_dot_product_attention
@dataclass
class GlobalAttentionConfig(AttentionConfig):
attention_query_mask: torch.Tensor # Mark the queries which have global attention
causal: Optional[bool]
force_sparsity: Optional[bool]
@register_attention("global", GlobalAttentionConfig)
class GlobalAttention(Attention):
def __init__(
self,
dropout: float,
attention_query_mask: torch.Tensor,
causal: bool = False,
force_sparsity: bool = False,
*_,
**__,
):
r"""
Global attention, as proposed for instance in BigBird_ or Longformer_.
Global means in that case that the queries positively labelled in the ```attention_query_mask``` can attend
to all the other queries. The queries negatively labelled in the ```attention_query_mask``` cannot attend to
any other query.
This implementation is sparse-aware, meaning that the empty attention parts will not be represented in memory.
Args:
dropout (float): probability of an element to be zeroed
attention_query_mask (torch.Tensor): if true, this query can attend to all the others
"""
super().__init__()
assert attention_query_mask.dtype == torch.bool, "A boolean mask is expected"
assert (
attention_query_mask.shape[1] == 1
and attention_query_mask.shape[0] > attention_query_mask.shape[1]
), "A N x 1 query mask is expected"
self.attn_drop = nn.Dropout(dropout, inplace=False)
self.attention_mask = global_token_pattern(attention_query_mask[:, 0])
self.force_sparsity = force_sparsity
if causal:
self.attention_mask &= causal_1d_pattern(attention_query_mask.shape[1])
self.attention_mask = (
sparsify(self.attention_mask)
if self.force_sparsity
else maybe_sparsify(self.attention_mask)
)
# Properties specific to this attention mechanism
self.requires_same_k_q_dimensions = True
self.supports_attention_mask = False
self.supports_key_padding_mask = False
def forward(
self,
q: torch.Tensor,
k: torch.Tensor,
v: torch.Tensor,
att_mask: Optional[Union[torch.Tensor, AttentionMask]] = None,
*_,
**__,
):
# Make sure that the mask is on the right device
if self.attention_mask.device != q.device:
self.attention_mask = self.attention_mask.to(q.device)
# Mask-aware attention
if att_mask is not None:
if att_mask.dtype == torch.bool and isinstance(
self.attention_mask, AttentionMask
):
if not isinstance(att_mask, AttentionMask):
att_mask = AttentionMask.from_bool(att_mask)
mask = self.attention_mask + att_mask
else:
mask = self.attention_mask & att_mask
else:
mask = self.attention_mask
# Handle q/k/v which would not fit the mask
seq_len = q.shape[-2]
q_, k_, v_ = map(lambda x: self._maybe_pad_sequence(x, mask), (q, k, v))
# Normal attention with the global tokens mask
att = scaled_dot_product_attention(
q=q_, k=k_, v=v_, att_mask=mask, dropout=self.attn_drop
)
# Take into account an hypothetical padding
return att[:, :seq_len, :]
|
EXA-1-master
|
exa/libraries/xformers/xformers/components/attention/global_tokens.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
import logging
from dataclasses import dataclass
from enum import Enum
from typing import Optional, Union
import torch
import torch.autograd.profiler as profiler
import torch.nn as nn
import torch.nn.functional as Fn
from xformers.components.attention import (
Attention,
AttentionConfig,
AttentionMask,
register_attention,
)
from xformers.components.attention.core import (
scaled_dot_product_attention,
scaled_query_key_softmax,
)
logger = logging.getLogger("xformers")
class LandmarkSelection(str, Enum):
Orthogonal = "orthogonal"
KMeans = "kmeans"
KMeans_Spherical = "kmeans_spherical"
Random = "random"
@dataclass
class OrthoformerAttentionConfig(AttentionConfig):
"""
num_landmarks Number of landmarks to use for softmax approximation.
subsample_fraction Percentage of q_samples matrix to sample per iteration
landmark_selection Landmark selection strategy
"""
num_landmarks: Optional[int]
subsample_fraction: Optional[float]
landmark_selection: Optional[LandmarkSelection]
@register_attention("orthoformer", OrthoformerAttentionConfig)
class OrthoFormerAttention(Attention):
def __init__(
self,
dropout: float,
num_landmarks: int = 32,
subsample_fraction: float = 1.0,
landmark_selection: LandmarkSelection = LandmarkSelection.Orthogonal,
*args,
**kwargs,
):
"""
Orthoformer_ attention mechanism.
::
"Keeping Your Eye on the Ball: Trajectory Attention in Video Transformers"
Patrick, M., Campbell, D., Asano, Y., Misra, I., Metze, F., Feichtenhofer,
C., Vedaldi, A., Henriques, J. (2021)
Reference codebase: https://github.com/facebookresearch/Motionformer
.. _Orthoformer: https://arxiv.org/abs/2106.05392
"""
super().__init__()
self.num_landmarks = num_landmarks
self.attn_drop = nn.Dropout(dropout)
self.subsample_fraction = subsample_fraction
self.landmark_selection = landmark_selection
# Properties specific to this attention mechanism
self.supports_attention_mask = True
self.supports_key_padding_mask = False
def forward(
self,
q: torch.Tensor,
k: torch.Tensor,
v: torch.Tensor,
att_mask: Optional[Union[AttentionMask, torch.Tensor]] = None,
*args,
**kwargs,
):
N = k.shape[1]
if self.num_landmarks == N:
# Default attention
x = scaled_dot_product_attention(q, k, v, att_mask)
else:
with torch.no_grad(), profiler.record_function("select landmarks"):
if self.landmark_selection == LandmarkSelection.Orthogonal:
landmarks = self._compute_orthogonal_landmarks(q)
elif self.landmark_selection == LandmarkSelection.Random:
half_L = self.num_landmarks // 2
landmarks_q = q[:, torch.randint(q.size(1), (half_L,)), :]
landmarks_k = k[:, torch.randint(k.size(1), (half_L,)), :]
landmarks = torch.cat((landmarks_q, landmarks_k), dim=-2)
elif self.landmark_selection == LandmarkSelection.KMeans:
landmarks = self._cluster_landmarks(q)
elif self.landmark_selection == LandmarkSelection.KMeans_Spherical:
landmarks = self._cluster_landmarks(q, spherical=True)
if att_mask is not None:
logger.warning(
"Orthoformer: attention mask passed alongside with using landmarks to reduce dimensions. \
The two are typically not compatible"
)
# FIXME: Should we still accept a mask in that case ?
att_mask = None
# pyre-ignore[61]: TODO(T103337542): `landmarks` mistakenly seems
# like it could be uninitialized.
kernel_1 = scaled_query_key_softmax(q, landmarks, att_mask)
# pyre-ignore[61]: TODO(T103337542): `landmarks` mistakenly seems
# like it could be uninitialized.
kernel_2 = scaled_query_key_softmax(landmarks, k, att_mask)
x = torch.matmul(kernel_1, torch.matmul(kernel_2, v))
x = self.attn_drop(x)
return x
def _cluster_landmarks(
self,
q: torch.Tensor,
spherical: bool = False,
num_iters: int = 6,
) -> torch.Tensor:
"""
Construct set of landmarks by recursively selecting new landmarks
that are maximally orthogonal to the existing set.
Returns near orthogonal landmarks with shape (B, M, D).
"""
num_landmarks = min(self.num_landmarks, q.shape[1])
if self.subsample_fraction < 1.0:
num_samples = max(
int(self.subsample_fraction * q.size(-2)), num_landmarks
) # Need at least M/2 samples of queries and keys
q_samples = q[:, torch.randint(q.size(-2), (num_samples,)), :] # (B, N, D)
else:
q_samples = q # (B, N, D)
if spherical:
q_samples_normalized = Fn.normalize(
q_samples, p=2, dim=-1
) # may need to change default eps to eps=1e-8 for mixed precision compatibility
landmarks = self._kmeans_spherical(
q_samples_normalized, num_landmarks, num_iters
)
else:
landmarks = self._kmeans(q_samples, num_landmarks, num_iters)
return landmarks # (B, M, D)
def _kmeans(self, x: torch.Tensor, K: int, num_iters: int = 10):
"""
Arguments:
x: (B, N, D)
K: number of clusters
num_iters: the number of kmeans updates
"""
B, N, D = x.size()
assert K <= N, f"{K} > {N}"
c = x[
:, torch.randperm(N, device=x.device)[:K], :
].clone() # initialisation for the centroids
with profiler.record_function("kmeans"):
x_i = x.view(B, N, 1, D)
c_j = c.view(B, 1, K, D)
counts = c.new_zeros(B, K)
ones = x.new_ones((B, N))
for _ in range(num_iters):
# E step: assign points to the nearest cluster
D_ij = ((x_i - c_j) ** 2).sum(-1) # (B, N, K) squared distances
cl = D_ij.argmin(
dim=-1, keepdim=True
).long() # (B, N, 1) index of point to nearest cluster
# M step: update the centroids
c.zero_()
c.scatter_add_(-2, cl.repeat(1, 1, D), x) # sum of points per cluster
counts.fill_(1e-6) # avoid div0
counts.scatter_add_(
-1, cl.squeeze(-1), ones
) # number of points per cluster
c.divide_(counts.unsqueeze(-1)) # compute the average
return c
def _kmeans_spherical(self, x: torch.Tensor, K: int, num_iters=10):
"""
Arguments:
x: (B, N, D)
"""
B, N, D = x.size()
assert K <= N, f"{K} > {N}"
# initialisation for the centroids
c = x[:, torch.randperm(N, device=x.device)[:K], :].clone()
with profiler.record_function("kmeans_spherical"):
counts = c.new_zeros(B, K)
ones = x.new_ones((B, N))
for _ in range(num_iters):
# E step: assign points to the nearest cluster
D_ij = torch.matmul(
x, c.transpose(-2, -1)
) # (B, N, K) cosine similarity
cl = D_ij.argmax(
dim=-1, keepdim=True
).long() # (B, N, 1) index of point to nearest cluster
# M step: update the centroids
c.zero_()
c.scatter_add_(-2, cl.repeat(1, 1, D), x) # sum of points per cluster
counts.fill_(1e-6) # avoid div0
counts.scatter_add_(
-1, cl.squeeze(-1), ones
) # number of points per cluster
c.divide_(counts.unsqueeze(-1)) # compute the average
c = Fn.normalize(c, p=2, dim=-1) # renormalise
return c
def _compute_orthogonal_landmarks(self, q: torch.Tensor) -> torch.Tensor:
"""
Construct set of landmarks by recursively selecting new landmarks
that are maximally orthogonal to the existing set.
Returns near orthogonal landmarks with shape (B, M, D).
"""
if self.subsample_fraction < 1.0:
# Need at least M samples of queries
num_samples = max(
int(self.subsample_fraction * q.size(-2)), self.num_landmarks
)
q_samples = q[
:, torch.randint(q.size(-2), (num_samples,), device=q.device), :
]
else:
# (B, N, D)
q_samples = q
# may need to change default eps to eps=1e-8 for mixed precision compatibility
q_samples_normalized = Fn.normalize(q_samples, p=2, dim=-1)
B, N, D = q_samples_normalized.shape
selected_mask = torch.zeros((B, N, 1), device=q_samples_normalized.device)
landmark_mask = torch.ones(
(B, 1, 1), dtype=selected_mask.dtype, device=q_samples_normalized.device
)
# Get initial random landmark
random_idx = torch.randint(
q_samples_normalized.size(-2), (B, 1, 1), device=q_samples_normalized.device
)
selected_mask.scatter_(-2, random_idx, landmark_mask)
# Selected landmarks
selected_landmarks = torch.empty(
(B, self.num_landmarks, D),
device=q_samples_normalized.device,
dtype=q_samples_normalized.dtype,
)
selected_landmarks[:, 0, :] = q_samples_normalized[
torch.arange(q_samples_normalized.size(0)), random_idx.view(-1), :
].view(B, D)
# Store computed cosine similarities
cos_sims = torch.empty(
(B, N, self.num_landmarks),
device=q_samples_normalized.device,
dtype=q_samples_normalized.dtype,
)
for M in range(1, self.num_landmarks):
with profiler.record_function("find new landmark"):
# Calculate absolute cosine similarity between selected and unselected landmarks
# (B, N, D) * (B, D) -> (B, N)
cos_sims[:, :, M - 1] = torch.einsum(
"b n d, b d -> b n",
q_samples_normalized,
selected_landmarks[:, M - 1, :],
).abs()
# (B, N, M) cosine similarities of current set of landmarks wrt all queries and keys
cos_sim_set = cos_sims[:, :, :M]
# Get orthogonal landmark: landmark with smallest absolute cosine similarity:
# set cosine similarity for already selected landmarks to > 1
cos_sim_set.view(-1, M)[selected_mask.flatten().bool(), :] = 10
# (B,) - want max for non
selected_landmark_idx = cos_sim_set.amax(-1).argmin(-1)
# Add most orthogonal landmark to selected landmarks:
selected_landmarks[:, M, :] = q_samples_normalized[
torch.arange(q_samples_normalized.size(0)), selected_landmark_idx, :
].view(B, D)
# Removed selected indices from non-selected mask:
selected_mask.scatter_(
-2, selected_landmark_idx.unsqueeze(-1).unsqueeze(-1), landmark_mask
)
# (B, M, D)
landmarks = torch.masked_select(q_samples, selected_mask.bool()).reshape(
B, -1, D
)
return landmarks # (B, M, D)
|
EXA-1-master
|
exa/libraries/xformers/xformers/components/attention/ortho.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
import logging
import math
from dataclasses import dataclass
import torch
from xformers import _is_triton_available
from xformers.components.attention import Attention, AttentionConfig, register_attention
logger = logging.getLogger("xformers")
_is_blocksparse_available = _is_triton_available()
if _is_blocksparse_available:
from triton.ops.blocksparse import matmul as blocksparse_matmul # type: ignore
from triton.ops.blocksparse import softmax as blocksparse_softmax # type: ignore
from xformers.triton.utils import gpu_capabilities_older_than_70
# Blocksparse requires Tensor cores
if gpu_capabilities_older_than_70():
logger.warning(
"Blocksparse is not available: the current GPU does not expose Tensor cores"
)
_is_blocksparse_available = False
if _is_blocksparse_available:
@dataclass
class BlockSparseAttentionConfig(AttentionConfig):
layout: torch.Tensor # The dimensions of the random features
block_size: int
dropout: float
num_heads: int
@register_attention("blocksparse", BlockSparseAttentionConfig)
class BlockSparseAttention(Attention):
r"""
Thin wrap over the Triton blocksparse computations. The sparsity pattern is determined through the layout.
.. warning: the layout is assumed to have the dimensions [heads, seq, seq].
If some dimensions are missing, we assume that the same layout is to be used across heads.
.. warning: for now, the sequence (context) length has to be a power of two. This constraint could
be relaxed in the future.
.. warning: the block size has to be picked from [16, 32, 64]. Some speed is gained from bigger blocks.
It is of course possible to reproduce coarser patterns given these primitives, as the user sees fit.
"""
def __init__(
self,
layout: torch.Tensor,
block_size: int = 16,
dropout: float = 0.0,
num_heads: int = 1, # optional, used to adapt the layout if in need
causal: bool = False,
*args,
**kwargs,
):
if layout.dim() == 2:
logger.warning(
"The layout passed is lacking a head dimension and a batch dimension"
)
logger.warning(
"Now assuming that the same layout is to be used across all heads"
)
layout = layout.unsqueeze(0).expand(num_heads, -1, -1)
logger.warning(f"New layout dimensions: {layout.shape}")
assert block_size in (
16,
32,
64,
128,
), "Only block sizes in [16, 32, 64, 128] are supported"
super().__init__()
self.causal = causal
self.attn_drop = torch.nn.Dropout(dropout, inplace=False)
# Pure blocksparse data
self.layout = layout
self.block_size = block_size
# make sure that the head dimension is not folded down with the batch
self.requires_head_dimension = True
# key padding mask and attention mask must be passed in separately
self.requires_same_k_q_dimensions = True
# The underlying triton op does not support per element attention mask
self.supports_attention_mask = False
self.supports_key_padding_mask = False
def create_triton_kernels(self, device):
# blocksparse operators
self.sparse_dot_sdd = blocksparse_matmul(
self.layout,
self.block_size,
"sdd",
trans_a=False,
trans_b=True,
device=device,
)
self.sparse_dot_dsd = blocksparse_matmul(
self.layout,
self.block_size,
"dsd",
trans_a=False,
trans_b=False,
device=device,
)
self.sparse_softmax = blocksparse_softmax(
self.layout,
self.block_size,
device=device,
)
def forward(
self,
q: torch.Tensor,
k: torch.Tensor,
v: torch.Tensor,
scale: float = 1.0,
*args,
**kwargs,
) -> torch.Tensor:
assert (
"att_mask" not in kwargs.keys() and "att_mask" not in args
), "This attention does not support an attention mask, but you can specify causality."
r"""
A thin wrap around the Triton blockparse attention operation
.. note: Per element attention mask is not supported, but you can specify causality
"""
# Delayed triton init, to make sure that we get the right device
# Infer device from query
if not hasattr(self, "sparse_dot_sdd"):
self.create_triton_kernels(q.device)
assert (
q.shape[-2] == k.shape[-2]
), "Blocksparse requires the same dimensions for K and Q for now"
assert (
q.shape[-2] == self.layout.shape[-2] * self.block_size
), "Actual sequence size and layout are inconsistent"
assert (
k.shape[-2] == self.layout.shape[-2] * self.block_size
), "Actual sequence size and layout are inconsistent"
assert (
q.shape[-2] % self.block_size
) == 0, "Sequence length {} must be a multiple of block size {}".format(
q.shape[-2], self.block_size
)
# Self-attend: (B, nh, S, hs) x (B, nh, hs, S) -> (B, nh, S, S)
# When the computations are block sparse, the matrix types change along the way:
# - (sparse) attention matrix = (dense) Kt * (dense) Q
q = q / math.sqrt(q.size(-1))
sparse_att_mat = self.sparse_dot_sdd(q, k)
# - softmax on the sparse attention matrix
sparse_att_mat = self.sparse_softmax(
sparse_att_mat, scale=scale, is_causal=self.causal
)
sparse_att_mat = self.attn_drop(sparse_att_mat)
# - then (dense) attention is (sparse) attention matrix * dense (value)
a = self.sparse_dot_dsd(sparse_att_mat, v)
return a
|
EXA-1-master
|
exa/libraries/xformers/xformers/components/attention/blocksparse.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
from dataclasses import dataclass
from typing import Optional, Union
import torch
import torch.nn as nn
from xformers.components.attention import (
Attention,
AttentionConfig,
AttentionMask,
maybe_sparsify,
register_attention,
sparsify,
)
from xformers.components.attention.attention_patterns import (
causal_1d_pattern,
local_1d_pattern,
)
from xformers.components.attention.core import scaled_dot_product_attention
@dataclass
class LocalAttentionConfig(AttentionConfig):
causal: Optional[bool] = None
window_size: Optional[int] = None
force_sparsity: Optional[bool] = None
@register_attention("local", LocalAttentionConfig)
class LocalAttention(Attention):
def __init__(
self,
dropout: float = 0.0,
causal: bool = False,
window_size: int = 5,
force_sparsity: bool = False,
*args,
**kwargs,
):
r"""
An implementation of a sliding window attention, as proposed in RoutingTransformer_, LongFormer_ or BigBird_
Args:
dropout (float): the probability of an output to be randomly dropped at training time
causal (bool): apply a causal mask, in that the attention cannot be applied to the future
window_size (int): the overall window size for local attention.
Odd number is expected if the mask is not causal, as the window size will be evenly
distributed on both sides of each query
.. _RoutingTransformer: https://arxiv.org/pdf/2003.05997.pdf
.. _BigBird: https://arxiv.org/pdf/2007.14062.pdf
.. _Longformer: https://arxiv.org/pdf/2004.05150.pdf
"""
super().__init__()
self.attn_drop = nn.Dropout(dropout, inplace=False)
self.causal = causal
self.force_sparsity = force_sparsity
if not self.causal:
assert (
window_size % 2 == 1
), "The window size is assumed to be odd (counts self-attention + 2 wings)"
self.window_size = window_size
self.attention_mask: Optional[torch.Tensor] = None
self.requires_same_k_q_dimensions = True
# Properties specific to this attention mechanism
self.supports_attention_mask = True
self.supports_key_padding_mask = False
def _get_local_mask(self, shape: torch.Size) -> torch.Tensor:
window_size = self.window_size * 2 + 1 if self.causal else self.window_size
mask = local_1d_pattern(shape[1], window_size)
if self.causal:
mask &= causal_1d_pattern(shape[1])
mask = sparsify(mask) if self.force_sparsity else maybe_sparsify(mask)
return mask
def forward(
self,
q: torch.Tensor,
k: torch.Tensor,
v: torch.Tensor,
att_mask: Optional[Union[torch.Tensor, AttentionMask]] = None,
*args,
**kwargs,
):
# Local window attention masking
if self.attention_mask is None or self.attention_mask.shape[1] != q.shape[1]:
self.attention_mask = self._get_local_mask(q.shape).to(q.device)
# Take into account the optional user mask
if att_mask is None:
mask = self.attention_mask
else:
if isinstance(att_mask, AttentionMask):
# Needed because & op not defined for SparseCS with AttentionMask
att_mask = att_mask.to_bool()
mask = self.attention_mask & att_mask
return scaled_dot_product_attention(
q=q, k=k, v=v, att_mask=mask, dropout=self.attn_drop
)
|
EXA-1-master
|
exa/libraries/xformers/xformers/components/attention/local.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
# Credits: this is heavily inspired by the official implementation, present in
# https://github.com/sarthmit/Compositional-Attention
# Original author: Sarthak Mittal
# This is a simplified version, for the sake of clarity, and because some features could be exposed later
# via the library directly.
# In particular, code paths for TPUs, quantization and gumbel softmax have been removed
# We're also following the same dimension ordering as in the rest of the xformers library
# which is to say [Batch, Sequence, Embedding] wherever possible
import math
from dataclasses import dataclass
from typing import Optional
import torch
import torch.nn.functional as F
from torch import Tensor, nn
from xformers.components.attention import (
Attention,
AttentionConfig,
AttentionMask,
register_attention,
)
from xformers.components.attention.core import _softmax
from xformers.components.input_projection import InputProjection, InputProjectionConfig
def _either_or(a: Optional[int], b: int) -> int:
return a if a is not None else b
@dataclass
class CompositionalAttentionConfig(AttentionConfig):
dim_model: int
num_heads: int
dim_attn: Optional[int] = None
num_rules: Optional[int] = None
dim_key: Optional[int] = None
dim_value: Optional[int] = None
dim_selection: Optional[int] = None
dropout: float
qk_rule: bool = False
nonlinear: bool = False
q_compose: bool = False
bias: bool = True
causal: Optional[bool] = False
in_proj_container: Optional[InputProjection] = None
use_separate_proj_weight: Optional[bool] = False
@register_attention("compositional", CompositionalAttentionConfig)
class CompositionalAttention(Attention):
"""Compositional Attention, as proposed in
"Compositional Attention: Disentangling search and retrieval"_, S. Mittal et al.
A key insight from this proposal is that the attention mechanism can be conceived as two steps:
a search and a retrieval operation. When queried, the model can search for the most relevant information
(Softmax(QKt)), then retrieve information given the Value.
Contrary to the original attention proposal, which does not consider interactions in between heads,
the compositional attention will consider all possible interactions and softmax over that dimension,
so that the information retrieved covers the most relevant dimensions. The number of heads and rules to
use is thus typically smaller than for a comparable traditional Transformer, and asking for the same number of heads
may not fit in memory.
Args:
dim_model: dimension of the incoming latent space
num_heads: number of heads *for the search operation*
dim_attn: dimension (embedding) of the attention
num_rules: number of rules to consider *for the retrieval operation*
dim_selection: dimension of the scoring/selection space for the retrievals
dim_key, dim_value: dimensions of K and V, if different from Q
dropout: attention dropout probability
qk_rule: QK product will drive the retrieval process
nonlinear: use a non linear method to score the retrievals
bias: use bias in the initial projection step
causal: causal computations (attend to the past only)
_"Compositional Attention: Disentangling search and retrieval": https://arxiv.org/pdf/2110.09419v1.pdf
"""
def __init__(
self,
dim_model: int,
num_heads: int,
dim_attn: Optional[int] = None,
num_rules: Optional[int] = None,
dim_selection: Optional[int] = None,
dim_key: Optional[int] = None,
dim_value: Optional[int] = None,
dropout=0.0,
qk_rule=False,
nonlinear=False,
q_compose=False,
in_proj_container: Optional[InputProjection] = None,
use_separate_proj_weight: Optional[bool] = False,
bias=True,
causal=False,
*_,
**__,
):
super().__init__()
# Define the inherited flags
self.requires_skip_multi_head = (
True # This attention owns the multi-head mechanism
)
# Handle defaults / undefined values
self.dim_model = dim_model
num_rules = _either_or(num_rules, num_heads)
dim_selection = _either_or(dim_selection, dim_model // num_heads)
# All the initial definition plumbing
dim_attn = _either_or(dim_attn, dim_model)
dim_key = _either_or(dim_key, dim_model)
dim_value = _either_or(dim_value, dim_model)
self.in_proj_container = (
in_proj_container
if in_proj_container is not None
else InputProjection(
query_proj_params=InputProjectionConfig(dim_model, dim_key, bias=bias),
key_proj_params=InputProjectionConfig(dim_model, dim_key, bias=bias)
if use_separate_proj_weight
else None,
value_proj_params=InputProjectionConfig(dim_model, dim_value, bias=bias)
if use_separate_proj_weight
else None,
)
)
self.num_heads = num_heads
self.num_rules = num_rules
self.qk_rule = qk_rule
self.dim_selection = dim_selection
self.nonlinear = nonlinear
self.q_compose = q_compose
self.dropout_module = nn.Dropout(dropout)
self.dim_head = dim_model // num_heads
self.value_dim = dim_attn // num_rules
assert (
self.value_dim * num_rules == dim_attn
), "value_dim must be divisible by num_rules"
self.scaling = self.dim_head**-0.5
self.scaling_values = self.dim_selection**-0.5
self.out_proj = nn.Linear(self.num_heads * self.value_dim, dim_model, bias=bias)
if self.qk_rule:
self.value_k = nn.Linear(self.value_dim, self.dim_selection, bias=bias)
if self.q_compose:
self.value_q = nn.Linear(self.dim_head, self.dim_selection, bias=bias)
else:
self.value_q = nn.Linear(
dim_model, self.dim_selection * self.num_heads, bias=bias
)
else:
if self.q_compose:
self.value_q = nn.Linear(self.dim_head, self.dim_selection, bias=bias)
else:
self.value_q = nn.Linear(
dim_model, self.dim_selection * self.num_heads, bias=bias
)
if self.nonlinear:
self.score_network: nn.Module = nn.Sequential(
nn.Linear(
self.dim_selection + self.value_dim,
self.dim_selection,
bias=bias,
),
nn.ReLU(),
nn.Linear(self.dim_selection, 1, bias=bias),
)
else:
self.score_network = nn.Linear(
self.dim_selection + self.value_dim, 1, bias=bias
)
self.causal = causal
# Properties specific to this attention mechanism
self.supports_attention_mask = True
self.supports_key_padding_mask = False
self._reset_parameters()
def _reset_parameters(self):
# NOTE: in_proj_container is already initialized
if self.qk_rule:
nn.init.xavier_uniform_(self.value_k.weight, gain=1 / math.sqrt(2))
nn.init.xavier_uniform_(self.value_q.weight, gain=1 / math.sqrt(2))
else:
nn.init.xavier_uniform_(self.value_q.weight)
if self.nonlinear:
nn.init.xavier_uniform_(self.score_network[0].weight)
nn.init.xavier_uniform_(self.score_network[2].weight)
else:
nn.init.xavier_uniform_(self.score_network.weight)
nn.init.xavier_uniform_(self.out_proj.weight)
if self.out_proj.bias is not None:
nn.init.constant_(self.out_proj.bias, 0.0)
def forward(
self,
q: Tensor,
k: Tensor,
v: Tensor,
att_mask: Optional[Tensor] = None,
*args,
**kwargs,
) -> Tensor:
"""
Input shape: Time x Batch x Channel
Args:
att_mask (ByteTensor, optional): typically used to
implement causal attention, where the mask prevents the
attention from looking forward in time (default: None).
"""
B, Sq, E = q.shape
_, Sk, _ = k.shape
assert E == self.dim_model
# First define projected query/key/values
# We keep the projected and original tensors in flight,
# depending on the options the original values could be reused
q_unprojected = q
q, k, v = self.in_proj_container(query=q, key=k, value=v)
q *= self.scaling
# Init causal mask if needed, now that we know the context length
if self.causal and (
self._causal_mask is None or self._causal_mask.shape[0] != Sk
):
self._causal_mask = AttentionMask.make_causal(Sq, Sq, device=q.device)
# Convenience, create an attention mask if a tensor was passed
# This sanitizes different mask types being passed, from now on it's additive
if isinstance(att_mask, torch.Tensor):
# By default we don't know of the causality, and a check would be expensive
att_mask_additive: Optional[AttentionMask] = (
AttentionMask.from_bool(att_mask)
if att_mask.dtype == torch.bool
else AttentionMask(att_mask, is_causal=False)
)
else:
att_mask_additive = None
# Handle the attention and key padding masks
if self._causal_mask is not None:
# Optionally add the causal mask
if att_mask_additive is not None:
att_mask_additive += self._causal_mask
else:
att_mask_additive = self._causal_mask
# Flatten the heads or the rules
q = (
q.view(B, Sq, self.num_heads, self.dim_head)
.movedim(2, 1)
.flatten(0, 1) # [B * num_heads, Sq, dim_head]
)
k = (
k.view(B, Sk, self.num_heads, self.dim_head).movedim(2, 1).flatten(0, 1)
) # [B * num_heads, Sk, dim_head]
v = v.view(B, -1, self.num_rules, self.value_dim).movedim(2, 1).flatten(0, 1)
# Compute the search: Softmax(QKt)
attn_weights = torch.bmm(q, k.transpose(1, 2)) # [B * self.num_heads, Sq, Sk]
if att_mask_additive is not None:
attn_weights += att_mask_additive.values
attn_weights = _softmax(attn_weights, causal=self.causal)
attn_weights = attn_weights.view(B, self.num_heads, Sq, Sk)
attn_probs = self.dropout_module(attn_weights)
# Now compute the information retrieval
# keep all the heads in flight, we'll score the different possibilities
# - compute all the possible retrievals
v = v.view(B, 1, self.num_rules, Sk, self.value_dim)
attn_probs = attn_probs.unsqueeze(2)
attn = torch.matmul(attn_probs, v).view(
B, self.num_heads, self.num_rules, Sq, self.value_dim
)
attn = attn.movedim(3, 1) # [B, Sq, H, Rules, Values]
# - search the most appropriate retrieval among all the values
if self.q_compose:
v_q = self.value_q(q.transpose(0, 1)).view(
B, Sq, self.num_heads, 1, self.dim_selection
)
else:
v_q = self.value_q(q_unprojected).view(
B, Sq, self.num_heads, 1, self.dim_selection
)
if self.qk_rule:
v_q *= self.scaling_values
v_k = (
self.value_k(attn)
.view(B, Sq, self.num_heads, self.num_rules, self.dim_selection)
.transpose(4, 3)
.contiguous()
)
v_score = torch.matmul(v_q, v_k).view(
B, Sq, self.num_heads, self.num_rules, 1
)
else:
v_q = v_q.expand(-1, -1, -1, self.num_rules, -1)
v_in = torch.cat([attn, v_q], dim=-1)
v_score = self.score_network(v_in).view(
B, Sq, self.num_heads, self.num_rules, 1
)
v_score = F.softmax(v_score, dim=3)
# - extracted values are the original attention (inc. all the values) weighted by value score
attn = (attn * v_score).sum(dim=3).view(B, Sq, self.num_heads * self.value_dim)
# Final attention projection, same as other mechanisms
attn = self.out_proj(attn)
return attn
|
EXA-1-master
|
exa/libraries/xformers/xformers/components/attention/compositional.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
import math
from dataclasses import dataclass
from typing import Optional
import torch
import torch.nn as nn
from xformers.components.attention import Attention, AttentionConfig, register_attention
@dataclass
class PoolingAttentionConfig(AttentionConfig):
pool_size: int # dimension of the input sequence
stride: Optional[int] # dimension of the internal space
padding: Optional[int]
@register_attention("pooling", PoolingAttentionConfig)
class Pooling(Attention):
def __init__(
self,
pool_size: int = 3,
stride: int = 1,
padding: Optional[int] = None,
*_,
**__,
):
"""
Pooling token mixing mechanism, as proposed in
`Metaformer is actually what you need for vision`_, Yu et al (2021).
The original notation is kept as is.
.. _`Metaformer is actually what you need for vision` : https://arxiv.org/pdf/2111.11418v1.pdf
"""
super().__init__()
padding = padding if padding is not None else pool_size // 2
self.pool = nn.AvgPool2d(
pool_size,
stride=stride,
padding=pool_size // 2,
count_include_pad=False,
)
# MHA related flags:
# kq need to have the same dimension
self.requires_same_k_q_dimensions = False
# This attention does not support attention masks
self.supports_attention_mask = False
# This "attention" (token mixing) skips the multihead attention altogether
self.requires_skip_multi_head = True
self.requires_input_projection = False
# This operator does not really handle q,k,v
self.requires_same_k_q_dimensions = True
# This attention requires the 2d structure out of the context,
# implictly assumed to be a squared length
self.requires_squared_context = True
def forward(self, q: torch.Tensor, *_, **__):
# Expose the 2D token structure
B, HW, C = q.shape
H = int(math.sqrt(HW))
assert H * H == HW
q = q.transpose(-2, -1).reshape(B, C, H, H)
# 2D pool
x_pool = self.pool(q) - q # compensate for the residual path
# Get back to B HW C
return x_pool.flatten(2, 3).transpose(-2, -1)
|
EXA-1-master
|
exa/libraries/xformers/xformers/components/attention/pooling.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
import logging
from pathlib import Path
from typing import Any, Callable, Dict, Set, Union
import torch
from xformers.utils import (
generate_matching_config,
get_registry_decorator,
import_all_modules,
)
from ._sputnik_sparse import SparseCS
from .attention_mask import AttentionMask
from .base import Attention, AttentionConfig # noqa
logger = logging.getLogger("xformers")
# CREDITS: Classy Vision registry mechanism
ATTENTION_REGISTRY: Dict[str, Any] = {}
ATTENTION_CLASS_NAMES: Set[str] = set()
# Arbitrary threshold for now,
# in between dense and sparse matrix algorithms for the attention mechanism
_DENSITY_THRESHOLD = 0.30 # noqa # from the sputnik paper, vs.
_USE_SPUTNIK = True
def build_attention(config: Union[Dict[str, Any], AttentionConfig]):
"""Builds an attention from a config.
This assumes a 'name' key in the config which is used to determine what
attention class to instantiate. For instance, a config `{"name": "my_attention",
"foo": "bar"}` will find a class that was registered as "my_attention"
(see :func:`register_attention`) and call .from_config on it."""
if not isinstance(config, AttentionConfig):
try:
config_instance = generate_matching_config(
config, ATTENTION_REGISTRY[config["name"]].config
)
except KeyError as e:
name = config["name"]
logger.warning(f"{name} not available among {ATTENTION_REGISTRY.keys()}")
raise e
else:
config_instance = config
return ATTENTION_REGISTRY[config_instance.name].constructor.from_config(
config_instance
)
"""Registers an Attention subclass.
This decorator allows xFormers to instantiate a subclass of Attention
from a configuration file, even if the class itself is not part of the
xFormers library. To use it, apply this decorator to an Attention
subclass, like this:
.. code-block:: python
@dataclass
class MyConfig:
...
@register_attention('my_attention', MyConfig)
class MyAttention(Attention):
...
To instantiate an attention from a configuration file, see :func:`build_attention`."""
register_attention: Callable[[str, Any], Callable[[Any], Any]] = get_registry_decorator(
ATTENTION_REGISTRY, ATTENTION_CLASS_NAMES, Attention, AttentionConfig
)
def maybe_sparsify(matrix) -> Any:
# Sparsify if that makes sense
if torch.count_nonzero(matrix).item() / matrix.numel() > _DENSITY_THRESHOLD:
# If not sparse, then AttentionMask is the reference type
return AttentionMask.from_bool(matrix)
return sparsify(matrix)
def sparsify(matrix):
if _USE_SPUTNIK:
return SparseCS(matrix)
return matrix.to_sparse()
from .favor import FavorAttention # noqa
from .global_tokens import GlobalAttention # noqa
from .linformer import LinformerAttention # noqa
from .local import LocalAttention # noqa
from .nystrom import NystromAttention # noqa
from .ortho import OrthoFormerAttention # noqa
from .random import RandomAttention # noqa
from .scaled_dot_product import ScaledDotProduct # noqa
__all__ = [
"ScaledDotProduct",
"LocalAttention",
"LinformerAttention",
"NystromAttention",
"RandomAttention",
"OrthoFormerAttention",
"GlobalAttention",
"FavorAttention",
"Attention",
"AttentionMask",
"build_attention",
"register_attention",
]
# Optionally expose the BlockSparse attention
try:
from .blocksparse import BlockSparseAttention # noqa
__all__ += ["BlockSparseAttention"]
except ImportError:
pass
# automatically import any Python files in the directory
import_all_modules(str(Path(__file__).parent), "xformers.components.attention")
|
EXA-1-master
|
exa/libraries/xformers/xformers/components/attention/__init__.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
import torch
from xformers.ops import masked_matmul
from xformers.sparse import SparseCSRTensor
# TODO: this is here for BC
from xformers.sparse.utils import _csr_to_coo, _dense_to_sparse # noqa: F401
class SparseCS:
def __init__(self, matrix, device=None):
if device is None:
device = torch.device("cpu")
if matrix.ndim == 2:
matrix = matrix[None]
assert matrix.ndim == 3
self._mat = SparseCSRTensor.from_dense(matrix).to(device)
@property
def device(self):
return self._mat.device
@property
def ndim(self):
return self._mat.ndim
@property
def dtype(self):
return self._mat.dtype
@property
def is_sparse(self):
return True
@property
def shape(self):
return self._mat.shape[1:]
@property
def values(self):
return self._mat.values()
@property
def row_indices(self):
return self._mat._csr_row_indices
@property
def column_indices(self):
return self._mat._csr_column_indices
@property
def row_offsets(self):
return self._mat._csr_row_offsets
@property
def _transp_info(self):
return self._mat._csr_transp_info
@classmethod
def wrap(
cls, shape, values, row_indices, row_offsets, column_indices, _transp_info
):
matrix = cls.__new__(cls)
_shape = (values.shape[0],) + shape
csr_matrix = SparseCSRTensor._wrap(
_shape, values, row_indices, row_offsets, column_indices, _transp_info
)
matrix._mat = csr_matrix
return matrix
@classmethod
def _wrap(cls, csr_matrix):
assert isinstance(csr_matrix, SparseCSRTensor)
matrix = cls.__new__(cls)
matrix._mat = csr_matrix
return matrix
def __mul__(self, other):
assert isinstance(other, (int, float))
return type(self)._wrap(self._mat * other)
def __add__(self, other):
assert isinstance(other, type(self))
return type(self)._wrap(self._mat + other._mat)
def matmul_with_mask(self, a, b):
return type(self)._wrap(masked_matmul(a, b, self._mat))
def softmax(self):
out = torch.nn.functional.softmax(self._mat, -1)
return type(self)._wrap(out)
def spmm(self, b):
out = torch.bmm(self._mat, b)
return out
def transpose(self):
out = torch.transpose(self._mat, -2, -1)
return type(self)._wrap(out)
def to(self, device):
assert isinstance(device, torch.device)
out = self._mat.to(device)
return type(self)._wrap(out)
def to_dense(self):
return self._mat.to_dense()
def logical_and(self, other: torch.Tensor):
assert not isinstance(other, SparseCS)
out = torch.logical_and(self._mat, other)
return type(self)._wrap(out)
def __and__(self, other):
return self.logical_and(other)
|
EXA-1-master
|
exa/libraries/xformers/xformers/components/attention/_sputnik_sparse.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
import logging
import math
from contextlib import nullcontext
from functools import lru_cache
from typing import Optional, Union
import torch
from xformers import _has_cpp_library, _is_triton_available
from xformers.components.attention.attention_mask import AttentionMask
if _has_cpp_library:
from ._sputnik_sparse import SparseCS
if _is_triton_available():
from xformers.triton.softmax import softmax as triton_softmax
from xformers.triton.utils import gpu_capabilities_older_than_70
_is_blocksparse_available = (
_is_triton_available() and not gpu_capabilities_older_than_70()
)
if _is_blocksparse_available:
from xformers.components.attention.blocksparse import BlockSparseAttention
logger = logging.getLogger("xformers")
def _create_random_sparsity(matrix, sparsity, divisible_by=4):
assert matrix.ndim == 3
keep = torch.rand_like(matrix[0], dtype=torch.float32) > sparsity
nonzero = torch.nonzero(keep)
nnz = nonzero.shape[0]
# NOTE: need to make it a multiple of 4 for sputnik
nonzero = nonzero[: (nnz - nnz % divisible_by)]
i, j = nonzero.unbind(1)
output = torch.zeros_like(matrix)
bdim = torch.arange(matrix.shape[0], device=matrix.device)[:, None]
output[bdim, i, j] = matrix[bdim, i, j]
return output
def _broadcast_batch(mask, batch_size):
if mask.ndim == 3:
return mask
assert mask.ndim == 2
mask = mask.coalesce()
values = mask.values()
indices = mask.indices()
nnz = len(values)
# strategy: repeat the indices and append the extra batch dimension to the indices
indices = indices.repeat(1, batch_size)
# now create the batch indices
batch_indices = torch.arange(batch_size, device=indices.device)
batch_indices = batch_indices[:, None].expand(batch_size, nnz).flatten()
# put them together
indices = torch.cat([batch_indices[None, :], indices], dim=0)
# now repeat the values
values = values.repeat(batch_size)
size = (batch_size,) + mask.shape
return torch.sparse_coo_tensor(indices, values, size)
def _matmul_with_mask(
a: torch.Tensor,
b: torch.Tensor,
mask: Optional[Union[torch.Tensor, "SparseCS"]],
) -> torch.Tensor:
if mask is None:
return a @ b
if _has_cpp_library and mask.dtype == torch.bool:
if isinstance(mask, SparseCS):
return mask.matmul_with_mask(a, b)
if mask.is_sparse:
# perform broadcasting if needed
mask = _broadcast_batch(mask, a.shape[0])
# coalesced is not implemented for bool tensors, so need to cast
mask = mask.to(dtype=a.dtype) # type: ignore # mypy is missing the catch above
return torch.ops.xformers.matmul_with_mask(a, b, mask)
# Non optimized codepath
if _has_cpp_library:
assert not isinstance(mask, SparseCS)
att = a @ b
if mask.dtype == torch.bool:
assert not isinstance(mask, SparseCS)
if mask.ndim == 2:
mask = mask.unsqueeze(0).expand(att.shape[0], -1, -1)
# mask is presumed false == ignore
att[~mask] = float("-inf")
else:
# mask is presumed additive
# repeat if batch sizes don't match
if (
not isinstance(mask, SparseCS)
and mask.ndim == 3
and mask.shape[0] != att.shape[0]
and (att.shape[0] % mask.shape[0]) == 0
):
repeat_factor = att.shape[0] // mask.shape[0]
mask = mask.repeat([repeat_factor, 1, 1])
logger.info("Mismatched batch dimensions for mask, repeating mask.")
att += mask
return att
def _softmax(a: torch.Tensor, causal: bool = False) -> torch.Tensor:
if _has_cpp_library and isinstance(a, SparseCS):
return a.softmax()
if a.is_sparse:
return torch.sparse.softmax(a, dim=a.ndim - 1)
if _is_triton_available():
return triton_softmax(a, mask=None, causal=causal)
else:
return torch.softmax(a, dim=a.ndim - 1)
if _has_cpp_library:
class SparseBMM(torch.autograd.Function):
@staticmethod
def forward(ctx, a, b):
a = a.coalesce()
r = torch.bmm(a, b)
ctx.save_for_backward(a, b)
return r
@staticmethod
def backward(ctx, grad):
a, b = ctx.saved_tensors
# gradients w.r.t. a
ga = None
if ctx.needs_input_grad[0]:
ga = torch.ops.xformers.matmul_with_mask(grad, b.transpose(-2, -1), a)
# gradients w.r.t. b
gb = None
if ctx.needs_input_grad[1]:
gb = a.transpose(1, 2).bmm(grad)
return ga, gb
def _sparse_bmm(a: torch.Tensor, b: torch.Tensor) -> torch.Tensor:
"""
Batch matrix multiply between a sparse matrix and a dense matrix
"""
assert a.ndim == b.ndim == 3
assert a.shape[0] == b.shape[0]
assert a.shape[2] == b.shape[1]
return SparseBMM.apply(a, b)
def bmm(a: torch.Tensor, b: torch.Tensor) -> torch.Tensor:
if _has_cpp_library:
if isinstance(a, SparseCS):
return a.spmm(b)
if a.is_sparse:
return _sparse_bmm(a, b)
return a @ b
def _apply_dropout(att, dropout):
if dropout is None:
return att
# Dropout chokes on sparse tensors
if _has_cpp_library:
if isinstance(att, SparseCS):
values = att.values.clone()
values = dropout(values)
att = SparseCS.wrap(
att.shape,
values,
att.row_indices,
att.row_offsets,
att.column_indices,
att._transp_info,
)
elif att.is_sparse:
att = att.coalesce()
values = att.values().clone() # protect against in-place dropout
values = dropout(values)
att = torch.sparse_coo_tensor(att.indices(), values, att.shape)
else:
# Simple dense case
att = dropout(att)
return att
# Non optimized vanilla dropout
att = dropout(att)
return att
def scaled_query_key_softmax(
q: torch.Tensor,
k: torch.Tensor,
att_mask: Optional[Union[AttentionMask, "SparseCS", torch.Tensor]],
) -> torch.Tensor:
# TODO assume we have (N, S, hs) instead of (B, nh, S, hs), with N = B x nh
# this is needed due to limitations in sparse_bmm for now
# Self-attend: (N, S, hs) x (N, hs, S) -> (N, S, S)
q = q / math.sqrt(k.size(-1))
# Matmul with mask
if att_mask is not None and isinstance(att_mask, AttentionMask):
# Additive mask
mask: Optional[Union[SparseCS, torch.Tensor]] = att_mask.values
else:
mask = att_mask
att = _matmul_with_mask(q, k.transpose(-2, -1), mask)
# Softmax to get the attention probabilities
is_causal = isinstance(att_mask, AttentionMask) and att_mask.is_causal
att = _softmax(att, causal=is_causal)
return att
if _is_blocksparse_available:
# 128 is default maxsize
@lru_cache(maxsize=128)
def _retrieve_blocksparse(
num_heads: int, seq_len: int, block_size: int
) -> BlockSparseAttention:
# Checks if blocksparse object exists in cache
blocks = seq_len // block_size
layout_fill = torch.ones((num_heads, blocks, blocks), dtype=torch.long)
return BlockSparseAttention(
layout=layout_fill, block_size=block_size, causal=True
)
def blocksparse_attention(
q: torch.Tensor,
k: torch.Tensor,
v: torch.Tensor,
dropout: Optional[torch.nn.Module] = None,
block_size: int = 128,
) -> torch.Tensor:
orig_dim = q.dim()
seq_len = q.shape[-2]
# Layout head dimension: 1 or batch size (q.shape[0])
layout_heads = 1
# TODO perhaps add functionality to pad qkv if sequence length is not divisible by block size?
assert seq_len % block_size == 0, "Sequence length must be divisible by block size"
if orig_dim == 3:
# Reshape from (N, S, hs) to (B, nh, S, hs) where N = B x nh, hs = D / nh
# Assuming num_heads = 1, (N, S, hs) to (B, 1, S, hs)
if layout_heads == 1:
q = q.unsqueeze(1)
k = k.unsqueeze(1)
v = v.unsqueeze(1)
else:
q = q.unsqueeze(0)
k = k.unsqueeze(0)
v = v.unsqueeze(0)
blocksparse_attention = _retrieve_blocksparse(layout_heads, seq_len, block_size)
# Dropout is a no-op in evaluation mode
if isinstance(dropout, torch.nn.Dropout):
blocksparse_attention.attn_drop = dropout
else:
blocksparse_attention.attn_drop = torch.nn.Dropout(0.0)
att = blocksparse_attention(q, k, v)
# Reshape attention (B, nh, S, hs) back to (N, S, hs)
if orig_dim == 3:
return att.flatten(0, 1)
return att
def scaled_dot_product_attention(
q: torch.Tensor,
k: torch.Tensor,
v: torch.Tensor,
att_mask: Optional[Union[AttentionMask, "SparseCS", torch.Tensor]],
dropout: Optional[torch.nn.Module] = None,
block_size: int = 128,
) -> torch.Tensor:
autocast_disabled = (
_has_cpp_library
and isinstance(att_mask, SparseCS)
or (att_mask is not None and att_mask.is_sparse)
)
seq_len = q.shape[-2]
# switch if:
# causal is required but mask is not sparse
# fp16 or under amp context
# sequence length is divisible by block size
# same seq len for K and Q
switch_to_blocksparse = (
_is_blocksparse_available
and (att_mask is not None and not att_mask.is_sparse)
and (isinstance(att_mask, AttentionMask) and att_mask.is_causal)
and (q.dtype == torch.float16 or torch.is_autocast_enabled())
and not seq_len % block_size
and q.shape[-2] == k.shape[-2]
)
if switch_to_blocksparse:
logger.info("Switching causal attention to Triton blocksparse...")
return blocksparse_attention(q, k, v, dropout, block_size)
with torch.cuda.amp.autocast(enabled=False) if autocast_disabled else nullcontext(): # type: ignore
if autocast_disabled:
q, k, v = q.float(), k.float(), v.float()
att = scaled_query_key_softmax(q, k, att_mask=att_mask)
# Optional dropout, could be part of the masking in the future
att = _apply_dropout(att, dropout)
# Get to the predicted values, for all heads
# y = att @ v # (N, S, S) x (N, S, hs) -> (N, S, hs)
y = bmm(att, v)
return y
|
EXA-1-master
|
exa/libraries/xformers/xformers/components/attention/core.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
from dataclasses import dataclass
import torch
from xformers.components.attention import Attention, AttentionConfig, register_attention
def calc_rel_pos(n: int):
# Adapted from LucidRains
# https://github.com/lucidrains/lambda-networks/blob/main/lambda_networks/lambda_networks.py
rel_pos = torch.arange(n)[None, :] - torch.arange(n)[:, None] # [n, n]
rel_pos += n - 1 # shift value range from [-n+1, n-1] to [0, 2n-2]
return rel_pos
@dataclass
class LambdaLayerConfig(AttentionConfig):
seq_len: int # dimension of the input sequence
dim_head: int
@register_attention("lambda", LambdaLayerConfig)
class LambdaLayer(Attention):
def __init__(self, dropout: float, seq_len: int, dim_head: int, *_, **__):
"""
Attention approximation using Lambda layers, from
"Lambda networks: modeling long-range interactions without attention.", Bello, I. (2021).
"""
super().__init__()
# Possible extensions:
# - support different dimensions for key and queries
# - support varying dimensions in between inputs and outputs
# - support u hyperparam
self.rel_pos_emb = torch.nn.Parameter(
torch.randn(2 * seq_len - 1, int(dim_head))
)
self.rel_pos = calc_rel_pos(seq_len)
self.attn_drop = torch.nn.Dropout(dropout, inplace=True)
# Properties specific to this attention mechanism
self.requires_same_k_q_dimensions = True
self.supports_attention_mask = False
self.supports_key_padding_mask = False
def forward(
self, q: torch.Tensor, k: torch.Tensor, v: torch.Tensor, *args, **kwargs
):
"""..NOTE: We're reusing the einsum notation suggested by the paper, changed in that
heads are folded in the batch dimension"""
content_lambda = torch.einsum("bnk,bnv->bkv", torch.softmax(k, dim=-1), v)
content_output = torch.einsum("bnk,bkv->bnv", q, content_lambda)
rel_pos_emb = self.rel_pos_emb[self.rel_pos]
# Handle real sequence length being possibly smaller
seq_len = q.shape[1]
rel_pos_emb = rel_pos_emb[:seq_len, :seq_len, :]
# Compute the position lambda for every possible combination in one go, then compute the
# position related contribution
position_lambdas = torch.einsum(
"mnk,bnv->bnkv", rel_pos_emb, v
) # one lambda per position
position_output = (q.unsqueeze(2) @ position_lambdas).squeeze()
att = content_output + position_output
att = self.attn_drop(att)
return att
|
EXA-1-master
|
exa/libraries/xformers/xformers/components/attention/lambda_layer.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
from dataclasses import dataclass
from typing import Optional, Union
import torch
import torch.nn as nn
from xformers.components.attention import (
Attention,
AttentionConfig,
AttentionMask,
maybe_sparsify,
register_attention,
sparsify,
)
from xformers.components.attention.attention_patterns import (
causal_1d_pattern,
random_pattern,
)
from xformers.components.attention.core import scaled_dot_product_attention
@dataclass
class RandomAttentionConfig(AttentionConfig):
r: Optional[
float
] # the ratio of keys that the query can attend to. 1.0 means dense attention
constant_masking: Optional[
bool
] # whether the randomness is per query or defined at construction time
force_sparsity: Optional[bool] # use sparsity in any case (potentially slower)
@register_attention("random", RandomAttentionConfig)
class RandomAttention(Attention):
def __init__(
self,
dropout: float,
causal: bool = False,
r: float = 0.01,
constant_masking: bool = True,
force_sparsity: bool = False,
*args,
**kwargs,
):
"""
"Random" attention, as proposed for instance in BigBird_.
Random means in that case that each query can attend to a random set of keys.
This implementation is sparse-aware, meaning that the empty attention parts will not be represented in memory.
Args:
r (float): the ratio in [0,1] of keys that the query can attend to
constant_masking (bool): if true, keep the same random set for all queries.
.. _BigBird: https://arxiv.org/pdf/2007.14062.pdf
"""
super().__init__()
self.attn_drop = nn.Dropout(dropout, inplace=False)
self.causal = causal
self.r = r
self.rand_attention_mask: Optional[torch.Tensor] = None
self.constant_masking = constant_masking
self.force_sparsity = force_sparsity
# Properties specific to this attention mechanism
self.supports_attention_mask = True
self.supports_key_padding_mask = False
self.requires_same_k_q_dimensions = True
def _get_rand_mask(self, shape: torch.Size) -> torch.Tensor:
sparsity = 1 - self.r
mask = random_pattern(shape[1], sparsity=sparsity)
if self.causal:
mask &= causal_1d_pattern(shape[1])
mask = sparsify(mask) if self.force_sparsity else maybe_sparsify(mask)
return mask
def forward(
self,
q: torch.Tensor,
k: torch.Tensor,
v: torch.Tensor,
att_mask: Optional[Union[torch.Tensor, AttentionMask]] = None,
*args,
**kwargs,
):
# Rand masking
if not self.constant_masking or self.rand_attention_mask is None:
self.rand_attention_mask = self._get_rand_mask(q.shape).to(q.device)
# Mask-aware attention
if att_mask is not None:
if att_mask.dtype == torch.bool and isinstance(
self.rand_attention_mask, AttentionMask
):
mask = self.rand_attention_mask + AttentionMask.from_bool(att_mask)
else:
if isinstance(att_mask, AttentionMask):
# Needed because & op not defined for SparseCS with AttentionMask
att_mask = att_mask.to_bool()
mask = self.rand_attention_mask & att_mask
else:
mask = self.rand_attention_mask
# Handle q/k/v which would not fit the mask
seq_len = q.shape[-2]
q_, k_, v_ = map(lambda x: self._maybe_pad_sequence(x, mask), (q, k, v))
# Normal attention with the random mask
att = scaled_dot_product_attention(
q=q_, k=k_, v=v_, att_mask=mask, dropout=self.attn_drop
)
# Take into account an hypothetical padding
return att[:, :seq_len, :]
|
EXA-1-master
|
exa/libraries/xformers/xformers/components/attention/random.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
import torch
from torch.cuda.amp import autocast
from xformers.components.attention import Attention, AttentionConfig, register_attention
@register_attention("fourier_mix", AttentionConfig)
class FourierMix(Attention):
def __init__(self, dropout: float, *_, **__):
"""
FFT-based pseudo-attention mechanism, from
"
"FNet: Mixing Tokens with Fourier Transforms"
Lee-Thorp et al., 2021, https://arxiv.org/pdf/2105.03824.pdf
"""
super().__init__()
self.attn_drop = torch.nn.Dropout(dropout, inplace=False)
# Properties specific to this attention mechanism
self.supports_attention_mask = False
self.requires_input_projection = False
def forward(self, q: torch.Tensor, *_, **__):
# Guard against autocast / fp16, not supported by torch.fft.fft2
with autocast(enabled=False):
att = torch.fft.fft2(q).real
att = self.attn_drop(att)
return att
|
EXA-1-master
|
exa/libraries/xformers/xformers/components/attention/fourier_mix.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
import logging
from dataclasses import dataclass
from typing import Optional, Union
import torch
from torch import nn
from xformers.components.attention import (
Attention,
AttentionConfig,
AttentionMask,
register_attention,
)
from xformers.components.attention.core import scaled_dot_product_attention
logger = logging.getLogger("xformers")
@dataclass
class ScaledDotProductConfig(AttentionConfig):
causal: Optional[bool]
seq_len: Optional[int]
to_seq_len: Optional[int]
@register_attention("scaled_dot_product", ScaledDotProductConfig)
class ScaledDotProduct(Attention):
r"""
Implementing the Scaled Dot-Product attention proposed in
`Attention is all you need`_, Vaswani et al.
.. _`Attention is all you need`: https://arxiv.org/abs/1706.03762v5
"""
mask: Optional[AttentionMask]
def __init__(
self,
dropout: float = 0.0,
causal: bool = False,
seq_len: Optional[int] = None,
to_seq_len: Optional[int] = None,
*args,
**kwargs,
):
super().__init__()
self.attn_drop = nn.Dropout(dropout, inplace=False)
self.causal = causal
self.seq_len = seq_len
if causal and seq_len is not None:
self.mask = AttentionMask.make_causal(seq_len, to_seq_len)
else:
self.mask = None
# Properties specific to this attention mechanism
self.supports_attention_mask = True
self.supports_key_padding_mask = False
def forward(
self,
q: torch.Tensor,
k: torch.Tensor,
v: torch.Tensor,
att_mask: Optional[Union[AttentionMask, torch.Tensor]] = None,
*args,
**kwargs,
) -> torch.Tensor:
r"""
att_mask A 2D or 3D mask which ignores attention at certain positions.
- If the mask is boolean, a value of True will keep the value,
while a value of False will mask the value.
Key padding masks (dimension: batch x sequence length) and attention masks
(dimension: sequence length x sequence length OR batch x sequence length x sequence length)
can be combined and passed in here. Method maybe_merge_masks provided in the utils can be
used for that merging.
- If the mask has the float type, then an additive mask is expected (masked values are -inf)
"""
# Convenience, create an attention mask if a tensor was passed
if att_mask is not None and isinstance(att_mask, torch.Tensor):
# By default we don't know of the causality, and a check would be expensive
att_mask = (
AttentionMask.from_bool(att_mask)
if att_mask.dtype == torch.bool
else AttentionMask(att_mask, is_causal=False)
)
# Handle a possibly deferred causal mask handling
mask = self.mask
if self.causal and self.mask is None:
mask = AttentionMask.make_causal(
seq_len=q.shape[-2],
to_seq_len=q.shape[-2],
device=q.device,
dtype=q.dtype,
)
# Merge the optional causal mask and the user-provided mask
if mask is not None:
mask = mask.to(dtype=q.dtype, device=q.device)
att_mask = att_mask + mask if att_mask is not None else mask
# Try to handle a case where the sequence is smaller than the mask
if (
att_mask is not None
and q.shape[-2] == k.shape[-2]
and q.shape[-2] < att_mask.shape[1]
):
if isinstance(att_mask, AttentionMask):
att_mask = att_mask.make_crop(seq_len=q.shape[-2])
else:
logger.error(
"Mismatching sparse attention mask and sequence length."
+ " Please pad the inputs or adjust the attention mask"
)
raise NotImplementedError
# Attend: (B x nh, S, hs) x (B x nh, hs, S) -> (B x nh, S, S)
y = scaled_dot_product_attention(
q=q, k=k, v=v, att_mask=att_mask, dropout=self.attn_drop
)
return y
|
EXA-1-master
|
exa/libraries/xformers/xformers/components/attention/scaled_dot_product.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
from typing import Optional
import torch
# Reshapes key padding mask from (batch_size, src_len) -> (batch_size * num_heads 1, src_len)
def reshape_key_padding_mask(
key_padding_mask: torch.Tensor, batched_dim: int
) -> torch.Tensor:
assert key_padding_mask.ndim == 2
batch_size, src_len = key_padding_mask.size()
num_heads = batched_dim // batch_size
return _reshape_key_padding_mask(key_padding_mask, batch_size, src_len, num_heads)
def _reshape_key_padding_mask(
key_padding_mask: torch.Tensor, batch_size: int, src_len: int, num_heads: int
) -> torch.Tensor:
assert key_padding_mask.shape == (batch_size, src_len)
key_padding_mask = (
key_padding_mask.view(batch_size, 1, 1, src_len)
.expand(-1, num_heads, -1, -1)
.reshape(batch_size * num_heads, 1, src_len)
)
return key_padding_mask
# Combine the attention mask and key padding mask into a single mask
# Taken from https://github.com/pytorch/pytorch/blob/master/torch/nn/functional.py
# Additive masking not yet supported
def maybe_merge_masks(
att_mask: Optional[torch.Tensor],
key_padding_mask: Optional[torch.Tensor],
batch_size: int,
src_len: int,
num_heads: int,
tgt_len: Optional[int] = None,
) -> Optional[torch.Tensor]:
if tgt_len is None:
tgt_len = src_len
if key_padding_mask is not None:
assert key_padding_mask.shape == (batch_size, src_len)
key_padding_mask = _reshape_key_padding_mask(
key_padding_mask, batch_size, src_len, num_heads
)
if att_mask is None:
# make sure dimensions of key padding mask are the same as those expected for att_mask
att_mask = key_padding_mask.expand(-1, tgt_len, -1)
# Assumption is that False means to mask.
elif att_mask.dtype == torch.bool:
att_mask = att_mask.logical_and(key_padding_mask)
else:
att_mask = att_mask.masked_fill(~key_padding_mask, float("-inf"))
return att_mask
# Assumes that matrix passed in has had softmax applied to it.
def iterative_pinv(softmax_mat: torch.Tensor, n_iter=6, pinverse_original_init=False):
"""
Computing the Moore-Penrose inverse.
Use an iterative method from (Razavi et al. 2014) to approximate the Moore-Penrose inverse via efficient
matrix-matrix multiplications.
"""
i = torch.eye(
softmax_mat.size(-1), device=softmax_mat.device, dtype=softmax_mat.dtype
)
k = softmax_mat
# The entries of K are positive and ||K||_{\infty} = 1 due to softmax
if pinverse_original_init:
# This original implementation is more conservative to compute coefficient of Z_0.
v = 1 / torch.max(torch.sum(k, dim=-2)) * k.transpose(-1, -2)
else:
# This is the exact coefficient computation, 1 / ||K||_1, of initialization of Z_0, leading to faster
# convergence.
v = (
1
/ torch.max(torch.sum(k, dim=-2), dim=-1).values[:, None, None]
* k.transpose(-1, -2)
)
for _ in range(n_iter):
kv = torch.matmul(k, v)
v = torch.matmul(
0.25 * v,
13 * i - torch.matmul(kv, 15 * i - torch.matmul(kv, 7 * i - kv)),
)
return v
def bool_mask_to_additive(
mask: torch.Tensor, dtype: Optional[torch.dtype] = torch.float32
) -> torch.Tensor:
assert (
mask.dtype == torch.bool
), "This util is meant to convert in between bool masks and additive ones"
mask_ = torch.zeros_like(mask, dtype=dtype)
mask_[~mask] = float("-inf")
return mask_
|
EXA-1-master
|
exa/libraries/xformers/xformers/components/attention/utils.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
from typing import Optional, Type, TypeVar
import torch
Self = TypeVar("Self", bound="AttentionMask")
class AttentionMask:
"""
Holds an attention mask, along with a couple of helpers and attributes.
.. note: this is an additive mask, meaning that coefficients which should be computed hold the '0.' value,
and coefficients which should be skipped hold the '-inf' value. Any other value is possible if the purpose
is to bias the attention computation for instance
.. note: the attention mask dimensions are expected to be `[batch, to_sequence, from_sequence]`,
`[to_sequence, from_sequence]`, or anything broadcastable in between
"""
def __init__(self, additive_mask: torch.Tensor, is_causal: bool = False):
assert additive_mask.is_floating_point(), additive_mask.dtype
assert not additive_mask.requires_grad
if additive_mask.ndim == 2:
additive_mask = additive_mask.unsqueeze(0)
self.values = additive_mask
self.is_causal = is_causal
self.seq_len = additive_mask.shape[1]
self.to_seq_len = additive_mask.shape[0]
def to_bool(self) -> torch.Tensor:
"""
.. warning: we assume here that True implies that the value should be computed
"""
return self.values != float("-inf")
@classmethod
def from_bool(cls: Type[Self], x: torch.Tensor) -> Self:
"""
Create an AttentionMask given a boolean pattern.
.. warning: we assume here that True implies that the value should be computed
"""
assert x.dtype == torch.bool
additive_mask = torch.empty_like(x, dtype=torch.float, device=x.device)
additive_mask.masked_fill_(x, 0.0)
additive_mask.masked_fill_(~x, float("-inf"))
return cls(additive_mask)
@classmethod
def from_multiplicative(cls: Type[Self], x: torch.Tensor) -> Self:
"""
Create an AttentionMask given a multiplicative attention mask.
"""
assert not x.dtype == torch.bool
additive_mask = torch.empty_like(x, dtype=torch.float, device=x.device)
x = x.bool()
additive_mask.masked_fill_(x, 0.0)
additive_mask.masked_fill_(~x, float("-inf"))
return cls(additive_mask)
@classmethod
def make_causal(
cls: Type[Self],
seq_len: int,
to_seq_len: Optional[int] = None,
device: Optional[torch.device] = None,
dtype: Optional[torch.dtype] = None,
) -> Self:
if not to_seq_len:
to_seq_len = seq_len
additive_mask = torch.triu(
torch.ones(seq_len, to_seq_len, device=device, dtype=dtype) * float("-inf"),
diagonal=1,
)
return cls(additive_mask=additive_mask, is_causal=True)
def make_crop(
self, seq_len: int, to_seq_len: Optional[int] = None
) -> "AttentionMask":
"""
Return a cropped attention mask, whose underlying tensor is a view of this one
"""
if not to_seq_len:
to_seq_len = seq_len
return AttentionMask(
self.values[:, :seq_len, :to_seq_len], is_causal=self.is_causal
)
def __repr__(self):
return f"AttentionMask - causal {self.is_causal} - mask " + str(self.values)
@property
def device(self):
return self.values.device
@property
def is_sparse(self):
return False
@property
def ndim(self):
return len(self.values.shape)
@property
def dtype(self):
return self.values.dtype
@property
def shape(self):
return self.values.shape
def __add__(self, other):
return AttentionMask(self.values + other.values, is_causal=False)
def to(
self, device: Optional[torch.device] = None, dtype: Optional[torch.dtype] = None
) -> "AttentionMask":
assert device is None or isinstance(device, torch.device)
assert dtype is None or isinstance(dtype, torch.dtype)
assert device is not None or dtype is not None
# Noop if we don't need to create another instance
if ((device and device == self.device) or not device) and (
(dtype and dtype == self.dtype) or not dtype
):
return self
return AttentionMask(self.values.to(device=device, dtype=dtype), self.is_causal)
|
EXA-1-master
|
exa/libraries/xformers/xformers/components/attention/attention_mask.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
from dataclasses import dataclass
from typing import Optional
import torch
import torch.nn as nn
from xformers.components.attention import Attention, AttentionConfig, register_attention
from xformers.components.attention.core import scaled_dot_product_attention
@dataclass
class LinformerSelfAttentionConfig(AttentionConfig):
seq_len: int # dimension of the input sequence
k: Optional[int] # dimension of the internal space
@register_attention("linformer", LinformerSelfAttentionConfig)
class LinformerAttention(Attention):
def __init__(
self, dropout: float, seq_len: int, k: Optional[int] = None, *args, **kwargs
):
"""
Linformer attention mechanism,
from `Linformer: Self-Attention with Linear Complexity`_, Wang et al (2020).
The original notation is kept as is.
.. _`Linformer: Self-Attention with Linear Complexity` : https://arxiv.org/abs/2006.04768v2
"""
super().__init__()
if k is None:
k = seq_len // 4
self.k = k
self.E = nn.Linear(seq_len, k, bias=False)
self.F = nn.Linear(seq_len, k, bias=False)
self.attn_drop = nn.Dropout(dropout, inplace=False)
self.seq_len = seq_len
# MHA related flags:
# kq need to have the same dimension
self.requires_same_k_q_dimensions = True
# This attention does not support attention masks
self.supports_attention_mask = False
def forward(
self, q: torch.Tensor, k: torch.Tensor, v: torch.Tensor, *args, **kwargs
):
# Handle a smaller dimension than expected
padding = 0
if q.shape[1] < self.seq_len:
padding = self.seq_len - q.shape[1]
pad_dims = (0, 0, 0, padding)
q = torch.nn.functional.pad(q, pad_dims)
k = torch.nn.functional.pad(k, pad_dims)
v = torch.nn.functional.pad(v, pad_dims)
k_projected = self.E(k.transpose(-2, -1)).transpose(-2, -1)
v_projected = self.F(v.transpose(-2, -1)).transpose(-2, -1)
y = scaled_dot_product_attention(
q=q, k=k_projected, v=v_projected, att_mask=None, dropout=self.attn_drop
)
y = self.attn_drop(y)
return y[:, :-padding, :] if padding > 0 else y
|
EXA-1-master
|
exa/libraries/xformers/xformers/components/attention/linformer.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
import math
from typing import List
import numpy as np
import torch
from xformers.components.attention.sparsity_config import (
BigBirdSparsityConfig,
BSLongformerSparsityConfig,
FixedSparsityConfig,
VariableSparsityConfig,
)
# generic nd cases
def _generate_nd_grid(*sizes):
coords = [torch.arange(s) for s in sizes]
return torch.meshgrid(*coords)
def local_nd_distance(*sizes, p=2.0, weights=None):
if weights is None:
weights = (1,) * len(sizes)
assert len(sizes) == len(weights)
grid = _generate_nd_grid(*sizes)
grid = [i.flatten() * w for i, w in zip(grid, weights)]
grid = torch.stack(grid, dim=1).float()
d = torch.cdist(grid, grid, p=p)
return d
def local_nd_gaussian_distribution(*sizes, sigma=1):
d = local_nd_distance(*sizes, p=2.0) ** 2
d = torch.exp(-0.5 * sigma ** (-2.0) * d)
return d
def local_nd_pattern(*sizes, distance, p=2.0):
d = local_nd_distance(*sizes, p=p)
return d < distance
def axial_nd_pattern(*sizes):
# axial is a special case with p=0 and distance=2
d = local_nd_distance(*sizes, p=0)
return d < 2
def random_pattern_from_probability_matrix(dist_matrix, nnz):
att = torch.zeros_like(dist_matrix, dtype=torch.bool)
# PyTorch multinomial wrongly doesn't support sampling when number of categories
# is > 2^24, arguing that it's because it's the max representable consecutive element
# in fp32 and that the kernels use float32. This is actually not true, and the kernels
# should work fine if double tensor is passed on CPU. This is a bug that was introduced
# in https://github.com/pytorch/pytorch/commit/bf04c2ca2f591d98ce57816f0ef0cd20a21bbf66
# when unifying the checks between CPU and CUDA. For now, just fall-back to numpy
if dist_matrix.numel() > 2**24:
dist_matrix = dist_matrix.double()
dist_matrix /= dist_matrix.sum()
idxs = np.random.choice(
dist_matrix.numel(), nnz, p=dist_matrix.flatten(), replace=False
)
idxs = torch.as_tensor(idxs)
else:
idxs = torch.multinomial(dist_matrix.flatten(), nnz, replacement=False)
att.view(-1)[idxs] = True
return att
def global_token_pattern(attention_query_mask: torch.Tensor) -> torch.Tensor:
assert attention_query_mask.ndim == 1
assert attention_query_mask.dtype == torch.bool
attention_query_mask = attention_query_mask[None, :]
mask = attention_query_mask | attention_query_mask.transpose(1, 0)
return mask
def random_pattern(attn_size: int, sparsity: float) -> torch.Tensor:
assert 0 < sparsity < 1
mask = torch.rand(attn_size, attn_size) > sparsity
return mask
# 1d-specific cases
def local_1d_pattern(attn_size: int, window_size: int) -> torch.Tensor:
assert (
window_size % 2 == 1
), "The window size is assumed to be odd (counts self-attention + 2 wings)"
h_win_size = window_size // 2 + 1
return local_nd_pattern(attn_size, distance=h_win_size, p=1.0)
def causal_1d_pattern(attn_size: int) -> torch.Tensor:
mask = torch.tril(torch.ones(attn_size, attn_size, dtype=torch.bool))
return mask
# 2d-specific cases
def horizontal_axial_2d_distance(H, W, p=2.0):
d = local_nd_distance(H, W, p=p, weights=(1, 0))
return d
def vertical_axial_2d_distance(H, W, p=2.0):
d = local_nd_distance(H, W, p=p, weights=(0, 1))
return d
def local_2d_distance(H, W, p=2.0):
return local_nd_distance(H, W, p=p)
def local_2d_gausian_distribution(H, W, sigma=1):
return local_nd_gaussian_distribution(H, W, sigma=sigma)
def local_2d_pattern(H, W, distance, p=2.0):
return local_nd_pattern(H, W, distance=distance, p=p)
def axial_2d_pattern(H, W):
return axial_nd_pattern(H, W)
def swin_attention_pattern(H, W, window_size, shift_size=0):
assert H % window_size == 0
assert W % window_size == 0
assert 0 <= shift_size < window_size, "shift_size must in 0-window_size"
# input grid
i, j = _generate_nd_grid(H, W)
i, j = i + 0.5, j + 0.5
# anchors grid
# if shift is present, add extra element to the grid
# to account for the uneven partitioning
extra = int(shift_size % window_size != 0)
grid_h = H // window_size + extra
grid_w = W // window_size + extra
ii, jj = _generate_nd_grid(grid_h, grid_w)
# convert shift to be compatible with the paper representation
s = (-shift_size) % window_size
offset = window_size / 2 - s
ii = ii * window_size + offset
jj = jj * window_size + offset
input_coords = torch.stack([i.flatten(), j.flatten()], 1).float()
anchors_coords = torch.stack([ii.flatten(), jj.flatten()], 1).float()
anchor_id = torch.cdist(input_coords, anchors_coords, p=2).argmin(1)
mask = anchor_id[:, None] == anchor_id[None, :]
return mask
def dilated_2d_pattern(H, W, k=2):
"""
Returns a 2d pattern that samples 1 every k elements in the attention mask.
Can be seen as a form of downsampling, where every pixel attends to a downsampled
version of the input.
"""
d_h = local_nd_distance(H, W, p=1, weights=(1, 0))
d_w = local_nd_distance(H, W, p=1, weights=(0, 1))
d = (d_h.floor() % k == 0) & (d_w.floor() % k == 0)
return d
# Block sparse utils
def block_sparsify_tensor(x, mask, block_size):
"""
Block sparsify a tensor, given a mask and block size
"""
ret = torch.empty(
(x.size(0), mask.sum(), block_size, block_size), dtype=x.dtype, device=x.device
)
for idx, (h, i, j) in enumerate(zip(*mask.nonzero(as_tuple=True))):
ret[:, idx, :, :] = x[
:,
h,
i * block_size : (i + 1) * block_size,
j * block_size : (j + 1) * block_size,
]
return ret
def pattern_to_layout(mask: torch.Tensor, block_size: int) -> torch.Tensor:
r"""
Given a mask pattern and blocksize, return the corresponding layout
which makes sure that all the positives in the mask are covered
"""
assert mask.ndim >= 2, "We're expecting [Heads, Seq, Seq] or [Seq, Seq]"
_should_squeeze = False
if mask.ndim == 2:
mask = mask.unsqueeze(0)
_should_squeeze = True
assert (
mask.shape[1] % block_size == 0 and mask.shape[2] % block_size == 0
), "We're only handling masks divisible by block_size"
# Now mark the mask
layout = torch.nn.functional.max_pool2d(
mask.to(torch.float), kernel_size=block_size, stride=block_size
)
layout = layout.to(torch.long)
if _should_squeeze:
layout.squeeze_(0)
return layout
def alibi_pattern(threshold: float, mask_shape: torch.Size) -> torch.Tensor:
r"""
Use the additive bias computation from ALiBi_ to generate a mask.
Note that this mask can in turn be used to generate a blocksparse attention computation layout
.. note: mask_shape is expected to hold the [heads, seq, seq] dimensions
.. _ALiBi: https://arxiv.org/pdf/2108.12409.pdf
"""
# CREDITS: code snippet from Ofir Press, one of the authors
def get_slopes(n: int):
def get_slopes_power_of_2(n: int) -> List[float]:
start = 2 ** (-(2 ** -(math.log2(n) - 3)))
ratio = start
return [start * ratio**i for i in range(n)]
# In the paper, we only train models that have 2^a heads for some a. This function has
# some good properties that only occur when the input is a power of 2. To maintain that even
# when the number of heads is not a power of 2, we use this workaround.
if math.log2(n).is_integer():
return get_slopes_power_of_2(n)
else:
closest_power_of_2 = 2 ** math.floor(math.log2(n))
return (
get_slopes_power_of_2(closest_power_of_2)
+ get_slopes(2 * closest_power_of_2)[0::2][: n - closest_power_of_2]
)
maxpos = mask_shape[1]
attn_heads = mask_shape[0]
slopes = torch.Tensor(get_slopes(attn_heads))
# In the next line, the part after the * is what constructs the diagonal matrix
# (right matrix in Figure 3 in the paper).
# If you run it you'll see that it doesn't exactly print out the same matrix as we have in Figure 3,
# but one where all rows are identical.
# This works because the softmax operation is invariant to translation,
# and our bias functions are always linear.
alibi = slopes.unsqueeze(1).unsqueeze(1) * torch.arange(maxpos).unsqueeze(
0
).unsqueeze(0).expand(attn_heads, -1, -1)
alibi = alibi.view(attn_heads, 1, maxpos)
# Now threshold arbitrarily, report the mask
return alibi < threshold
def quick_fixed_layout(num_heads: int, block_size: int, seq_len: int):
config = FixedSparsityConfig(num_heads=num_heads, block_size=block_size)
return config.make_layout(seq_len)
def quick_variable_layout(num_heads: int, block_size: int, seq_len: int):
config = VariableSparsityConfig(num_heads=num_heads, block_size=block_size)
return config.make_layout(seq_len)
def quick_bigbird_layout(num_heads: int, block_size: int, seq_len: int):
config = BigBirdSparsityConfig(num_heads=num_heads, block_size=block_size)
return config.make_layout(seq_len)
def quick_bslongformer_layout(num_heads: int, block_size: int, seq_len: int):
config = BSLongformerSparsityConfig(num_heads=num_heads, block_size=block_size)
return config.make_layout(seq_len)
def layout_to_pattern(layout: torch.Tensor, block_size: int):
r"""
create a pattern of shape [heads, seq, seq] out of a blocksparse
layout of shape [heads, seq/block_size, seq/block_size]
"""
return torch.kron(layout, torch.ones(block_size, block_size))
|
EXA-1-master
|
exa/libraries/xformers/xformers/components/attention/attention_patterns.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
import math
from dataclasses import dataclass
import torch
import torch.nn as nn
from xformers.components.attention import Attention, AttentionConfig, register_attention
@dataclass
class VisualAttentionConfig(AttentionConfig):
dim_model: int # dimension of the input sequence
class LKA(nn.Module):
def __init__(self, dim: int):
super().__init__()
self.conv0 = nn.Conv2d(dim, dim, 5, padding=2, groups=dim)
self.conv_spatial = nn.Conv2d(
dim, dim, 7, stride=1, padding=9, groups=dim, dilation=3
)
self.conv1 = nn.Conv2d(dim, dim, 1)
def forward(self, x: torch.Tensor):
u = x.clone()
attn = self.conv0(x)
attn = self.conv_spatial(attn)
attn = self.conv1(attn)
return u * attn
@register_attention("visual", VisualAttentionConfig)
class Visual(Attention):
def __init__(
self,
dim_model: int,
*_,
**__,
):
"""
Large kernel attention mechanism, as proposed in `Visual Attention Network`_, Guo et al (2022).
The original notation is tentatively kept as is. See https://github.com/Visual-Attention-Network
for the reference implementation
.. Note: compared to the paper, this block contains the LKA (Large Kernel Attention)
and the prior and posterior transformations (Conv2d and activation)
.. _`Visual Attention Network` : https://arxiv.org/pdf/2202.09741.pdf
"""
super().__init__()
self.block = nn.Sequential(
nn.Conv2d(dim_model, dim_model, 1),
nn.GELU(),
LKA(dim_model),
nn.Conv2d(dim_model, dim_model, 1),
)
# MHA related flags:
self.requires_same_k_q_dimensions = (
True # This mechanism only really supports self attention
)
self.supports_attention_mask = False
self.requires_skip_multi_head = (
True # This mechanism skips the multihead attention altogether
)
self.requires_squared_context = (
True # Recovering the 2D structure from context assumes squared content
)
self.requires_input_projection = (
False # This mechanism does not require that the MHA projects inputs
)
def forward(self, q: torch.Tensor, *_, **__):
# Expose the 2D token structure
B, HW, C = q.shape
H = int(math.sqrt(HW))
assert H * H == HW
x = q.transpose(-2, -1).reshape(B, C, H, H)
# Large kernel attention
residual = x.clone()
x = self.block(x)
x = x + residual
# Get back to B HW C
return x.flatten(2, 3).transpose(-2, -1)
|
EXA-1-master
|
exa/libraries/xformers/xformers/components/attention/visual.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
"""
The code has been adopted from DeepSpeed
(https://github.com/microsoft/DeepSpeed/blob/master/deepspeed/ops/sparse_attention/sparsity_config.py)
"""
import random
import torch
class SparsityConfig:
"""Abstract Configuration class to store `sparsity configuration of a self attention layer`.
It contains shared property of different block-sparse sparsity patterns. However, each class
needs to extend it based on required property and functionality.
"""
def __init__(self, num_heads, block_size=16, different_layout_per_head=False):
"""Initialize the Sparsity Pattern Config.
Arguments:
num_heads: required: an integer determining number of attention heads of the layer.
block_size: optional: an integer determining the block size. Current implementation of
sparse self-attention is based on blocked sparse matrices. In which this parameter
defines size of such blocks, `Block X Block`.
different_layout_per_head: optional: a boolean determining if each head should be
assigned a different sparsity layout; default is false and this will be satisfied
based on availability.
"""
self.num_heads = num_heads
self.block_size = block_size
self.different_layout_per_head = different_layout_per_head
self.num_layout_heads = num_heads if different_layout_per_head else 1
def setup_layout(self, seq_len):
"""Create layout tensor for the given sequence length
Arguments:
seq_len: required: an integer determining number of attention heads of the layer.
Return:
layout: a tensor of dimension (num_heads, num_blocks, num_blocks) for sparsity layout
of all head; initialized with zero
"""
if seq_len % self.block_size != 0:
raise ValueError(
f"Sequence Length, {seq_len}, needs to be dividable by Block size {self.block_size}!"
)
num_blocks = seq_len // self.block_size
# TODO Currently we allocate layout per head; needs to be updated if heads share a single layout.
layout = torch.zeros(
(self.num_heads, num_blocks, num_blocks), dtype=torch.int64
)
return layout
def check_and_propagate_first_head_layout(self, layout):
"""If all heads require same sparsity layout, it propagate first head layout to all heads
Arguments:
layout: required: a tensor of dimension (num_heads, num_blocks, num_blocks) containing
sparsity layout of all head; may not be completely set at this step
Return:
layout: a tensor of dimension (num_heads, num_blocks, num_blocks) containing sparsity
layout of all head
"""
if not self.different_layout_per_head:
layout[1 : self.num_heads, :, :] = layout[0, :, :]
return layout
class DenseSparsityConfig(SparsityConfig):
"""Configuration class to store `Dense` configuration.
In reality, this is not sparse and all blocks are used. We keep it for the sake of comparison and
comprehension.
"""
def __init__(self, num_heads, block_size=16, different_layout_per_head=False):
"""Initialize the Dense Sparsity Pattern Config.
In reality, this is not sparse and all blocks are used. We keep it for the sake of comparison
and comprehension.
Arguments:
num_heads: required: an integer determining number of attention heads of the layer.
block_size: optional: an integer determining the block size. Current implementation of
sparse self-attention is based on blocked sparse matrices. In which this parameter
defines size of such blocks, `Block X Block`.
different_layout_per_head: optional: this is just for the sake of consistency with
other sparsity formats; can ignore it for DenseSparsityConfig
"""
super().__init__(num_heads, block_size, different_layout_per_head)
def make_layout(self, seq_len):
"""Set 1 to all blocks of the layout meanins the pattern is dense; not sparse.
Arguments:
seq_len: required: an integer determining the underling sequence length;
must be <= max sequence length
Return:
layout: a tensor of dimension (num_heads, num_blocks, num_blocks) containing sparsity
layout of all head; for dense everything is 1
"""
layout = self.setup_layout(seq_len)
layout[:, :, :] = 1
return layout
class FixedSparsityConfig(SparsityConfig):
"""Configuration class to store `Fixed` sparsity configuration.
For more details about this sparsity config, please see `Generative Modeling with
Sparse Transformers`: https://arxiv.org/abs/1904.10509; this has been customized.
This class extends parent class of `SparsityConfig` and customizes it for `Fixed` sparsity.
"""
def __init__(
self,
num_heads,
block_size=16,
different_layout_per_head=False,
num_local_blocks=4,
num_global_blocks=1,
attention="bidirectional",
horizontal_global_attention=False,
num_different_global_patterns=1,
):
"""Initialize `Fixed` Sparsity Pattern Config.
For usage example please see, TODO DeepSpeed Sparse Transformer Tutorial
Arguments:
num_heads: required: an integer determining number of attention heads of the layer.
block_size: optional: an integer determining the block size. Current implementation of
sparse self-attention is based on blocked sparse matrices. In which this parameter
defines size of such blocks, `Block X Block`.
different_layout_per_head: optional: a boolean determining if each head should be
assigned a different sparsity layout; default is false and this will be satisfied
based on availability.
num_local_blocks: optional: an integer determining the number of blocks in local attention
window.
num_global_blocks: optional: an integer determining how many consecutive blocks in a local
window is used as the representative of the window for global attention.
attention: optional: a string determining attention type. Attention can be `unidirectional`,
such as autoregressive models, in which tokens attend only to tokens appear before them
in the context. Considering that, the upper triangular of attention matrix is empty as
above figure. Or it can be `bidirectional`, such as BERT, in which tokens can attend to
any other tokens before or after them. Then, the upper triangular part of the attention
matrix is mirror of the lower triangular in the above figure.
horizontal_global_attention: optional: a boolean determining if blocks that are global
representative of a local window, also attend to all other blocks. This is valid only if
attention type is `bidirectional`. Looking at the attention matrix, that means global
attention not only includes the vertical blocks, but also horizontal blocks.
num_different_global_patterns: optional: an integer determining number of different global
attentions layouts. While global attention can be fixed by which block/s are representative
of any local window, since there are multi-heads, each head can use a different global representative.
For example, with 4 blocks local window and global attention size of 1 block, we can have 4 different
versions in which the first, Second, third, or forth block of each local window can be global
representative of that window. This parameter determines how many of such patterns we want.
Of course, there is a limitation based on num_local_blocks and num_global_blocks.
"""
super().__init__(num_heads, block_size, different_layout_per_head)
self.num_local_blocks = num_local_blocks
if num_local_blocks % num_global_blocks != 0:
raise ValueError(
f"""Number of blocks in a local window, {num_local_blocks},
must be dividable by number of global blocks, {num_global_blocks}!"""
)
self.num_global_blocks = num_global_blocks
if attention != "unidirectional" and attention != "bidirectional":
raise NotImplementedError(
'only "uni/bi-directional" attentions are supported for now!'
)
self.attention = attention
if attention != "bidirectional" and horizontal_global_attention:
raise ValueError(
'only "bi-directional" attentions can support horizontal global attention!'
)
self.horizontal_global_attention = horizontal_global_attention
if num_different_global_patterns > 1 and not different_layout_per_head:
raise ValueError(
"""Number of different layouts cannot be more than one when you have set a single layout
for all heads! Set different_layout_per_head to True."""
)
if num_different_global_patterns > (num_local_blocks // num_global_blocks):
raise ValueError(
f"""Number of layout versions (num_different_global_patterns), {num_different_global_patterns},
cannot be larger than number of local window blocks divided by number of global blocks,
{num_local_blocks} / {num_global_blocks} = {num_local_blocks//num_global_blocks}!"""
)
self.num_different_global_patterns = num_different_global_patterns
def set_local_layout(self, h, layout):
"""Sets local attention layout used by the given head in the sparse attention.
Arguments:
h: required: an integer determining head index
layout: required: a tensor of dimension (num_heads, num_blocks, num_blocks) containing
sparsity layout of all head; may not be completely set at this step
Return:
layout: a tensor of dimension (num_heads, num_blocks, num_blocks) containing sparsity
layout of all head in which local layout is set
"""
num_blocks = layout.shape[1]
for i in range(0, num_blocks, self.num_local_blocks):
end = min(i + self.num_local_blocks, num_blocks)
for row in range(i, end):
for col in range(
i, (row + 1 if self.attention == "unidirectional" else end)
):
layout[h, row, col] = 1
return layout
def set_global_layout(self, h, layout):
"""Sets global attention layout used by the given head in the sparse attention.
Currently we set global blocks starting from the last block of a local window to the first one.
That means if a local window consists of 4 blocks and global attention size is one block, we use
block #4 in each local window as global. If we have different layout per head, then other heads
will get #3, #2, and #1. And if we have more heads (and different layout has set) than num of global
attentions, multiple head may have same global attentions.
Note) if horizontal_global_attention is set, global blocks will be set both horizontally and
vertically.
Arguments:
h: required: an integer determining head index
layout: required: a tensor of dimension (num_heads, num_blocks, num_blocks) containing
sparsity layout of all head; may not be completely set at this step
Return:
layout: a tensor of dimension (num_heads, num_blocks, num_blocks) containing sparsity
layout of all head in which global layout is set
"""
num_blocks = layout.shape[1]
first_global_block_idx = (
self.num_local_blocks
- (1 + h % self.num_different_global_patterns) * self.num_global_blocks
)
# set all global blocks except the last one if (in last local window)
end = num_blocks - (num_blocks % self.num_local_blocks)
for i in range(first_global_block_idx, end, self.num_local_blocks):
# vertical global attention
first_row = 0 if self.attention == "bidirectional" else i
# (((i // self.num_local_blocks) + 1) * self.num_local_blocks)
# if (first_row < num_blocks):
layout[h, first_row:, i : i + self.num_global_blocks] = 1
# horizontal global attention; only in bidirectional attention
if self.horizontal_global_attention:
layout[h, i : i + self.num_global_blocks, :] = 1
# set last global blocks; handle possible short last local window
if end < num_blocks:
start = min(
end + first_global_block_idx, num_blocks - self.num_global_blocks
)
end = start + self.num_global_blocks
# vertical global attention
first_row = 0 if self.attention == "bidirectional" else start
# (((start // self.num_local_blocks) + 1) * self.num_local_blocks)
# if (first_row < num_blocks):
layout[h, first_row:, start:end] = 1
# horizontal global attention
if self.horizontal_global_attention:
layout[h, start:end, :] = 1
return layout
def make_layout(self, seq_len):
"""Generates `Fixed` sparsity layout used by each head in the sparse attention.
Arguments:
seq_len: required: an integer determining number of attention heads of the layer.
Return:
layout: a tensor of dimension (num_heads, num_blocks, num_blocks) containing `Fixed`
sparsity layout of all head
"""
layout = self.setup_layout(seq_len)
for h in range(0, self.num_layout_heads):
layout = self.set_local_layout(h, layout)
layout = self.set_global_layout(h, layout)
layout = self.check_and_propagate_first_head_layout(layout)
return layout
class VariableSparsityConfig(SparsityConfig):
"""Configuration class to store `Variable` sparsity configuration.
This layout is an extension of FixedSparsityConfig in which:
- user can set random layout; default value is zero means no random block
- user can provide a list of local block sizes
- user can provide a list of global block indices.
For more details about `Fixed` sparsity config, please see `Generative Modeling with
Sparse Transformers`: https://arxiv.org/abs/1904.10509; this has been customized.
This class extends parent class of `SparsityConfig` and customizes it for `Fixed` sparsity.
"""
def __init__(
self,
num_heads,
block_size=16,
different_layout_per_head=False,
num_random_blocks=0,
local_window_blocks=[4],
global_block_indices=[0],
global_block_end_indices=None,
attention="bidirectional",
horizontal_global_attention=False,
):
"""Initialize `Variable` Sparsity Pattern Config.
For usage example please see, TODO DeepSpeed Sparse Transformer Tutorial
Arguments:
num_heads: required: an integer determining number of attention heads of the layer.
block_size: optional: an integer determining the block size. Current implementation of sparse
self-attention is based on blocked sparse matrices. In which this parameter defines
size of such blocks, `Block X Block`.
different_layout_per_head: optional: a boolean determining if each head should be assigned a
different sparsity layout; default is false and this will be satisfied based on
availability. Currently this sparsity config can only assign single layout to all heads;
needs to be extended for different layout per head.
num_random_blocks: optional: an integer determining the number of random blocks in each block row.
local_window_blocks: optional: a list of integers determining the number of blocks in each
local attention window. It assumes first number determines # of blocks in the first local
window, second the second window, ..., and the last number determines the number of blocks
in the remaining local windows.
global_block_indices: optional: a list of integers determining which blocks are considered
as global attention. Given indices, determine the blocks that all other token blocks
attend to and they attend to all other token blocks. Default value is only index 0.
Notice that if global_block_end_indices parameter is set, this parameter is used as
starting index of each global window.
global_block_end_indices: optional: a list of integers determining end indices of global
window blocks. By default this is not used. But if it is set, it must have the same size
of global_block_indices parameter, and combining this two parameters, for each index i,
blocks from global_block_indices[i] to global_block_end_indices[i] (exclusive) are
considered as global attention.
attention: optional: a string determining attention type. Attention can be `unidirectional`,
such as autoregressive models, in which tokens attend only to tokens appear before them
in the context. Considering that, the upper triangular of attention matrix is empty as
above figure. Or it can be `bidirectional`, such as BERT, in which tokens can attend to
any other tokens before or after them. Then, the upper triangular part of the attention
matrix is mirror of the lower triangular in the above figure.
horizontal_global_attention: optional: a boolean determining if blocks that are global
representative of a local window, also attend to all other blocks. This is valid only if
attention type is `bidirectional`. Looking at the attention matrix, that means global
attention not only includes the vertical blocks, but also horizontal blocks.
"""
super().__init__(num_heads, block_size, different_layout_per_head)
self.num_random_blocks = num_random_blocks
self.local_window_blocks = local_window_blocks
self.global_block_indices = global_block_indices
if global_block_end_indices is not None:
if len(global_block_indices) != len(global_block_end_indices):
raise ValueError(
f"""Global block start indices length, {len(global_block_indices)}, must be same as
global block end indices length, {len(global_block_end_indices)}!"""
)
for _, (start_idx, end_idx) in enumerate(
zip(global_block_indices, global_block_end_indices)
):
if start_idx >= end_idx:
raise ValueError(
f"""Global block start index, {start_idx}, must be smaller than global block end
index, {end_idx}!"""
)
self.global_block_end_indices = global_block_end_indices
if attention != "unidirectional" and attention != "bidirectional":
raise NotImplementedError(
'only "uni/bi-directional" attentions are supported for now!'
)
self.attention = attention
if attention != "bidirectional" and horizontal_global_attention:
raise ValueError(
'only "bi-directional" attentions can support horizontal global attention!'
)
self.horizontal_global_attention = horizontal_global_attention
def set_random_layout(self, h, layout):
"""Sets random attention layout used by the given head in the sparse attention.
Note) By default, it assumes there will be a unique random block layout for all heads; unless
`different_layout_per_head` parameter is set in which each head can have a different random
layout.
Arguments:
h: required: an integer determining head index
layout: required: a tensor of dimension (num_heads, num_blocks, num_blocks) containing
sparsity layout of all head; may not be completely set at this step
Return:
layout: a tensor of dimension (num_heads, num_blocks, num_blocks) containing sparsity
layout of all head in which random layout is set
"""
num_blocks = layout.shape[1]
if num_blocks < self.num_random_blocks:
raise ValueError(
f"""Number of random blocks, {self.num_random_blocks}, must be smaller than overall number
of blocks in a row, {num_blocks}!"""
)
for row in range(0, num_blocks):
rnd_cols = random.sample(range(0, num_blocks), self.num_random_blocks)
layout[h, row, rnd_cols] = 1
return layout
def set_local_layout(self, h, layout):
"""Sets local attention layout used by the given head in the sparse attention.
Arguments:
h: required: an integer determining head index
layout: required: a tensor of dimension (num_heads, num_blocks, num_blocks) containing
sparsity layout of all head; may not be completely set at this step
Return:
layout: a tensor of dimension (num_heads, num_blocks, num_blocks) containing sparsity
layout of all head in which local layout is set
"""
num_blocks = layout.shape[1]
start_block_idx = 0
end_block_idx = 0
for block_size in self.local_window_blocks:
end_block_idx += block_size
end_block_idx = min(end_block_idx, num_blocks)
for row in range(start_block_idx, end_block_idx):
for col in range(
start_block_idx,
(row + 1 if self.attention == "unidirectional" else end_block_idx),
):
layout[h, row, col] = 1
start_block_idx += block_size
# if there is any remaining not attended part, use the lats local window block size as local
# window for the remaining applicable local windows
for i in range(start_block_idx, num_blocks, block_size):
end_block_idx = min(i + block_size, num_blocks)
for row in range(i, end_block_idx):
for col in range(
i,
(row + 1 if self.attention == "unidirectional" else end_block_idx),
):
layout[h, row, col] = 1
return layout
def set_global_layout(self, h, layout):
"""Sets global attention layout used by the given head in the sparse attention.
Arguments:
h: required: an integer determining head index
layout: required: a tensor of dimension (num_heads, num_blocks, num_blocks) containing
sparsity layout of all head; may not be completely set at this step
Return:
layout: a tensor of dimension (num_heads, num_blocks, num_blocks) containing sparsity
layout of all head in which global layout is set
"""
num_blocks = layout.shape[1]
if self.global_block_end_indices is None:
for idx in self.global_block_indices:
# if global block idx is in the range of the sequence blocks
if idx < num_blocks:
# global rows
if self.horizontal_global_attention:
layout[h, idx, :] = 1
# global columns
first_row = 0 if self.attention == "bidirectional" else idx
layout[h, first_row:, idx] = 1
else:
for _, (start_idx, end_idx) in enumerate(
zip(self.global_block_indices, self.global_block_end_indices)
):
# if global block idx is in the range of the sequence blocks
if start_idx < num_blocks:
end_idx = min(end_idx, num_blocks)
# global rows
if self.horizontal_global_attention:
layout[h, start_idx:end_idx, :] = 1
# global columns
first_row = 0 if self.attention == "bidirectional" else start_idx
layout[h, first_row:, start_idx:end_idx] = 1
return layout
def make_layout(self, seq_len):
"""Generates `Variable` sparsity layout used by each head in the sparse attention.
Arguments:
seq_len: required: an integer determining number of attention heads of the layer.
Return:
layout: a tensor of dimension (num_heads, num_blocks, num_blocks) containing `Variable`
sparsity layout of all head
"""
layout = self.setup_layout(seq_len)
for h in range(0, self.num_layout_heads):
layout = self.set_random_layout(h, layout)
layout = self.set_local_layout(h, layout)
layout = self.set_global_layout(h, layout)
layout = self.check_and_propagate_first_head_layout(layout)
return layout
class BigBirdSparsityConfig(SparsityConfig):
"""Configuration class to store `BigBird` sparsity configuration.
For more details about this sparsity config, please see `Big Bird: Transformers for
Longer Sequences`: https://arxiv.org/pdf/2007.14062.pdf
This class extends parent class of `SparsityConfig` and customizes it for `BigBird` sparsity.
"""
def __init__(
self,
num_heads,
block_size=16,
different_layout_per_head=False,
num_random_blocks=1,
num_sliding_window_blocks=3,
num_global_blocks=1,
attention="bidirectional",
):
"""Initialize the BigBird Sparsity Pattern Config.
For usage example please see, TODO DeepSpeed Sparse Transformer Tutorial
Arguments:
num_heads: required: an integer determining number of attention heads of the layer.
block_size: optional: an integer determining the block size. Current implementation of
sparse self-attention is based on blocked sparse matrices. In which this parameter
defines size of such blocks, `Block X Block`.
different_layout_per_head: optional: a boolean determining if each head should be assigned
a different sparsity layout; default is false and this will be satisfied based on
availability.
num_random_blocks: optional: an integer determining the number of random blocks in each
block row.
num_sliding_window_blocks: optional: an integer determining the number of blocks in sliding
local attention window.
num_global_blocks: optional: an integer determining how many consecutive blocks, starting
from index 0, are considered as global attention. Global block tokens will be attended
by all other block tokens and will attend to all other block tokens as well.
attention: optional: a string determining attention type. Attention can be `unidirectional`,
such as autoregressive models, in which tokens attend only to tokens appear before them
in the context. Considering that, the upper triangular of attention matrix is empty as
above figure. Or it can be `bidirectional`, such as BERT, in which tokens can attend to
any other tokens before or after them. Then, the upper triangular part of the attention
matrix is mirror of the lower triangular in the above figure.
"""
super().__init__(num_heads, block_size, different_layout_per_head)
self.num_random_blocks = num_random_blocks
self.num_sliding_window_blocks = num_sliding_window_blocks
self.num_global_blocks = num_global_blocks
if attention != "unidirectional" and attention != "bidirectional":
raise NotImplementedError(
'only "uni/bi-directional" attentions are supported for now!'
)
self.attention = attention
def set_random_layout(self, h, layout):
"""Sets random attention layout used by the given head in the sparse attention.
Note) By default, it assumes there will be a unique random block layout for all heads; unless
`different_layout_per_head` parameter is set in which each head can have a different random layout.
Arguments:
h: required: an integer determining head index
layout: required: a tensor of dimension (num_heads, num_blocks, num_blocks) containing
sparsity layout of all head; may not be completely set at this step
Return:
layout: a tensor of dimension (num_heads, num_blocks, num_blocks) containing sparsity
layout of all head in which random layout is set
"""
num_blocks = layout.shape[1]
if num_blocks < self.num_random_blocks:
raise ValueError(
f"""Number of random blocks, {self.num_random_blocks}, must be smaller than overall number
of blocks in a row, {num_blocks}!"""
)
for row in range(0, num_blocks):
sample_range = (
range(0, num_blocks)
if self.attention == "bidirectional"
else range(0, row + 1)
)
rnd_cols = random.sample(sample_range, self.num_random_blocks)
layout[h, row, rnd_cols] = 1
return layout
def set_sliding_window_layout(self, h, layout):
"""Sets sliding local attention layout used by the given head in the sparse attention.
Arguments:
h: required: an integer determining head index
layout: required: a tensor of dimension (num_heads, num_blocks, num_blocks) containing
sparsity layout of all head; may not be completely set at this step
Return:
layout: a tensor of dimension (num_heads, num_blocks, num_blocks) containing sparsity
layout of all head in which local sliding window layout is set
"""
num_blocks = layout.shape[1]
if num_blocks < self.num_sliding_window_blocks:
raise ValueError(
f"""Number of sliding window blocks, {self.num_sliding_window_blocks}, must be smaller than
overall number of blocks in a row, {num_blocks}!"""
)
w = self.num_sliding_window_blocks // 2
for row in range(0, num_blocks):
start = max(0, row - w)
end = min(row + w + 1, num_blocks)
layout[h, row, start:end] = 1
return layout
def set_global_layout_itc(self, h, layout):
"""Sets global attention layout used by the given head in the sparse attention.
Arguments:
h: required: an integer determining head index
layout: required: a tensor of dimension (num_heads, num_blocks, num_blocks) containing
sparsity layout of all head; may not be completely set at this step
Return:
layout: a tensor of dimension (num_heads, num_blocks, num_blocks) containing sparsity layout
of all head in which global layout is set
"""
num_blocks = layout.shape[1]
if num_blocks < self.num_global_blocks:
raise ValueError(
f"""Number of global blocks, {self.num_global_blocks}, must be smaller than overall number
of blocks in a row, {num_blocks}!"""
)
# global rows
layout[h, 0 : self.num_global_blocks, :] = 1
# global columns
layout[h, :, 0 : self.num_global_blocks] = 1
if self.attention == "unidirectional":
# zero out anything attending to the future
layout = torch.tril(layout)
return layout
def make_layout(self, seq_len):
"""Generates `BigBird` sparsity layout used by each head in the sparse attention.
Arguments:
seq_len: required: an integer determining number of attention heads of the layer.
Return:
layout: a tensor of dimension (num_heads, num_blocks, num_blocks) containing `BigBird`
sparsity layout of all head
"""
layout = self.setup_layout(seq_len)
for h in range(0, self.num_layout_heads):
layout = self.set_random_layout(h, layout)
layout = self.set_sliding_window_layout(h, layout)
layout = self.set_global_layout_itc(h, layout)
layout = self.check_and_propagate_first_head_layout(layout)
return layout
class BSLongformerSparsityConfig(SparsityConfig):
"""Configuration class to store edited `Longformer` sparsity configuration.
Note) this is a block-sparse version of the Longformer which is slightly different than original
Longformer; which is element-wise sparsity.
For more details about this sparsity config, please see `Longformer:
The Long-Document Transformer`: https://arxiv.org/pdf/2004.05150.pdf
This class extends parent class of `SparsityConfig` and customizes it for `Longformer` sparsity.
"""
def __init__(
self,
num_heads,
block_size=16,
different_layout_per_head=False,
num_sliding_window_blocks=3,
global_block_indices=[0],
global_block_end_indices=None,
attention="bidirectional",
):
"""Initialize the edited `Longformer` Sparsity Pattern Config.
For usage example please see, TODO DeepSpeed Sparse Transformer Tutorial
Arguments:
num_heads: required: an integer determining number of attention heads of the layer.
block_size: optional: an integer determining the block size. Current implementation of sparse
self-attention is based on blocked sparse matrices. In which this parameter defines size
of such blocks, `Block X Block`.
different_layout_per_head: optional: a boolean determining if each head should be assigned a
different sparsity layout; default is false and this will be satisfied based on
availability.
num_sliding_window_blocks: optional: an integer determining the number of blocks in sliding
local attention window.
global_block_indices: optional: a list of integers determining which blocks are considered
as global attention. Given indices, determine the blocks that all other token blocks
attend to and they attend to all other token blocks. Default value is only index 0.
Notice that if global_block_end_indices parameter is set, this parameter is used as
starting index of each global window.
global_block_end_indices: optional: a list of integers determining end indices of global
window blocks. By default this is not used. But if it is set, it must have the same size
of global_block_indices parameter, and combining this two parameters, for each index i,
blocks from global_block_indices[i] to global_block_end_indices[i] (exclusive) are
considered as global attention.
attention: optional: a string determining attention type. Attention can be `unidirectional`,
such as autoregressive models, in which tokens attend only to tokens appear before them
in the context. Considering that, the upper triangular of attention matrix is empty as
above figure. Or it can be `bidirectional`, such as BERT, in which tokens can attend to
any other tokens before or after them. Then, the upper triangular part of the attention
matrix is mirror of the lower triangular in the above figure.
"""
super().__init__(num_heads, block_size, different_layout_per_head)
self.num_sliding_window_blocks = num_sliding_window_blocks
self.global_block_indices = global_block_indices
self.attention = attention
if global_block_end_indices is not None:
if len(global_block_indices) != len(global_block_end_indices):
raise ValueError(
f"""Global block start indices length, {len(global_block_indices)}, must be same as
global block end indices length, {len(global_block_end_indices)}!"""
)
for _, (start_idx, end_idx) in enumerate(
zip(global_block_indices, global_block_end_indices)
):
if start_idx >= end_idx:
raise ValueError(
f"""Global block start index, {start_idx}, must be smaller than global block end
index, {end_idx}!"""
)
self.global_block_end_indices = global_block_end_indices
def set_sliding_window_layout(self, h, layout):
"""Sets sliding local attention layout used by the given head in the sparse attention.
Arguments:
h: required: an integer determining head index
layout: required: a tensor of dimension (num_heads, num_blocks, num_blocks) containing
sparsity layout of all head; may not be completely set at this step
Return:
layout: a tensor of dimension (num_heads, num_blocks, num_blocks) containing sparsity layout
of all head in which local sliding window layout is set
"""
num_blocks = layout.shape[1]
if num_blocks < self.num_sliding_window_blocks:
raise ValueError(
f"""Number of sliding window blocks, {self.num_sliding_window_blocks}, must be smaller
than overall number of blocks in a row, {num_blocks}!"""
)
w = self.num_sliding_window_blocks // 2
for row in range(0, num_blocks):
start = max(0, row - w)
end = min(row + w + 1, num_blocks)
layout[h, row, start:end] = 1
return layout
def set_global_layout(self, h, layout):
"""Sets global attention layout used by the given head in the sparse attention.
Arguments:
h: required: an integer determining head index
layout: required: a tensor of dimension (num_heads, num_blocks, num_blocks) containing
sparsity layout of all head; may not be completely set at this step
Return:
layout: a tensor of dimension (num_heads, num_blocks, num_blocks) containing sparsity
layout of all head in which global layout is set
"""
num_blocks = layout.shape[1]
if self.global_block_end_indices is None:
for idx in self.global_block_indices:
# if global block idx is in the range of the sequence blocks
if idx < num_blocks:
# global rows
layout[h, idx, :] = 1
# global columns
layout[h, :, idx] = 1
else:
for _, (start_idx, end_idx) in enumerate(
zip(self.global_block_indices, self.global_block_end_indices)
):
# if global block idx is in the range of the sequence blocks
if start_idx < num_blocks:
end_idx = min(end_idx, num_blocks)
# global rows
layout[h, start_idx:end_idx, :] = 1
# global columns
layout[h, :, start_idx:end_idx] = 1
if self.attention == "unidirectional":
layout = torch.tril(layout)
return layout
def make_layout(self, seq_len):
"""Generates edited `Longformer` sparsity layout used by each head in the sparse attention.
Arguments:
seq_len: required: an integer determining number of attention heads of the layer.
Return:
layout: a tensor of dimension (num_heads, num_blocks, num_blocks) containing `BSLongformer`
sparsity layout of all head
"""
layout = self.setup_layout(seq_len)
for h in range(0, self.num_layout_heads):
layout = self.set_sliding_window_layout(h, layout)
layout = self.set_global_layout(h, layout)
layout = self.check_and_propagate_first_head_layout(layout)
return layout
|
EXA-1-master
|
exa/libraries/xformers/xformers/components/attention/sparsity_config.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
import logging
from dataclasses import dataclass
from typing import Optional
import torch
import torch.nn as nn
from xformers.components.attention import Attention, AttentionConfig, register_attention
from xformers.components.attention.core import (
scaled_dot_product_attention,
scaled_query_key_softmax,
)
from xformers.components.attention.utils import (
bool_mask_to_additive,
iterative_pinv,
reshape_key_padding_mask,
)
logger = logging.getLogger("xformers")
@dataclass
class NystromSelfAttentionConfig(AttentionConfig):
"""
num_heads Number of heads.
num_landmarks Number of landmarks to use for softmax approximation. 64 often sufficient for a good
approximation according to https://arxiv.org/pdf/2102.03902.pdf.
causal Apply a causal mask, in that the attention cannot be applied to the future.
use_razavi_pinverse If true, use iterative method from (Razavi et al. 2014) to approximate the Moore-Penrose
inverse, otherwise use standard torch inverse.
pinverse_original_init True if using original initialization when calculating Moore-Penrose pseudo inverse using
method from (Razavi et al. 2014).
False if using exact coefficient computation (leads to faster convergence).
inv_iterations Number of iterations for calculating the Moore-Penrose pseudo inverse.
v_skip_connection A module that will take V as input and will be added as a skip connection to the
softmax approximation. A skip connection is added in the paper to help with training.
conv_kernel_size Kernel size for convolution optionally added to help in training.
If v_skip_connection is not specified, this will be used to define the default
depth wise convolution used as a skip connection.
If both conv_kernel_size and v_skip_connection are None, no skip connection will
be added.
landmark_pooling Which module to use when computing landmarks. Default is AdaptiveAvgPool2d.
"""
num_heads: int
num_landmarks: Optional[int]
landmark_pooling: Optional[nn.Module]
causal: Optional[bool]
pinverse_original_init: Optional[bool]
inv_iterations: Optional[int]
v_skip_connection: Optional[nn.Module]
conv_kernel_size: Optional[int]
use_razavi_pinverse: Optional[bool]
class AvgPool(nn.Module):
def __init__(self, n: int):
super().__init__()
self.n = n
def forward(self, x: torch.Tensor):
# Average independently for every segment in the sequence dimension
seq_len = x.shape[1]
head_dim = x.shape[2]
segments = seq_len // self.n
assert segments > 0, "num_landmarks should be smaller than the sequence length"
# Dimensions are a match
if seq_len % self.n == 0:
return x.reshape(
-1,
self.n,
segments,
head_dim,
).mean(dim=-2)
# Handle the last segment boundary being off
n_round = self.n - seq_len % self.n
x_avg_round = (
x[:, : n_round * segments, :]
.reshape(-1, n_round, segments, head_dim)
.mean(dim=-2)
)
x_avg_off = (
x[:, n_round * segments :, :]
.reshape(-1, self.n - n_round, segments + 1, head_dim)
.mean(dim=-2)
)
return torch.cat((x_avg_round, x_avg_off), dim=-2)
@register_attention("nystrom", NystromSelfAttentionConfig)
class NystromAttention(Attention):
# TODO: update defaults for use_razavi_pinverse and inv_iterations
def __init__(
self,
dropout: float,
num_heads: int,
num_landmarks: int = 64,
landmark_pooling: Optional[nn.Module] = None,
causal: bool = False,
use_razavi_pinverse: bool = True,
pinverse_original_init: bool = False,
inv_iterations: int = 6, # recommended default in paper was 6.
v_skip_connection: Optional[nn.Module] = None,
conv_kernel_size: Optional[int] = None,
*args,
**kwargs,
):
"""
Nystrom attention mechanism, from Nystromformer_.
::
"A Nystrom-based Algorithm for Approximating Self-Attention."
Xiong, Y., Zeng, Z., Chakraborty, R., Tan, M., Fung, G., Li, Y., Singh, V. (2021)
Reference codebase: https://github.com/mlpen/Nystromformer
.. _Nystromformer: https://arxiv.org/pdf/2102.03902.pdf
"""
super().__init__()
# merged key padding mask and attention mask is not accepted
self.requires_separate_masks = True
self.num_landmarks = num_landmarks
# TODO: should be able to not have to pass in num_heads
self.num_heads = num_heads
self.use_razavi_pinverse = use_razavi_pinverse
self.pinverse_original_init = pinverse_original_init
self.inv_iterations = inv_iterations
self.attn_drop = nn.Dropout(dropout)
self.skip_connection = v_skip_connection
self.causal = causal
if self.skip_connection is None and conv_kernel_size is not None:
self.skip_connection = nn.Conv2d(
in_channels=self.num_heads,
out_channels=self.num_heads,
kernel_size=(conv_kernel_size, 1),
padding=(conv_kernel_size // 2, 0),
bias=False,
groups=self.num_heads,
)
if landmark_pooling is not None:
self.landmark_pooling = landmark_pooling
else:
self.landmark_pooling = AvgPool(n=self.num_landmarks)
# Optional lower triangular masks for causal attention
self.causal_mask_1: Optional[torch.Tensor] = None
self.causal_mask_2: Optional[torch.Tensor] = None
self.causal_mask_3: Optional[torch.Tensor] = None
# This attention does not support attention masks
self.supports_attention_mask = False
self.supports_key_padding_mask = True
def forward(
self,
q: torch.Tensor,
k: torch.Tensor,
v: torch.Tensor,
key_padding_mask: Optional[torch.Tensor] = None,
*args,
**kwargs,
):
r"""
key_padding_mask Only a key padding mask is accepted here. The size must be (batch size, sequence length) or
(batch size * num_heads, 1, sequence length). If dimensions are not correct, the mask will
be ignored. An additive mask is expected, meaning float values using "-inf" to mask values
"""
batched_dim = k.size(0)
seq_len = k.size(-2)
tt = {"dtype": q.dtype, "device": q.device}
if key_padding_mask is not None:
if key_padding_mask.dtype == torch.bool:
logger.warning(
"Bool mask found, but an additive mask is expected. Converting but this is slow"
)
key_padding_mask = bool_mask_to_additive(key_padding_mask)
if key_padding_mask.ndim == 2:
key_padding_mask = reshape_key_padding_mask(
key_padding_mask, batched_dim
)
zeros = torch.zeros_like(key_padding_mask)
ones = torch.ones_like(key_padding_mask)
is_masked = torch.isinf(-key_padding_mask)
# _mask takes 1 if the token is not padded, otherwise 0.
_mask = torch.where(is_masked, zeros, ones)
_mask = _mask.transpose(2, 1)
assert _mask.shape == (batched_dim, q.shape[1], 1)
# Mask q and k before pooling
# https://github.com/mlpen/Nystromformer/blob/main/code/attention_nystrom.py#L31
q = q * _mask
k = k * _mask
assert key_padding_mask.size() == (batched_dim, 1, seq_len), (
f"key_padding_mask has invalid dimensions {key_padding_mask.size()}."
f" Must have dimensions {batched_dim, 1, seq_len} or (batch_size, {seq_len})."
)
if self.num_landmarks >= seq_len:
mask: Optional[torch.Tensor] = None
if self.causal:
mask = self._triu_mask(batched_dim, seq_len, seq_len, **tt)
if key_padding_mask is not None:
mask = key_padding_mask if mask is None else mask + key_padding_mask
x = scaled_dot_product_attention(q=q, k=k, v=v, att_mask=mask)
else:
q_landmarks = self.landmark_pooling(q)
k_landmarks = self.landmark_pooling(k)
if self.causal and (
self.causal_mask_1 is None
or (batched_dim, seq_len, self.num_landmarks)
!= self.causal_mask_1.size()
):
self.causal_mask_1 = self._triu_mask(
batched_dim, seq_len, self.num_landmarks, **tt
)
self.causal_mask_2 = self._triu_mask(
batched_dim, self.num_landmarks, self.num_landmarks, **tt
)
self.causal_mask_3 = self._triu_mask(
batched_dim, self.num_landmarks, seq_len, **tt
)
mask_3: Optional[torch.Tensor] = self.causal_mask_3
if key_padding_mask is not None:
mask_3 = (
key_padding_mask if mask_3 is None else mask_3 + key_padding_mask
)
kernel_1 = scaled_query_key_softmax(q=q, k=k_landmarks, att_mask=None)
kernel_2 = scaled_query_key_softmax(
q=q_landmarks, k=k_landmarks, att_mask=None
)
kernel_3 = scaled_dot_product_attention(
q=q_landmarks, k=k, v=v, att_mask=mask_3
)
kernel_2_inv = (
iterative_pinv(
kernel_2, self.inv_iterations, self.pinverse_original_init
)
if self.use_razavi_pinverse
else torch.linalg.pinv(kernel_2)
)
x = torch.matmul(
torch.matmul(
kernel_1,
kernel_2_inv,
),
kernel_3,
)
if self.skip_connection:
# Assumption here is that v is 3D.
v_conv = self.skip_connection(
v.reshape(-1, self.num_heads, v.size(-2), v.size(-1))
)
x += v_conv.reshape(-1, v_conv.size(-2), v_conv.size(-1))
x = self.attn_drop(x)
return x
def _triu_mask(self, dim_1: int, dim_2: int, dim_3: int, **kwargs) -> torch.Tensor:
device = kwargs["device"]
dtype = kwargs["dtype"]
return torch.triu(
torch.ones(dim_2, dim_3, dtype=dtype, device=device) * float("-inf"),
diagonal=1,
).expand(
dim_1, -1, -1
) # micro optim, save memory on the batch dimension
|
EXA-1-master
|
exa/libraries/xformers/xformers/components/attention/nystrom.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
import logging
import math
from dataclasses import dataclass
from typing import Optional, Tuple
import torch
import torch.nn as nn
from torch.cuda.amp import autocast
from xformers.components.attention import Attention, AttentionConfig, register_attention
from xformers.components.attention.feature_maps import (
FeatureMap,
FeatureMapType,
SMHyperbolic,
SMOrf,
SMReg,
)
logger = logging.getLogger("xformers")
@dataclass
class FavorAttentionConfig(AttentionConfig):
causal: Optional[bool]
dim_features: Optional[int] = None # The dimensions of the random features
dim_head: Optional[
int
] = None # The embedding dimension of the inputs. Only useful to get a dim_features estimate
iter_before_redraw: Optional[
int
] = None # The number of iterations before the random features are re-drawn from scratch
feature_map: Optional[FeatureMapType] = None
@register_attention("favor", FavorAttentionConfig)
class FavorAttention(Attention):
def __init__(
self,
causal: bool = False,
dropout: float = 0.0,
dim_features: Optional[int] = None,
dim_head: Optional[int] = None,
iter_before_redraw: Optional[int] = None,
feature_map_type: FeatureMapType = FeatureMapType.SMReg,
normalize_inputs: bool = False,
*_,
**__,
):
r"""
Kernelized attention, as proposed in Performers_
("Rethinking attention with performers." K. Choromanski et al. (2020).).
FAVOR stands for "Fast Attention Via positive Orthogonal Random features"
Args:
dropout (float): the probability of an output to be randomly dropped at training time
dim_features (int): the dimension of the random features space
iter_before_redraw (int): the number of steps (forward calls) before a redraw of the features
feature_map_type (FeatureMapType): the type of feature map being used,
for instance orthogonal random features.
.. _Performers: https://arxiv.org/pdf/2009.14794v1.pdf
"""
super().__init__()
self.causal = causal
self.iter_before_redraw = (
(2 * iter_before_redraw)
if iter_before_redraw is not None
else iter_before_redraw
) # This will be used for both key and query
self.normalize_inputs = normalize_inputs
self.feature_map_type = feature_map_type
self.attn_drop = nn.Dropout(dropout, inplace=True)
# Setup dimension-dependent variables
# Reasonable dimension default
if dim_features is None:
assert dim_head is not None, "dim_features or dim_head needs to be passed"
self.dim_features = math.ceil(dim_head * (1 + math.log2(dim_head)))
self.dim_features = 2 * (
self.dim_features // 2
) # needs to be even for some variants
logger.info(
f"FAVOR: Automatically setting the random mapping dimension to {self.dim_features} from {dim_head}"
)
else:
self.dim_features = dim_features
feature_map_constructor = {
FeatureMapType.SMHyp: SMHyperbolic,
FeatureMapType.SMReg: SMReg,
FeatureMapType.SMOrf: SMOrf,
}[self.feature_map_type]
feature_settings = {
"dim_features": self.dim_features,
"iter_before_redraw": self.iter_before_redraw,
"normalize_inputs": self.normalize_inputs,
}
self.feature_map: FeatureMap = feature_map_constructor(**feature_settings) # type: ignore
# Properties specific to this attention mechanism
self.supports_attention_mask = False
self.supports_key_padding_mask = False
@staticmethod
def _maybe_promote(x: torch.Tensor) -> torch.Tensor:
# Only promote fp16 buffers, bfloat16 would be fine for instance
return x.float() if x.dtype == torch.float16 else x
@staticmethod
def _causal_attention(
k_prime: torch.Tensor, q_prime: torch.Tensor, v: torch.Tensor
) -> Tuple[torch.Tensor, torch.Tensor]:
# Algorithm 1 in the paper
ref_v = torch.ones_like(v.unsqueeze(2)) # BATCH x SEQ x 1 x EMB
Gps = k_prime.unsqueeze(3) * v.unsqueeze(2)
Grenorm = k_prime.unsqueeze(3) * ref_v
# Consolidate against the feature dimension
att_raw = torch.einsum("bcfe,bcf->bce", Gps, q_prime)
att_norm = torch.einsum("bcfe,bcf->bce", Grenorm, q_prime)
# Cumulative sum over the sequence
att_raw = att_raw.cumsum(2)
att_norm = att_norm.cumsum(2)
return att_raw, att_norm
def forward(
self,
q: torch.Tensor,
k: torch.Tensor,
v: torch.Tensor,
*_,
**__,
):
# Project key and queries onto the feature map space
k_prime = self.feature_map(k)
q_prime = self.feature_map(q)
with autocast(enabled=False):
# The softmax kernel approximation for Favor will easily overflow
# Force the computations here to stay in fp32 for numerical stability
# Note that the dimensions are vastly reduced when compared to scaled_dot_product
k_prime = self._maybe_promote(k_prime)
q_prime = self._maybe_promote(q_prime)
v = self._maybe_promote(v)
if not self.causal:
att_normalization = q_prime @ (
k_prime.transpose(-2, -1) @ torch.ones_like(v)
)
att_raw = q_prime @ (k_prime.transpose(-2, -1) @ v)
else:
# Actually compute attention
att_raw, att_normalization = self._causal_attention(k_prime, q_prime, v)
# Normalize
att = att_raw / att_normalization
if self.attn_drop is not None:
att = self.attn_drop(att)
return att
|
EXA-1-master
|
exa/libraries/xformers/xformers/components/attention/favor.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
from abc import ABCMeta, abstractmethod
from dataclasses import asdict, dataclass
from typing import Optional, Type, TypeVar
import torch
import torch.nn as nn
from xformers.components.attention import AttentionMask
@dataclass
class AttentionConfig:
"""Parameters required for all Attentions.
Can accept and store extra parameters.
"""
name: str # the registered name for this attention mechanism
dropout: float # dropout probability
Self = TypeVar("Self", bound="Attention")
# Define the common interface, every attention block needs to derive from it
class Attention(nn.Module, metaclass=ABCMeta):
r"""The base Attention mechanism, which is typically a sub-part of the multi-head attention"""
_causal_mask: Optional[AttentionMask] = None
@abstractmethod
def __init__(self, dropout: Optional[float] = None, *args, **kwargs):
super().__init__()
# Requires the inputs to be projected
self.requires_input_projection = True
# Whether the head dimension needs to be present (if not it can be folded into the batch dimension)
self.requires_head_dimension = False
# key padding mask and attention mask must be passed in as separate arguments instead of a merged attention mask
self.requires_separate_masks = False
# Requires that K and Q have the same sequence length
self.requires_same_k_q_dimensions = False
# Whether the attention owns the single head/multihead mechanism
# so that the MHA wrapper should skip it
self.requires_skip_multi_head = False
# This attention requires a context length which is squared, often due to 2D pooling
self.requires_squared_context = False
# Whether this attention mechanism supports attention masks
self.supports_attention_mask = True
self.supports_key_padding_mask = False
@classmethod
def from_config(cls: Type[Self], config: AttentionConfig) -> Self:
# Generate the class inputs from the config
fields = asdict(config)
# Skip all Nones so that default values are used
fields = {k: v for k, v in fields.items() if v is not None}
return cls(**fields)
@abstractmethod
def forward(
self, q: torch.Tensor, k: torch.Tensor, v: torch.Tensor, *args, **kwargs
) -> torch.Tensor:
raise NotImplementedError
@staticmethod
def _maybe_pad_sequence(x: torch.Tensor, mask: torch.Tensor):
"""
If the sequence is shorter than the mask, return a padded view
"""
if x.shape[-2] != mask.shape[-1]:
assert x.shape[-2] < mask.shape[-1], (
"Sequence is bigger than the provided mask, cannot infer what to do with it."
" Please update your attention mask"
)
pad_size = (0, 0, 0, mask.shape[-1] - x.shape[-2], 0, 0)
return torch.nn.functional.pad(x, pad_size, mode="constant", value=0.0)
return x
|
EXA-1-master
|
exa/libraries/xformers/xformers/components/attention/base.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
from enum import Enum
from .base import FeatureMap, FeatureMapConfig
from .softmax import NormDistribution, SMHyperbolic, SMOrf, SMReg
class FeatureMapType(str, Enum):
SMOrf = "sm_orf"
SMHyp = "sm_hyp"
SMReg = "sm_reg" # regularized softmax kernel
__all__ = [
"SMOrf",
"SMReg",
"SMHyperbolic",
"NormDistribution",
"FeatureMapConfig",
"FeatureMap",
]
|
EXA-1-master
|
exa/libraries/xformers/xformers/components/attention/feature_maps/__init__.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
import math
from enum import Enum, auto
from typing import Optional
import torch
from torch.autograd.profiler import record_function
from .base import FeatureMap
"""
A set of feature maps which approximate the softmax kernel, as per the Performers_ paper.
_Performers: "Rethinking attention with performers." K. Choromanski et al. (2020).
https://arxiv.org/pdf/2009.14794v1.pdf
"""
class NormDistribution(Enum):
Xi = auto()
Uniform = auto()
class SoftMaxPositiveEstimators(FeatureMap):
def __init__(
self,
dim_features: int,
iter_before_redraw: Optional[int],
normalize_inputs: bool = False,
epsilon: float = 1e-6,
softmax_temp: float = -1,
):
super().__init__(dim_features, iter_before_redraw, normalize_inputs, epsilon)
self.softmax_temp = softmax_temp
# Handle the scaling from all kernels by √m.
# This normalizes for all the feature maps involved
self.h_scale = math.log(math.sqrt(self.dim_features))
def pre_scale(self, x: torch.Tensor) -> torch.Tensor:
with record_function("feature_map::pre_scale"):
# Re-draw counting logic
if (
(
self.iter_before_redraw is not None
and self._iter_counter > self.iter_before_redraw
)
or self.features is None
or self.features.device != x.device
):
# The feature map is actually using half the dimension, we'll concatenate + and - features
self._iter_counter = 1
self.features = self._get_feature_map(
x.shape[-1], self.dim_feature_map, x.device
)
features = self.features
assert features is not None
if features.dtype != x.dtype:
self.features = features.to(x.dtype)
self._iter_counter += 1
# Normalization / softmax
if self.softmax_temp < 0:
# A = exp(QK.t/√d), so each input will be scaled by √√d
self.softmax_temp = x.shape[-1] ** -0.25
x_scaled = x * self.softmax_temp
# Compute the scaling factors in logspace, applied from within the exponential
# - dimnish possible exponential overflow
# - remove a multiply across the batch, replace by an addition
norm_x_2 = torch.einsum("...d,...d->...", x_scaled, x_scaled).unsqueeze(-1)
self.offset = -0.5 * norm_x_2 - self.h_scale + self.epsilon
if self.normalize_inputs:
# L0 normalize the exponential term, can be useful for numerical stability
# This ensures that features +- offset is below 1
self.offset -= norm_x_2.max(1, keepdim=True)[0]
# Return the scaled inputs, the rest depends on the kernel being used
return x_scaled
@staticmethod
@torch.no_grad()
def _get_random_ortho_matrix(
blocks: int,
dim: int,
device: torch.device,
norm_distribution: NormDistribution = NormDistribution.Uniform,
) -> torch.Tensor:
r"""
Generate a random matrix whose rows are exactly orthonormal
"How to generate random matrices from the classical compact groups", Mezzadri, 2007
https://arxiv.org/pdf/math-ph/0609050v2.pdf
.. note: the typical qr decomposition does not give uniform results, qr decomposition is not
unique and the qr decomposition routines are biased towards numerical stability. See the above
paper for more information.
.. note: this does not follow the original implementation from the Performers authors.
see docs/assets/kde plots to visualize the impact of using the R signs to correct Q
"""
H = torch.randn((blocks, dim, dim), device=device, requires_grad=False)
# Randomly scale the norms of the features, Xi distributed
if norm_distribution == NormDistribution.Xi:
# NOTE: This averages to sqrt(d)
norms = torch.sqrt(torch.einsum("...d,...d->...", H, H))
Q, R = torch.linalg.qr(H)
Q = torch.diag_embed(torch.sign(torch.diagonal(R, dim1=1, dim2=2))) @ Q
# Normalize if need be. Uniform NormDistribution does nothing, Q is already orthonormal
if norm_distribution == NormDistribution.Xi:
return torch.diag_embed(norms) @ Q
return Q
class SMOrf(SoftMaxPositiveEstimators):
"""
"Positive random orthogonal features" softmax estimator,
SM_ort^m+, as proposed in the Performers_ paper, Lemma 1.
_Performers: "Rethinking attention with performers." K. Choromanski et al. (2020).
https://arxiv.org/pdf/2009.14794v1.pdf
"""
@torch.no_grad()
def _get_feature_map(self, dim_input: int, dim_features: int, device: torch.device):
"""
Generate the projection matrix onto the random features
.. note: The heads dimension needs to be taken into account, hence the per-block random matrix
and not uniformally random.
"""
# Get per block random unitary matrices.
# We need enough of them to project the whole input dimension, regardless of the
# requested dimension of the features
features = self._get_random_ortho_matrix(
math.ceil(dim_input / dim_features),
dim_features,
norm_distribution=NormDistribution.Xi,
device=device,
)
return features.flatten(0, 1)[:dim_input]
def forward(self, x: torch.Tensor) -> torch.Tensor:
# Softmax-dimension related scaling, shared for all kernels
x_scaled = super().pre_scale(x)
assert self.features is not None
# Project onto the random feature map.
x_scaled = x_scaled @ self.features
return torch.exp(x_scaled + self.offset)
class SMHyperbolic(SoftMaxPositiveEstimators):
"""
"Positive random features hyperbolic" estimator, SMHyp+,
as proposed in the Performers_ paper, Lemma 1.
_Performers: "Rethinking attention with performers." K. Choromanski et al. (2020).
https://arxiv.org/pdf/2009.14794v1.pdf
"""
def __init__(
self,
dim_features: int,
iter_before_redraw: Optional[int],
normalize_inputs: bool = False,
epsilon: float = 1e-6,
softmax_temp: float = -1,
):
super().__init__(
dim_features, iter_before_redraw, normalize_inputs, epsilon, softmax_temp
)
assert (
dim_features % 2 == 0
), "The feature dimension needs to be even with this kernel"
self.dim_feature_map = self.dim_features // 2
@torch.no_grad()
def _get_feature_map(self, dim_input: int, dim_features: int, device: torch.device):
"""
Generate the projection matrix onto the random features
.. note: The heads dimension needs to be taken into account, hence the per-block random matrix
and not uniformally random.
"""
# Get per block random unitary matrices.
# We need enough of them to project the whole input dimension, regardless of the
# requested dimension of the features
features = self._get_random_ortho_matrix(
math.ceil(dim_input / dim_features),
dim_features,
norm_distribution=NormDistribution.Xi,
device=device,
)
return features.flatten(0, 1)[:dim_input]
def forward(self, x: torch.Tensor) -> torch.Tensor:
# Softmax-dimension related scaling, shared for all kernels
x_scaled = super().pre_scale(x)
# Project onto the random feature map, concatenate both + and - results
# This follows Lemma 1 in the original Performers Paper to best approximate a
# softmax kernel (cosh representation)
x_scaled = x_scaled @ self.features
return torch.cat(
[torch.exp(x_scaled + self.offset), torch.exp(-x_scaled + self.offset)],
dim=-1,
)
class SMReg(SoftMaxPositiveEstimators):
"""
"Regularized softmax kernel" estimator, SMREG+, as proposed in the Performers_ paper.
_Performers: "Rethinking attention with performers." K. Choromanski et al. (2020).
https://arxiv.org/pdf/2009.14794v1.pdf
"""
def __init__(
self,
dim_features: int,
iter_before_redraw: Optional[int],
normalize_inputs: bool = False,
epsilon: float = 1e-6,
softmax_temp: float = -1,
):
super().__init__(
dim_features, iter_before_redraw, normalize_inputs, epsilon, softmax_temp
)
assert (
dim_features % 2 == 0
), "The feature dimension needs to be even with this kernel"
self.dim_feature_map = self.dim_features // 2
@torch.no_grad()
def _get_feature_map(self, dim_input: int, dim_features: int, device: torch.device):
"""
Generate the projection matrix onto the random features
.. note: The heads dimension needs to be taken into account, hence the per-block random matrix
and not uniformally random.
"""
# Get per block random unitary matrices.
# We need enough of them to project the whole input dimension, regardless of the
# requested dimension of the features
features = self._get_random_ortho_matrix(
math.ceil(dim_input / dim_features),
dim_features,
norm_distribution=NormDistribution.Uniform,
device=device,
).flatten(0, 1)
norms = math.sqrt(dim_input) * torch.ones(features.shape[0], device=device)
return (torch.diag(norms) @ features)[:dim_input]
def forward(self, x: torch.Tensor) -> torch.Tensor:
# Softmax-dimension related scaling, shared for all kernels
x_scaled = super().pre_scale(x)
# Project onto the random feature map, concatenate both + and - results
# This follows Lemma 1 in the original Performers Paper to best approximate a
# softmax kernel (cosh representation + sample regularization)
x_scaled = x_scaled @ self.features
return torch.cat(
[torch.exp(x_scaled + self.offset), torch.exp(-x_scaled + self.offset)],
dim=-1,
)
|
EXA-1-master
|
exa/libraries/xformers/xformers/components/attention/feature_maps/softmax.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
from abc import abstractmethod
from dataclasses import asdict, dataclass
from typing import Optional, Type, TypeVar
import torch
"""
Feature maps allow for a given query or key to be encoded in a different space.
"""
Self = TypeVar("Self", bound="FeatureMap")
@dataclass
class FeatureMapConfig:
name: str
dim_features: int
iter_before_redraw: Optional[int]
normalize_inputs: Optional[bool]
epsilon: Optional[float]
class FeatureMap(torch.nn.Module):
def __init__(
self,
dim_features: int,
iter_before_redraw: Optional[int] = None,
normalize_inputs: bool = False,
epsilon: float = 1e-6,
):
super().__init__()
self.dim_features = dim_features
self.dim_feature_map = dim_features
self.iter_before_redraw = iter_before_redraw
self.features: Optional[torch.Tensor] = None
self.epsilon = epsilon
self.normalize_inputs = normalize_inputs
self._iter_counter = 0
@abstractmethod
def _get_feature_map(self, dim_input: int, dim_features: int, device: torch.device):
raise NotImplementedError()
@classmethod
def from_config(cls: Type[Self], config: FeatureMapConfig) -> Self:
# Generate the class inputs from the config
fields = asdict(config)
# Skip all Nones so that default values are used
fields = {k: v for k, v in fields.items() if v is not None}
return cls(**fields)
|
EXA-1-master
|
exa/libraries/xformers/xformers/components/attention/feature_maps/base.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
from pathlib import Path
from typing import Any, Callable, Dict, Set, Union
from xformers.utils import (
generate_matching_config,
get_registry_decorator,
import_all_modules,
)
from .base import Feedforward, FeedforwardConfig # noqa
# CREDITS: Classy Vision registry mechanism
FEEDFORWARD_REGISTRY: Dict[str, Any] = {}
FEEDFORWARD_CLASS_NAMES: Set[str] = set()
def build_feedforward(config: Union[Dict[str, Any], FeedforwardConfig]):
"""Builds a feedforward from a config.
This assumes a 'name' key in the config which is used to determine what
attention class to instantiate. For instance, a config `{"name": "my_feedforward",
"foo": "bar"}` will find a class that was registered as "my_feedforward"
(see :func:`register_feedforward`) and call .from_config on it."""
if not isinstance(config, FeedforwardConfig):
config_instance = generate_matching_config(
config, FEEDFORWARD_REGISTRY[config["name"]].config
)
else:
config_instance = config
return FEEDFORWARD_REGISTRY[config_instance.name].constructor.from_config(
config_instance
)
"""Registers a Feedforward subclass.
This decorator allows xFormers to instantiate a subclass of Feedforward
from a configuration file, even if the class itself is not part of the
xFormers framework. To use it, apply this decorator to a Feedforward
subclass, like this:
.. code-block:: python
@dataclass
class MyConfig:
...
@register_feedforward('my_ff', MyConfig)
class MyFeedforward(Feedforward):
...
To instantiate a feedforward from a configuration file, see :func:`build_feedforward`."""
register_feedforward: Callable[
[str, Any], Callable[[Any], Any]
] = get_registry_decorator(
FEEDFORWARD_REGISTRY, FEEDFORWARD_CLASS_NAMES, Feedforward, FeedforwardConfig
)
try:
from .fused_mlp import FusedMLP # noqa
_fused_mlp_available = True
except ImportError:
_fused_mlp_available = False
from .mlp import MLP # noqa
__all__ = [
"MLP",
"Feedforward",
"build_feedforward",
"register_feedforward",
]
if _fused_mlp_available:
__all__ += ["FusedMLP"]
# automatically import any Python files in the directory
import_all_modules(str(Path(__file__).parent), "xformers.components.feedforward")
|
EXA-1-master
|
exa/libraries/xformers/xformers/components/feedforward/__init__.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
import logging
from dataclasses import dataclass
from enum import Enum
from typing import Any, Callable, Optional, Union
import torch
from xformers.components import Activation
from xformers.components.feedforward import (
Feedforward,
FeedforwardConfig,
register_feedforward,
)
logger = logging.getLogger("xformers")
_is_fairscale_available = True
try:
import torch.distributed as dist
from fairscale.nn import MOELayer, Top2Gate # type: ignore
from xformers.components.feedforward import MLP
except ImportError:
logger.warning(
"Either FairScale or torch distributed is not available, MixtureOfExperts will not be exposed."
" Please install them if you would like to use MoE"
)
_is_fairscale_available = False
if _is_fairscale_available:
# Credits: initially implemented in FairScale for sanity checking
class RoundRobinGate(torch.nn.Module):
def __init__(self, model_dim, num_experts):
super().__init__()
self.model_dim = model_dim
self.num_experts = num_experts
def forward(self, input):
s = input.shape[0]
assert s % self.num_experts == 0, f"{s} % {self.num_experts} != 0"
capacity = 2 * s // self.num_experts
output = torch.zeros(
s, self.num_experts, capacity, dtype=input.dtype, device=input.device
)
for i in range(s):
output[i, i % self.num_experts, i // self.num_experts] = 1.0
return 0.0, output, output.bool()
class GateConfig(str, Enum):
RoundRobin = "round_robin"
Top2 = "top_2"
# Other gating techniques could be exposed here
@dataclass
class MoEConfig(FeedforwardConfig):
number_of_experts: int
gate: GateConfig
number_of_local_experts: Optional[int] = None
expert_constructor: Optional[Any] = None
hidden_layer_multiplier: Optional[int] = None
group: Optional[Any] = None
@register_feedforward("MixtureOfExperts", MoEConfig)
class MixtureOfExperts(Feedforward):
"""
A MLP variant which uses the "Mixture of Experts" paradigm, as described in Gshard_.
xFormers uses the FairScale_ implementation under the hood.
.. warning: Please note that most of the benefits of MoE are present in a distributed training environmentt
.. _Gshard: https://arxiv.org/pdf/2006.16668.pdf
.. _FairScale: https://github.com/facebookresearch/fairscale/
"""
def __init__(
self,
dim_model: int,
dropout: float,
activation: Activation,
number_of_experts: int,
gate: Union[GateConfig, torch.nn.Module],
number_of_local_experts: Optional[int] = None,
expert_constructor: Optional[Callable[[], torch.nn.Module]] = None,
hidden_layer_multiplier: Optional[int] = None,
group: Optional[Any] = None,
*_,
**__,
):
super().__init__()
# Handle a possibly uninitialized process group
assert (
dist.is_initialized()
), "Mixture of Experts require torch distributed to be initialized"
if number_of_local_experts is not None:
assert number_of_experts >= number_of_local_experts
else:
if dist.get_world_size() == 1:
logger.warning("Local experts no specified but world size of 1")
logger.warning("Assuming that all experts are local")
number_of_local_experts = number_of_experts
else:
number_of_local_experts = 1
# Programatically handle the gating technique
if not isinstance(gate, torch.nn.Module):
gate_constructor = {
GateConfig.RoundRobin: RoundRobinGate,
GateConfig.Top2: Top2Gate,
}[gate]
self.gate = gate_constructor(dim_model, number_of_experts)
else:
self.gate = gate
# Programatically handle the experts
if expert_constructor is None:
multiplier = (
hidden_layer_multiplier
if hidden_layer_multiplier is not None
else 4
)
def expert_constructor() -> torch.nn.Module:
return MLP(dim_model, dropout, activation, multiplier)
assert expert_constructor is not None
local_experts = torch.nn.ModuleList(
[expert_constructor() for _ in range(number_of_local_experts)]
)
self.moe = MOELayer(gate=self.gate, experts=local_experts, group=group)
self.requires_cuda = True
def forward(self, inputs: torch.Tensor) -> torch.Tensor:
# FairScale MoE assumes that the dimensions are [S, B, E]
# xFormers assumes [B, S, E]
return self.moe(inputs.movedim(0, 1)).movedim(0, 1)
|
EXA-1-master
|
exa/libraries/xformers/xformers/components/feedforward/mixture_of_experts.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
from dataclasses import dataclass
import torch
import torch.nn as nn
import xformers
from xformers.components import Activation, build_activation
from xformers.components.feedforward import Feedforward, FeedforwardConfig
if xformers._is_functorch_available:
from xformers.components.nvfuser import ( # noqa
NVFusedBiasActivationDropout,
)
from . import register_feedforward
@dataclass
class MlpConfig(FeedforwardConfig):
hidden_layer_multiplier: int
bias: bool
@register_feedforward("MLP", MlpConfig)
class MLP(Feedforward):
def __init__(
self,
dim_model: int,
dropout: float,
activation: Activation,
hidden_layer_multiplier: int,
bias: bool = True,
*args,
**kwargs,
):
super().__init__()
dim_mlp = hidden_layer_multiplier * dim_model
# check if fused Bias Activation Dropout is applicable
if xformers._is_functorch_available:
# Catch unimported fused layer
from xformers.components.nvfuser.bias_act_dropout import ( # noqa
NVFusedBiasActivationDropout,
)
self.requires_cuda = True
self.mlp = nn.Sequential(
nn.Linear(
in_features=dim_model, out_features=dim_mlp, bias=False
), # bias is handled in the next layer
NVFusedBiasActivationDropout(
p=dropout,
bias_shape=dim_mlp if bias else None,
activation=activation,
),
nn.Linear(
in_features=dim_mlp, out_features=dim_model, bias=False
), # bias is handled in the next layer
NVFusedBiasActivationDropout(
p=dropout,
bias_shape=dim_model if bias else None,
activation=None,
),
)
else:
self.mlp = nn.Sequential(
nn.Linear(in_features=dim_model, out_features=dim_mlp, bias=bias),
build_activation(activation),
nn.Dropout(dropout),
nn.Linear(in_features=dim_mlp, out_features=dim_model, bias=bias),
nn.Dropout(dropout),
)
def forward(self, inputs: torch.Tensor) -> torch.Tensor:
return self.mlp(inputs)
|
EXA-1-master
|
exa/libraries/xformers/xformers/components/feedforward/mlp.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
# CREDITS: Largely reusing the code from the reference VAN implementation
# see https://github.com/Visual-Attention-Network
import math
from dataclasses import dataclass
from typing import Optional
import torch.nn as nn
from xformers.components import Activation, build_activation
from xformers.components.feedforward import Feedforward, FeedforwardConfig
from . import register_feedforward
@dataclass
class ConvMlpConfig(FeedforwardConfig):
hidden_layer_multiplier: int
dim_model: int
dim_model_out: Optional[int]
act_layer: Activation
dropout: float
@register_feedforward("Conv2DFeedforward", ConvMlpConfig)
class Conv2DFeedforward(Feedforward):
"""
A Convolutional feed-forward network, as proposed in VAN_ (Vision Attention Network, Guo et al.)
.. _VAN: https://arxiv.org/pdf/2202.09741.pdf
"""
def __init__(
self,
dim_model: int,
hidden_layer_multiplier: int = 1,
dim_model_out: Optional[int] = None,
activation: Activation = Activation.GeLU,
dropout=0.0,
*args,
**kwargs,
):
super().__init__()
out_features = dim_model_out or dim_model
hidden_features = hidden_layer_multiplier * dim_model
self.conv_mlp = nn.Sequential(
nn.Conv2d(dim_model, hidden_features, 1),
nn.Conv2d(
hidden_features,
hidden_features,
3,
1,
1,
bias=True,
groups=hidden_features,
),
build_activation(activation),
nn.Conv2d(hidden_features, out_features, 1),
nn.Dropout(dropout),
)
# This feedforward requires a context length which is squared, often due to 2D pooling
self.requires_squared_context = True
def init_weights(self, **kwargs):
# Follow the original init, but also make it possible to initialize from the outside
def init_module(m: nn.Module):
if isinstance(m, nn.Conv2d):
fan_out = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
fan_out //= m.groups
m.weight.data.normal_(0, math.sqrt(2.0 / fan_out))
if m.bias is not None:
m.bias.data.zero_()
self.apply(init_module)
def forward(self, x):
# The conv layers expect NCHW, we have NLC by default
B, L, C = x.shape
HW = int(math.sqrt(x.shape[-2]))
assert HW**2 == L, "Conv2DFeedforward requires squared context lengths"
x = x.reshape((B, HW, HW, C)).swapdims(1, -1)
# The actual FW, including the 2d convolutions
x = self.conv_mlp(x)
# back to NLC
x = x.transpose(1, -1)
return x.flatten(1, 2)
|
EXA-1-master
|
exa/libraries/xformers/xformers/components/feedforward/conv_mlp.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
import logging
from dataclasses import dataclass
import torch
import torch.nn as nn
from xformers.components import Activation
from xformers.components.feedforward import (
Feedforward,
FeedforwardConfig,
register_feedforward,
)
logger = logging.getLogger("xformers")
if torch.cuda.is_available():
try:
from xformers.triton import FusedDropoutBias
@dataclass
class FusedMlpConfig(FeedforwardConfig):
hidden_layer_multiplier: int
@register_feedforward("FusedMLP", FusedMlpConfig)
class FusedMLP(Feedforward):
"""
A MLP using fused linear layers.
"""
def __init__(
self,
dim_model: int,
dropout: float,
activation: Activation,
hidden_layer_multiplier: int,
bias: bool = True,
*args,
**kwargs,
):
super().__init__()
dim_mlp = hidden_layer_multiplier * dim_model
self.mlp = nn.Sequential(
nn.Linear(
in_features=dim_model, out_features=dim_mlp, bias=False
), # bias is handled in the next layer
# pyre-ignore[16]: TODO(T101400990): Pyre did not recognize
# the `FusedLinear` import.
FusedDropoutBias(
p=dropout,
bias_shape=dim_mlp if bias else None,
activation=activation,
),
nn.Linear(
in_features=dim_mlp, out_features=dim_model, bias=False
), # bias is handled in the next layer
# pyre-ignore[16]: TODO(T101400990): Pyre did not recognize
# the `FusedLinear` import.
FusedDropoutBias(
p=dropout,
bias_shape=dim_model if bias else None,
activation=None,
),
)
self.requires_cuda = True
def forward(self, inputs: torch.Tensor) -> torch.Tensor:
return self.mlp(inputs)
except ImportError:
logger.warning("Triton is not available, FusedMLP will not be enabled.")
|
EXA-1-master
|
exa/libraries/xformers/xformers/components/feedforward/fused_mlp.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
from abc import ABCMeta, abstractmethod
from dataclasses import asdict, dataclass
from typing import Optional, Type, TypeVar
import torch.nn as nn
from xformers.components import Activation
Self = TypeVar("Self", bound="Feedforward")
@dataclass
class FeedforwardConfig:
name: str
dim_model: int
dropout: float
activation: Activation
# Define the common interface, every feedforward block needs to derive from it
class Feedforward(nn.Module, metaclass=ABCMeta):
@abstractmethod
def __init__(
self,
dim_model: Optional[int] = None,
dropout: Optional[float] = None,
activation: Optional[Activation] = None,
*args,
**kwargs,
):
super().__init__()
# This feedforward requires a CUDA accelerator
self.requires_cuda = False
# This feedforward requires a context length which is squared, often due to 2D pooling
self.requires_squared_context = False
@classmethod
def from_config(cls: Type[Self], config: FeedforwardConfig) -> Self:
# Generate the class inputs from the config
fields = asdict(config)
# Skip all Nones so that default values are used
fields = {k: v for k, v in fields.items() if v is not None}
return cls(**fields)
|
EXA-1-master
|
exa/libraries/xformers/xformers/components/feedforward/base.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
from dataclasses import dataclass
from typing import Optional
import torch
import torch.nn as nn
from xformers.components.positional_embedding import (
PositionEmbedding,
PositionEmbeddingConfig,
register_positional_embedding,
)
@dataclass
class VocabEmbeddingConfig(PositionEmbeddingConfig):
vocab_size: int
dropout: float
@register_positional_embedding("vocab", VocabEmbeddingConfig)
class VocabEmbedding(PositionEmbedding):
def __init__(
self,
dim_model: int,
seq_len: int,
vocab_size: int,
dropout: float = 0.0,
*args,
**kwargs
):
super().__init__()
self.vocab_size = vocab_size
self.dim_model = dim_model
self.dropout = torch.nn.Dropout(p=dropout)
self.position_embeddings = nn.Embedding(seq_len, self.dim_model)
self.word_embeddings = nn.Embedding(self.vocab_size, self.dim_model)
self.position_ids: Optional[torch.Tensor] = None
self.init_weights()
def init_weights(self, gain: float = 1.0):
torch.nn.init.normal_(self.position_embeddings.weight, std=0.02 * gain)
torch.nn.init.normal_(self.word_embeddings.weight, std=0.02 * gain)
def forward(self, x: torch.Tensor):
position_ids = torch.arange(x.shape[1], dtype=torch.long, device=x.device)[
None, :
].repeat(x.shape[0], 1)
X_token = self.word_embeddings(x)
X_pos = self.position_embeddings(position_ids)
X = X_token + X_pos
X = self.dropout(X)
return X
|
EXA-1-master
|
exa/libraries/xformers/xformers/components/positional_embedding/vocab.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
from pathlib import Path
from typing import Any, Callable, Dict, Set, Union
from xformers.utils import (
generate_matching_config,
get_registry_decorator,
import_all_modules,
)
from .base import PositionEmbedding, PositionEmbeddingConfig # noqa
# CREDITS: Classy Vision registry mechanism
POSITION_EMBEDDING_REGISTRY: Dict[str, Any] = {}
POSITION_EMBEDDING_CLASS_NAMES: Set[str] = set()
def build_positional_embedding(config: Union[Dict[str, Any], PositionEmbeddingConfig]):
"""Builds a position encoding from a config.
This assumes a 'name' key in the config which is used to determine what
attention class to instantiate. For instance, a config `{"name": "my_position_encoding",
"foo": "bar"}` will find a class that was registered as "my_position_encoding"
(see :func:`register_positional_embedding`) and call .from_config on it."""
if not isinstance(config, PositionEmbeddingConfig):
config_instance = generate_matching_config(
config, POSITION_EMBEDDING_REGISTRY[config["name"]].config
)
else:
config_instance = config
return POSITION_EMBEDDING_REGISTRY[config_instance.name].constructor.from_config(
config_instance
)
"""Registers a PositionEncoding subclass.
This decorator allows xFormers to instantiate a subclass of PositionEncoding
from a configuration file, even if the class itself is not part of the
xFormers framework. To use it, apply this decorator to a `PositionEncoding`
subclass, like this:
.. code-block:: python
@dataclass
class MyConfig:
...
@register_positional_embedding('my_encoding', MyConfig)
class MyEncoding(PositionEncoding):
...
To instantiate a position encoding from a configuration file, see :func:`build_positional_embedding`."""
register_positional_embedding: Callable[
[str, Any], Callable[[Any], Any]
] = get_registry_decorator(
POSITION_EMBEDDING_REGISTRY,
POSITION_EMBEDDING_CLASS_NAMES,
PositionEmbedding,
PositionEmbeddingConfig,
)
from .rotary import RotaryEmbedding # noqa
from .sine import SinePositionalEmbedding # type: ignore # noqa
from .vocab import VocabEmbedding # noqa
__all__ = [
"RotaryEmbedding",
"SinePositionalEmbedding",
"VocabEmbedding",
"build_positional_embedding",
"register_positional_embedding",
]
# automatically import any Python files in the directory
import_all_modules(
str(Path(__file__).parent), "xformers.components.positional_embedding"
)
|
EXA-1-master
|
exa/libraries/xformers/xformers/components/positional_embedding/__init__.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
from dataclasses import dataclass
import torch
from xformers.components.positional_embedding import (
PositionEmbedding,
PositionEmbeddingConfig,
register_positional_embedding,
)
@dataclass
class LearnablePositionalEmbeddingConfig(PositionEmbeddingConfig):
name: str
seq_len: int
dim_model: int
add_class_token: bool
@register_positional_embedding("learnable", LearnablePositionalEmbeddingConfig)
class LearnablePositionalEmbedding(PositionEmbedding):
def __init__(
self, seq_len: int, dim_model: int, add_class_token: bool = False, *_, **__
):
super().__init__()
# 0.02 is BERT initialization
self.pos_emb = torch.nn.Parameter(
torch.randn(1, seq_len + int(add_class_token), dim_model) * 0.02
)
self.class_token = (
torch.nn.Parameter(torch.zeros(dim_model)) if add_class_token else None
)
def forward(self, x: torch.Tensor) -> torch.Tensor:
if self.class_token is not None:
# Prepend class token
clf_token = (
torch.ones(x.shape[0], 1, self.pos_emb.shape[-1], device=x.device)
* self.class_token
)
x = torch.cat([clf_token, x], dim=1)
if x.ndim == 2:
x = x.unsqueeze(-1)
return x + self.pos_emb
|
EXA-1-master
|
exa/libraries/xformers/xformers/components/positional_embedding/param.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
# Silence Mypy errors in this file.
# type: ignore
import math
import torch
from xformers.components.positional_embedding import (
PositionEmbedding,
PositionEmbeddingConfig,
register_positional_embedding,
)
@register_positional_embedding("sine", PositionEmbeddingConfig)
class SinePositionalEmbedding(PositionEmbedding):
def __init__(self, dim_model: int, *args, **kwargs):
super().__init__()
self.dim_model = dim_model
def forward(self, x: torch.Tensor) -> torch.Tensor:
seq_len = x.shape[1]
pos = (
torch.arange(0, seq_len, device=x.device, dtype=torch.float32)
.unsqueeze(1)
.repeat(1, self.dim_model)
)
dim = (
torch.arange(0, self.dim_model, device=x.device, dtype=torch.float32)
.unsqueeze(0)
.repeat(seq_len, 1)
)
div = torch.exp(-math.log(10000) * (2 * (dim // 2) / self.dim_model))
pos *= div
pos[:, 0::2] = torch.sin(pos[:, 0::2])
pos[:, 1::2] = torch.cos(pos[:, 1::2])
output = x.unsqueeze(-1) if x.ndim == 2 else x
return output + pos.unsqueeze(0)
|
EXA-1-master
|
exa/libraries/xformers/xformers/components/positional_embedding/sine.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
# CREDITS: This implementation is inspired by GPT-NeoX https://github.com/EleutherAI/gpt-neox
# NOTE: Almost the same right now, moving parts to Triton is the next step
from typing import Tuple
import torch
def rotate_half(x):
x1, x2 = x.chunk(2, dim=-1)
return torch.cat((-x2, x1), dim=-1)
@torch.jit.script
def apply_rotary_pos_emb(x, cos, sin):
# NOTE: This could probably be moved to Triton
# Handle a possible sequence length mismatch in between q and k
cos = cos[:, :, : x.shape[-2], :]
sin = sin[:, :, : x.shape[-2], :]
return (x * cos) + (rotate_half(x) * sin)
class RotaryEmbedding(torch.nn.Module):
"""
The rotary position embeddings from RoFormer_ (Su et. al).
A crucial insight from the method is that the query and keys are
transformed by rotation matrices which depend on the relative positions.
Other implementations are available in the Rotary Transformer repo_ and in
GPT-NeoX_, GPT-NeoX was an inspiration
.. _RoFormer: https://arxiv.org/abs/2104.09864
.. _repo: https://github.com/ZhuiyiTechnology/roformer
.. _GPT-NeoX: https://github.com/EleutherAI/gpt-neox
.. warning: Please note that this embedding is not registered on purpose, as it is transformative
(it does not create the embedding dimension) and will likely be picked up (imported) on a ad-hoc basis
"""
def __init__(self, dim_model: int, *_, **__):
super().__init__()
# Generate and save the inverse frequency buffer (non trainable)
inv_freq = 1.0 / (10000 ** (torch.arange(0, dim_model, 2).float() / dim_model))
self.register_buffer("inv_freq", inv_freq)
self._seq_len_cached = None
self._cos_cached = None
self._sin_cached = None
def _update_cos_sin_tables(self, x, seq_dimension=1):
seq_len = x.shape[seq_dimension]
# Reset the tables if the sequence length has changed,
# or if we're on a new device (possibly due to tracing for instance)
if (
seq_len != self._seq_len_cached
or self._cos_cached.device != x.device
or self._cos_cached.dtype != x.dtype
):
self._seq_len_cached = seq_len
t = torch.arange(
x.shape[seq_dimension], device=x.device, dtype=torch.float32
)
freqs = torch.einsum("i,j->ij", t, self.inv_freq.to(x.dtype))
emb = torch.cat((freqs, freqs), dim=-1).to(x.device)
self._cos_cached = emb.cos()[None, None, :, :].to(x.dtype)
self._sin_cached = emb.sin()[None, None, :, :].to(x.dtype)
return self._cos_cached, self._sin_cached
def forward(
self, q: torch.Tensor, k: torch.Tensor
) -> Tuple[torch.Tensor, torch.Tensor]:
self._cos_cached, self._sin_cached = self._update_cos_sin_tables(
k, seq_dimension=-2
)
return (
apply_rotary_pos_emb(q, self._cos_cached, self._sin_cached),
apply_rotary_pos_emb(k, self._cos_cached, self._sin_cached),
)
|
EXA-1-master
|
exa/libraries/xformers/xformers/components/positional_embedding/rotary.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
from abc import ABCMeta, abstractmethod
from dataclasses import asdict, dataclass
from typing import Type, TypeVar
import torch.nn as nn
Self = TypeVar("Self", bound="PositionEmbedding")
@dataclass
class PositionEmbeddingConfig:
name: str
dim_model: int
seq_len: int
class PositionEmbedding(nn.Module, metaclass=ABCMeta):
@abstractmethod
def __init__(self, *args, **kwargs) -> None:
super().__init__()
@classmethod
def from_config(cls: Type[Self], config: PositionEmbeddingConfig) -> Self:
# Generate the class inputs from the config
fields = asdict(config)
# Skip all Nones so that default values are used
fields = {k: v for k, v in fields.items() if v is not None}
return cls(**fields)
|
EXA-1-master
|
exa/libraries/xformers/xformers/components/positional_embedding/base.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
import functools
from typing import Optional
import torch
import torch.nn as nn
from functorch.compile import memory_efficient_fusion
from xformers.components import ResidualNormStyle
def _fn(
x: torch.Tensor,
bias: Optional[torch.nn.parameter.Parameter],
residual: torch.Tensor,
prob: float,
layer_norm_style: Optional[ResidualNormStyle],
norm: nn.Module,
) -> torch.Tensor:
a = torch.add(x, bias) if bias is not None else x
b = torch.nn.functional.dropout(a, prob) if prob > 0.0 else a
if layer_norm_style == ResidualNormStyle.Pre:
c = norm(b)
return torch.add(c, residual)
elif layer_norm_style == ResidualNormStyle.Post:
c = torch.add(b, residual)
return norm(c)
else:
raise ValueError
class NVFusedBiasDropoutResLayerNorm(torch.nn.Module):
"""
A layer which fuses the computation of LayerNorm, Residual, and Dropout(x + Bias)
operations with AOTAutograd and nvFuser based on specified layer norm style
"""
def __init__(
self,
p: float,
d_model: int,
bias_shape: Optional[int] = None,
layer_norm_style: ResidualNormStyle = ResidualNormStyle.Post,
) -> None:
super().__init__()
self.p = float(p)
self.requires_residual = True
self.layer_norm_style = layer_norm_style
self.bias = (
nn.Parameter(torch.zeros(bias_shape)) if bias_shape is not None else None
)
self.norm = nn.LayerNorm(d_model)
self._fn_train = functools.partial(
_fn,
prob=p,
layer_norm_style=self.layer_norm_style,
norm=self.norm,
)
self._fn_eval = functools.partial(
_fn,
prob=0.0,
layer_norm_style=self.layer_norm_style,
norm=self.norm,
)
assert (
self.p < 1.0
), f"We don't want to drop all the values, most probably p={self.p} is not properly set"
def init_weights(self, *args, **kwargs):
with torch.no_grad():
if self.bias is not None:
self.bias.fill_(0.0)
def forward(self, x: torch.Tensor, residual: torch.Tensor) -> torch.Tensor:
# Train/inference
fn = self._fn_train if self.training else self._fn_eval
# Catch a non-cuda setup, fallback to pytorch
if not x.is_cuda:
return fn(x, self.bias, residual)
# AOTAutograd, NVFuser backed path
aot_fn = memory_efficient_fusion(fn=fn)
return aot_fn(x, self.bias, residual)
|
EXA-1-master
|
exa/libraries/xformers/xformers/components/nvfuser/bias_dropout_res_layernorm.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
import functools
from typing import Optional
import torch
import torch.nn as nn
from functorch.compile import memory_efficient_fusion
from xformers.components import Activation, build_activation
def _fn(
x: torch.Tensor,
bias: Optional[torch.nn.parameter.Parameter],
activation: nn.Module,
prob: float,
) -> torch.Tensor:
if bias is not None:
x = torch.add(x, bias)
y = activation(x)
return torch.nn.functional.dropout(y, prob) if prob > 0.0 else y
class NVFusedBiasActivationDropout(torch.nn.Module):
"""
A layer which fuses the computation of Dropout(Activation(x + Bias))
with AOTAutograd and nvFuser
"""
def __init__(
self,
p: float,
activation: Optional[Activation] = None,
bias_shape: Optional[int] = None,
) -> None:
super().__init__()
self.p = float(p)
self.requires_residual = False
self.activation = activation
self.pytorch_activation = build_activation(self.activation)
self.bias = (
nn.Parameter(torch.zeros(bias_shape)) if bias_shape is not None else None
)
self._fn_train = functools.partial(
_fn,
activation=self.pytorch_activation,
prob=self.p,
)
self._fn_eval = functools.partial(
_fn,
activation=self.pytorch_activation,
prob=0.0,
)
assert (
self.p < 1.0
), f"We don't want to drop all the values, most probably p={self.p} is not properly set"
def init_weights(self, *args, **kwargs):
with torch.no_grad():
if self.bias is not None:
self.bias.fill_(0.0)
def forward(self, x: torch.Tensor) -> torch.Tensor:
# Train/inference
fn = self._fn_train if self.training else self._fn_eval
# Catch a non-cuda setup, fallback to pytorch
if not x.is_cuda:
return fn(x, self.bias)
# AOTAutograd, NVFuser backed path
aot_fn = memory_efficient_fusion(fn)
return aot_fn(x, self.bias)
|
EXA-1-master
|
exa/libraries/xformers/xformers/components/nvfuser/bias_act_dropout.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
from xformers import _is_functorch_available
if _is_functorch_available: # noqa
try:
from .bias_act_dropout import NVFusedBiasActivationDropout # noqa
from .bias_dropout_res import NVFusedBiasDropoutRes # noqa
from .bias_dropout_res_layernorm import NVFusedBiasDropoutResLayerNorm # noqa
__all__ = [
"NVFusedBiasActivationDropout",
"NVFusedBiasDropoutResLayerNorm",
"NVFusedBiasDropoutRes",
]
except ImportError:
__all__ = []
|
EXA-1-master
|
exa/libraries/xformers/xformers/components/nvfuser/__init__.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
from typing import Any, Dict, List, Optional
import torch.nn as nn
from xformers.components import Activation, ResidualNormStyle
from xformers.components.nvfuser import (
NVFusedBiasActivationDropout,
NVFusedBiasDropoutRes,
NVFusedBiasDropoutResLayerNorm,
)
def build_nvfused(
fused_pattern: nn.Module,
shape: tuple,
bias: bool,
activation: Optional[Activation],
p: float,
layer_norm_style: Optional[ResidualNormStyle],
):
bias_shape = shape[-1] if bias else None
d_model = shape[-1]
init_args: Dict[nn.Module, List[Any]] = {
NVFusedBiasActivationDropout: [p, activation, bias_shape], # type: ignore
NVFusedBiasDropoutRes: [p, bias_shape], # type: ignore
NVFusedBiasDropoutResLayerNorm: [ # type: ignore
p,
d_model,
bias_shape,
layer_norm_style,
],
}
return fused_pattern(*init_args[fused_pattern])
|
EXA-1-master
|
exa/libraries/xformers/xformers/components/nvfuser/utils.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
import functools
from typing import Optional
import torch
import torch.nn as nn
from functorch.compile import memory_efficient_fusion
def _fn(
x: torch.Tensor,
bias: Optional[torch.nn.parameter.Parameter],
residual: torch.Tensor,
prob: float,
) -> torch.Tensor:
a = torch.add(x, bias) if bias is not None else x
b = torch.nn.functional.dropout(a, prob) if prob > 0.0 else a
return torch.add(b, residual)
class NVFusedBiasDropoutRes(torch.nn.Module):
"""
A layer which fuses the computation of Dropout(x + Bias) + Residual
with AOTAutograd and nvFuser
"""
def __init__(
self,
p: float,
bias_shape: Optional[int] = None,
) -> None:
super().__init__()
self.p = float(p)
self.requires_residual = True
self.bias = (
nn.Parameter(torch.zeros(bias_shape)) if bias_shape is not None else None
)
self._fn_train = functools.partial(_fn, prob=self.p)
self._fn_eval = functools.partial(_fn, prob=0.0)
assert (
self.p < 1.0
), f"We don't want to drop all the values, most probably p={self.p} is not properly set"
def init_weights(self, *args, **kwargs):
with torch.no_grad():
if self.bias is not None:
self.bias.fill_(0.0)
def forward(self, x: torch.Tensor, residual: torch.Tensor) -> torch.Tensor:
# Train/inference
fn = self._fn_train if self.training else self._fn_eval
# Catch a non-cuda setup, fallback to pytorch
if not x.is_cuda:
return fn(x, self.bias, residual)
# AOTAutograd, NVFuser backed path
aot_fn = memory_efficient_fusion(fn)
return aot_fn(x, self.bias, residual)
|
EXA-1-master
|
exa/libraries/xformers/xformers/components/nvfuser/bias_dropout_res.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
import itertools
import random
from functools import partial
import torch
from torch.utils import benchmark
from utils import benchmark_main_helper
import xformers.ops
import xformers.ops.fmha as fmha
torch.backends.cuda.matmul.allow_tf32 = False
def create_attn_bias(
bias_type,
batch_size: int,
num_heads: int,
q_len: int,
kv_len: int,
device,
dtype,
bias_requires_grad: bool = False,
):
NoneType = type(None)
if bias_type is NoneType:
return None
if bias_type is torch.Tensor:
attn_bias = (
torch.randn((batch_size * num_heads, 1, kv_len), device=device, dtype=dtype)
* 3
)
return attn_bias.expand(batch_size * num_heads, q_len, kv_len)
if bias_type is xformers.ops.LowerTriangularMask:
return bias_type()
assert False, f"Unsupported bias type: {bias_type}"
def ref_attention_bmk(q, k, v, attn_bias=None, p=0.0):
if isinstance(attn_bias, xformers.ops.AttentionMask):
attn_bias = (
attn_bias.materialize((q.shape[0], 1, q.shape[1], k.shape[1]))
.to(q)
.squeeze()
)
q = q * (1.0 / q.shape[-1] ** 0.5)
if attn_bias is None:
attn = q @ k.transpose(-2, -1)
else:
# equivalent to (q @ k.transpose(-2, -1) + m).softmax(-1) @ v
# but faster, and is what is used in PyTorch now
attn = torch.baddbmm(attn_bias, q, k.transpose(-2, -1))
attn = attn.softmax(-1)
if p > 0:
attn = torch.nn.functional.dropout(attn, p=p)
return attn @ v
def ref_attention(q, k, v, attn_bias, p=0.0):
assert q.ndim == 4
def T(t):
return t.permute((0, 2, 1, 3)).reshape(
[t.shape[0] * t.shape[2], t.shape[1], t.shape[3]]
)
out = ref_attention_bmk(T(q), T(k), T(v), attn_bias, p)
out = out.reshape([q.shape[0], q.shape[2], q.shape[1], v.shape[3]])
return out.permute((0, 2, 1, 3))
min_run_time = 0.5
device = torch.device("cuda")
NUM_THREADS = [1] if device.type == "cuda" else [1, 40]
SHAPES = [
# ViT
(384, 197, 1, 88),
(384, 197, 1, 80),
(384, 197, 1, 64),
(1024, 197, 1, 88),
(1024, 197, 1, 80),
(1024, 197, 1, 64),
# ViT-Huge
(32 * 16, 197, 1, 80),
(32, 197, 16, 80),
(32, 197, 16, 64),
(32, 197, 16, 128),
# ViT-Giant
(16 * 16, 197, 1, 88),
(16, 197, 16, 88),
(16, 197, 16, 64),
(16, 197, 16, 128),
# FB models
(1024, 82, 8, 64),
(150, 256, 16, 64),
(64, 256, 12, 64),
# Stable diffusion (https://github.com/huggingface/diffusers/pull/532)
(1, 4096, 16, 40), # 512x512
(1, 16384, 16, 40), # 1024x1024
(1, 4096, 16, 80),
(1, 16384, 16, 80),
# + bs4
(4, 4096, 16, 40),
(4, 16384, 16, 40),
(4, 4096, 16, 80),
(4, 16384, 16, 80),
# ParlAI model
(256, 4096, 16, 64),
# Zetta B M H K
(8, 2048, 20, 128),
# LLaMa 70b - mp=8/16
*sorted(list(itertools.product([1, 2], [2048, 4096, 8192], [4, 8], [128]))),
*sorted(list(itertools.product([16], [128, 512, 1024], [16], [16, 32, 64, 128]))),
]
OPS = [
(xformers.ops.fmha.cutlass.FwOp, xformers.ops.fmha.cutlass.BwOp),
(xformers.ops.fmha.flash.FwOp, xformers.ops.fmha.flash.BwOp),
# TODO: Triton is not stable: it can trigger Illegal Memory Accesses
# and its performance varies a lot between runs.
# (xformers.ops.fmha.triton.FwOp, xformers.ops.fmha.triton.BwOp),
]
def product_dict(**kwargs):
keys = kwargs.keys()
vals = kwargs.values()
for instance in itertools.product(*vals):
yield dict(zip(keys, instance))
CASES = list(
product_dict(
shape=SHAPES,
num_threads=NUM_THREADS,
dropout_p=[0.0],
attn_bias_cfg=[(type(None), False)],
dtype=[torch.half],
)
)
# Add more cases with some variations
for c in CASES.copy():
c = c.copy()
c.update(
random.Random(str(c["shape"])).choice(
[
{"dropout_p": 0.3},
{"attn_bias_cfg": (torch.Tensor, False)},
{"attn_bias_cfg": (torch.Tensor, True)},
{"attn_bias_cfg": (xformers.ops.LowerTriangularMask, False)},
{"dtype": torch.bfloat16},
{"dtype": torch.float},
]
)
)
CASES.append(c)
def create_tensors(shape, dtype, requires_grad=False):
B, M, H, K = shape
qkv = torch.rand(
[B, M, 3, H, K], device=device, dtype=dtype, requires_grad=requires_grad
)
q, k, v = xformers.ops.unbind(qkv, 2)
return qkv, q, k, v
def mem_eff_attention_fw(shape, num_threads: int, attn_bias_cfg, dropout_p, dtype):
B, M, H, K = shape
_, q, k, v = create_tensors(shape, dtype)
attn_bias_type, attn_bias_requires_grad = attn_bias_cfg
if attn_bias_requires_grad:
return
bias = create_attn_bias(
attn_bias_type,
batch_size=B,
num_heads=H,
q_len=M,
kv_len=M,
device=device,
dtype=dtype,
bias_requires_grad=attn_bias_requires_grad,
)
inp = fmha.Inputs(query=q, key=k, value=v, attn_bias=bias, p=dropout_p)
dtype_str = {
torch.bfloat16: "b16",
torch.half: "f16",
torch.float: "f32",
}[dtype]
sub_label = (
f"{dtype_str} {B}-{M}-{H}-{K}, p={dropout_p}, "
f"BiasT={attn_bias_type.__name__}"
)
has_run = False
for fw_op, bw_op in OPS:
if not fw_op.supports(inp):
continue
yield benchmark.Timer(
stmt="fn(q, k, v, attn_bias, p)",
globals={
"q": q,
"k": k,
"v": v,
"attn_bias": inp.attn_bias,
"p": dropout_p,
"fn": partial(
xformers.ops.memory_efficient_attention, op=(fw_op, bw_op)
),
},
label=f"attention (attn_bias={attn_bias_type})",
description=fw_op.NAME,
sub_label=sub_label,
num_threads=num_threads,
)
has_run = True
if not has_run:
return
yield benchmark.Timer(
stmt="fn(q, k, v, attn_bias, p)",
globals={
"q": q,
"k": k,
"v": v,
"attn_bias": inp.attn_bias,
"p": dropout_p,
"fn": ref_attention,
},
label=f"attention (attn_bias={attn_bias_type})",
description="eager",
sub_label=sub_label,
num_threads=num_threads,
)
def mem_eff_attention_bw(shape, num_threads: int, attn_bias_cfg, dropout_p, dtype):
B, M, H, K = shape
qkv, q, k, v = create_tensors(shape, dtype, requires_grad=True)
attn_bias_type, attn_bias_requires_grad = attn_bias_cfg
bias = create_attn_bias(
attn_bias_type,
batch_size=B,
num_heads=H,
q_len=M,
kv_len=M,
device=device,
dtype=dtype,
bias_requires_grad=attn_bias_requires_grad,
)
inp = fmha.Inputs(query=q, key=k, value=v, attn_bias=bias, p=dropout_p)
dtype_str = {
torch.bfloat16: "b16",
torch.half: "f16",
torch.float: "f32",
}[dtype]
sub_label = (
f"{dtype_str} {B}-{M}-{H}-{K}, p={dropout_p}, "
f"BiasT={attn_bias_type.__name__}, BiasGrad={attn_bias_requires_grad}"
)
has_run = False
for fw_op, bw_op in OPS:
if not fw_op.supports(inp) or not bw_op.supports(inp):
continue
has_run = True
out = xformers.ops.memory_efficient_attention(
inp.query, inp.key, inp.value, inp.attn_bias, inp.p, op=(fw_op, bw_op)
)
grad_benchmark = torch.ones_like(q)
yield benchmark.Timer(
stmt="out.backward(grad, retain_graph=True)",
globals={
"out": out,
"grad": grad_benchmark,
},
label=f"attention backward (attn_bias={attn_bias_type})",
description=bw_op.NAME,
sub_label=sub_label,
num_threads=num_threads,
)
del out
if not has_run:
return
yield benchmark.Timer(
stmt="out.backward(grad, retain_graph=True)",
globals={
"out": ref_attention(q, k, v, inp.attn_bias, dropout_p),
"grad": grad_benchmark,
},
label=f"attention backward (attn_bias={attn_bias_type})",
description="vanilla",
sub_label=sub_label,
num_threads=num_threads,
)
benchmark_main_helper(mem_eff_attention_fw, CASES, min_run_time=min_run_time)
benchmark_main_helper(mem_eff_attention_bw, CASES, min_run_time=min_run_time)
|
EXA-1-master
|
exa/libraries/xformers/xformers/benchmarks/benchmark_mem_eff_attention.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
from functools import partial
from typing import Any, Dict, List, Optional
import torch
import torch.nn as nn
import triton
from xformers.benchmarks.utils import TestCase, pretty_plot, pretty_print
from xformers.components import Activation, ResidualNormStyle, build_activation
from xformers.components.nvfuser import (
NVFusedBiasActivationDropout,
NVFusedBiasDropoutRes,
NVFusedBiasDropoutResLayerNorm,
)
from xformers.components.nvfuser.bias_act_dropout import _fn as bias_act_dropout
from xformers.components.nvfuser.bias_dropout_res import _fn as bias_dropout_res
from xformers.components.nvfuser.bias_dropout_res_layernorm import (
_fn as bias_dropout_res_layernorm,
)
from xformers.components.nvfuser.utils import build_nvfused
from xformers.triton import FusedDropoutBias
SHAPES = [
(8, 256, 512),
(8, 512, 1024),
(4, 1024, 1024),
(2, 2048, 2048),
(1, 2048, 12288),
(2, 4096, 4096),
]
P = 0.1
def build_torch_fn(
pattern: nn.Module,
shape: tuple,
bias: Optional[torch.Tensor],
activation: Optional[Activation],
p: float,
layer_norm_style: Optional[ResidualNormStyle],
dtype: torch.dtype,
):
torch_act = build_activation(activation)
if pattern == NVFusedBiasActivationDropout:
return partial(bias_act_dropout, bias=bias, activation=torch_act, prob=p)
elif pattern == NVFusedBiasDropoutRes:
return partial(bias_dropout_res, bias=bias, prob=p)
elif pattern == NVFusedBiasDropoutResLayerNorm:
norm = nn.LayerNorm(shape[-1]).to(device=torch.device("cuda"), dtype=dtype)
return partial(
bias_dropout_res_layernorm,
bias=bias,
prob=p,
layer_norm_style=layer_norm_style,
norm=norm,
)
else:
raise ValueError
def bench_nvfused(
fused_pattern: nn.Module,
bias: bool,
backward: bool,
activation: Optional[Activation],
layer_norm_style: Optional[ResidualNormStyle],
):
device = torch.device("cuda")
pattern_str = {
NVFusedBiasActivationDropout: "Bias_Act_Dropout",
NVFusedBiasDropoutRes: "Bias_Dropout_Res",
NVFusedBiasDropoutResLayerNorm: "Bias_Dropout_Res_LayerNorm",
}[
fused_pattern # type: ignore
]
for dtype in [
torch.float16,
torch.float32,
]:
results: Dict[str, Any] = {}
results_mem: Dict[str, Any] = {}
for B, M, K in SHAPES:
a = torch.rand(
(B, M, K), device=device, dtype=dtype, requires_grad=backward
)
b = torch.rand(K, device=device, dtype=dtype, requires_grad=backward)
torch_fn = build_torch_fn(
fused_pattern,
(B, M, K),
b if bias else None,
activation,
P,
layer_norm_style,
dtype,
)
nvfuser_fn = build_nvfused(
fused_pattern, (B, M, K), bias, activation, P, layer_norm_style
)
nvfuser_fn.cuda()
nvfuser_fn.to(device=device, dtype=dtype)
residual = nvfuser_fn.requires_residual
triton_fn = (
FusedDropoutBias(
P, bias_shape=K if bias else None, activation=activation
)
if fused_pattern == NVFusedBiasActivationDropout
else None
)
def step(fn, residual, x):
y = fn(x=x, residual=x) if residual else fn(x)
if backward:
y.grad = None
torch.norm(y).backward()
return y
testcases = [
TestCase(
partial(step, fn=torch_fn, residual=residual),
"pytorch- bias: {} - fw{}{}{}".format(
bias,
"+bw" if backward else "",
f" - Act: {activation}" if activation is not None else "",
f" - Style: {layer_norm_style}"
if layer_norm_style is not None
else "",
),
),
TestCase(
partial(step, fn=nvfuser_fn, residual=residual),
"nvFuser- bias: {} - fw{}{}{}".format(
bias,
"+bw" if backward else "",
f" - Act: {activation}" if activation is not None else "",
f" - Style: {layer_norm_style}"
if layer_norm_style is not None
else "",
),
),
]
if triton_fn is not None:
triton_test = TestCase(
partial(step, fn=triton_fn, residual=residual),
"triton- bias: {} - fw{}{}{}".format(
bias,
"+bw" if backward else "",
f" - Act: {activation}" if activation is not None else "",
f" - Style: {layer_norm_style}"
if layer_norm_style is not None
else "",
),
)
testcases.append(triton_test)
for testcase in testcases:
torch.cuda.empty_cache()
torch.cuda.reset_peak_memory_stats()
torch.cuda.synchronize()
time = triton.testing.do_bench(
lambda: testcase.function(x=a), grad_to_none=[a, b]
)[0]
torch.cuda.synchronize()
max_memory = torch.cuda.max_memory_allocated() // 2**20
key = f"B={B}, M={M}, K={K}"
if key not in results:
results[key] = {}
results[key][testcase.name] = f"{time:.3f}"
# Record peak mem usage
if key not in results_mem:
results_mem[key] = {}
results_mem[key][testcase.name] = f"{max_memory:.1f}"
pretty_print(
results,
title="\n --- RUNTIME Type: {} {} --- ".format(pattern_str, dtype),
units="ms",
)
pretty_print(
results_mem,
title="\n --- PEAK MEMORY Type: {} {} --- ".format(pattern_str, dtype),
units="MB",
)
pretty_plot(
results,
title="RUNTIME-{}-FW{}-{}{}-{}{}".format(
pattern_str,
"+BW" if backward else "",
bias,
f"-{activation}" if activation is not None else "",
dtype,
f"-{layer_norm_style}" if layer_norm_style is not None else "",
),
units="ms",
dash_key="pytorch",
legend_loc="upper left",
)
pretty_plot(
results_mem,
title="MAXMEM-{}-FW{}-{}{}-{}{}".format(
pattern_str,
"+BW" if backward else "",
bias,
f"-{activation}" if activation is not None else "",
dtype,
f"-{layer_norm_style}" if layer_norm_style is not None else "",
),
units="MB",
dash_key="pytorch",
legend_loc="upper left",
)
PATTERNS = [
NVFusedBiasActivationDropout,
NVFusedBiasDropoutRes,
NVFusedBiasDropoutResLayerNorm,
]
for pattern in PATTERNS:
activations: List[Optional[Activation]] = (
[Activation.ReLU, Activation.GeLU, Activation.SquaredReLU]
if pattern == NVFusedBiasActivationDropout
else [None]
)
for activation in activations:
for bw in [True, False]:
for bias in [True, False]:
styles: List[Optional[ResidualNormStyle]] = (
[ResidualNormStyle.Pre, ResidualNormStyle.Post]
if pattern == NVFusedBiasDropoutResLayerNorm
else [None]
)
for style in styles:
bench_nvfused(pattern, bias, bw, activation, style) # type: ignore
|
EXA-1-master
|
exa/libraries/xformers/xformers/benchmarks/benchmark_nvfuser.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
import itertools
import random
import torch
from torch.utils import benchmark
from utils import benchmark_main_helper
import xformers.ops as xops
min_run_time = 0.5
device = torch.device("cuda")
def product_dict(**kwargs):
keys = kwargs.keys()
vals = kwargs.values()
for instance in itertools.product(*vals):
yield dict(zip(keys, instance))
CASES_IADD = list(
product_dict(
shape=[
(int(48 * 0.6), 48, 1, 257 * 1536),
(int(48 * 0.6), 48, 257, 1536),
],
scaling=[False, True],
dtype=[torch.half],
)
) + list(
product_dict(
shape=[
# Format: [B_src, B_inp, M, D]
(int(192 * 0.6), 192, 50, 1536),
(int(48 * 257 * 0.6), 257 * 48, 1, 1536),
(int(192 * 50 * 0.6), 192 * 50, 1, 1536),
(int(16 * 257 * 0.6), 48 * 257, 1, 1536),
],
scaling=[False],
dtype=[torch.half],
)
)
CASES_ISELECT = list(
product_dict(
batches=[((48, 257), (50, 192))],
D=[1536],
keep_ratio=[0.6],
dtype=[torch.half],
)
)
DTYPE2STR = {
torch.bfloat16: "b16",
torch.half: "f16",
torch.float32: "f32",
}
def _setup_test(functions, fw: bool = False, bw: bool = False, **kwargs):
for k, benchmark_cls in functions.items():
benchmark_object = benchmark_cls(**kwargs, bw=bw)
label = benchmark_object.label
label += "fw" if fw else ""
label += "bw" if bw else ""
def run_one():
if fw:
benchmark_object.fw()
if bw:
benchmark_object.bw()
yield benchmark.Timer(
stmt="fn()",
globals={
"fn": run_one,
},
label=label,
description=k,
sub_label=benchmark_object.sub_label,
)
class ScaledIndexAddBenchmark:
def __init__(self, dtype, scaling: bool, shape, bw: bool) -> None:
B_src, B_out, M, D = shape
torch.manual_seed(B_out + B_src)
dtype_str = DTYPE2STR.get(dtype, dtype)
self.sub_label = f"{dtype_str} B_src={B_src}, B_out={B_out}, M={M}, D={D} s={'Y' if scaling else 'N'}"
self.label = "scaled_index_add"
self.alpha = 0.73
self.inp = torch.randn(
[B_out, M, D], device="cuda", dtype=dtype, requires_grad=bw
)
self.src = torch.randn(
[B_src, M, D], device="cuda", dtype=dtype, requires_grad=bw
)
self.scaling = (
torch.randn([D], device="cuda", dtype=dtype, requires_grad=bw)
if scaling
else None
)
self.index = torch.tensor(
[i for i in range(self.src.shape[0])], dtype=torch.int64, device="cuda"
)
self.grad = torch.randn([B_out, M, D], device="cuda", dtype=dtype)
self.out = torch.Tensor()
def fw(self) -> None:
self.out = xops.scaled_index_add(
input=self.inp.clone(),
index=self.index,
source=self.src,
scaling=self.scaling,
alpha=self.alpha,
)
def bw(self):
self.inp.grad = None
self.src.grad = None
if self.scaling is not None:
self.scaling.grad = None
self.out.backward(self.grad, retain_graph=True)
class ScaledIndexAddBenchmarkBaseline(ScaledIndexAddBenchmark):
def fw(self) -> None:
src_scaled = self.src
if self.scaling is not None:
src_scaled * self.scaling.unsqueeze(0).unsqueeze(0)
self.out = self.inp.index_add(
dim=0,
source=src_scaled,
index=self.index,
alpha=self.alpha,
)
def scaled_index_add_fw(**kwargs):
yield from _setup_test(
**kwargs,
fw=True,
functions={
"xformers": ScaledIndexAddBenchmark,
"pytorch": ScaledIndexAddBenchmarkBaseline,
},
)
def scaled_index_add_fwbw(**kwargs):
yield from _setup_test(
**kwargs,
fw=True,
bw=True,
functions={
"xformers": ScaledIndexAddBenchmark,
"pytorch": ScaledIndexAddBenchmarkBaseline,
},
)
class IndexSelectBenchmark:
def __init__(self, dtype, batches, D, keep_ratio, bw: bool) -> None:
dtype_str = DTYPE2STR.get(dtype, dtype)
self.sub_label = f"{dtype_str} D={D} batches={batches} keep={keep_ratio}"
self.label = "index_select"
srcs = [torch.randn([B, seqlen * D]) for (B, seqlen) in batches]
src = torch.cat([s.view([-1, D]) for s in srcs], dim=0).cuda().to(dtype)
src.requires_grad_(True)
indices = []
sources = []
elements_i = 0
for source_i in srcs:
index = [i for i in range(source_i.shape[0])]
random.Random(source_i.shape[0]).shuffle(index)
indices.append(
torch.tensor(
index[: int(keep_ratio * source_i.shape[0])],
dtype=torch.int64,
device="cuda",
)
)
sources.append(
src[
elements_i : elements_i + source_i.shape[0] * source_i.shape[1] // D
].reshape(source_i.shape)
)
elements_i += source_i.shape[0] * source_i.shape[1] // D
self.indices, self.sources, self.src = indices, sources, src
self.out = torch.Tensor()
def fw(self) -> None:
self.out = xops.index_select_cat(self.sources, self.indices)
def bw(self):
self.src.grad = None
self.out.backward(self.out, retain_graph=True)
class IndexSelectBenchmarkBaseline(IndexSelectBenchmark):
def fw(self) -> None:
self.out = torch.cat(
[s[i].flatten() for s, i in zip(self.sources, self.indices)], dim=0
)
def index_select_fw(**kwargs):
yield from _setup_test(
**kwargs,
fw=True,
functions={
"xformers": IndexSelectBenchmark,
"pytorch": IndexSelectBenchmarkBaseline,
},
)
def index_select_fwbw(**kwargs):
yield from _setup_test(
**kwargs,
fw=True,
bw=True,
functions={
"xformers": IndexSelectBenchmark,
"pytorch": IndexSelectBenchmarkBaseline,
},
)
benchmark_main_helper(scaled_index_add_fw, CASES_IADD, min_run_time=min_run_time)
benchmark_main_helper(scaled_index_add_fwbw, CASES_IADD, min_run_time=min_run_time)
benchmark_main_helper(index_select_fw, CASES_ISELECT, min_run_time=min_run_time)
benchmark_main_helper(index_select_fwbw, CASES_ISELECT, min_run_time=min_run_time)
|
EXA-1-master
|
exa/libraries/xformers/xformers/benchmarks/benchmark_indexing.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
import argparse
from typing import Any, Dict
import torch
import triton
from xformers.benchmarks.utils import TestCase, pretty_plot, pretty_print
from xformers.components import Activation
from xformers.components.feedforward import MLP, FusedMLP
SHAPES = [
(8, 256, 512),
(8, 512, 1024),
(4, 1024, 1024),
(2, 2048, 2048),
(1, 2048, 4096),
(1, 1024, 12288),
]
HIDDEN_LAYER_MULTIPLIER = [4]
def bench_MLP(backward: bool, bias: bool, dropout: float, activation: Activation):
device = torch.device("cuda")
bw = "+bw" if backward else ""
for dtype in [torch.float16, torch.float32]:
results: Dict[str, Any] = {}
for B, M, K in SHAPES:
for hlm in HIDDEN_LAYER_MULTIPLIER:
fused_mlp = FusedMLP(
dim_model=K,
dropout=dropout,
activation=activation,
hidden_layer_multiplier=hlm,
bias=bias,
).to(device=device, dtype=dtype)
standard_mlp = MLP(
dim_model=K,
dropout=dropout,
activation=activation,
hidden_layer_multiplier=hlm,
bias=bias,
).to(device=device, dtype=dtype)
a = torch.randn(
(B, M, K), requires_grad=backward, device=device, dtype=dtype
)
def mlp_standard():
y = standard_mlp(a)
if backward:
torch.norm(y).backward()
return y
def mlp_fused():
y = fused_mlp(a)
if backward:
torch.norm(y).backward()
return y
for testcase in [
TestCase(
mlp_standard,
"standard - {} - {} bias - {} drop - fw{}".format(
activation,
"no" if not bias else "",
dropout,
"+bw" if backward else "",
),
),
TestCase(
mlp_fused,
"fused - {} - {} bias - {} drop - fw{}".format(
activation,
"no" if not bias else "",
dropout,
"+bw" if backward else "",
),
),
]:
time = triton.testing.do_bench(testcase.function)[0]
key = f"{B} x {M} x {K} - {hlm}"
if key not in results:
results[key] = {}
results[key][testcase.name] = f"{time:.2f}"
pretty_print(
results,
title=f"\n --- Type: {dtype} --- ",
units="runtime in ms, lower is better. BMK - mul: ",
)
pretty_plot(
results,
title=f"MLP-{activation}-FW{bw}-{dtype}",
units="runtime in ms, lower is better",
dash_key="torch",
)
if __name__ == "__main__":
# Get the user requests
parser = argparse.ArgumentParser("Benchmark MLP")
parser.add_argument("-act", "--activations", nargs="+", default=[Activation.GeLU])
parser.add_argument("-bias", "--bias", nargs="+", default=[False, True])
parser.add_argument("-dropout", "--dropout", nargs="+", default=[0.0, 0.1])
args = parser.parse_args()
for bw in [False, True]:
for bias in args.bias:
for dropout in args.dropout:
for activation in args.activations:
bench_MLP(
backward=bw,
bias=bias,
dropout=float(dropout),
activation=activation,
)
|
EXA-1-master
|
exa/libraries/xformers/xformers/benchmarks/benchmark_mlp.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
from typing import Any, Dict, List
import torch
import triton
from xformers.benchmarks.utils import TestCase, pretty_plot, pretty_print
from xformers.triton.sum_strided import sum_2d_dim_0
SHAPES = [
(128, 128),
(384, 128),
(784, 512),
(1024, 768),
(2048, 1024),
(4096, 4096),
]
def to_gbs(a, ms):
# Read the full array, write the non-reduced dimension
return ((a.numel() + a.shape[1]) * a.element_size() * 1e-9) / (ms * 1e-3)
def bench_functions(
test_cases: List[TestCase], shapes, metric_transform, unit, title=""
):
device = torch.device("cuda")
for dtype in [torch.float16, torch.float32]:
results: Dict[str, Any] = {}
for M, N in shapes:
a = torch.rand(M, N, device=device, dtype=dtype, requires_grad=True)
for testcase in test_cases:
time = triton.testing.do_bench(lambda: testcase.function(a))[0]
metric = metric_transform(a, time)
key = f"M={M}, N={N}"
if key not in results:
results[key] = {}
results[key][testcase.name] = f"{metric:.1f}"
_type = " fp16" if dtype == torch.float16 else " fp32"
pretty_print(
results,
title=" ------------- Type: {} ------------- ".format(_type),
units=unit,
)
pretty_plot(results, title + _type, unit, dash_key="pytorch")
bench_functions(
[
TestCase(lambda x: torch.sum(x, dim=0), "pytorch"),
TestCase(sum_2d_dim_0, "triton"),
],
SHAPES,
to_gbs,
"GB/s",
"Strided_sum",
)
|
EXA-1-master
|
exa/libraries/xformers/xformers/benchmarks/benchmark_triton_stride_sum.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
import gc
import math
from collections import namedtuple
from dataclasses import dataclass
import matplotlib.pyplot as plt
import torch
import triton
from triton.ops.blocksparse import matmul as blocksparse_matmul
from xformers.benchmarks.utils import pretty_barplot
from xformers.components.attention.attention_patterns import (
axial_2d_pattern,
causal_1d_pattern,
global_token_pattern,
local_1d_pattern,
local_2d_pattern,
)
from xformers.components.attention.core import SparseCS, _matmul_with_mask
device = "cuda"
TestCase = namedtuple("TestCase", ["prepare_callable", "mask", "config", "name"])
##############################################
# Plotting utilities
##############################################
def plot_mask(mask, config, filename):
sparsity = get_sparsity(mask)
batch_size = config.batch_size
num_heads = config.num_heads
seq_len = config.seq_length
proxy = torch.ones(batch_size, num_heads, seq_len, seq_len, dtype=torch.bool)
proxy = triton.testing.mask_tensor(proxy, mask, config.block_size, False)
proxy = proxy[0][0]
f = plt.figure()
plt.imshow(proxy.logical_not(), cmap="gray")
plt.suptitle("Sparsity = " + str(sparsity) + "%")
plt.savefig(filename)
plt.close(f)
##############################################
# Mask and testing utilities
##############################################
def get_mask(MaskGenType, config, config_setter=[]):
mask_config = Configuration()
mask_config.init(config)
# Get the mask
mask_generator = MaskGenType(mask_config)
for (key, value) in config_setter:
mask_generator.set_config_attr(key, value)
if not mask_generator.is_valid_config():
return None
return mask_generator()
def densify_mask(mask, config):
num_heads = config.num_heads
seq_length = config.seq_length
block_size = config.block_size
dense_mask = torch.zeros(num_heads, seq_length, seq_length)
for (h, i, j) in zip(*mask.nonzero(as_tuple=True)):
dense_mask[
h,
i * block_size : (i + 1) * block_size,
j * block_size : (j + 1) * block_size,
] = mask[h, i, j]
return dense_mask
def mask_tensor(a, mask, config):
return triton.testing.mask_tensor(a, mask, config.block_size, 0.0)
def sparsify_tensor(a, mask, config):
return triton.testing.sparsify_tensor(a, mask, config.block_size)
def get_sparsity(mask):
return round((1.0 - mask.sum().item() / mask.numel()) * 100)
##############################################
# Mask Generation
##############################################
@dataclass
class Configuration(object):
batch_size: int = 32
num_heads: int = 12
seq_length: int = 2048
hidden_size: int = 768 # hidden_size = n_heads * projection_hidden_dimension
block_size: int = 64
@property
def blocked_seq_length(self):
return int(self.seq_length / self.block_size)
def init(self, kwargs):
for key, value in kwargs.items():
setattr(self, key, value)
def __str__(self):
desc = [
f"bs={self.batch_size}",
f"h={self.num_heads}",
f"k={self.hidden_size}",
f"seq={self.seq_length}",
f"bl={self.block_size}",
]
return ",".join(desc)
class AttentionMask(object):
def __init__(self, config=None):
super().__init__()
if config is None:
config = Configuration()
self.config = config
def is_blocked(self):
return self.config.block_size != 1
def is_valid_config(self, keep_blocked=True):
return True
def expand(self, mask):
if mask.ndim == 2:
return mask.unsqueeze(0).expand(self.config.num_heads, -1, -1)
def gen_mask(self, keep_blocked=True):
raise NotImplementedError("Abstract data class")
def set_config_attr(self, key, value):
setattr(self.config, key, value)
def __str__(self):
raise NotImplementedError("Abstract data type")
def __call__(self):
mask = self.gen_mask()
return mask, self.config, str(self)
class RandomAttentionMask(AttentionMask):
"""
This is a Random mask. Useful for performance and memory analysis.
"""
def __init__(self, config=None):
super(RandomAttentionMask, self).__init__(config)
self.set_config_attr("mask_prob", 0.5)
def gen_mask(self, keep_blocked=True):
seq_length = self.config.seq_length
if keep_blocked:
seq_length = self.config.blocked_seq_length
mask = torch.rand(seq_length, seq_length) > self.config.mask_prob
return self.expand(mask)
def __str__(self):
return "random"
class LowerTriangularAttentionMask(AttentionMask):
"""
This is a lower triangular mask. This is common in decoder only models.
This should reduce the computation and memory to roughly half as close to
half of the mask is zero.
The mask stays same for each head and each input.
Nit pick (TODO) - While blocking, we need to ensure that the blocks along
the diagonals are themselves lower triangular blocks. But, for performance
measurement, this is ok to ignore as we treat the whole block as useful
values.
"""
def __init__(self, config=None):
super(LowerTriangularAttentionMask, self).__init__(config)
def gen_mask(self, keep_blocked=True):
seq_length = self.config.seq_length
if keep_blocked:
seq_length = self.config.blocked_seq_length
return self.expand(causal_1d_pattern(seq_length))
def __str__(self):
return "lower_triangular"
class BigBirdAttentionMask(AttentionMask):
"""
BigBird mask are composed of three types of masks - random, global and window.
For more details, refer to https://arxiv.org/pdf/2007.14062.pdf
One point to note is that mask is per head here. So, mask is 3D tensor.
(num_heads, seq_length, seq_length).
"""
def __init__(self, config=None):
super(BigBirdAttentionMask, self).__init__(config)
self.mask_per_head = True
self.set_config_attr("num_global_tokens", 2 * self.config.block_size)
self.set_config_attr("num_random_tokens", 3 * self.config.block_size)
self.set_config_attr("num_window_tokens", 3 * self.config.block_size)
def gen_global_mask(self, seq_length):
# Global tokens are tokens that attend to all tokens and to whom all tokens attend to in the sequence
num_global_blocks = self.config.num_global_tokens // self.config.block_size
mask_indices = torch.randint(0, seq_length - 1, size=(num_global_blocks,))
mask_indices = torch.unique(mask_indices)
query_mask = torch.zeros(seq_length).to(dtype=torch.bool)
query_mask.scatter_(0, mask_indices, True)
return global_token_pattern(query_mask)
def gen_random_mask(self, seq_length):
# Each query token attends over r random number of tokens
num_random_blocks = self.config.num_random_tokens // self.config.block_size
mask_indices = torch.randint(
0, seq_length - 1, size=(seq_length, num_random_blocks)
)
random_mask = torch.zeros(seq_length, seq_length).to(dtype=torch.bool)
random_mask.scatter_(1, mask_indices, True)
return random_mask
def gen_window_mask(self, seq_length):
num_window_blocks = self.config.num_window_tokens // self.config.block_size
if num_window_blocks % 2 == 0:
num_window_blocks += 1
return local_1d_pattern(seq_length, num_window_blocks)
def gen_mask(self, keep_blocked=True):
seq_length = self.config.seq_length
if keep_blocked:
seq_length = self.config.blocked_seq_length
assert keep_blocked, "Not implemented, call to_dense later to get full tensor"
if self.mask_per_head:
head_masks = []
for _ in range(self.config.num_heads):
global_mask = self.gen_global_mask(seq_length)
random_mask = self.gen_random_mask(seq_length)
window_mask = self.gen_window_mask(seq_length)
mask = global_mask + random_mask + window_mask
head_masks.append(mask)
mask = torch.stack(head_masks)
else:
global_mask = self.gen_global_mask(seq_length)
random_mask = self.gen_random_mask(seq_length)
window_mask = self.gen_window_mask(seq_length)
mask = global_mask + random_mask + window_mask
mask = self.expand(mask)
return mask
def __str__(self):
return "bigbird"
class AxialAttentionMask(AttentionMask):
"""
BigBird mask are composed of three types of masks - random, global and window.
For more details, refer to https://arxiv.org/pdf/2007.14062.pdf
One point to note is that mask is per head here. So, mask is 3D tensor.
(num_heads, seq_length, seq_length).
"""
def __init__(self, config=None):
super(AxialAttentionMask, self).__init__(config)
if config is None:
self.set_config_attr("seq_length", 1024)
def is_valid_config(self, keep_blocked=True):
seq_length = self.config.seq_length
if keep_blocked:
seq_length = self.config.blocked_seq_length
H = int(math.sqrt(seq_length))
if H * H == seq_length:
return True
return False
def gen_mask(self, keep_blocked=True):
seq_length = self.config.seq_length
if keep_blocked:
seq_length = self.config.blocked_seq_length
H = int(math.sqrt(seq_length))
assert H * H == seq_length, f"H={H}, seq_length={seq_length}"
return self.expand(axial_2d_pattern(H, H))
def __str__(self):
return "axial"
class LocalAttentionMask(AttentionMask):
"""
BigBird mask are composed of three types of masks - random, global and window.
For more details, refer to https://arxiv.org/pdf/2007.14062.pdf
One point to note is that mask is per head here. So, mask is 3D tensor.
(num_heads, seq_length, seq_length).
"""
def __init__(self, config=None):
super(LocalAttentionMask, self).__init__(config)
self.set_config_attr("num_local_blocks", 3)
if config is None:
self.set_config_attr("seq_length", 1024)
def is_valid_config(self, keep_blocked=True):
seq_length = self.config.seq_length
if keep_blocked:
seq_length = self.config.blocked_seq_length
H = int(math.sqrt(seq_length))
if H * H == seq_length:
return True
return False
def gen_mask(self, keep_blocked=True):
seq_length = self.config.seq_length
if keep_blocked:
seq_length = self.config.blocked_seq_length
H = int(math.sqrt(seq_length))
assert H * H == seq_length, f"H={H}, seq_length={seq_length}"
return self.expand(local_2d_pattern(H, H, self.config.num_local_blocks))
def __str__(self):
return "local"
##############################################
# Class to organize the experiments
##############################################
class Experiment(object):
def __init__(self, mode, dtype, do_accuracy_check, profile_sputnik):
self.mode = mode
self.dtype = dtype
self.do_accuracy_check = do_accuracy_check
self.profile_sputnik = profile_sputnik
def reset_results(self):
self.results = {}
self.results["flops"] = {}
self.results["time"] = {}
self.results["memory"] = {}
self.results["speedup"] = {}
self.results["memory_savings"] = {}
def do_mem(sel, fn):
# bookeeping
torch.cuda.empty_cache()
torch.cuda.reset_peak_memory_stats()
# actually run the function
fn()
fn()
torch.cuda.synchronize()
return torch.cuda.max_memory_allocated() // 2**20
def gen_config(self):
raise NotImplementedError("Not setup")
def plot(self, sparsity, pattern_name):
raise NotImplementedError("Not setup")
def run(self):
raise NotImplementedError("Not setup")
def add_kv(self, d, d_key, d_value, testcase):
d_value = max(0, d_value)
if d_key not in d:
d[d_key] = {}
d[d_key][testcase.name] = d_value
def bench_all(
self, a, b, tests, mask_config, sparsity, baseline_name, op_flops, dict_key
):
if self.do_accuracy_check:
self.check_all(tests, a, b)
for testcase in tests:
gc.collect()
torch.cuda.empty_cache()
torch.cuda.reset_peak_memory_stats()
try:
fn = testcase.prepare_callable(a, b, testcase.mask, testcase.config)
ms = triton.testing.do_bench(fn)[0]
flops = op_flops / ms * 1e3 # TFlop per second
mem = self.do_mem(fn)
except Exception:
# raise
ms = -1
flops = -1
mem = -1
# Write into results
# dict_key = f"sp={sparsity}%,{mask_config}"
self.add_kv(self.results["time"], dict_key, ms, testcase)
self.add_kv(self.results["flops"], dict_key, flops, testcase)
self.add_kv(self.results["memory"], dict_key, mem, testcase)
speedup = self.results["time"][dict_key][baseline_name] / ms
memory_savings = self.results["memory"][dict_key][baseline_name] / mem
self.add_kv(self.results["speedup"], dict_key, speedup, testcase)
self.add_kv(self.results["flops"], dict_key, flops, testcase)
self.add_kv(
self.results["memory_savings"], dict_key, memory_savings, testcase
)
desc = f"sparsity={sparsity}, ops={op_flops}, time={ms}, tflops={flops}, mem={mem}"
print(f"{testcase.name} --> {mask_config}, {desc}")
def get_inputs(self, config, device="cuda"):
# if mode = sddmm, a, b = query, key
# if mode = spmm, a, b = attn, value
if self.mode == "sddmm":
return [
torch.randn(
config.batch_size,
config.num_heads,
config.seq_length,
config.hidden_size // config.num_heads,
device=device,
dtype=self.dtype,
)
for _ in range(2)
]
else:
assert self.mode == "spmm"
attn = torch.randn(
config.batch_size,
config.num_heads,
config.seq_length,
config.seq_length,
device=device,
dtype=self.dtype,
)
value = torch.randn(
config.batch_size,
config.num_heads,
config.seq_length,
config.hidden_size // config.num_heads,
device=device,
dtype=self.dtype,
)
return [attn, value]
def torch_matmul_callable(self, a, b, mask, config):
input_a = mask_tensor(a, mask, config) if self.mode == "spmm" else a
input_b = b.transpose(-1, -2) if self.mode == "sddmm" else b
def torch_fn():
return torch.matmul(input_a, input_b)
return torch_fn
def get_triton_fn(self, mask, config, mode="sddmm"):
if mode == "sddmm":
return blocksparse_matmul(
layout=mask,
block=config.block_size,
mode="sdd",
device="cuda",
trans_a=False,
trans_b=True,
)
else:
assert mode == "spmm"
return blocksparse_matmul(
layout=mask,
block=config.block_size,
mode="dsd",
device="cuda",
trans_a=False,
trans_b=False,
)
def triton_callable(self, a, b, mask, config):
triton_kernel = self.get_triton_fn(mask, config, self.mode)
input_a = sparsify_tensor(a, mask, config) if self.mode == "spmm" else a
input_b = b
def triton_fn():
return triton_kernel(input_a, input_b)
return triton_fn
def prepare_sputnik_inputs(self, query, key, config, mask):
# - sparse / sputnik
mask_cs = torch.ones(
[config.batch_size, config.num_heads, config.seq_length, config.seq_length],
dtype=torch.bool,
device="cuda",
)
mask_cs = triton.testing.mask_tensor(
mask_cs, mask, config.block_size, value=False
)
# Sputnik kernels only handle fp32
query_cs = query.flatten(start_dim=0, end_dim=1).to(torch.float32)
key_cs = key.flatten(start_dim=0, end_dim=1).to(torch.float32)
query_cs = query_cs.contiguous()
key_cs = key_cs.transpose(-2, -1)
sparse_mask_cs = SparseCS(
mask_cs.flatten(start_dim=0, end_dim=1).contiguous(),
device=torch.device("cuda"),
)
return query_cs, key_cs, sparse_mask_cs
def sputnik_callable(self, a, b, mask, config):
assert self.mode == "sddmm"
a_cs, b_cs, sparse_mask_cs = self.prepare_sputnik_inputs(a, b, config, mask)
def sputnik_fn():
return _matmul_with_mask(a_cs, b_cs, sparse_mask_cs)
return sputnik_fn
def get_op_flops(self, mask, config):
# Measure total compute ops
op_flops = (
2 # FMA
* config.batch_size # batched matmul
* (config.hidden_size // config.num_heads) # Reduce dimension
* float(mask.sum())
* config.block_size
* config.block_size # Effective seq length * seq_length
* 1e-12 # TFlops
)
return op_flops
def check_all(self, tests, a, b):
ref_test = tests[0]
ref_out = ref_test.prepare_callable(a, b, ref_test.mask, ref_test.config)()
res_test = tests[1]
res_out = res_test.prepare_callable(a, b, res_test.mask, res_test.config)()
self.check_accuracy(ref_out, res_out, ref_test.mask, ref_test.config)
def check_accuracy(self, ref_full, res_bsr, mask, config):
if self.mode == "sddmm":
# Get the dense representation of the bsr tensor
# Use triton sparse * dense multiplication to get the dense tensor back
sparse_dot_dsd = blocksparse_matmul(
layout=mask,
block=config.block_size,
mode="dsd",
device="cuda",
trans_a=False,
trans_b=False,
)
identity = torch.eye(
config.seq_length, config.seq_length, device=device, dtype=self.dtype
)
identity = identity.expand(config.batch_size, config.num_heads, -1, -1)
res = sparse_dot_dsd(res_bsr, identity)
# Get the res where values are masked. Expand the blocked mask
# ref = triton.testing.mask_tensor(ref_full, mask, config.block_size)
full_mask = densify_mask(mask, config)
ref = ref_full * full_mask.to(dtype=self.dtype, device=device)
try:
assert torch.allclose(ref, res, atol=1e-3, rtol=1e-3)
except RuntimeError:
pass
except AssertionError:
raise
else:
assert self.mode == "spmm"
# Both are dense outputs
try:
assert torch.allclose(ref_full, res_bsr, atol=1e-3, rtol=1e-3)
except RuntimeError:
pass
except AssertionError:
import pdb
pdb.set_trace()
raise
class DifferentPatternExperiment(Experiment):
"""
In this experiment, we check if sparsity pattern (like bigbird, lower triangular
etc) changes the performance of different kernels. The idea is to check if
changing sparsity pattern, while keeping total sparsity ratio same, leads to any
perforamnce differences.
We will perform two experiments
1) LowerTraingularMask vs RandomMask - Both have ~50% sparsity.
2) BigBird Mask vs RandomMask - Both have same sparsity.
"""
def __init__(self, mode, dtype, do_accuracy_check, profile_sputnik=False):
super(DifferentPatternExperiment, self).__init__(
mode, dtype, do_accuracy_check, profile_sputnik
)
def gen_config(self):
batch_sizes = [32]
heads = [16]
seq_lengths = [1024, 2048]
block_sizes = [64]
hidden_sizes = [1024, 4096, 8192]
for batch in batch_sizes:
for hidden_size in hidden_sizes:
for head in heads:
for seq in seq_lengths:
for block in block_sizes:
entry = {
"batch_size": batch,
"num_heads": head,
"seq_length": seq,
"block_size": block,
"hidden_size": hidden_size,
}
yield entry
def plot(self, sparsity, config, pattern_name):
desc = [
f"bs={config.batch_size}",
f"nheads={config.num_heads}",
f"block={config.block_size}",
f"dtype={self.dtype}",
]
title_suffix = ",".join(desc)
pretty_barplot(
self.results["speedup"],
title=f"{self.mode} - Pattern experiment ({sparsity}%) - speedup\n"
+ title_suffix,
filename=f"same_sparsity_{self.mode}_{self.dtype}_{pattern_name}_time.svg",
dash_key="pytorch",
units="Speedup normalized to torch_matmul",
)
pretty_barplot(
self.results["flops"],
title=f"{self.mode} - Pattern experiment ({sparsity}%) - throughput\n"
+ title_suffix,
filename=f"same_sparsity_{self.mode}_{self.dtype}_{pattern_name}_flops.svg",
dash_key="pytorch",
units="TFlops/s",
)
pretty_barplot(
self.results["memory_savings"],
title=f"{self.mode} - Pattern experiment ({sparsity}%) - memory savings\n"
+ title_suffix,
filename=f"same_sparsity_{self.mode}_{self.dtype}_{pattern_name}_memory.svg",
dash_key="pytorch",
units="Memory savings normalized to torch_matmul",
)
def run(self):
for MaskGenType in [LowerTriangularAttentionMask, BigBirdAttentionMask]:
self.reset_results()
for config in self.gen_config():
# Get pattern mask
pattern_mask, pattern_config, pattern_name = get_mask(
MaskGenType, config
)
sparsity = get_sparsity(pattern_mask)
mask_prob = sparsity / 100
# Get random mask
random_mask, random_config, _ = get_mask(
RandomAttentionMask,
config,
[("mask_prob", mask_prob)],
)
print(f"{pattern_name} sparsity", get_sparsity(pattern_mask))
print("Random sparsity", get_sparsity(random_mask))
# Create input tensors
a, b = self.get_inputs(random_config)
tests = []
baseline_name = "torch-matmul"
tests.append(
TestCase(
self.torch_matmul_callable,
random_mask,
random_config,
f"{baseline_name}",
)
)
tests.append(
TestCase(
self.triton_callable,
random_mask,
random_config,
"triton-random",
)
)
tests.append(
TestCase(
self.triton_callable,
pattern_mask,
pattern_config,
f"triton-{pattern_name}",
)
)
if self.profile_sputnik and self.mode == "sddmm":
tests.append(
TestCase(
self.sputnik_callable,
random_mask,
random_config,
"sputnik-random",
)
)
tests.append(
TestCase(
self.sputnik_callable,
pattern_mask,
pattern_config,
f"sputnik-{pattern_name}",
)
)
dict_key = f"hidden={random_config.hidden_size},seq_len={random_config.seq_length}"
self.bench_all(
a,
b,
tests,
random_config,
sparsity,
baseline_name,
self.get_op_flops(random_mask, random_config),
dict_key,
)
ideal_testcase = TestCase(None, None, None, "Ideal")
ideal_speedup = round(100 / (100 - sparsity), 1)
self.add_kv(
self.results["speedup"], dict_key, ideal_speedup, ideal_testcase
)
self.add_kv(
self.results["memory_savings"],
dict_key,
ideal_speedup,
ideal_testcase,
)
self.plot(sparsity, random_config, pattern_name)
class VarySparsityExperiment(Experiment):
"""
In this experiment, we check how sparsity ration affects the performance.
"""
def __init__(self, mode, dtype, do_accuracy_check, profile_sputnik=False):
super(VarySparsityExperiment, self).__init__(
mode, dtype, do_accuracy_check, profile_sputnik
)
def gen_config(self):
batch_sizes = [32]
heads = [16]
seq_lengths = [2048]
hidden_sizes = [1024, 8192]
block_sizes = [64]
for batch in batch_sizes:
for seq in seq_lengths:
for head in heads:
for block in block_sizes:
for hidden_size in hidden_sizes:
entry = {
"batch_size": batch,
"num_heads": head,
"seq_length": seq,
"block_size": block,
"hidden_size": hidden_size,
}
yield entry
def plot(self, sparsity, config, pattern_name):
desc = [
f"bs={config.batch_size}",
f"nheads={config.num_heads}",
f"block={config.block_size}",
f"dtype={self.dtype}",
f"seq_len={config.seq_length}",
]
title_suffix = ",".join(desc)
pretty_barplot(
self.results["speedup"],
title=f"{self.mode} - SparsityRatio experiment speedup\n" + title_suffix,
filename=f"vary_sparsity_{self.mode}_{self.dtype}_{pattern_name}_time.svg",
dash_key="pytorch",
units="Speedup normalized to torch_matmul",
)
pretty_barplot(
self.results["flops"],
title=f"{self.mode} - SparsityRatio experiment throughput\n" + title_suffix,
filename=f"vary_sparsity_{self.mode}_{self.dtype}_{pattern_name}_flops.svg",
dash_key="pytorch",
units="TFlops/s",
)
pretty_barplot(
self.results["memory_savings"],
title=f"{self.mode} - SparsityRatio experiment memory savings\n"
+ title_suffix,
filename=f"vary_sparsity_{self.mode}_{self.dtype}_{pattern_name}_memory.svg",
dash_key="pytorch",
units="Memory savings normalized to torch_matmul",
)
def run(self):
self.reset_results()
random_config = None
for config in self.gen_config():
for x in range(10, 100, 10):
mask_prob = x / 100.0
# Get random mask
random_mask, random_config, _ = get_mask(
RandomAttentionMask,
config,
[
("mask_prob", mask_prob),
],
)
sparsity = get_sparsity(random_mask)
print("Random sparsity", get_sparsity(random_mask))
# Create input tensors
a, b = self.get_inputs(random_config)
tests = []
baseline_name = "torch-matmul"
tests.append(
TestCase(
self.torch_matmul_callable,
random_mask,
random_config,
f"{baseline_name}",
)
)
tests.append(
TestCase(
self.triton_callable,
random_mask,
random_config,
"triton-random",
)
)
if self.profile_sputnik and self.mode == "sddmm":
tests.append(
TestCase(
self.sputnik_callable,
random_mask,
random_config,
"sputnik-random",
)
)
dict_key = f"sp={mask_prob},hidden={random_config.hidden_size}"
self.bench_all(
a,
b,
tests,
random_config,
sparsity,
baseline_name,
self.get_op_flops(random_mask, random_config),
dict_key,
)
ideal_testcase = TestCase(None, None, None, "Ideal")
ideal_speedup = round(100 / (100 - mask_prob * 100), 1)
self.add_kv(
self.results["speedup"], dict_key, ideal_speedup, ideal_testcase
)
self.add_kv(
self.results["memory_savings"],
dict_key,
ideal_speedup,
ideal_testcase,
)
self.plot(None, random_config, "random")
class BlockSizeExperiment(Experiment):
"""
In this experiment, we analyze how increasing the block size affects
performance. We will take the lower triangular pattern. As we increase the
batch size, the blocks near the diagonal have to do more unnecessary
computation (the effective sparsity starts decreasing).
"""
def __init__(self, mode, dtype, do_accuracy_check, profile_sputnik=False):
super(BlockSizeExperiment, self).__init__(
mode, dtype, do_accuracy_check, profile_sputnik
)
def gen_config(self):
batch_sizes = [32]
heads = [16]
seq_lengths = [2048]
block_sizes = [32, 64, 128, 256]
hidden_sizes = [1024, 8192]
for batch in batch_sizes:
for seq in seq_lengths:
for hidden_size in hidden_sizes:
for block in block_sizes:
for head in heads:
entry = {
"batch_size": batch,
"num_heads": head,
"seq_length": seq,
"block_size": block,
"hidden_size": hidden_size,
}
yield entry
def plot(self, sparsity, config, pattern_name):
pretty_barplot(
self.results["speedup"],
title=f"{self.mode} - BlockSize experiment speedup\n"
f"bs={config.batch_size}, nheads={config.num_heads}, seq_len={config.seq_length}, dtype={self.dtype}",
filename=f"vary_block_size_{self.mode}_{self.dtype}_{pattern_name}_time.svg",
dash_key="pytorch",
units="Speedup normalized to torch matmul",
)
pretty_barplot(
self.results["flops"],
title=f"{self.mode} - BlockSize experiment throughput\n"
f"bs={config.batch_size}, nheads={config.num_heads}, seq_len={config.seq_length}, dtype={self.dtype}",
filename=f"vary_block_size_{self.mode}_{self.dtype}_{pattern_name}_flops.svg",
dash_key="pytorch",
units="TFlops/s",
)
pretty_barplot(
self.results["memory_savings"],
title=f"{self.mode} - BlockSize experiment memory savings\n"
f"bs={config.batch_size}, nheads={config.num_heads}, seq_len={config.seq_length}, dtype={self.dtype}",
filename=f"vary_block_size_{self.mode}_{self.dtype}_{pattern_name}_memory.svg",
dash_key="pytorch",
units="Memory savings normalized to torch matmul",
)
def get_op_flops(self, mask, config):
# Op flops here refer to the original non blocked attention mask, where
# no unnecessary elements are unmasked. We can compute this by computing
# the total flops of batch matmul and then multiply by (n+1)/2n.
num_masked_elems = (config.seq_length + 1) / (2.0 * config.seq_length)
op_flops = (
2 # FMA
* config.batch_size # batched matmul
* config.num_heads
* (config.hidden_size // config.num_heads) # Reduce dimension
* config.seq_length
* config.seq_length
* num_masked_elems
* 1e-12 # TFlops
)
return op_flops
def run(self):
self.reset_results()
lt_config = None
for config in self.gen_config():
lt_mask, lt_config, lt_name = get_mask(
LowerTriangularAttentionMask,
config,
)
sparsity = get_sparsity(lt_mask)
print("Effective sparsity", sparsity)
if lt_config.seq_length == 2048:
plot_mask(lt_mask, lt_config, f"lt_mask_{lt_config.block_size}.svg")
# Create input tensors
a, b = self.get_inputs(lt_config)
tests = []
baseline_name = "torch-matmul"
tests.append(
TestCase(
self.torch_matmul_callable, lt_mask, lt_config, f"{baseline_name}"
)
)
tests.append(
TestCase(self.triton_callable, lt_mask, lt_config, "triton-random")
)
if self.profile_sputnik and self.mode == "sddmm":
tests.append(
TestCase(
self.sputnik_callable, lt_mask, lt_config, "sputnik-random"
)
)
dict_key = f"hidden={lt_config.hidden_size}, block={lt_config.block_size}"
self.bench_all(
a,
b,
tests,
lt_config,
sparsity,
baseline_name,
self.get_op_flops(lt_mask, lt_config),
dict_key,
)
ideal_testcase = TestCase(None, None, None, "Ideal")
seq_len = lt_config.seq_length
total_elems = seq_len * seq_len
nnz = seq_len * (seq_len + 1) / 2
ideal_speedup = (1.0 * total_elems) / nnz
self.add_kv(
self.results["speedup"], dict_key, ideal_speedup, ideal_testcase
)
self.add_kv(
self.results["memory_savings"],
dict_key,
ideal_speedup,
ideal_testcase,
)
self.plot(None, lt_config, lt_name)
if __name__ == "__main__":
for MaskGen in [
RandomAttentionMask,
LowerTriangularAttentionMask,
BigBirdAttentionMask,
AxialAttentionMask,
LocalAttentionMask,
]:
mask_gen = MaskGen()
mask, config, name = mask_gen()
plot_mask(mask, config, f"{name}.svg")
for mode in ["sddmm", "spmm"]:
DifferentPatternExperiment(mode, torch.float16, True).run()
VarySparsityExperiment(mode, torch.float16, True).run()
BlockSizeExperiment(mode, torch.float16, True).run()
|
EXA-1-master
|
exa/libraries/xformers/xformers/benchmarks/benchmark_blocksparse_transformers.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
import itertools
from functools import partial, reduce
import timm
import torch
import torch.nn as nn
from timm.models.layers import Mlp as TimmMlp
from timm.models.vision_transformer import Attention as TimmAttention
from timm.models.vision_transformer import Block as TimmBlock
from torch.utils import benchmark
from utils import benchmark_main_helper
import xformers.ops as xops
def replace_module(module: nn.Module, replace_class, factory):
if isinstance(module, replace_class):
return factory(module)
module_output = module
for name, child in module.named_children():
module_output.add_module(name, replace_module(child, replace_class, factory))
del module
return module_output
class TimmMemEffAttention(nn.Module):
def __init__(self, attn: TimmAttention, op=None):
super().__init__()
self.op = None
self.num_heads = attn.num_heads
self.scale = attn.scale
self.qkv = attn.qkv
self.attn_drop = attn.attn_drop
self.proj = attn.proj
self.proj_drop = attn.proj_drop
def forward(self, x):
B, N, C = x.shape
qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads)
q, k, v = xops.unbind(qkv, dim=2)
x = xops.memory_efficient_attention(q, k, v, op=self.op).reshape(B, N, C)
x = self.proj(x)
x = self.proj_drop(x)
return x
class TimmSwiGLU(nn.Module):
def __init__(self, mlp: TimmMlp, op=None) -> None:
super().__init__()
self.fc1 = mlp.fc1
self.swiglu = xops.SwiGLU(
in_features=mlp.fc1.in_features,
hidden_features=mlp.fc1.out_features,
bias=True,
)
self.op = op
def forward(self, x: torch.Tensor) -> torch.Tensor:
return self.swiglu(x)
def mod_memeff_attn(model: nn.Module, op=None) -> nn.Module:
return replace_module(model, TimmAttention, partial(TimmMemEffAttention, op=op))
def mod_mlp_to_swiglu(model: nn.Module, op=None) -> nn.Module:
def _mlp_to_swiglu(block: TimmBlock):
block.mlp = TimmSwiGLU(block.mlp, op=op)
return block
return replace_module(model, TimmBlock, _mlp_to_swiglu)
mod_mlp_to_eagr_swiglu = partial(mod_mlp_to_swiglu, op=xops.SwiGLUEagerOp)
mod_mlp_to_fast_swiglu = partial(mod_mlp_to_swiglu, op=None)
def compose(*fns):
def compose2(f, g):
return lambda *a, **kw: f(g(*a, **kw))
return reduce(compose2, fns)
MODELS = [
# model_name, model_factory, input_shape
("ViT-B/16", timm.models.vit_base_patch16_224, [512, 3, 224, 224]),
("ViT-B/8", timm.models.vit_base_patch8_224, [64, 3, 224, 224]),
("ViT-L/16", timm.models.vit_large_patch16_224, [128, 3, 224, 224]),
("ViT-g/14", timm.models.vit_giant_patch14_224, [32, 3, 224, 224]),
]
MODIFIERS = [
["mlp", lambda x: x],
["mlp+memeff", compose(mod_mlp_to_fast_swiglu, mod_memeff_attn)],
["swiglu", mod_mlp_to_eagr_swiglu],
["swiglu+fast_swiglu", mod_mlp_to_fast_swiglu],
["swiglu+fast_swiglu+memeff", compose(mod_mlp_to_fast_swiglu, mod_memeff_attn)],
]
def product_dict(**kwargs):
keys = kwargs.keys()
vals = kwargs.values()
for instance in itertools.product(*vals):
yield dict(zip(keys, instance))
CASES = list(
product_dict(
model_info=MODELS,
dtype=[torch.half],
)
)
def benchmark_transformer(model_info, dtype):
device = "cuda"
model_name, model_factory, input_shape = model_info
inp = torch.randn(input_shape, dtype=dtype, device=device)
for mod_name, mod_apply in MODIFIERS:
model: nn.Module = model_factory()
model = mod_apply(model).to(device).to(dtype)
# Make sure we don't have errors
out = model(inp)
grad = out.clone()
out.backward(grad)
yield benchmark.Timer(
stmt="model(inp).backward(grad)",
globals={
"model": model,
"inp": inp,
"grad": grad,
},
label="fw+bw",
description=mod_name,
sub_label=model_name,
)
benchmark_main_helper(benchmark_transformer, CASES)
|
EXA-1-master
|
exa/libraries/xformers/xformers/benchmarks/benchmark_transformer.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
from typing import Any, Dict
import torch
import triton
from xformers.benchmarks.utils import TestCase, pretty_plot, pretty_print
from xformers.components.reversible import ReversibleSequence
SHAPES = [(16384, 32), (2048, 256), (128, 4096)]
DEPTH = [4, 32, 256]
def bench_revnet(backward: bool):
device = torch.device("cuda")
bw = "+bw" if backward else ""
for dtype in [torch.float16, torch.float32]:
results: Dict[str, Any] = {}
for B, K in SHAPES:
for depth in DEPTH:
f = torch.nn.Linear(K, K).to(device=device, dtype=dtype)
g = torch.nn.Linear(K, K).to(device=device, dtype=dtype)
revseq = ReversibleSequence(
torch.nn.ModuleList([torch.nn.ModuleList([f, g])] * depth)
)
revseq = revseq.to(device=device, dtype=dtype)
a = torch.rand(
1, B, K, device=device, dtype=dtype, requires_grad=backward
)
b = torch.rand(
1, B, K * 2, device=device, dtype=dtype, requires_grad=backward
)
def normal_step():
y = a
for _ in range(depth):
y = y + f(y)
y = y + g(y)
if backward:
torch.norm(y).backward()
return y
def reversible_step():
y = revseq(b)
if backward:
torch.norm(y).backward()
return y
for testcase in [
TestCase(normal_step, f"residual - fw{bw}"),
TestCase(reversible_step, f"reversible - fw{bw}"),
]:
time = triton.testing.do_bench(testcase.function)[0]
key = f"Batch={B}, Features={K}, Depth={depth}"
if key not in results:
results[key] = {}
results[key][testcase.name] = f"{time:.2f}"
pretty_print(
results,
title=f"\n --- Type: {dtype} --- ",
units="runtime in ms, lower is better",
)
pretty_plot(
results,
title=f"RevNet-FW{bw}-{dtype}",
units="runtime in ms, lower is better",
dash_key="torch",
)
for bw in [False, True]:
bench_revnet(bw)
|
EXA-1-master
|
exa/libraries/xformers/xformers/benchmarks/benchmark_revnet.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
import itertools
from contextlib import nullcontext
from functools import partial
from typing import Any
import torch
from torch.utils import benchmark
from utils import benchmark_main_helper
import xformers.ops.swiglu_op as xsw
min_run_time = 0.5
device = torch.device("cuda")
SHAPES = [
# Format: [inp.shape[0], inp.shape[1], hidden.shape[1]]
# ViT-Giant
(9456, 1536, 2736),
(4440, 1536, 2736),
(4728, 1536, 2736),
# Some smaller shapes as well
(4728, 1536, 1024),
# GPT-3 (small)
(32768, 2048, 5632),
# Chinchilla
(32768, 8192, 22016),
]
# OP = xsw._SwiGLUDecomposedOp
# OP = xsw.SwiGLUFusedOp
OP = xsw.SwiGLUPackedFusedOp
def product_dict(**kwargs):
keys = kwargs.keys()
vals = kwargs.values()
for instance in itertools.product(*vals):
yield dict(zip(keys, instance))
CASES = list(
product_dict(
shape=SHAPES,
dtype=[torch.bfloat16, torch.half, "autocast_half"],
bias=[True, False],
)
)
DTYPE2STR = {
torch.bfloat16: "b16 ",
torch.half: "f16 ",
"autocast_half": "f16.ac",
}
def benchmark_swiglu(shape, dtype, bias: bool):
if dtype == "autocast_half":
inp_dtype, model_dtype, autocast = torch.float, torch.float, True
else:
inp_dtype, model_dtype, autocast = dtype, dtype, False
x = torch.randn(shape[:2], device=device, dtype=inp_dtype)
module = (
xsw.SwiGLU(in_features=shape[1], hidden_features=shape[2], bias=bias)
.to(device)
.to(model_dtype)
)
dtype_str = DTYPE2STR.get(dtype, dtype)
bstr = "bias" if bias else "nobi"
sub_label = f"{dtype_str} B={shape[0]}, I={shape[1]}, H={shape[2]} {bstr}"
params = module._ordered_params()
PREFIX = 'with torch.autocast("cuda", dtype=torch.half):\n ' if autocast else ""
yield benchmark.Timer(
stmt=f"{PREFIX}fn(x, *args)",
globals={
"x": x,
"args": params,
"fn": partial(xsw.swiglu, op=OP),
},
label="swiglu_fw",
description=OP.NAME,
sub_label=sub_label,
)
yield benchmark.Timer(
stmt=f"{PREFIX}fn(x, *args)",
globals={
"x": x,
"args": params,
"fn": partial(xsw.swiglu, op=xsw.SwiGLUEagerOp),
},
label="swiglu_fw",
description="eager",
sub_label=sub_label,
)
def benchmark_swiglu_bw(shape, dtype, bias: bool):
if dtype == "autocast_half":
inp_dtype, model_dtype = torch.float, torch.float
cm: Any = partial(torch.cuda.amp.autocast, enabled=True, dtype=torch.float16)
else:
inp_dtype, model_dtype = dtype, dtype
cm = nullcontext
x = torch.randn(shape[:2], device=device, dtype=inp_dtype)
x.requires_grad_()
module = (
xsw.SwiGLU(in_features=shape[1], hidden_features=shape[2], bias=bias)
.to(device)
.to(model_dtype)
)
dtype_str = DTYPE2STR.get(dtype, dtype)
bstr = "bias" if bias else "nobi"
sub_label = f"{dtype_str} B={shape[0]}, I={shape[1]}, H={shape[2]} {bstr}"
params = module._ordered_params()
with cm():
out = xsw.swiglu(x, *params, op=OP)
grad = torch.zeros_like(out)
yield benchmark.Timer(
stmt="out.backward(grad, retain_graph=True)",
globals={
"out": out,
"grad": grad,
},
label="swiglu_bw",
description=OP.NAME,
sub_label=sub_label,
)
del out
with cm():
out = xsw.swiglu(x, *params, op=xsw.SwiGLUEagerOp)
yield benchmark.Timer(
stmt="out.backward(grad, retain_graph=True)",
globals={
"out": out,
"grad": grad,
},
label="swiglu_bw",
description="eager",
sub_label=sub_label,
)
benchmark_main_helper(benchmark_swiglu, CASES, min_run_time=min_run_time)
benchmark_main_helper(benchmark_swiglu_bw, CASES, min_run_time=min_run_time)
|
EXA-1-master
|
exa/libraries/xformers/xformers/benchmarks/benchmark_swiglu.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
from typing import Any, Dict
import torch
import triton
from xformers.benchmarks.utils import TestCase, pretty_plot, pretty_print
from xformers.triton import FusedLayerNorm
SHAPES = [
(8, 256, 512),
(8, 512, 1024),
(4, 1024, 1024),
(2, 2048, 2048),
(2, 4096, 4096),
(1, 2048, 12288),
]
def to_gbs_fw(a, ms):
# Read and write the full array
return (2 * a.numel() * a.element_size() * 1e-9) / (ms * 1e-3)
def bench_layernorm(backward: bool):
device = torch.device("cuda")
for dtype in [
torch.float16,
torch.bfloat16,
torch.float32,
]:
results: Dict[str, Any] = {}
for B, M, K in SHAPES:
a = torch.rand(B, M, K, device=device, dtype=dtype, requires_grad=backward)
# Pytorch layer norn
torch_layernorm = torch.nn.LayerNorm([K]).to(dtype=dtype, device=device)
# pyre-ignore[16]: TODO(T101400990): Pyre did not recognize the
# `FusedLinearNorm` import.
# Fused layernorm equivalent
fused_layernorm = FusedLayerNorm([K]).to(dtype=dtype, device=device)
def torch_step(x):
y = torch_layernorm(x)
if backward:
torch.norm(y).backward()
return y
def triton_step(x):
y = fused_layernorm(x)
if backward:
torch.norm(y).backward()
return y
for testcase in [
TestCase(
torch_step,
"pytorch - fw{}".format("+bw" if backward else ""),
),
TestCase(
triton_step,
"triton - fw{}".format("+bw" if backward else ""),
),
]:
time = triton.testing.do_bench(lambda: testcase.function(a))[0]
key = f"B={B}, M={M}, K={K}"
if key not in results:
results[key] = {}
# Record BW
bandwidth = to_gbs_fw(a, time)
results[key][testcase.name] = f"{bandwidth:.1f}"
pretty_print(results, title="\n --- Type: {} --- ".format(dtype), units="GB/s")
pretty_plot(
results,
title="LayerNorm-FW{}-{}".format("+BW" if backward else "", dtype),
units="GB/s",
dash_key="pytorch",
)
for bw in [False, True]:
bench_layernorm(bw)
|
EXA-1-master
|
exa/libraries/xformers/xformers/benchmarks/benchmark_triton_layernorm.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
import os
from typing import Any, Dict
import torch
import triton
from xformers.benchmarks.utils import TestCase, pretty_plot, pretty_print
from xformers.components.attention.attention_mask import AttentionMask
from xformers.components.attention.core import scaled_dot_product_attention
os.environ["CUDA_LAUNCH_BLOCKING"] = "1"
SHAPES = [
(8, 128, 2096),
(8, 1024, 256),
(12, 512, 1024),
(128, 128, 512),
(8, 2048, 4096),
(16, 1024, 5120),
(512, 128, 2560),
]
BLOCK_SIZES = [128]
N_HEADS = [8, 32]
def bench_blocksparse_compare(backward: bool):
device = torch.device("cuda")
bw = "+bw" if backward else ""
use_amp = True
_use_cuda = True
for dtype in [torch.float16, torch.float32]:
datatype = "fp16" if dtype == torch.float16 else "fp32"
results: Dict[str, Any] = {}
results_mem: Dict[str, Any] = {}
for BS in BLOCK_SIZES:
for heads in N_HEADS:
for B, M, K in SHAPES:
q = torch.randn(
(B, heads, M, K // heads),
requires_grad=backward,
device=device,
dtype=dtype,
)
k = q
v = q
# Mask with causal flag
m_att_mask = AttentionMask.make_causal(
M, M, device=device, dtype=dtype
)
# Custom causal tensor mask
m_custom = torch.triu(
torch.ones(M, M, device=device, dtype=dtype) * float("-inf"),
diagonal=1,
)
def blocksparse_attention():
with torch.cuda.amp.autocast(enabled=use_amp):
y = scaled_dot_product_attention(
q=q, k=k, v=v, att_mask=m_att_mask, block_size=BS
)
if backward:
torch.norm(y).backward()
return y
def sdp_attention():
with torch.cuda.amp.autocast(enabled=use_amp):
y = scaled_dot_product_attention(
q=q, k=k, v=v, att_mask=m_custom, block_size=BS
)
if backward:
torch.norm(y).backward()
return y
for testcase in [
TestCase(blocksparse_attention, f"blocksparse - fw{bw}"),
TestCase(sdp_attention, f"standard sdp - fw{bw}"),
]:
if _use_cuda:
torch.cuda.empty_cache()
torch.cuda.reset_peak_memory_stats()
torch.cuda.synchronize()
time = triton.testing.do_bench(testcase.function)[0]
if _use_cuda:
torch.cuda.synchronize()
max_memory = torch.cuda.max_memory_allocated() / 2**20
else:
max_memory = -1
key = f"B={B},M={M},K={K},NH={heads}"
if key not in results_mem:
results_mem[key] = {}
results_mem[key][testcase.name] = f"{max_memory:.1f}"
if key not in results:
results[key] = {}
results[key][testcase.name] = f"{time:.2f}"
pretty_print(
results,
title=f"\n --- Type: {datatype} Block Size: {BS} --- ",
units="runtime in ms",
)
pretty_print(
results_mem,
title=f"\n --- Type: {datatype} Block Size: {BS} --- ",
units="peak memory usage in MB",
)
pretty_plot(
results,
title=f"Causal Blocksparse Runtime FW{bw.upper()} {datatype} Blocksize:{BS}",
units="runtime in ms",
dash_key="torch",
legend_loc="upper left",
)
pretty_plot(
results_mem,
title=f"Causal Blocksparse Memory FW{bw.upper()} {datatype} Blocksize:{BS}",
units="peak memory usage in MB",
dash_key="torch",
legend_loc="upper left",
)
for bw in [False, True]:
bench_blocksparse_compare(bw)
|
EXA-1-master
|
exa/libraries/xformers/xformers/benchmarks/benchmark_causal_blocksparse.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
from typing import Any, Dict, List, Optional
import torch
import triton
from xformers.benchmarks.utils import TestCase, pretty_plot, pretty_print
from xformers.components import Activation, build_activation
from xformers.triton.fused_linear_layer import FusedLinear
SHAPES = [
(8, 512, 256), # Batch x Seq x Embedding
(8, 512, 512),
(4, 512, 1024),
(2, 512, 2048),
(2, 512, 4096),
(2, 512, 8192),
]
# Switch PyTorch to TF32 accumulations, Triton does that also
torch.backends.cuda.matmul.allow_tf32 = True
torch.backends.cudnn.allow_tf32 = True
def get_metrics_transform(
activation: Optional[Activation],
a: torch.Tensor,
w: torch.Tensor,
b: Optional[torch.Tensor],
backward: bool,
):
# all operations will involve a * weight.
flop = a.shape[0] * a.shape[1] * w.shape[1] * (2 * a.shape[2] - 1)
# optional activation on top
if activation is not None:
flop += a.numel()
# optionally * 2 (before the bias) if backward
if backward:
flop *= 2
# backward will also output a gradient with respect to the bias
# which consolidates on all the activation gradient
flop += a.shape[0] * a.shape[1] * w.shape[1]
# backward will also ouput another gradient with respect to the weight,
# which is another matmul, in between the grad_out and the inputs this time
flop += a.shape[0] * a.shape[1] * w.shape[1] * (2 * a.shape[2] - 1)
# optional bias on top
if b is not None:
flop += b.numel()
def metric_conversion(ms):
# Returns TFlops/second
return flop * 1e-12 / (ms * 1e-3)
return metric_conversion
def bench_linear(activations: List[Optional[Activation]]):
device = torch.device("cuda")
for dtype in [
torch.float32,
torch.float16,
]:
for backward in [True, False]:
for activation in activations:
results: Dict[str, Any] = {}
for bias in [False, True]:
for B, M, K in SHAPES:
a = torch.rand(
B, M, K, device=device, dtype=dtype, requires_grad=backward
)
# Pytorch linear layer + activation
torch_linear = torch.nn.Linear(K, 4 * K, bias=bias).to(
dtype=dtype, device=device
)
torch_activation = build_activation(activation)
# Fused layer equivalent
fused_linear = FusedLinear(
K, 4 * K, bias=bias, activation=activation
).to(dtype=dtype, device=device)
def torch_step(x):
y = torch_activation(torch_linear(x))
if backward:
torch.norm(y).backward()
return y
def triton_step(x):
y = fused_linear(x)
if backward:
torch.norm(y).backward()
return y
metrics_transform = get_metrics_transform(
activation,
a,
torch_linear.weight,
torch_linear.bias,
backward,
)
for testcase in [
TestCase(
torch_step,
"pytorch - {} - {} bias - fw{}".format(
activation,
"no" if not bias else "",
"+bw" if backward else "",
),
),
TestCase(
triton_step,
"triton - {} - {} bias - fw{}".format(
activation,
"no" if not bias else "",
"+bw" if backward else "",
),
),
]:
time = triton.testing.do_bench(
lambda: testcase.function(a)
)[0]
key = f"B={B}, M={M}, K={K}"
if key not in results:
results[key] = {}
metric = metrics_transform(time)
results[key][testcase.name] = f"{metric:.1f}"
pretty_print(
results,
title="\n --- Type: {} ---".format(dtype),
units="TFlops/s",
)
_type = "_fp16" if dtype == torch.float16 else "_fp32"
title = "FusedLinear" + _type + "_FW"
if backward:
title += "_BW"
title += "_" + activation.value if activation else "_none"
pretty_plot(results, title, "TFlops/s", dash_key="pytorch")
activations = [ac for ac in Activation] + [None] # type: ignore
bench_linear(activations) # type: ignore
|
EXA-1-master
|
exa/libraries/xformers/xformers/benchmarks/benchmark_triton_fused_linear.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
|
EXA-1-master
|
exa/libraries/xformers/xformers/benchmarks/__init__.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
# Benchmark the blocksparse operations:
# matrix multiply and softmax
# Matmul can be of three types:
# - Dense x Dense (COO) -> Sparse
# - Sparse x Dense -> Dense
# - Dense x Sparse -> Dense
from typing import Any, Dict
import torch
import triton
from triton.ops.blocksparse import matmul as blocksparse_matmul
from xformers.benchmarks.utils import TestCase, pretty_plot, pretty_print
from xformers.components.attention.core import SparseCS, _matmul_with_mask
def bench_matmul(dtype: torch.dtype, shapes):
results: Dict[str, Any] = {}
Z, H = 1, 1
for M, N, K in shapes:
modes = [(mode, block) for mode in ["sdd", "dsd"] for block in [16, 32, 64]]
for mode, block in modes:
# create inputs
a = torch.randn((Z, H, M, K), dtype=dtype, device="cuda")
b = torch.randn((Z, H, K, N), dtype=dtype, device="cuda")
shape = {
"sdd": (M, N),
"dsd": (a.shape[2], a.shape[3]),
"dds": (b.shape[2], b.shape[3]),
}[mode]
# Pre-sparsify everything
_layout = torch.eye(shape[0] // block, shape[1] // block, dtype=torch.long)
# - blocksparse
layout = _layout.unsqueeze(0).expand(H, -1, -1)
a_triton = (
triton.testing.sparsify_tensor(a, layout, block) if mode == "dsd" else a
)
b_triton = (
triton.testing.sparsify_tensor(b, layout, block) if mode == "dds" else b
)
bsmm = blocksparse_matmul(
layout=layout,
block=block,
mode=mode,
device=torch.device("cuda"),
trans_a=False,
trans_b=False,
)
# - dense
ta = triton.testing.mask_tensor(a, layout, block) if mode == "dsd" else a
tb = triton.testing.mask_tensor(b, layout, block) if mode == "dds" else b
# - sparse / sputnik
mask = torch.ones_like(a, dtype=torch.float, device="cuda")
mask = triton.testing.mask_tensor(mask, layout, block, value=0.0)
a_cs = a.flatten(start_dim=0, end_dim=1).to(
torch.float32
) # Sputnik kernels only handle fp32
b_cs = b.flatten(start_dim=0, end_dim=1).to(torch.float32)
a_cs = a_cs.contiguous()
b_cs = b_cs.transpose(-2, -1).contiguous()
if mode == "sdd":
b_cs = b_cs.transpose(-2, -1)
# pyre-fixme[16]: TODO(T101400990): Pyre did not recognize the
# `SparseCS` import.
sparse_cs_mask = SparseCS(
mask.flatten(start_dim=0, end_dim=1).contiguous(),
device=torch.device("cuda"),
)
# The raw compute steps
op_flops = {
"sdd": 2 * Z * K * float(layout.sum()) * block * block,
"dsd": 2 * Z * N * float(layout.sum()) * block * block,
"dds": 2 * Z * M * float(layout.sum()) * block * block,
}[
mode
] * 1e-12 # TFlops
def torch_step():
return torch.matmul(ta, tb)
def triton_step():
return bsmm(a_triton, b_triton)
def sparse_step():
if mode == "sdd":
return _matmul_with_mask(a_cs, b_cs, sparse_cs_mask)
else:
return sparse_cs_mask.spmm(b_cs)
# Run and measure, report perf in terms of TFlops
for testcase in [
TestCase(
torch_step,
f"pytorch - {mode} - {block}: ",
),
TestCase(
sparse_step,
f"sparse - {mode} - {block}: ",
),
TestCase(
triton_step,
f"triton - {mode} - {block}: ",
),
]:
ms = triton.testing.do_bench(lambda: testcase.function())[0]
key = f"M={M}, N={N}, K={K}"
if key not in results:
results[key] = {}
num_flops = op_flops / ms * 1e3 # Get to TFlop per second
results[key][testcase.name] = f"{num_flops:.1f}"
print(f"{key} - {testcase.name} - {num_flops:.2f}TFlops")
pretty_print(
results,
title="\n ------------- Type: {} -------------".format(dtype),
units="TFlops/s",
)
pretty_plot(
results,
title=f"Sparse/Blocksparse throughput - {dtype}",
filename=f"blocksparse_{dtype}.png",
dash_key="pytorch",
units="TFlops/s",
)
shapes = [(k, k, k) for k in [128, 512, 1024, 2048, 4096]]
bench_matmul(torch.float16, shapes)
bench_matmul(torch.float32, shapes)
|
EXA-1-master
|
exa/libraries/xformers/xformers/benchmarks/benchmark_triton_blocksparse.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
import torch
from xformers.benchmarks.utils import TestCase, bench_functions
from xformers.triton.softmax import log_softmax as triton_log_softmax
from xformers.triton.softmax import softmax as triton_softmax
SHAPES = [
(8, 384, 128),
(8, 784, 512),
(4, 1024, 768),
(4, 2048, 1024),
(2, 2048, 2048),
(2, 2048, 4096),
(2, 4096, 4096),
(1, 2048, 12288),
]
def pytorch_fw_bw(x):
y = torch.norm(torch.softmax(x, dim=-1))
y.backward()
def triton_causal_fw(x):
_ = triton_softmax(x, causal=True)
def triton_fw_bw(x):
y = torch.norm(triton_softmax(x))
y.backward()
def triton_causal_fw_bw(x):
y = torch.norm(triton_softmax(x, causal=True))
y.backward()
def pytorch_log_fw_bw(x):
y = torch.norm(torch.log_softmax(x, dim=-1))
y.backward()
def triton_log_fw_bw(x):
y = torch.norm(triton_log_softmax(x))
y.backward()
# Test FW
def to_gbs_fw(a, ms):
# Read and write the full array
return (2 * a.numel() * a.element_size() * 1e-9) / (ms * 1e-3)
def to_gbs_fwbw(a, ms):
# same as above, but we do it twice (FW and then gradient)
return 2 * to_gbs_fw(a, ms)
bench_functions(
[
TestCase(lambda x: torch.softmax(x, dim=-1), "pytorch - fw"),
TestCase(triton_softmax, "triton - fw"),
TestCase(triton_causal_fw, "triton - causal - fw"),
TestCase(lambda x: torch.log_softmax(x, dim=-1), "pytorch - log - fw"),
TestCase(triton_log_softmax, "triton - log - fw"),
],
SHAPES,
to_gbs_fw,
"GB/s",
"Softmax_Bandwidth_FW_",
)
# Test FW+BW
bench_functions(
[
TestCase(pytorch_fw_bw, "pytorch - fw+bw"),
TestCase(triton_fw_bw, "triton - fw+bw"),
TestCase(triton_causal_fw_bw, "triton - causal - fw+bw"),
TestCase(pytorch_log_fw_bw, "pytorch - log - fw+bw"),
TestCase(triton_log_fw_bw, "triton - log - fw+bw"),
],
SHAPES,
to_gbs_fwbw,
"GB/s",
"Softmax_Bandwidth_FW_BW_",
)
|
EXA-1-master
|
exa/libraries/xformers/xformers/benchmarks/benchmark_triton_softmax.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import contextlib
import copy
import csv
import glob
import logging
import math
import os
import tempfile
from collections import defaultdict, namedtuple
from dataclasses import replace
from typing import Any, Dict, Generator, List, Set, Tuple
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
import torch
import tqdm
from torch.utils import benchmark
sns.set()
TestCase = namedtuple("TestCase", ["function", "name"])
_triton_is_available = torch.cuda.is_available()
if _triton_is_available:
try:
import triton
except ImportError as e:
logging.warning(f"Triton is not available: {e}.\nbench_functions")
_triton_is_available = False
def pretty_print(results, title, units):
"""Printout the contents of a dict as a human-readable and Markdown compatible array"""
print(title)
header = " Units: {:<45}".format(units)
print("| " + header + "|" + "".join("{0:<20}|".format(k) for k in results.keys()))
offset = len(header)
print(
"|-{}|".format("-" * offset)
+ "".join("{}|".format("-" * 20) for _ in results.keys())
)
workloads: Dict[str, Any] = {k: [] for v in results.values() for k in v.keys()}
for v in results.values():
for k in v.keys():
workloads[k].append(v[k])
for k, w in workloads.items():
print(
"| {0:<{offset}}|".format(k, offset=offset)
+ "".join("{:<20}|".format(v) for v in w)
)
print("")
def pretty_plot(
results, title, units: str, filename=None, dash_key="", legend_loc="lower right"
):
"""Graph out the contents of a dict.
Dash key means that if the result label has this key, then it will be displayed with a dash"""
if not filename:
filename = title + ".png"
# Sanitize the filename
filename = (
filename.replace(" ", "_").replace("/", "_").replace("-", "_").replace(":", "")
)
# Gather all the results in "collumns"
workloads: Dict[str, Any] = {k: [] for v in results.values() for k in v.keys()}
for v in results.values():
for k in v.keys():
workloads[k].append(float(v[k]))
# Make sure that the plot is big enough
f = plt.figure()
f.set_figwidth(6)
f.set_figheight(6)
# Display the collections
for k, v in workloads.items():
if dash_key and dash_key in k:
plt.plot(list(results.keys()), v, "--")
else:
plt.plot(list(results.keys()), v)
plt.title(title)
plt.legend(list(workloads.keys()), loc=legend_loc)
plt.ylabel(units)
plt.xticks(rotation=45)
plt.savefig(filename, bbox_inches="tight")
plt.close(f)
if _triton_is_available:
def bench_functions(
test_cases: List[TestCase], shapes, metric_transform, unit, title=""
):
device = torch.device("cuda")
for dtype in [torch.bfloat16, torch.float16, torch.float32]:
results: Dict[str, Any] = {}
for B, M, K in shapes:
a = torch.rand(B, M, K, device=device, dtype=dtype, requires_grad=True)
for testcase in test_cases:
time = triton.testing.do_bench(lambda: testcase.function(a))[0]
metric = metric_transform(a, time)
key = f"B={B}, M={M}, K={K}"
if key not in results:
results[key] = {}
results[key][testcase.name] = f"{metric:.1f}"
pretty_print(
results,
title=" ------------- Type: {} ------------- ".format(dtype),
units=unit,
)
pretty_plot(results, title + str(dtype), unit, dash_key="pytorch")
def pretty_barplot(results, title, units: str, filename=None, dash_key=""):
"""Graph out the contents of a dict.
Dash key means that if the result label has this key, then it will be displayed with a dash"""
if not filename:
filename = title + ".png"
# Sanitize the filename
filename = (
filename.replace(" ", "_").replace("/", "_").replace("-", "_").replace(":", "")
)
xlabels = list(results.keys())
# Gather all the results in "collumns"
workloads: Dict[str, Any] = {k: [] for v in results.values() for k in v.keys()}
for v in results.values():
for k in v.keys():
workloads[k].append(float(v[k]))
options = list(workloads.keys())
group_len = len(options)
for key in workloads.keys():
num_groups = len(workloads[key])
break
group_width = group_len + 1
# Make sure that the plot is big enough
f = plt.figure()
f.set_figwidth(6)
f.set_figheight(6)
for idx in range(group_len):
option = options[idx]
values = workloads[option]
xloc = np.arange(1 + idx, group_width * num_groups, group_width)
plt.bar(xloc, values, width=1, edgecolor="black")
plt.title(title)
plt.legend(list(workloads.keys()), loc="upper right")
plt.ylabel(units)
ax = plt.gca()
xticks_loc = np.arange(
1 + (group_len - 1) / 2.0, group_width * num_groups, group_width
)
ax.set_xticks(xticks_loc, xlabels)
plt.xticks(rotation=45)
plt.setp(ax.xaxis.get_majorticklabels(), ha="right")
ax.set_axisbelow(True)
ax.yaxis.grid(color="gray", linestyle="dashed")
ax.xaxis.grid(color="gray", linestyle="dashed")
plt.savefig(filename, bbox_inches="tight")
plt.close(f)
def rmf(filename: str) -> None:
"""Remove a file like rm -f."""
try:
os.remove(filename)
except FileNotFoundError:
pass
@contextlib.contextmanager
def temp_files_ctx(num: int) -> Generator:
"""A context to get tempfiles and ensure they are cleaned up."""
files = [tempfile.mkstemp()[1] for _ in range(num)]
yield tuple(files)
# temp files could have been removed, so we use rmf.
for name in files:
rmf(name)
META_ALGORITHM = "algorithm"
BASELINE_DESCRIPTIONS = ["eager", "vanilla", "pytorch"]
# Serialize/unserialize to CSV
# We could use pkl, but resort to CSV for readability
def _benchmark_results_from_csv(filename: str) -> List[Tuple[Dict[str, Any], Any]]:
parts = os.path.basename(filename).split(".")
env = ""
description = ""
if len(parts) == 3:
env = parts[1]
description = parts[0]
data = []
with open(filename, "r") as csvfile:
reader = csv.DictReader(csvfile)
for row in reader:
if description != "" and row["description"] not in BASELINE_DESCRIPTIONS:
row["description"] = description
task_spec = benchmark.utils.common.TaskSpec(
stmt="",
setup="",
global_setup="",
label=row["label"],
sub_label=row["sub_label"],
description=row["description"],
env=env,
num_threads=int(row["num_threads"]),
)
measurement = benchmark.utils.common.Measurement(
number_per_run=1,
raw_times=[float(row["runtime_us"]) / (1000.0 * 1000)],
task_spec=task_spec,
)
measurement.mem_use = float(row["mem_use_mb"]) # type: ignore
data.append(
(
{
META_ALGORITHM: row["algorithm"]
if row["algorithm"] != ""
else None,
},
measurement,
)
)
return data
def _benchmark_results_to_csv(
filename: str, results: List[Tuple[Dict[str, Any], Any]]
) -> None:
data = [
{
"sub_label": r.task_spec.sub_label,
"label": r.task_spec.label,
"num_threads": r.task_spec.num_threads,
"algorithm": metadata.get(META_ALGORITHM, ""),
"description": r.task_spec.description
if r.task_spec.description in BASELINE_DESCRIPTIONS
else "",
"runtime_us": int(1000 * 1000 * r.mean),
"mem_use_mb": r.mem_use,
}
for metadata, r in results
]
with open(filename, "w+", newline="") as csvfile:
writer = csv.DictWriter(csvfile, fieldnames=list(data[0].keys()))
writer.writeheader()
for d in data:
writer.writerow(d)
def _finalize_results(results: List[Tuple[Dict[str, Any], Any]]) -> List[Any]:
"""
Returns a `benchmark.Compare` object, except that if we have runs
with different algorithms, we also add the algorithm name
in the column titles
"""
all_algorithms: Set[str] = set()
all_description: Set[str] = set()
for (metadata, r) in results:
algo = metadata.get(META_ALGORITHM, None)
if algo is not None:
all_algorithms.add(algo)
all_description.add(r.task_spec.description)
display_algo = len(all_algorithms) > 1
display_descr = len(all_description) > 1
display_results = []
for (metadata, r) in results:
algo = metadata.get(META_ALGORITHM, None)
if algo is None:
display_results.append(r)
else:
r = copy.copy(r)
description = ""
if display_descr:
description = r.task_spec.description
if display_algo:
if display_descr:
description += "["
description += algo
if display_descr:
description += "]"
r.task_spec = replace(r.task_spec, description=description)
display_results.append(r)
return display_results
def _render_bar_plot(results: List[Any], store_results_folder: str) -> None:
if not results:
return
runtime: Dict[str, Dict[str, float]] = defaultdict(dict)
memory_usage: Dict[str, Dict[str, float]] = defaultdict(dict)
all_descriptions: List[str] = []
for r in results:
# Hacky: use a list to preserve order
if r.task_spec.description not in all_descriptions:
if r.task_spec.description in BASELINE_DESCRIPTIONS:
all_descriptions.insert(0, r.task_spec.description)
else:
all_descriptions.append(r.task_spec.description)
runtime[r.task_spec.sub_label][r.task_spec.description] = r.mean
memory_usage[r.task_spec.sub_label][r.task_spec.description] = r.mem_use
all_data_mem: List[Any] = []
all_data_run: List[Any] = []
for key, runtime_values in runtime.items():
memory_values = memory_usage[key]
all_data_mem.append(
[key]
+ [
memory_values.get(d, 0)
/ memory_values.get(all_descriptions[0], math.inf)
for d in all_descriptions
]
)
all_data_run.append(
[key]
+ [
runtime_values.get(all_descriptions[0], 0)
/ runtime_values.get(d, math.inf)
for d in all_descriptions
]
)
if all_descriptions[0] == "":
all_descriptions[0] = "baseline"
else:
all_descriptions[0] = f"{all_descriptions[0]} (baseline)"
for data, filename, title in [
(all_data_mem, "mem.png", "Memory usage (vs baseline, lower is better)"),
(
all_data_run,
"runtime.png",
"Runtime speedup (vs baseline, higher is better)",
),
]:
df = pd.DataFrame(data, columns=["Configuration"] + all_descriptions)
df.plot(
x="Configuration",
kind="bar",
stacked=False,
title=title,
)
plt.tight_layout()
filename_full = os.path.join(store_results_folder, filename)
plt.savefig(filename_full)
print(f"Saved plot: {filename_full}")
def benchmark_main_helper(benchmark_fn, cases: List[Dict[str, Any]], **kwargs) -> None:
"""
Helper function to run benchmarks.
Supports loading previous results for comparison, and saving current results to file.
"""
parser = argparse.ArgumentParser()
parser.add_argument(
"--fn", default=None, type=str, help="Only benchmark this function"
)
parser.add_argument(
"--label", default=None, type=str, help="Store results to a file"
)
parser.add_argument(
"--fail_if_regression",
action="store_true",
help="Enabled in CI to check against performance regressions",
)
parser.add_argument(
"--compare",
default=None,
type=str,
help="Compare to previously stored benchmarks (coma separated)",
)
parser.add_argument("--omit-baselines", action="store_true")
parser.add_argument(
"--quiet",
action="store_true",
help="Skip intermediate results and progress bar",
)
args = parser.parse_args()
if args.fn is not None and args.fn != benchmark_fn.__name__:
print(f'Skipping benchmark "{benchmark_fn.__name__}"')
return
benchmark_run_and_compare(
benchmark_fn=benchmark_fn,
cases=cases,
optimized_label="optimized" if args.label is None else args.label,
fail_if_regression=args.fail_if_regression,
compare=args.compare.split(",") if args.compare is not None else [],
quiet=args.quiet,
omit_baselines=args.omit_baselines,
**kwargs,
)
def benchmark_run_and_compare(
benchmark_fn,
cases: List[Dict[str, Any]],
compare: List[str],
omit_baselines: bool = False,
fail_if_regression: bool = False,
quiet: bool = False,
optimized_label: str = "optimized",
*,
min_run_time: int = 2,
atol_s: float = 30e-6,
rtol: float = 0.05,
) -> None:
SKIP_VANILLA_TASKS_IF_ALREADY_DONE = True
results_compare_to = []
results = []
store_results_folder = os.path.expanduser(
os.path.join(
os.environ.get(
"XFORMERS_BENCHMARKS_CACHE",
os.path.join("~", ".cache", "xformers", "benchmarks"),
),
benchmark_fn.__name__,
)
)
try:
env = (
torch.cuda.get_device_name(torch.cuda.current_device())
.replace(" ", "_")
.replace("-", "_")
.replace(".", "_")
)
except (RuntimeError, AssertionError): # No GPU
env = "cpu"
assert (
"." not in optimized_label
), f"label=`{optimized_label}` should not contain dots"
assert "." not in env, f"env=`{env}` should not contain dots"
os.makedirs(store_results_folder, exist_ok=True)
# Load runs that we want to compare to
skip_vanilla_tasks = set()
for cmp_name in compare:
name_with_env = cmp_name if "." in cmp_name else f"{cmp_name}.*"
for filename in glob.glob(
os.path.join(store_results_folder, f"{name_with_env}.csv")
):
loaded = _benchmark_results_from_csv(filename)
for m, r in loaded:
if r.task_spec.env == env and SKIP_VANILLA_TASKS_IF_ALREADY_DONE:
skip_vanilla_tasks.add(
(r.task_spec.sub_label, r.task_spec.num_threads)
)
results_compare_to += loaded
if not quiet:
pbar = tqdm.tqdm(cases, leave=False)
cases = pbar
for case in cases:
if quiet:
print(str(case))
else:
pbar.write(f"====== {str(case)} ======")
try:
benchmarks_generator = benchmark_fn(**case)
except NotImplementedError:
# pbar.write(f"Skipped (NotImplementedError)")
continue
except RuntimeError as e:
if "CUDA out of memory" not in str(e):
raise
if not quiet:
pbar.write("Skipped (OOM)")
continue
name = None
try:
for benchmark_object in benchmarks_generator:
is_optimized = (
benchmark_object._task_spec.description not in BASELINE_DESCRIPTIONS
)
metadata = {}
if is_optimized:
metadata[META_ALGORITHM] = benchmark_object._task_spec.description
benchmark_object._task_spec = replace(
benchmark_object._task_spec, description=optimized_label
)
elif (
omit_baselines
or (
benchmark_object._task_spec.sub_label,
benchmark_object._task_spec.num_threads,
)
in skip_vanilla_tasks
):
continue
memory = math.inf
try:
torch.cuda.synchronize()
torch.cuda.reset_peak_memory_stats()
mem_begin = torch.cuda.max_memory_allocated() / 2**20
benchmark_object._task_spec = replace(
benchmark_object._task_spec, env=env
)
measurement = benchmark_object.blocked_autorange(
min_run_time=min_run_time
)
torch.cuda.synchronize()
results.append((metadata, measurement))
name = measurement.task_spec.description
memory = torch.cuda.max_memory_allocated() / 2**20 - mem_begin
measurement.mem_use = memory
except RuntimeError as e:
if "CUDA out of memory" not in str(e):
raise
if not quiet:
pbar.write("Skipped (OOM)")
finally:
del benchmark_object
if not quiet:
pbar.write(f"{name}: memory used: {memory} MB")
except RuntimeError as e:
if "CUDA out of memory" not in str(e):
raise
if not quiet:
pbar.write("Skipped (OOM)")
# Display results for benchmarks we just calculated
if name is not None and not quiet:
def matches_current(r):
return (
r[1].task_spec.sub_label == results[-1][1].task_spec.sub_label
and r[1].task_spec.label == results[-1][1].task_spec.label
)
pbar.write(
str(
benchmark.Compare(
_finalize_results(
list(filter(matches_current, results))
+ list(filter(matches_current, results_compare_to))
)
)
)
)
results_for_print = _finalize_results(results + results_compare_to)
benchmark.Compare(results_for_print).print()
_render_bar_plot(results_for_print, store_results_folder)
# Save runs to a file
if results and optimized_label is not None:
write_to_path = os.path.join(
store_results_folder, f"{optimized_label}.{env}.csv"
)
_benchmark_results_to_csv(write_to_path, results)
print(f"Saved results to {write_to_path}")
if fail_if_regression:
_fail_if_regressions(
results, reference=results_compare_to, atol_s=atol_s, rtol=rtol
)
def _fail_if_regressions(
results: List[Any], reference: List[Any], atol_s: float, rtol: float
) -> None:
def get_measurement_id(r):
return (
r[0].get(META_ALGORITHM, ""),
r[1].task_spec.label,
r[1].task_spec.sub_label,
r[1].task_spec.env,
)
id_to_result = {}
for r in results:
id_to_result[get_measurement_id(r)] = r[1]
num_better = 0
num_worse = 0
num_nochange = 0
num_unk = 0
reference_set = set()
for ref in reference:
if ref[1].task_spec.description in BASELINE_DESCRIPTIONS:
continue
benchmark_id = get_measurement_id(ref)
if benchmark_id in reference_set:
raise ValueError(f"Duplicate benchmark in reference for {benchmark_id}")
reference_set.add(benchmark_id)
if benchmark_id not in id_to_result:
num_unk += 1
continue
res = id_to_result[benchmark_id]
# If significative change
if abs(ref[1].mean - res.mean) - rtol * ref[1].mean > atol_s:
is_now_better = res.mean < ref[1].mean
if is_now_better:
num_better += 1
else:
num_worse += 1
cmp = "IMPROVED" if is_now_better else "REGRESS "
print(cmp, benchmark_id, f"ref={ref[1].mean}", f"now={res.mean}")
else:
num_nochange += 1
print("Regression test summary:")
print(f" Better : {num_better}")
print(f" No change: {num_nochange}")
print(f" Worse : {num_worse}")
if num_unk > 0:
print(f" (no ref) : {num_unk}")
if num_worse > 1:
raise RuntimeError("At least one benchmark regressed!")
if num_nochange == 0:
raise RuntimeError("No reference found")
|
EXA-1-master
|
exa/libraries/xformers/xformers/benchmarks/utils.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import json
import time
from contextlib import suppress
from typing import Any, Dict, List, Optional
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
import torch
import torch.nn.functional as F
from sklearn.model_selection import ParameterGrid
# CREDITS: Sean Naren
from torch.autograd.profiler import record_function
from tqdm import tqdm
from xformers.components import Activation
from xformers.components.attention import ATTENTION_REGISTRY
from xformers.factory.block_factory import xFormerEncoderBlock, xFormerEncoderConfig
_use_cuda = torch.cuda.is_available()
_GLOBAL_ATTENTION_RATIO = 0.1 # arbitrary
def _get_attention_query_mask(sequence_length: int, ratio: float):
mask = torch.rand((sequence_length, 1)) < ratio
while torch.count_nonzero(mask) / float(mask.numel()) > ratio:
mask = torch.rand((sequence_length, 1)) < ratio
return mask
def _get_trace_handler(name: str):
def trace_handler(prof):
prof.export_chrome_trace(f"profile_{name}.json")
prof.export_stacks(f"stacks_{name}.txt", "self_cuda_time_total")
return trace_handler
def _train_for_several_steps(
block: xFormerEncoderBlock,
num_steps: int,
batch_size: int,
sequence_length: int,
embed_dim: int,
autocast: bool,
device: torch.device,
lr: float = 0.01,
norm_type: Optional[float] = None,
profile: bool = False,
att_name: str = "",
) -> Dict[str, float]:
# use SGD with momentum instead of Adam, since Adam is scale invariant
# and this makes it bad for tests
optim = torch.optim.SGD(block.parameters(), lr=lr, momentum=0.9)
if _use_cuda:
torch.cuda.empty_cache()
torch.cuda.reset_peak_memory_stats()
torch.cuda.synchronize()
start_time = time.time()
# Optional profiler, requires a context and some setup
profiler = (
torch.profiler.profile( # type: ignore
activities=[
torch.profiler.ProfilerActivity.CPU, # type: ignore
torch.profiler.ProfilerActivity.CUDA, # type: ignore
],
schedule=torch.profiler.schedule(wait=1, warmup=1, active=1), # type: ignore
on_trace_ready=_get_trace_handler(
f"{att_name}_batch_{batch_size}_seq_{sequence_length}_embed_dim_{embed_dim}"
),
profile_memory=True,
with_stack=True,
)
if profile
else suppress()
)
# Actual vanilla training loop
# - nonsensical data, but remove that from the compute time
inputs = torch.rand(batch_size, sequence_length).to(device)
with profiler as p: # type: ignore
for _ in range(num_steps):
optim.zero_grad()
with torch.cuda.amp.autocast(enabled=autocast):
with record_function("attention_forward"):
output = block(inputs)
with record_function("loss"):
loss = F.mse_loss(
inputs.unsqueeze(-1).repeat(1, 1, output.shape[-1]),
output,
reduction="sum",
)
with record_function("backward"):
loss.backward()
if norm_type is not None:
clip_norm = 0.3
torch.nn.utils.clip_grad_norm_(block.parameters(), clip_norm, norm_type)
optim.step()
if p:
p.step()
if _use_cuda:
torch.cuda.synchronize()
max_memory = torch.cuda.max_memory_allocated() / 2**20
else:
max_memory = -1
run_time = time.time() - start_time
return {"run_time": run_time, "max_memory": round(max_memory, 1)}
def benchmark_model(num_warmup: int, num_steps: int, **kwargs) -> Dict[str, float]:
# Run warm-up first
warm_up_args = {**kwargs}
warm_up_args["profile"] = False
_train_for_several_steps(num_steps=num_warmup, **warm_up_args)
return _train_for_several_steps(num_steps=num_steps, **kwargs)
def test_xformer_encoder_block(
attention_name: str,
feedforward_name: str,
heads: int,
attn_dropout: float,
residual_dropout: float,
causal: bool,
activation: Activation,
autocast: bool,
batch_size: int,
sequence_length: int,
embed_dim: int,
dropout: float,
num_steps: int,
num_warmup: int,
device: torch.device,
profile: bool,
) -> Dict[str, float]:
block = instantiate_xformer(
activation=activation,
attention_name=attention_name,
attn_dropout=attn_dropout,
causal=causal,
feedforward_name=feedforward_name,
heads=heads,
residual_dropout=residual_dropout,
sequence_length=sequence_length,
embed_dim=embed_dim,
dropout=dropout,
).to(device)
print(
"Testing:",
block,
batch_size,
sequence_length,
embed_dim,
autocast,
device,
attention_name,
)
return benchmark_model(
num_steps=num_steps,
num_warmup=num_warmup,
block=block,
batch_size=batch_size,
sequence_length=sequence_length,
embed_dim=embed_dim,
autocast=autocast,
device=device,
profile=profile,
att_name=attention_name,
)
def instantiate_xformer(
activation: Activation,
attention_name: str,
attn_dropout: float,
causal: bool,
feedforward_name: str,
heads: int,
residual_dropout: float,
sequence_length: int,
embed_dim: int,
dropout: float,
) -> xFormerEncoderBlock:
block_size = 16
attention_config = {
"name": attention_name,
"dropout": attn_dropout,
"causal": causal,
"seq_len": sequence_length,
"attention_query_mask": _get_attention_query_mask(
sequence_length, _GLOBAL_ATTENTION_RATIO
),
"num_heads": heads,
"dim_head": embed_dim / heads,
"layout": torch.eye(
sequence_length // block_size,
sequence_length // block_size,
dtype=torch.long,
)
.unsqueeze(0)
.expand(heads, -1, -1),
"block_size": block_size,
}
multi_head_config = {
"num_heads": heads,
"dim_model": embed_dim,
"residual_dropout": residual_dropout,
"attention": attention_config,
}
feedforward_config = {
"name": feedforward_name,
"dim_model": embed_dim,
"dropout": dropout,
"activation": activation,
"hidden_layer_multiplier": 4,
}
position_embedding_config = {
"name": "sine",
"dim_model": embed_dim,
"seq_len": sequence_length,
}
block_config = xFormerEncoderConfig(
dim_model=embed_dim,
multi_head_config=multi_head_config,
feedforward_config=feedforward_config,
position_encoding_config=position_embedding_config,
)
block = xFormerEncoderBlock.from_config(block_config)
return block
def plot(args, results: List[Dict[str, Any]]):
df = pd.DataFrame(results)
HEADS = args.heads[-1]
AMP = args.pytorch_amp[-1]
EMB = args.embedding_dim[-1]
CAUSAL = args.causal[-1]
BATCH_SIZE = args.batch_size[-1]
ACTIVATION = args.activations[-1]
df_filtered = df[
(df["activation"] == ACTIVATION)
& (df["heads"] == HEADS)
& (df["autocast"] == AMP)
& (df["embed_dim"] == EMB)
& (df["causal"] == CAUSAL)
& (df["batch_size"] == BATCH_SIZE)
]
df_filtered.sort_values(
by=["sequence_length", "max_memory"], ascending=[False, True], inplace=True
)
sns.barplot(
x="sequence_length",
y="max_memory",
hue="attention_name",
data=df_filtered,
palette="Set2",
)
plt.xlabel("Sequence length")
plt.ylabel("Max memory being used")
plt.title("Memory use")
plt.savefig("memory_vs_attention.png")
plt.clf()
df_filtered.sort_values(
by=["sequence_length", "run_time"], ascending=[False, True], inplace=True
)
sns.barplot(
x="sequence_length",
y="run_time",
hue="attention_name",
data=df_filtered,
palette="Set2",
)
plt.xlabel("Sequence length")
plt.ylabel("Average epoch time")
plt.title("Runtime")
plt.savefig("runtime_vs_attention.png")
if __name__ == "__main__":
# Get the user requests
parser = argparse.ArgumentParser(
"Benchmark different attention mechanisms on various sequence lengths"
)
parser.add_argument(
"-a", "--attentions", nargs="+", default=list(ATTENTION_REGISTRY.keys())
)
parser.add_argument("-mlp", "--mlp", nargs="+", default=["MLP"])
parser.add_argument(
"-act", "--activations", nargs="+", default=[a.value for a in Activation]
)
parser.add_argument(
"-emb", "--embedding_dim", nargs="+", default=[64, 128, 256], type=int
)
parser.add_argument(
"-sl", "--sequence_length", nargs="+", default=[576, 1024], type=int
)
parser.add_argument("-bs", "--batch_size", nargs="+", default=[8, 16, 32], type=int)
parser.add_argument("-heads", "--heads", nargs="+", default=[8, 16], type=int)
parser.add_argument("-fp16", "--pytorch_amp", nargs="+", default=[True], type=bool)
parser.add_argument("-causal", "--causal", nargs="+", default=[False], type=bool)
parser.add_argument("-plot", "--plot", action="store_true", default=False)
parser.add_argument(
"-profile",
"--profile",
help="Pofile the runtime and memory",
action="store_true",
default=False,
)
args = parser.parse_args()
# Setup the test configs
constants = {
"device": torch.device("cuda") if _use_cuda else torch.device("cpu"),
"num_warmup": 5,
"num_steps": 10,
"dropout": 0.1,
"attn_dropout": 0.1,
"residual_dropout": 0.1,
"profile": args.profile,
}
param_grid = {
"autocast": args.pytorch_amp,
"causal": args.causal,
"heads": args.heads,
"activation": args.activations,
"attention_name": args.attentions,
"feedforward_name": args.mlp,
"sequence_length": args.sequence_length,
"embed_dim": args.embedding_dim,
"batch_size": args.batch_size,
}
print(
"Testing the following parameters: \n",
json.dumps(param_grid, sort_keys=True, indent=4),
)
grid = ParameterGrid(param_grid)
grid_outputs = []
for params in tqdm(grid, total=len(grid)):
outputs = test_xformer_encoder_block(**constants, **params) # type: ignore
results = {**outputs, **params}
grid_outputs.append(results)
print(json.dumps(grid_outputs, sort_keys=True, indent=4))
# Optional plots
if args.plot:
plot(args, grid_outputs)
|
EXA-1-master
|
exa/libraries/xformers/xformers/benchmarks/benchmark_encoder.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
from typing import Any, Dict, Optional
import torch
import triton
from xformers.benchmarks.utils import TestCase, pretty_plot, pretty_print
from xformers.components import Activation, build_activation
from xformers.triton import FusedDropoutBias
SHAPES = [
(8, 256, 512),
(8, 512, 1024),
(4, 1024, 1024),
(2, 2048, 2048),
(1, 2048, 12288),
(2, 4096, 4096),
]
P = 0.1
def to_gbs_fw(a, ms, bias):
# Read and write the full array
total = 2 * a.numel() * a.element_size()
if bias:
# Read the bias, ideally only once
total += a.shape[-1] * a.element_size()
return total * 1e-9 / (ms * 1e-3)
def bench_dropout(bias: bool, backward: bool, activation: Optional[Activation]):
device = torch.device("cuda")
for dtype in [
torch.float16,
torch.float32,
]:
results: Dict[str, Any] = {}
for B, M, K in SHAPES:
a = torch.rand(
(B, M, K), device=device, dtype=dtype, requires_grad=backward
)
b = torch.rand(K, device=device, dtype=dtype, requires_grad=backward)
torch_act = build_activation(activation)
triton_dropout = FusedDropoutBias(
P, bias_shape=K if bias else None, activation=activation
)
def torch_step(x):
x_ = x + b if bias else x
y = torch.nn.functional.dropout(x_, P)
if activation:
y = torch_act(y)
if backward:
y.grad = None
torch.norm(y).backward()
return y
def triton_step(x):
y = triton_dropout(x)
if backward:
y.grad = None
torch.norm(y).backward()
return y
for testcase in [
TestCase(
torch_step,
"pytorch - bias: {} - fw{} - act: {}".format(
bias, "+bw" if backward else "", activation
),
),
TestCase(
triton_step,
"triton - bias: {} - fw{} - act: {}".format(
bias, "+bw" if backward else "", activation
),
),
]:
time = triton.testing.do_bench(
lambda: testcase.function(a), grad_to_none=[a, b]
)[0]
key = f"B={B}, M={M}, K={K}"
if key not in results:
results[key] = {}
# Record BW
bandwidth = to_gbs_fw(a, time, bias)
results[key][testcase.name] = f"{bandwidth:.1f}"
pretty_print(results, title="\n --- Type: {} --- ".format(dtype), units="GB/s")
pretty_plot(
results,
title="Dropout-Bias-{}-FW{}-{}-Act: {}".format(
bias, "+BW" if backward else "", dtype, activation
),
units="GB/s",
dash_key="pytorch",
)
for activation in [Activation.GeLU, None, Activation.SquaredReLU]:
for bw in [True, False]:
for bias in [True, False]:
bench_dropout(bias, bw, activation)
|
EXA-1-master
|
exa/libraries/xformers/xformers/benchmarks/benchmark_triton_dropout.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
import random
import time
from typing import Any, Dict, List, Tuple
import torch
import triton
from torch.cuda.amp import autocast
from xformers.benchmarks.utils import TestCase, pretty_print
from xformers.factory.model_factory import xFormer, xFormerConfig
VOCAB = 8
def _data(device, batch, seq, emb, vocab=VOCAB):
# The dummy task is basically to classify sequences, either pure zeroes or some noise
input_a = torch.zeros((batch, seq, emb), device=device)
input_b = (torch.rand((batch, seq, emb), device=device) * vocab).abs()
target_a = torch.zeros((batch, seq), device=device)
target_b = torch.ones((batch, seq), device=device)
if random.random() > 0.5:
return torch.cat([input_a, input_b], dim=0), torch.cat(
[target_a, target_b], dim=0
)
return torch.cat([input_b, input_a], dim=0), torch.cat([target_b, target_a], dim=0)
def reset_seeds():
torch.manual_seed(0)
random.seed(0)
def step(
model: torch.nn.Module,
optim: torch.optim.Optimizer,
batch: int,
seq: int,
emb: int,
device,
):
model.train()
optim.zero_grad()
batch, target = _data(device, batch, seq, emb)
try:
outputs = model(batch)
except TypeError:
# Pytorch encoder exposes target explicitly
outputs = model(batch, tgt=batch)
loss = torch.norm(torch.mean(outputs, dim=-1) - target)
loss.backward()
# Clip grad and error out if we're producing NaNs
torch.nn.utils.clip_grad_norm_(model.parameters(), 10.0, norm_type=2.0, error_if_nonfinite=True) # type: ignore
optim.step()
return loss.item()
def evaluate(model: torch.nn.Module, batch: int, seq: int, emb: int, device):
reset_seeds()
batch, target = _data(device, batch, seq, emb)
model.eval()
try:
outputs = model(batch)
except TypeError:
# Pytorch decoder exposes target explicitly
outputs = model(batch, tgt=batch)
return torch.norm(torch.mean(outputs, dim=-1) - target).item()
def train(model, optimizer, name, steps, batch: int, seq: int, emb: int, device):
# Dummy training, just checking that both options give the same results
# Same seed for everyone
start = time.time()
for _ in range(steps):
_ = step(model, optimizer, batch, seq, emb, device)
torch.cuda.synchronize()
print("Trained {} in {:.3}s".format(name, time.time() - start))
def bench_pytorch_encoder(
shapes: List[Tuple[int, int, int]],
activation: str,
n_heads: int,
dropout: float = 0.1,
layers: int = 2,
device: torch.device = torch.device("cuda"),
steps: int = 20,
use_amp: bool = True,
):
results_time: Dict[str, Any] = {}
results_memory: Dict[str, Any] = {}
for shape in shapes:
batch, seq, emb = shape
# Build both a xFormers and Pytorch model
reset_seeds()
model_xformers = xFormer.from_config(
xFormerConfig(
[
{
"block_type": "encoder",
"dim_model": emb,
"num_layers": layers,
"residual_norm_style": "post",
"multi_head_config": {
"num_heads": n_heads,
"residual_dropout": dropout,
"use_separate_proj_weight": True,
"bias": True,
"attention": {
"name": "scaled_dot_product",
"dropout": dropout,
"causal": False,
"seq_len": seq,
},
"dim_model": emb,
},
"feedforward_config": {
"name": "FusedMLP",
"dropout": dropout,
"activation": activation,
"hidden_layer_multiplier": 4,
"dim_model": emb,
},
},
]
)
).to(device)
print(model_xformers)
reset_seeds()
model_pytorch = torch.nn.TransformerEncoder(
torch.nn.TransformerEncoderLayer(
d_model=emb,
nhead=n_heads,
dim_feedforward=4 * emb,
dropout=dropout,
activation=activation,
layer_norm_eps=1e-05,
batch_first=True, # (batch, seq, feature)
device=device,
),
num_layers=layers,
)
print(model_pytorch)
optim_xformers = torch.optim.Adam(model_xformers.parameters(), lr=1e-3)
optim_pytorch = torch.optim.Adam(model_pytorch.parameters(), lr=1e-3)
def run_training(model, optimizer, label):
with autocast(enabled=use_amp):
eval_start = evaluate(model, batch, seq, emb, device)
torch.cuda.empty_cache()
torch.cuda.reset_peak_memory_stats()
torch.cuda.synchronize()
train(model, optimizer, label, steps, batch, seq, emb, device)
max_memory = torch.cuda.max_memory_allocated() // 2**20
print(f"Peak memory use: {max_memory}MB")
eval_stop = evaluate(model, batch, seq, emb, device)
print(f"Trained from {eval_start} to {eval_stop}\n")
return eval_start, eval_stop, max_memory
# Save the memory being used by both
memory: Dict[str, Any] = {"pytorch": [], "xformers": []}
def torch_train():
_, _, max_memory = run_training(model_pytorch, optim_pytorch, "pytorch")
memory["pytorch"].append(max_memory)
def xformers_train():
_, _, max_memory = run_training(model_xformers, optim_xformers, "xformers")
memory["xformers"].append(max_memory)
for testcase in [
TestCase(
xformers_train,
"xformers",
),
TestCase(
torch_train,
"pytorch",
),
]:
time, _, _ = triton.testing.do_bench(lambda: testcase.function())
key = "emb {} - heads {}".format(emb, n_heads)
if key not in results_time:
results_time[key] = {}
results_memory[key] = {}
results_time[key][testcase.name] = f"{time/1000:.1f}"
median_memory = sorted(memory[testcase.name])[
len(memory[testcase.name]) // 2
]
results_memory[key][testcase.name] = median_memory
pretty_print(
results_time,
title="\n--- Transformer training benchmark - runtime ---",
units="s",
)
pretty_print(
results_memory,
title="\n--- Transformer training benchmark - memory use ---",
units="MB",
)
if __name__ == "__main__":
device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
bench_pytorch_encoder(
shapes=[(16, 128, 128), (2, 1024, 1024), (1, 1024, 2048)],
activation="gelu",
n_heads=8,
dropout=0.1,
layers=2,
device=device,
steps=20,
use_amp=True,
)
|
EXA-1-master
|
exa/libraries/xformers/xformers/benchmarks/benchmark_pytorch_transformer.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
from typing import Callable
import torch
from torch.utils import benchmark
from xformers.components.attention.utils import iterative_pinv
MIN_RUN_TIME = 1
SHAPES = [[8, 8], [256, 1024], [128, 256]]
SPARSITIES = [0.5, 0.8, 0.9, 0.95, 0.99]
def bench_inverse(inverse_fn: Callable[[torch.Tensor], torch.Tensor]):
min_run_time = MIN_RUN_TIME
prob = 0.9
device = torch.device("cuda")
results = []
for B, M, K in zip(*SHAPES):
a = torch.rand(B, M, M, device=device)
a[a < prob] = 0
a = torch.softmax(a, dim=-1)
results.extend(
[
benchmark.Timer(
stmt=f"{inverse_fn.__name__}(a)",
globals={
"a": a,
f"{inverse_fn.__name__}": inverse_fn,
},
label=f"{inverse_fn.__name__}",
sub_label="dense",
description=f"B={B}, M={M}, K={K}",
).blocked_autorange(min_run_time=min_run_time),
]
)
for prob in SPARSITIES:
a = torch.rand(B, M, M, device=device)
a[a < prob] = 0
a = a.to_sparse()
results.append(
benchmark.Timer(
stmt=f"{inverse_fn.__name__}(a)",
globals={
"a": a,
f"{inverse_fn.__name__}": inverse_fn,
},
label=f"{inverse_fn.__name__}",
sub_label=f"sparsity: {prob:0.2f}",
description=f"B={B}, M={M}, K={K}",
).blocked_autorange(min_run_time=min_run_time)
)
compare = benchmark.Compare(results)
compare.print()
def iterative_pinv_analysis(
identity_tolerance: float = 1e-1,
pinv_tolerance: float = 5e-1,
max_iters: int = 30,
plot: bool = True,
):
for i in range(1, 10):
B, M = 1, 2**i
a = torch.rand(B, M, M)
a = torch.softmax(a, dim=-1)
for n_iter in range(1, max_iters + 1):
result = iterative_pinv(a, n_iter=n_iter)
expected = torch.linalg.pinv(a)
result_identity = torch.matmul(a, result)
identity = torch.eye(M)
# Default is frobenius norm.
identity_error = torch.linalg.norm(identity - result_identity, dim=(-2, -1))
inverse_error = torch.linalg.norm(expected - result, dim=(-2, -1))
if (identity_error < identity_tolerance).all() or n_iter == max_iters:
print(
f"Size {M}, n_iters {n_iter}: \n\t \
Final Error from Identity: {identity_error.item()} \n\t \
Final Error from linalg.pinv {inverse_error.item()}"
)
break
if __name__ == "__main__":
iterative_pinv_analysis()
bench_inverse(iterative_pinv)
bench_inverse(torch.linalg.pinv)
|
EXA-1-master
|
exa/libraries/xformers/xformers/benchmarks/benchmark_nystrom_utils.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
from typing import Any, Dict
import torch
import torch.nn as nn
import triton
from xformers.benchmarks.utils import TestCase, pretty_plot, pretty_print
from xformers.components import MultiHeadDispatch
from xformers.components.attention import ScaledDotProduct
SHAPES = [
(8, 384, 128),
(8, 784, 512),
(4, 1024, 768),
(4, 2048, 1024),
(2, 2048, 2048),
(2, 2048, 4096),
(2, 4096, 4096),
(1, 2048, 12288),
]
N_HEADS = [4]
def bench_multihead_dispatch(backward: bool, self_attention: bool):
device = torch.device("cuda")
bw = "+bw" if backward else ""
sa = " (self_attn)" if self_attention else ""
for dtype in [torch.float16, torch.float32]:
results: Dict[str, Any] = {}
for B, M, K in SHAPES:
for heads in N_HEADS:
xf_multi_head = MultiHeadDispatch(
dim_model=K,
residual_dropout=0.0,
num_heads=heads,
attention=ScaledDotProduct(),
bias=(True, True, True, True),
).to(device=device, dtype=dtype)
torch_multi_head = nn.MultiheadAttention(
embed_dim=K, num_heads=heads, batch_first=True
).to(device=device, dtype=dtype)
q = torch.randn(
(B, M, K), requires_grad=backward, device=device, dtype=dtype
)
if self_attention:
k = q
v = q
else:
k = torch.randn(
(B, M, K), requires_grad=backward, device=device, dtype=dtype
)
v = torch.randn(
(B, M, K), requires_grad=backward, device=device, dtype=dtype
)
def torch_mha():
y, _ = torch_multi_head(query=q, key=k, value=v)
if backward:
torch.norm(y).backward()
return y
def xformers_mha():
y = xf_multi_head(query=q, key=k, value=v)
if backward:
torch.norm(y).backward()
return y
for testcase in [
TestCase(torch_mha, f"torch - fw{bw}{sa}"),
TestCase(xformers_mha, f"xf - fw{bw}{sa}"),
]:
time = triton.testing.do_bench(testcase.function)[0]
key = f"B={B}, M={M}, K={K}, N_HEADS={heads}"
if key not in results:
results[key] = {}
results[key][testcase.name] = f"{time:.2f}"
pretty_print(
results,
title=f"\n --- Type: {dtype} --- ",
units="runtime in ms, lower is better",
)
pretty_plot(
results,
title=f"MHA-FW{bw}-{dtype}",
units="runtime in ms, lower is better",
dash_key="torch",
)
for bw in [False, True]:
for self_attention in [False, True]:
bench_multihead_dispatch(bw, self_attention)
|
EXA-1-master
|
exa/libraries/xformers/xformers/benchmarks/benchmark_multi_head_dispatch.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
import itertools
import torch
from torch.utils import benchmark
from xformers.components.attention._sputnik_sparse import _csr_to_coo
from xformers.components.attention.core import SparseCS, _create_random_sparsity
MIN_RUN_TIME = 0.2
def _get_fn(backend):
if backend == "csr_ge":
fn = torch.ops.xformers.csr_sddmm
elif backend == "csr_sputnik":
fn = torch.ops.xformers.sddmm_sputnik
elif backend == "coo_ge":
def fn(a, b, row_indices, row_offsets, column_indices):
row_coo, _ = _csr_to_coo(
a.shape[-2], b.shape[-2], row_offsets, column_indices
)
return torch.ops.xformers.coo_sddmm(
a, b, row_indices, row_coo, column_indices
)
elif backend == "csr_to_coo":
def fn(a, b, row_indices, row_offsets, column_indices):
row_coo, _ = _csr_to_coo(
a.shape[-2], b.shape[-2], row_offsets, column_indices
)
return row_coo
return fn
def bench_sddmm(configs):
min_run_time = MIN_RUN_TIME
device = torch.device("cuda")
results = []
for (B, M, K), prob in configs:
a = torch.rand(B, M, K, device=device)
b = torch.rand(B, M, K, device=device)
mask = _create_random_sparsity(
torch.ones(1, M, M, dtype=torch.bool), prob, divisible_by=16
)
aa = a
bb = b
mask = SparseCS(mask, device)
row_indices = mask.row_indices
row_offsets = mask.row_offsets
column_indices = mask.column_indices
for backend in ["csr_sputnik", "csr_ge", "coo_ge", "csr_to_coo"]:
fn_str = "fn(a, b, row_indices, row_offsets, column_indices)"
fn = _get_fn(backend)
results.append(
benchmark.Timer(
stmt=fn_str,
globals={
"a": aa,
"b": bb,
"mask": mask,
"row_indices": row_indices,
"row_offsets": row_offsets,
"column_indices": column_indices,
"fn": fn,
},
label="sddmm",
sub_label=f"B={B:>4d}, M={M:>4d}, K={K:>3d}, prob={prob:0.4f}",
description=backend,
).blocked_autorange(min_run_time=min_run_time)
)
compare = benchmark.Compare(results)
compare.print()
return results
# batch size 32, for different layers
SWIN_T_SIZES = [(96, 3136, 32), (192, 784, 32), (384, 196, 32), (768, 49, 32)]
swin_t_config = list(zip(SWIN_T_SIZES, (0.9844, 0.9375, 0.75, 0.0)))
# some random values
BASIC_SIZES = [(32, 1024, 32), (32, 1024, 128), (8, 4096, 32), (8, 4096, 128)]
SPARSITIES = [0.90, 0.93, 0.95, 0.97, 0.98, 0.99, 0.995, 0.999]
basic_config = list(itertools.product(BASIC_SIZES, SPARSITIES))
# batch size 32 here
vit_sizes = [
(192, 785, 64), # deit_small_patch8_224
(192, 197, 64), # deit_small_patch16_224
(384, 785, 64), # deit_base_patch8_224
(384, 197, 64), # deit_base_patch16_224
]
SPARSITIES = [0.70, 0.80, 0.85, 0.90, 0.93, 0.95, 0.97]
vit_config = list(itertools.product(vit_sizes, SPARSITIES))
results = []
print("Swin Transformer")
results += bench_sddmm(swin_t_config)
print("ViT")
results += bench_sddmm(vit_config)
print("Basic cases")
results += bench_sddmm(basic_config)
|
EXA-1-master
|
exa/libraries/xformers/xformers/benchmarks/benchmark_sddmm.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
import itertools
import torch
from torch.utils import benchmark
from xformers.components.attention.core import (
SparseCS,
_create_random_sparsity,
_matmul_with_mask,
_softmax,
bmm,
)
MIN_RUN_TIME = 1
SHAPES = [[8, 8], [256, 1024], [128, 256]]
SPARSITIES = [0.5, 0.8, 0.9, 0.95, 0.99]
def bench_sddmm():
min_run_time = MIN_RUN_TIME
SPARSITIES = [0.95, 0.98, 0.99, 0.995, 0.999]
device = torch.device("cuda")
results = []
for B, M, K in zip(*SHAPES):
a = torch.rand(B, M, K, device=device)
b = torch.rand(B, M, K, device=device)
for backend, prob in itertools.product(
["coo_pytorch", "csr_sputnik", "csr_ge"], SPARSITIES
):
mask = _create_random_sparsity(torch.ones(B, M, M, dtype=torch.bool), prob)
aa = a
bb = b
if "csr" in backend:
mask = SparseCS(mask, device)
aa = a
bb = b
row_indices = mask.row_indices
row_offsets = mask.row_offsets
column_indices = mask.column_indices
if "_ge" in backend:
fn = torch.ops.xformers.csr_sddmm
else:
fn = torch.ops.xformers.sddmm_sputnik
fn_str = "fn(a, b, row_indices, row_offsets, column_indices)"
else:
mask = mask.to_sparse().to(device)
_, row_offsets, column_indices = mask.indices().int().unbind()
row_offsets = row_offsets.contiguous()
column_indices = column_indices.contiguous()
row_indices = row_offsets
bb = b.transpose(-2, -1)
fn = _matmul_with_mask
fn_str = "fn(a, b, mask)"
results.append(
benchmark.Timer(
stmt=fn_str,
globals={
"a": aa,
"b": bb,
"mask": mask,
"row_indices": row_indices,
"row_offsets": row_offsets,
"column_indices": column_indices,
"fn": fn,
},
label="sddmm",
sub_label=f"sparsity {backend}: {prob:0.4f}",
description=f"B={B}, M={M}, K={K}",
).blocked_autorange(min_run_time=min_run_time)
)
compare = benchmark.Compare(results)
compare.print()
def bench_matmul_with_mask():
min_run_time = MIN_RUN_TIME
prob = 0.9
device = torch.device("cuda")
results = []
for B, M, K in zip(*SHAPES):
a = torch.rand(B, M, K, device=device)
b = torch.rand(B, K, M, device=device)
mask = torch.rand(B, M, M, device=device) > prob
results.extend(
[
benchmark.Timer(
stmt="_matmul_with_mask(a, b, mask)",
globals={
"a": a,
"b": b,
"mask": None,
"_matmul_with_mask": _matmul_with_mask,
},
label="matmul_with_mask",
sub_label="dense",
description=f"B={B}, M={M}, K={K}",
).blocked_autorange(min_run_time=min_run_time),
benchmark.Timer(
stmt="_matmul_with_mask(a, b, mask)",
globals={
"a": a,
"b": b,
"mask": mask,
"_matmul_with_mask": _matmul_with_mask,
},
label="matmul_with_mask",
sub_label="dense with masking",
description=f"B={B}, M={M}, K={K}",
).blocked_autorange(min_run_time=min_run_time),
]
)
for sputnik, prob in itertools.product([False, True], SPARSITIES):
mask = _create_random_sparsity(
torch.ones(B, M, M, dtype=torch.bool, device=device), prob
)
aa = a
bb = b
if sputnik:
mask = SparseCS(mask, device)
aa = a
bb = b.transpose(-2, -1).contiguous().transpose(-2, -1)
else:
mask = mask.to_sparse()
results.append(
benchmark.Timer(
stmt="_matmul_with_mask(a, b, mask)",
globals={
"a": aa,
"b": bb,
"mask": mask,
"_matmul_with_mask": _matmul_with_mask,
},
label="matmul_with_mask",
sub_label=f"sparsity {'sputnik' if sputnik else 'pytorch'}: {prob:0.2f}",
description=f"B={B}, M={M}, K={K}",
).blocked_autorange(min_run_time=min_run_time)
)
compare = benchmark.Compare(results)
compare.print()
def bench_softmax():
min_run_time = MIN_RUN_TIME
prob = 0.9
device = torch.device("cuda")
results = []
for B, M, K in zip(*SHAPES):
a = torch.rand(B, M, M, device=device)
a[a < prob] = 0
results.extend(
[
benchmark.Timer(
stmt="_softmax(a)",
globals={
"a": a,
"_softmax": _softmax,
},
label="softmax",
sub_label="dense",
description=f"B={B}, M={M}, K={K}",
).blocked_autorange(min_run_time=min_run_time),
]
)
for sputnik, prob in itertools.product([False, True], SPARSITIES):
a = _create_random_sparsity(torch.rand(B, M, M, device=device), prob)
if sputnik:
a = SparseCS(a, device)
else:
a = a.to_sparse()
results.append(
benchmark.Timer(
stmt="_softmax(a)",
globals={
"a": a,
"_softmax": _softmax,
},
label="softmax",
sub_label=f"sparsity {'sputnik' if sputnik else 'pytorch'}: {prob:0.2f}",
description=f"B={B}, M={M}, K={K}",
).blocked_autorange(min_run_time=min_run_time)
)
compare = benchmark.Compare(results)
compare.print()
def bench_bmm():
min_run_time = MIN_RUN_TIME
prob = 0.9
device = torch.device("cuda")
results = []
for B, M, K in zip(*SHAPES):
a = torch.rand(B, M, M, device=device)
a[a < prob] = 0
b = torch.rand(B, M, K, device=device)
results.extend(
[
benchmark.Timer(
stmt="bmm(a, b)",
globals={
"a": a,
"b": b,
"bmm": bmm,
},
label="bmm",
sub_label="dense",
description=f"B={B}, M={M}, K={K}",
).blocked_autorange(min_run_time=min_run_time),
]
)
for sputnik, prob in itertools.product([False, True], SPARSITIES):
a = _create_random_sparsity(torch.rand(B, M, M, device=device), prob)
bb = b
if sputnik:
a = SparseCS(a, device)
bb = b
else:
a = a.to_sparse()
results.append(
benchmark.Timer(
stmt="bmm(a, b)",
globals={
"a": a,
"b": bb,
"bmm": bmm,
},
label="bmm",
sub_label=f"sparsity {'sputnik' if sputnik else 'pytorch'}: {prob:0.2f}",
description=f"B={B}, M={M}, K={K}",
).blocked_autorange(min_run_time=min_run_time)
)
compare = benchmark.Compare(results)
compare.print()
bench_sddmm()
bench_matmul_with_mask()
bench_softmax()
bench_bmm()
|
EXA-1-master
|
exa/libraries/xformers/xformers/benchmarks/benchmark_core.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import os
from pathlib import Path
from xformers.benchmarks.LRA.run_tasks import Task
from xformers.components.attention import ATTENTION_REGISTRY
def get_default_shared_folder() -> str:
checkpoint_paths = ["/checkpoint", "/checkpoints"]
for checkpoint_path in checkpoint_paths:
if Path(checkpoint_path).is_dir():
return checkpoint_path
return "."
if __name__ == "__main__":
default_checkpoint_path = get_default_shared_folder()
# Get the user requests
parser = argparse.ArgumentParser(
"Benchmark different attention mechanisms on various sequence lengths"
)
parser.add_argument("-c", "--config_path", required=True)
parser.add_argument("-ck", "--checkpoint_path", required=True)
parser.add_argument(
"-a", "--attentions", nargs="+", default=list(ATTENTION_REGISTRY.keys())
)
parser.add_argument("-t", "--tasks", nargs="+", default=[t.value for t in Task])
parser.add_argument(
"--partition", default="a100", type=str, help="Partition where to submit"
)
args = parser.parse_args()
for attention in args.attentions:
for task in args.tasks:
os.system(
"python3 run_with_submitit.py"
+ f" --attention {attention} --task {task} --config {args.config_path}"
+ f" --checkpoint_dir {args.checkpoint_path}/{attention}/{task}"
+ f" --partition {args.partition}"
)
|
EXA-1-master
|
exa/libraries/xformers/xformers/benchmarks/LRA/batch_submit.py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.