repo stringlengths 2 99 | file stringlengths 13 225 | code stringlengths 0 18.3M | file_length int64 0 18.3M | avg_line_length float64 0 1.36M | max_line_length int64 0 4.26M | extension_type stringclasses 1 value |
|---|---|---|---|---|---|---|
libai | libai-main/projects/QQP/modeling/load_megatron_weight.py | # coding=utf-8
# Copyright 2021 The OneFlow Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import oneflow as flow
import torch
import libai.utils.distributed as dist
from libai.utils.checkpoint import get_missing_parameters_message, get_unexpected_parameters_message
logger = logging.getLogger("libai." + __name__)
def convert_tensor(tensor: torch.Tensor):
tensor = tensor.float()
return flow.Tensor(tensor.cpu().numpy())
def change_megatron_key(state_dict):
of_state_dict = {}
# Language model.
language_model = state_dict["language_model"]
# Embedding.
embedding = language_model["embedding"]
of_state_dict["embeddings.vocab_embeddings.weight"] = convert_tensor(
embedding["word_embeddings"]["weight"]
)
of_state_dict["embeddings.position_embeddings.weight"] = convert_tensor(
embedding["position_embeddings"]["weight"]
)
of_state_dict["embeddings.tokentype_embeddings.weight"] = convert_tensor(
embedding["tokentype_embeddings"]["weight"]
)
# Encoder.
encoder = language_model["encoder"]
for key, value in encoder.items():
# Change layers.0.input_layernorm.weight -> encoder.layers_0.input_layernorm.weight
key = "encoders." + key.replace("layers.", "")
if key.startswith("encoders.final_layernorm"):
key = key.replace("encoders.", "")
of_state_dict[key] = convert_tensor(value)
# Pooler.
pooler = language_model["pooler"]
of_state_dict["pooler.dense.weight"] = convert_tensor(pooler["dense.weight"])
of_state_dict["pooler.dense.bias"] = convert_tensor(pooler["dense.bias"])
# LM head.
lm_head = state_dict["lm_head"]
of_state_dict["cls.predictions.dense.weight"] = convert_tensor(lm_head["dense.weight"])
of_state_dict["cls.predictions.dense.bias"] = convert_tensor(lm_head["dense.bias"])
of_state_dict["cls.predictions.layernorm.weight"] = convert_tensor(lm_head["layernorm.weight"])
of_state_dict["cls.predictions.layernorm.bias"] = convert_tensor(lm_head["layernorm.bias"])
of_state_dict["lm_logits.bias"] = convert_tensor(lm_head["bias"])
# Binary head.
binary_head = state_dict["binary_head"]
of_state_dict["cls.seq_relationship.weight"] = convert_tensor(binary_head["weight"])
of_state_dict["cls.seq_relationship.bias"] = convert_tensor((binary_head["bias"]))
return of_state_dict
def load_tensor(tensor_lhs, tensor_rhs):
tensor_rhs = flow.to_global(
tensor_rhs,
placement=tensor_lhs.placement,
sbp=dist.get_nd_sbp([flow.sbp.broadcast, flow.sbp.broadcast]),
)
tensor_rhs = tensor_rhs.to_global(sbp=tensor_lhs.sbp)
tensor_lhs.copy_(tensor_rhs)
def load_model(model: flow.nn.Module, state_dict):
model_state_dict = model.state_dict()
# Decide shape
incorrect_shapes = []
for k in list(state_dict.keys()):
if k in model_state_dict:
shape_model = tuple(model_state_dict[k].shape)
shape_ckpt = tuple(state_dict[k].shape)
if shape_model != shape_ckpt:
incorrect_shapes.append((k, shape_ckpt, shape_model))
state_dict.pop(k)
unexpected_keys = []
for key, value in state_dict.items():
if key not in model_state_dict:
unexpected_keys.append(key)
continue
model_state_dict.pop(key)
load_tensor(model.state_dict()[key], value)
missing_keys = list(model_state_dict.keys())
for k, shape_checkpoint, shape_model in incorrect_shapes:
logger.warning(
"Skip loading parameter '{}' to the model due to incompatible "
"shapes: {} in the checkpoint but {} in the "
"model! You might want to double check if this is expected.".format(
k, shape_checkpoint, shape_model
)
)
if missing_keys:
logger.info(get_missing_parameters_message(missing_keys))
if unexpected_keys:
logger.info(get_unexpected_parameters_message(unexpected_keys))
def load_megatron_bert(model: flow.nn.Module, model_weight_path: str):
import torch
megatron_state_dict = torch.load(model_weight_path, map_location="cpu")["model"]
of_state_dict = change_megatron_key(megatron_state_dict)
load_model(model, of_state_dict)
| 4,859 | 35 | 100 | py |
libai | libai-main/projects/DALLE2/dalle2_inference.py | import os
from typing import Dict
import oneflow as flow
from dalle2.dalle2_loader import Dalle2ModelLoader
from dalle2.model_weights.download_utils import download_dalle2_weights
from dalle2.tokenizer import SimpleTokenizer
from oneflow.framework import balanced_splitter
import libai.utils.distributed as dist
from libai.inference.basic import BasePipeline
class Dalle2Pipeline(BasePipeline):
def __init__(
self,
config_file,
data_parallel=None,
tensor_parallel=None,
pipeline_parallel=None,
pipeline_stage_id=None,
pipeline_num_layers=None,
model_path=None,
mode="libai",
**kwargs,
):
super().__init__(
config_file,
data_parallel,
tensor_parallel,
pipeline_parallel,
pipeline_stage_id,
model_path,
pipeline_num_layers,
mode,
**kwargs,
)
def update_cfg(
self,
data_parallel=1,
tensor_parallel=1,
pipeline_parallel=1,
pipeline_stage_id=None,
pipeline_num_layers=None,
):
super().update_cfg(
data_parallel,
tensor_parallel,
pipeline_parallel,
pipeline_stage_id,
pipeline_num_layers,
)
self.cfg.model.prior.clip.name = "./dalle2/model_weights/ViT-L-14.pt"
self.cfg.model.prior_weight_path = "./dalle2/model_weights/prior_aes_finetune.pth"
self.cfg.model.decoder_weight_path = "./dalle2/model_weights/latest.pth"
self.cfg.swinir.swinir_path = (
"./swinir/weights/003_realSR_BSRGAN_DFOWMFC_s64w8_SwinIR-L_x4_GAN.pth"
)
def load_pretrain_weight(self, libai_cfg_model, model_path, mode=None):
if dist.is_main_process():
download_dalle2_weights(self.cfg)
dist.synchronize()
model_loader = Dalle2ModelLoader(libai_cfg_model, self.cfg, model_path)
return model_loader.load()
def build_tokenizer(self, cfg):
return SimpleTokenizer() # return instantiate(cfg.tokenizer)
def _parse_parameters(self, model_path=None, save_images=False, upsample_scale=None, **kwargs):
preprocess_params = {}
forward_params = {
"model_path": model_path,
"num_samples_per_batch": kwargs.get("num_samples_per_batch", 2),
"prior_cond_scale": kwargs.get("prior_cond_scale", 1.0),
"decoder_cond_scale": kwargs.get("decoder_cond_scale", 3.5),
}
postprocess_params = {
"save_images": save_images,
"upsample_scale": upsample_scale,
"swinir_path": "./swinir/weights/003_realSR_BSRGAN_DFOWMFC_s64w8_SwinIR-L_x4_GAN.pth",
}
return preprocess_params, forward_params, postprocess_params
def split_data(self, text):
rank = dist.get_rank()
indices = balanced_splitter.BalancedRanges(len(text), dist.get_world_size())
return text[indices[rank][0] : indices[rank][1]]
def preprocess(self, input_, **preprocess_parameters: Dict) -> dict:
tokens = self.tokenizer.tokenize(input_).to_global(
placement=flow.placement(type="cuda", ranks=list(range(dist.get_world_size()))),
sbp=flow.sbp.broadcast,
)
return {"text": input_, "tokens": tokens}
def forward(self, model_input_dict, **forward_params) -> dict:
tokens = model_input_dict["tokens"]
text_embed, text_encodings, text_mask = self.model.prior.clip.embed_text(tokens)
image_embed = self.model.prior.sample(
tokens,
num_samples_per_batch=forward_params["num_samples_per_batch"],
cond_scale=forward_params["prior_cond_scale"],
)
image_embed = self.model.decoder.sample(
image_embed=image_embed,
text_encodings=text_encodings,
text_mask=text_mask,
cond_scale=forward_params["decoder_cond_scale"],
)
return {"image_embed": image_embed}
def postprocess(self, model_output_dict, **postprocess_params: Dict) -> dict:
if not postprocess_params.get("save_images", False):
return model_output_dict
output_path = postprocess_params.get("output_dit", "./outputs")
os.makedirs(output_path, exist_ok=True)
import flowvision.transforms as T
to_pil = T.ToPILImage()
images = model_output_dict["image_embed"].to("cpu")
images_64x64 = list(map(to_pil, [images[i] for i in range(images.shape[0])]))
for i, image in enumerate(images_64x64):
image.save(f"{output_path}/{i}.png")
if postprocess_params.get("upsample_scale", False):
from swinir import load_model, upsample4x, upsample16x
swinir = load_model(postprocess_params.get("swinir_path", ""))
upsample_fun = upsample4x if args.upsample_scale == 4 else upsample16x
images = upsample_fun(images, swinir).to("cpu")
images = list(map(to_pil, [images[i] for i in range(images.shape[0])]))
for i, image in enumerate(images):
image.save(f"{output_path}/{i}_{args.upsample_scale}x.png")
print(f"Images have been saved under {output_path}.")
return model_output_dict
def parse_args():
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--config_file", type=str, default="configs/dalle2_config.py")
parser.add_argument("--data_parallel", type=int, default=1)
parser.add_argument("--tensor_parallel", type=int, default=4)
parser.add_argument("--pipeline_parallel", type=int, default=1)
parser.add_argument(
"--upsample_scale",
type=int,
choices=[4, 16],
default=None,
help="upsample scale, if 4x, output resolution will be 256 x 256.",
)
parser.add_argument(
"--swinir_path",
type=str,
default="./swinir/weights/003_realSR_BSRGAN_DFOWMFC_s64w8_SwinIR-L_x4_GAN.pth",
)
parser.add_argument("--output_dir", type=str, default="./outputs")
parser.add_argument("--save_images", action="store_true")
return parser.parse_args()
if __name__ == "__main__":
args = parse_args()
model = Dalle2Pipeline(
config_file=args.config_file,
data_parallel=args.data_parallel,
tensor_parallel=args.tensor_parallel,
pipeline_parallel=args.pipeline_parallel,
)
texts = [
"a shiba inu wearing a beret and black turtleneck",
"a teddy bear on a skateboard in times square",
"trump fight with biden in white house",
"Donald trump fight with biden in white house",
]
imgs = model(texts, **vars(args))
| 6,770 | 35.6 | 99 | py |
libai | libai-main/projects/DALLE2/dalle2/_clip.py | import os
import sys
from collections import namedtuple
import oneflow as flow
from oneflow import nn
from .models import l2norm
def import_flow_clip(fn):
def wrapper(*args, **kwargs):
sys.path.append(
os.path.join(os.path.abspath(os.path.join(os.path.dirname(__file__), "../..")), "CLIP")
)
fn(*args, **kwargs)
sys.path.pop()
return wrapper
EmbeddedText = namedtuple("EmbedTextReturn", ["text_embed", "text_encodings"])
EmbeddedImage = namedtuple("EmbedImageReturn", ["image_embed", "image_encodings"])
class BaseClipAdapter(nn.Module):
def __init__(self, clip, **kwargs):
super().__init__()
self.clip = clip
self.overrides = kwargs
@property
def dim_latent(self):
raise NotImplementedError
@property
def image_size(self):
raise NotImplementedError
@property
def image_channels(self):
raise NotImplementedError
@property
def max_text_len(self):
raise NotImplementedError
def embed_text(self, text):
raise NotImplementedError
def embed_image(self, image):
raise NotImplementedError
class OpenAIClipAdapter(BaseClipAdapter):
@import_flow_clip
def __init__(self, name="ViT-L/14"):
import clip
openai_clip, preprocess = clip.load(name)
super().__init__(openai_clip)
self.eos_id = 49407 # for handling 0 being also '!'
text_attention_final = self.find_layer("ln_final")
self.handle = text_attention_final.register_forward_hook(self._hook)
self.clip_normalize = preprocess.transforms[-1]
self.cleared = False
def find_layer(self, layer):
modules = dict([*self.clip.named_modules()])
return modules.get(layer, None)
def clear(self):
if self.cleared:
return
self.handle()
def _hook(self, _, inputs, outputs):
self.text_encodings = outputs
@property
def dim_latent(self):
return 512
@property
def image_size(self):
return self.clip.visual.input_resolution
@property
def image_channels(self):
return 3
@property
def max_text_len(self):
return self.clip.context_length
@flow.no_grad()
def embed_text(self, text):
text = text[..., : self.max_text_len]
assert not self.cleared
text_mask = text != 0 # v0.15.4
text_embed = self.clip.encode_text(text)
text_encodings = self.text_encodings
del self.text_encodings
return l2norm(text_embed.float()), text_encodings.float(), text_mask
@flow.no_grad()
def embed_image(self, image):
assert not self.cleared
image = self.validate_and_resize_image(image)
image = self.clip_normalize(image)
image_embed = self.clip.encode_image(image)
return EmbeddedImage(l2norm(image_embed.float()), None)
| 2,932 | 24.068376 | 99 | py |
libai | libai-main/projects/DALLE2/dalle2/utils.py | import importlib
import time
# helper functions
def exists(val):
return val is not None
# time helpers
class Timer:
def __init__(self):
self.reset()
def reset(self):
self.last_time = time.time()
def elapsed(self):
return time.time() - self.last_time
# print helpers
def print_ribbon(s, symbol="=", repeat=40):
flank = symbol * repeat
return f"{flank} {s} {flank}"
# import helpers
def import_or_print_error(pkg_name, err_str=None):
try:
return importlib.import_module(pkg_name)
except ModuleNotFoundError:
if exists(err_str):
print(err_str)
exit()
| 657 | 14.302326 | 50 | py |
libai | libai-main/projects/DALLE2/dalle2/vqgan_vae.py | import copy
from functools import partial, wraps
from math import sqrt
import flowvision
import oneflow as flow
import oneflow.nn.functional as F
from einops import rearrange, repeat
from oneflow import einsum, nn
from oneflow.autograd import grad as flow_grad
from libai.layers import Linear
from .einops_exts import Rearrange, rearrange_many
from .vector_quantize_flow import VectorQuantize as VQ
# constants
MList = nn.ModuleList
# helper functions
def exists(val):
return val is not None
def default(val, d):
return val if exists(val) else d
# decorators
def eval_decorator(fn):
def inner(model, *args, **kwargs):
was_training = model.training
model.eval()
out = fn(model, *args, **kwargs)
model.train(was_training)
return out
return inner
def remove_vgg(fn):
@wraps(fn)
def inner(self, *args, **kwargs):
has_vgg = hasattr(self, "vgg")
if has_vgg:
vgg = self.vgg
delattr(self, "vgg")
out = fn(self, *args, **kwargs)
if has_vgg:
self.vgg = vgg
return out
return inner
# keyword argument helpers
def pick_and_pop(keys, d):
values = list(map(lambda key: d.pop(key), keys))
return dict(zip(keys, values))
def group_dict_by_key(cond, d):
return_val = [dict(), dict()]
for key in d.keys():
match = bool(cond(key))
ind = int(not match)
return_val[ind][key] = d[key]
return (*return_val,)
def string_begins_with(prefix, string_input):
return string_input.startswith(prefix)
def group_by_key_prefix(prefix, d):
return group_dict_by_key(partial(string_begins_with, prefix), d)
def groupby_prefix_and_trim(prefix, d):
kwargs_with_prefix, kwargs = group_dict_by_key(partial(string_begins_with, prefix), d)
kwargs_without_prefix = dict(
map(lambda x: (x[0][len(prefix) :], x[1]), tuple(kwargs_with_prefix.items()))
)
return kwargs_without_prefix, kwargs
# tensor helper functions
def log(t, eps=1e-10):
return flow.log(t + eps)
def gradient_penalty(images, output, weight=10):
images.shape[0]
gradients = flow_grad(
outputs=output,
inputs=images,
grad_outputs=flow.ones(output.size(), device=images.device),
create_graph=True,
retain_graph=True,
only_inputs=True,
)[0]
gradients = rearrange(gradients, "b ... -> b (...)")
return weight * ((gradients.norm(2, dim=1) - 1) ** 2).mean()
def l2norm(t):
return F.normalize(t, dim=-1)
def leaky_relu(p=0.1):
return nn.LeakyReLU(0.1)
def stable_softmax(t, dim=-1, alpha=32 ** 2):
t = t / alpha
t = t - flow.amax(t, dim=dim, keepdim=True).detach()
return (t * alpha).softmax(dim=dim)
def safe_div(numer, denom, eps=1e-8):
return numer / (denom + eps)
# gan losses
def hinge_discr_loss(fake, real):
return (F.relu(1 + fake) + F.relu(1 - real)).mean()
def hinge_gen_loss(fake):
return -fake.mean()
def bce_discr_loss(fake, real):
return (-log(1 - flow.sigmoid(fake)) - log(flow.sigmoid(real))).mean()
def bce_gen_loss(fake):
return -log(flow.sigmoid(fake)).mean()
def grad_layer_wrt_loss(loss, layer):
return flow_grad(
outputs=loss, inputs=layer, grad_outputs=flow.ones_like(loss), retain_graph=True
)[0].detach()
# vqgan vae
class LayerNormChan(nn.Module):
def __init__(self, dim, eps=1e-5):
super().__init__()
self.eps = eps
self.gamma = nn.Parameter(flow.ones(1, dim, 1, 1))
def forward(self, x):
var = flow.var(x, dim=1, unbiased=False, keepdim=True)
mean = flow.mean(x, dim=1, keepdim=True)
return (x - mean) / (var + self.eps).sqrt() * self.gamma
# discriminator
class Discriminator(nn.Module):
def __init__(self, dims, channels=3, groups=16, init_kernel_size=5):
super().__init__()
dim_pairs = zip(dims[:-1], dims[1:])
self.layers = MList(
[
nn.Sequential(
nn.Conv2d(channels, dims[0], init_kernel_size, padding=init_kernel_size // 2),
leaky_relu(),
)
]
)
for dim_in, dim_out in dim_pairs:
self.layers.append(
nn.Sequential(
nn.Conv2d(dim_in, dim_out, 4, stride=2, padding=1),
nn.GroupNorm(groups, dim_out),
leaky_relu(),
)
)
dim = dims[-1]
self.to_logits = nn.Sequential( # return 5 x 5, for PatchGAN-esque training
nn.Conv2d(dim, dim, 1), leaky_relu(), nn.Conv2d(dim, 1, 4)
)
def forward(self, x):
for net in self.layers:
x = net(x)
return self.to_logits(x)
# positional encoding
class ContinuousPositionBias(nn.Module):
"""from https://arxiv.org/abs/2111.09883"""
def __init__(self, *, dim, heads, layers=2):
super().__init__()
self.net = MList([])
self.net.append(nn.Sequential(Linear(2, dim), leaky_relu()))
for _ in range(layers - 1):
self.net.append(nn.Sequential(Linear(dim, dim), leaky_relu()))
self.net.append(Linear(dim, heads))
self.register_buffer("rel_pos", None, persistent=False)
def forward(self, x):
n, device = x.shape[-1], x.device
fmap_size = int(sqrt(n))
if not exists(self.rel_pos):
pos = flow.arange(fmap_size, device=device)
grid = flow.stack(flow.meshgrid(pos, pos, indexing="ij"))
grid = rearrange(grid, "c i j -> (i j) c")
rel_pos = rearrange(grid, "i c -> i 1 c") - rearrange(grid, "j c -> 1 j c")
rel_pos = flow.sign(rel_pos) * flow.log(rel_pos.abs() + 1)
self.register_buffer("rel_pos", rel_pos, persistent=False)
rel_pos = self.rel_pos.float()
for layer in self.net:
rel_pos = layer(rel_pos)
bias = rearrange(rel_pos, "i j h -> h i j")
return x + bias
# resnet encoder / decoder
class ResnetEncDec(nn.Module):
def __init__(
self,
dim,
*,
channels=3,
layers=4,
layer_mults=None,
num_resnet_blocks=1,
resnet_groups=16,
first_conv_kernel_size=5,
use_attn=True,
attn_dim_head=64,
attn_heads=8,
attn_dropout=0.0,
):
super().__init__()
assert (
dim % resnet_groups == 0
), f"dimension {dim} must be divisible by {resnet_groups} (groups for the groupnorm)"
self.layers = layers
self.encoders = MList([])
self.decoders = MList([])
layer_mults = default(layer_mults, list(map(lambda t: 2 ** t, range(layers))))
assert (
len(layer_mults) == layers
), "layer multipliers must be equal to designated number of layers"
layer_dims = [dim * mult for mult in layer_mults]
dims = (dim, *layer_dims)
self.encoded_dim = dims[-1]
dim_pairs = zip(dims[:-1], dims[1:])
def append(arr, t):
arr.append(t)
def prepend(arr, t):
arr.insert(0, t)
if not isinstance(num_resnet_blocks, tuple):
num_resnet_blocks = (*((0,) * (layers - 1)), num_resnet_blocks)
if not isinstance(use_attn, tuple):
use_attn = (*((False,) * (layers - 1)), use_attn)
assert (
len(num_resnet_blocks) == layers
), "number of resnet blocks config must be equal to number of layers"
assert len(use_attn) == layers
for layer_index, (dim_in, dim_out), layer_num_resnet_blocks, layer_use_attn in zip(
range(layers), dim_pairs, num_resnet_blocks, use_attn
):
append(
self.encoders,
nn.Sequential(nn.Conv2d(dim_in, dim_out, 4, stride=2, padding=1), leaky_relu()),
)
prepend(
self.decoders,
nn.Sequential(nn.ConvTranspose2d(dim_out, dim_in, 4, 2, 1), leaky_relu()),
)
if layer_use_attn:
prepend(
self.decoders,
VQGanAttention(
dim=dim_out, heads=attn_heads, dim_head=attn_dim_head, dropout=attn_dropout
),
)
for _ in range(layer_num_resnet_blocks):
append(self.encoders, ResBlock(dim_out, groups=resnet_groups))
prepend(self.decoders, GLUResBlock(dim_out, groups=resnet_groups))
if layer_use_attn:
append(
self.encoders,
VQGanAttention(
dim=dim_out, heads=attn_heads, dim_head=attn_dim_head, dropout=attn_dropout
),
)
prepend(
self.encoders,
nn.Conv2d(channels, dim, first_conv_kernel_size, padding=first_conv_kernel_size // 2),
)
append(self.decoders, nn.Conv2d(dim, channels, 1))
def get_encoded_fmap_size(self, image_size):
return image_size // (2 ** self.layers)
@property
def last_dec_layer(self):
return self.decoders[-1].weight
def encode(self, x):
for enc in self.encoders:
x = enc(x)
return x
def decode(self, x):
for dec in self.decoders:
x = dec(x)
return x
class GLUResBlock(nn.Module):
def __init__(self, chan, groups=16):
super().__init__()
self.net = nn.Sequential(
nn.Conv2d(chan, chan * 2, 3, padding=1),
nn.GLU(dim=1),
nn.GroupNorm(groups, chan),
nn.Conv2d(chan, chan * 2, 3, padding=1),
nn.GLU(dim=1),
nn.GroupNorm(groups, chan),
nn.Conv2d(chan, chan, 1),
)
def forward(self, x):
return self.net(x) + x
class ResBlock(nn.Module):
def __init__(self, chan, groups=16):
super().__init__()
self.net = nn.Sequential(
nn.Conv2d(chan, chan, 3, padding=1),
nn.GroupNorm(groups, chan),
leaky_relu(),
nn.Conv2d(chan, chan, 3, padding=1),
nn.GroupNorm(groups, chan),
leaky_relu(),
nn.Conv2d(chan, chan, 1),
)
def forward(self, x):
return self.net(x) + x
# vqgan attention layer
class VQGanAttention(nn.Module):
def __init__(self, *, dim, dim_head=64, heads=8, dropout=0.0):
super().__init__()
self.heads = heads
self.scale = dim_head ** -0.5
inner_dim = heads * dim_head
self.dropout = nn.Dropout(dropout)
self.pre_norm = LayerNormChan(dim)
self.cpb = ContinuousPositionBias(dim=dim // 4, heads=heads)
self.to_qkv = nn.Conv2d(dim, inner_dim * 3, 1, bias=False)
self.to_out = nn.Conv2d(inner_dim, dim, 1, bias=False)
def forward(self, x):
h = self.heads
height, width, residual = *x.shape[-2:], x.clone()
x = self.pre_norm(x)
q, k, v = self.to_qkv(x).chunk(3, dim=1)
q, k, v = map(lambda t: rearrange(t, "b (h c) x y -> b h c (x y)", h=h), (q, k, v))
sim = einsum("b h c i, b h c j -> b h i j", q, k) * self.scale
sim = self.cpb(sim)
attn = stable_softmax(sim, dim=-1)
attn = self.dropout(attn)
out = einsum("b h i j, b h c j -> b h c i", attn, v)
out = rearrange(out, "b h c (x y) -> b (h c) x y", x=height, y=width)
out = self.to_out(out)
return out + residual
# ViT encoder / decoder
class RearrangeImage(nn.Module):
def forward(self, x):
n = x.shape[1]
w = h = int(sqrt(n))
return rearrange(x, "b (h w) ... -> b h w ...", h=h, w=w)
class Attention(nn.Module):
def __init__(self, dim, *, heads=8, dim_head=32):
super().__init__()
self.norm = nn.LayerNorm(dim)
self.heads = heads
self.scale = dim_head ** -0.5
inner_dim = dim_head * heads
self.to_qkv = nn.Linear(dim, inner_dim * 3, bias=False)
self.to_out = nn.Linear(inner_dim, dim)
def forward(self, x):
h = self.heads
x = self.norm(x)
q, k, v = self.to_qkv(x).chunk(3, dim=-1)
q, k, v = rearrange_many((q, k, v), "b n (h d) -> b h n d", h=h)
q = q * self.scale
sim = einsum("b h i d, b h j d -> b h i j", q, k)
sim = sim - sim.amax(dim=-1, keepdim=True).detach()
attn = sim.softmax(dim=-1)
out = einsum("b h i j, b h j d -> b h i d", attn, v)
out = rearrange(out, "b h n d -> b n (h d)")
return self.to_out(out)
def FeedForward(dim, mult=4):
return nn.Sequential(
nn.LayerNorm(dim),
nn.Linear(dim, dim * mult, bias=False),
nn.GELU(),
nn.Linear(dim * mult, dim, bias=False),
)
class Transformer(nn.Module):
def __init__(self, dim, *, layers, dim_head=32, heads=8, ff_mult=4):
super().__init__()
self.layers = nn.ModuleList([])
for _ in range(layers):
self.layers.append(
nn.ModuleList(
[
Attention(dim=dim, dim_head=dim_head, heads=heads),
FeedForward(dim=dim, mult=ff_mult),
]
)
)
self.norm = nn.LayerNorm(dim)
def forward(self, x):
for attn, ff in self.layers:
x = attn(x) + x
x = ff(x) + x
return self.norm(x)
class ViTEncDec(nn.Module):
def __init__(self, dim, channels=3, layers=4, patch_size=8, dim_head=32, heads=8, ff_mult=4):
super().__init__()
self.encoded_dim = dim
self.patch_size = patch_size
input_dim = channels * (patch_size ** 2)
self.encoder = nn.Sequential(
Rearrange("b c (h p1) (w p2) -> b (h w) (p1 p2 c)", p1=patch_size, p2=patch_size),
Linear(input_dim, dim),
Transformer(dim=dim, dim_head=dim_head, heads=heads, ff_mult=ff_mult, layers=layers),
RearrangeImage(),
Rearrange("b h w c -> b c h w"),
)
self.decoder = nn.Sequential(
Rearrange("b c h w -> b (h w) c"),
Transformer(dim=dim, dim_head=dim_head, heads=heads, ff_mult=ff_mult, layers=layers),
nn.Sequential(
Linear(dim, dim * 4, bias=False),
nn.Tanh(),
Linear(dim * 4, input_dim, bias=False),
),
RearrangeImage(),
Rearrange("b h w (p1 p2 c) -> b c (h p1) (w p2)", p1=patch_size, p2=patch_size),
)
def get_encoded_fmap_size(self, image_size):
return image_size // self.patch_size
@property
def last_dec_layer(self):
return self.decoder[-3][-1].weight
def encode(self, x):
return self.encoder(x)
def decode(self, x):
return self.decoder(x)
# main vqgan-vae classes
class NullVQGanVAE(nn.Module):
def __init__(self, *, channels):
super().__init__()
self.encoded_dim = channels
self.layers = 0
def get_encoded_fmap_size(self, size):
return size
def copy_for_eval(self):
return self
def encode(self, x):
return x
def decode(self, x):
return x
class VQGanVAE(nn.Module):
def __init__(
self,
*,
dim,
image_size,
channels=3,
layers=4,
l2_recon_loss=False,
use_hinge_loss=True,
vgg=None,
vq_codebook_dim=256,
vq_codebook_size=512,
vq_decay=0.8,
vq_commitment_weight=1.0,
vq_kmeans_init=True,
vq_use_cosine_sim=True,
use_vgg_and_gan=True,
vae_type="resnet",
discr_layers=4,
**kwargs,
):
super().__init__()
vq_kwargs, kwargs = groupby_prefix_and_trim("vq_", kwargs)
encdec_kwargs, kwargs = groupby_prefix_and_trim("encdec_", kwargs)
self.image_size = image_size
self.channels = channels
self.codebook_size = vq_codebook_size
if vae_type == "resnet":
enc_dec_klass = ResnetEncDec
elif vae_type == "vit":
enc_dec_klass = ViTEncDec
else:
raise ValueError(f"{vae_type} not valid")
self.enc_dec = enc_dec_klass(dim=dim, channels=channels, layers=layers, **encdec_kwargs)
self.vq = VQ(
dim=self.enc_dec.encoded_dim,
codebook_dim=vq_codebook_dim,
codebook_size=vq_codebook_size,
decay=vq_decay,
commitment_weight=vq_commitment_weight,
accept_image_fmap=True,
kmeans_init=vq_kmeans_init,
use_cosine_sim=vq_use_cosine_sim,
**vq_kwargs,
)
# reconstruction loss
self.recon_loss_fn = F.mse_loss if l2_recon_loss else F.l1_loss
# turn off GAN and perceptual loss if grayscale
self.vgg = None
self.discr = None
self.use_vgg_and_gan = use_vgg_and_gan
if not use_vgg_and_gan:
return
# preceptual loss
if exists(vgg):
self.vgg = vgg
else:
self.vgg = flowvision.models.vgg16(pretrained=True)
self.vgg.classifier = nn.Sequential(*self.vgg.classifier[:-2])
# gan related losses
layer_mults = list(map(lambda t: 2 ** t, range(discr_layers)))
layer_dims = [dim * mult for mult in layer_mults]
dims = (dim, *layer_dims)
self.discr = Discriminator(dims=dims, channels=channels)
self.discr_loss = hinge_discr_loss if use_hinge_loss else bce_discr_loss
self.gen_loss = hinge_gen_loss if use_hinge_loss else bce_gen_loss
@property
def encoded_dim(self):
return self.enc_dec.encoded_dim
def get_encoded_fmap_size(self, image_size):
return self.enc_dec.get_encoded_fmap_size(image_size)
def copy_for_eval(self):
device = next(self.parameters()).device
vae_copy = copy.deepcopy(self.cpu())
if vae_copy.use_vgg_and_gan:
del vae_copy.discr
del vae_copy.vgg
vae_copy.eval()
return vae_copy.to(device)
@remove_vgg
def state_dict(self, *args, **kwargs):
return super().state_dict(*args, **kwargs)
@remove_vgg
def load_state_dict(self, *args, **kwargs):
return super().load_state_dict(*args, **kwargs)
@property
def codebook(self):
return self.vq.codebook
def encode(self, fmap):
fmap = self.enc_dec.encode(fmap)
return fmap
def decode(self, fmap, return_indices_and_loss=False):
fmap, indices, commit_loss = self.vq(fmap)
fmap = self.enc_dec.decode(fmap)
if not return_indices_and_loss:
return fmap
return fmap, indices, commit_loss
def forward(
self,
img,
return_loss=False,
return_discr_loss=False,
return_recons=False,
add_gradient_penalty=True,
):
_, channels, height, width, _ = *img.shape, img.device
assert (
height == self.image_size and width == self.image_size
), "height and width of input image must be equal to {self.image_size}"
assert (
channels == self.channels
), "number of channels on image or sketch is not equal to the channels set on this VQGanVAE"
fmap = self.encode(img)
fmap, indices, commit_loss = self.decode(fmap, return_indices_and_loss=True)
if not return_loss and not return_discr_loss:
return fmap
assert (
return_loss ^ return_discr_loss
), "you should either return autoencoder loss or discriminator loss, but not both"
# whether to return discriminator loss
if return_discr_loss:
assert exists(self.discr), "discriminator must exist to train it"
fmap.detach_()
img.requires_grad_()
fmap_discr_logits, img_discr_logits = map(self.discr, (fmap, img))
discr_loss = self.discr_loss(fmap_discr_logits, img_discr_logits)
if add_gradient_penalty:
gp = gradient_penalty(img, img_discr_logits)
loss = discr_loss + gp
if return_recons:
return loss, fmap
return loss
# reconstruction loss
recon_loss = self.recon_loss_fn(fmap, img)
# early return if training on grayscale
if not self.use_vgg_and_gan:
if return_recons:
return recon_loss, fmap
return recon_loss
# perceptual loss
img_vgg_input = img
fmap_vgg_input = fmap
if img.shape[1] == 1:
# handle grayscale for vgg
img_vgg_input, fmap_vgg_input = map(
lambda t: repeat(t, "b 1 ... -> b c ...", c=3), (img_vgg_input, fmap_vgg_input)
)
img_vgg_feats = self.vgg(img_vgg_input)
recon_vgg_feats = self.vgg(fmap_vgg_input)
perceptual_loss = F.mse_loss(img_vgg_feats, recon_vgg_feats)
# generator loss
gen_loss = self.gen_loss(self.discr(fmap))
# calculate adaptive weight
last_dec_layer = self.enc_dec.last_dec_layer
norm_grad_wrt_gen_loss = grad_layer_wrt_loss(gen_loss, last_dec_layer).norm(p=2)
norm_grad_wrt_perceptual_loss = grad_layer_wrt_loss(perceptual_loss, last_dec_layer).norm(
p=2
)
adaptive_weight = safe_div(norm_grad_wrt_perceptual_loss, norm_grad_wrt_gen_loss)
adaptive_weight.clamp_(max=1e4)
# combine losses
loss = recon_loss + perceptual_loss + commit_loss + adaptive_weight * gen_loss
if return_recons:
return loss, fmap
return loss
| 21,938 | 26.219603 | 100 | py |
libai | libai-main/projects/DALLE2/dalle2/rotary_embedding_flow.py | from inspect import isfunction
from math import pi
import oneflow as flow
from einops import rearrange, repeat
from oneflow import einsum, nn
from libai.utils import distributed as dist
# helper functions
def exists(val):
return val is not None
def broadcat(tensors, dim=-1):
num_tensors = len(tensors)
shape_lens = set(list(map(lambda t: len(t.shape), tensors)))
assert len(shape_lens) == 1, "tensors must all have the same number of dimensions"
shape_len = list(shape_lens)[0]
dim = (dim + shape_len) if dim < 0 else dim
dims = list(zip(*map(lambda t: list(t.shape), tensors)))
expandable_dims = [(i, val) for i, val in enumerate(dims) if i != dim]
assert all(
[*map(lambda t: len(set(t[1])) <= 2, expandable_dims)]
), "invalid dimensions for broadcastable concatentation"
max_dims = list(map(lambda t: (t[0], max(t[1])), expandable_dims))
expanded_dims = list(map(lambda t: (t[0], (t[1],) * num_tensors), max_dims))
expanded_dims.insert(dim, (dim, dims[dim]))
expandable_shapes = list(zip(*map(lambda t: t[1], expanded_dims)))
tensors = list(map(lambda t: t[0].expand(*t[1]), zip(tensors, expandable_shapes)))
return flow.cat(tensors, dim=dim)
# rotary embedding helper functions
def rotate_half(x):
x = rearrange(x, "... (d r) -> ... d r", r=2)
x1, x2 = [i.squeeze(-1) for i in x.chunk(2, -1)] # x.unbind(dim = -1)
x = flow.stack((-x2, x1), dim=-1)
return rearrange(x, "... d r -> ... (d r)")
def apply_rotary_emb(freqs, t, start_index=0):
freqs = freqs.to(t.dtype)
rot_dim = freqs.shape[-1]
end_index = start_index + rot_dim
assert (
rot_dim <= t.shape[-1]
), f"feature dimension {t.shape[-1]} is not of sufficient size to \
rotate in all the positions {rot_dim}"
t_left, t, t_right = t[..., :start_index], t[..., start_index:end_index], t[..., end_index:]
t = (t * freqs.cos()) + (rotate_half(t) * freqs.sin())
return flow.cat((t_left, t, t_right), dim=-1)
# learned rotation helpers
def apply_learned_rotations(rotations, t, start_index=0, freq_ranges=None):
if exists(freq_ranges):
rotations = einsum("..., f -> ... f", rotations, freq_ranges)
rotations = rearrange(rotations, "... r f -> ... (r f)")
rotations = repeat(rotations, "... n -> ... (n r)", r=2)
return apply_rotary_emb(rotations, t, start_index=start_index)
# classes
class RotaryEmbedding(nn.Module):
def __init__(
self,
dim,
custom_freqs=None,
freqs_for="lang",
theta=10000,
max_freq=10,
num_freqs=1,
learned_freq=False,
):
super().__init__()
if exists(custom_freqs):
freqs = custom_freqs
elif freqs_for == "lang":
freqs = 1.0 / (theta ** (flow.arange(0, dim, 2)[: (dim // 2)].float() / dim))
elif freqs_for == "pixel":
freqs = flow.linspace(1.0, max_freq / 2, dim // 2) * pi
elif freqs_for == "constant":
freqs = flow.ones(num_freqs).float()
else:
raise ValueError(f"unknown modality {freqs_for}")
self.cache = dict()
if learned_freq:
self.freqs = nn.Parameter(freqs)
else:
self.register_buffer("freqs", freqs)
def rotate_queries_or_keys(self, t, seq_dim=-2):
seq_len = t.shape[seq_dim]
freqs = self.forward(
lambda: flow.arange(
seq_len,
placement=dist.get_layer_placement(0),
sbp=dist.get_nd_sbp([flow.sbp.broadcast, flow.sbp.broadcast]),
),
cache_key=seq_len,
)
return apply_rotary_emb(freqs, t)
def forward(self, t, cache_key=None):
if exists(cache_key) and cache_key in self.cache:
return self.cache[cache_key]
if isfunction(t):
t = t()
freqs = self.freqs
freqs = flow.einsum("..., f -> ... f", t.to(freqs.dtype), freqs)
freqs = repeat(freqs, "... n -> ... (n r)", r=2)
if exists(cache_key):
self.cache[cache_key] = freqs
return freqs
| 4,157 | 30.029851 | 96 | py |
libai | libai-main/projects/DALLE2/dalle2/tokenizer.py | # take from https://github.com/openai/CLIP/blob/main/clip/simple_tokenizer.py
# to give users a quick easy start to training DALL-E without doing BPE
import gzip
import html
import os
from functools import lru_cache
from pathlib import Path
import ftfy
import oneflow as flow
import regex as re
from .utils import import_or_print_error
# OpenAI simple tokenizer
@lru_cache()
def default_bpe():
return os.path.join(
os.path.dirname(os.path.abspath(__file__)), "data/bpe_simple_vocab_16e6.txt.gz"
)
@lru_cache()
def bytes_to_unicode():
bs = (
list(range(ord("!"), ord("~") + 1))
+ list(range(ord("¡"), ord("¬") + 1))
+ list(range(ord("®"), ord("ÿ") + 1))
)
cs = bs[:]
n = 0
for b in range(2 ** 8):
if b not in bs:
bs.append(b)
cs.append(2 ** 8 + n)
n += 1
cs = [chr(n) for n in cs]
return dict(zip(bs, cs))
def get_pairs(word):
pairs = set()
prev_char = word[0]
for char in word[1:]:
pairs.add((prev_char, char))
prev_char = char
return pairs
def basic_clean(text):
text = ftfy.fix_text(text)
text = html.unescape(html.unescape(text))
return text.strip()
def whitespace_clean(text):
text = re.sub(r"\s+", " ", text)
text = text.strip()
return text
class SimpleTokenizer(object):
def __init__(self, bpe_path=default_bpe()):
self.byte_encoder = bytes_to_unicode()
self.byte_decoder = {v: k for k, v in self.byte_encoder.items()}
merges = (
gzip.open(bpe_path).read().decode("utf-8").split("\n")
) # Path(bpe_path).read_text(encoding='utf8').split('\n')
merges = merges[1 : 49152 - 256 - 2 + 1]
merges = [tuple(merge.split()) for merge in merges]
vocab = list(bytes_to_unicode().values())
vocab = vocab + [v + "</w>" for v in vocab]
for merge in merges:
vocab.append("".join(merge))
vocab.extend(["<|startoftext|>", "<|endoftext|>"])
self.vocab_size = 49408
self.encoder = dict(zip(vocab, range(len(vocab))))
self.decoder = {v: k for k, v in self.encoder.items()}
self.bpe_ranks = dict(zip(merges, range(len(merges))))
self.cache = {"<|startoftext|>": "<|startoftext|>", "<|endoftext|>": "<|endoftext|>"}
self.pat = re.compile(
r"""<\|startoftext\|>|<\|endoftext\|>|'s|'t|'re|'ve|'m|'ll|'d|[\p{L}]+|[\p{N}]|[^\s\p{L}\p{N}]+""", # noqa
re.IGNORECASE,
)
def bpe(self, token):
if token in self.cache:
return self.cache[token]
word = tuple(token[:-1]) + (token[-1] + "</w>",)
pairs = get_pairs(word)
if not pairs:
return token + "</w>"
while True:
bigram = min(pairs, key=lambda pair: self.bpe_ranks.get(pair, float("inf")))
if bigram not in self.bpe_ranks:
break
first, second = bigram
new_word = []
i = 0
while i < len(word):
try:
j = word.index(first, i)
new_word.extend(word[i:j])
i = j
except ValueError:
new_word.extend(word[i:])
break
if word[i] == first and i < len(word) - 1 and word[i + 1] == second:
new_word.append(first + second)
i += 2
else:
new_word.append(word[i])
i += 1
new_word = tuple(new_word)
word = new_word
if len(word) == 1:
break
else:
pairs = get_pairs(word)
word = " ".join(word)
self.cache[token] = word
return word
def encode(self, text):
bpe_tokens = []
text = whitespace_clean(basic_clean(text)).lower()
for token in re.findall(self.pat, text):
token = "".join(self.byte_encoder[b] for b in token.encode("utf-8"))
bpe_tokens.extend(self.encoder[bpe_token] for bpe_token in self.bpe(token).split(" "))
return bpe_tokens
def decode(self, tokens, remove_start_end=True, pad_tokens=set()):
if flow.is_tensor(tokens):
tokens = tokens.tolist()
if remove_start_end:
tokens = [token for token in tokens if token not in (49406, 40407, 0)]
text = "".join([self.decoder[token] for token in tokens if token not in pad_tokens])
text = (
bytearray([self.byte_decoder[c] for c in text])
.decode("utf-8", errors="replace")
.replace("</w>", " ")
)
return text
def tokenize(self, texts, context_length=256, truncate_text=False):
if isinstance(texts, str):
texts = [texts]
all_tokens = [self.encode(text) for text in texts]
result = flow.zeros(len(all_tokens), context_length, dtype=flow.long)
for i, tokens in enumerate(all_tokens):
if len(tokens) > context_length:
if truncate_text:
tokens = tokens[:context_length]
else:
raise RuntimeError(
f"Input {texts[i]} is too long for context length {context_length}"
)
result[i, : len(tokens)] = flow.tensor(tokens)
return result
# tokenizer = SimpleTokenizer()
# YTTM tokenizer
class YttmTokenizer:
def __init__(self, bpe_path=None):
bpe_path = Path(bpe_path)
assert bpe_path.exists(), f"BPE json path {str(bpe_path)} does not exist"
self.yttm = import_or_print_error(
"youtokentome", "you need to install youtokentome by `pip install youtokentome`"
)
tokenizer = self.yttm.BPE(model=str(bpe_path))
self.tokenizer = tokenizer
self.vocab_size = tokenizer.vocab_size()
def decode(self, tokens, pad_tokens=set()):
if flow.is_tensor(tokens):
tokens = tokens.tolist()
return self.tokenizer.decode(tokens, ignore_ids=pad_tokens.union({0}))
def encode(self, texts):
encoded = self.tokenizer.encode(texts, output_type=self.yttm.OutputType.ID)
return list(map(flow.tensor, encoded))
def tokenize(self, texts, context_length=256, truncate_text=False):
if isinstance(texts, str):
texts = [texts]
all_tokens = self.encode(texts)
result = flow.zeros(len(all_tokens), context_length, dtype=flow.long)
for i, tokens in enumerate(all_tokens):
if len(tokens) > context_length:
if truncate_text:
tokens = tokens[:context_length]
else:
raise RuntimeError(
f"Input {texts[i]} is too long for context length {context_length}"
)
result[i, : len(tokens)] = flow.tensor(tokens)
return result
| 7,011 | 30.872727 | 119 | py |
libai | libai-main/projects/DALLE2/dalle2/dalle2_loader.py | import logging
import oneflow as flow
from oneflow.framework.check_point_v2 import _broadcast_py_object
import libai.utils.distributed as dist
from libai.models.build import build_model
from libai.models.utils.model_loader.base_loader import (
ModelLoaderHuggerFace,
_load_state_dict_into_model,
)
logger = logging.getLogger("libai.dalle2." + __name__)
class Dalle2ModelLoader(ModelLoaderHuggerFace):
def __init__(self, model, libai_cfg, pretrained_model_path, **kwargs):
super().__init__(model, libai_cfg, pretrained_model_path, **kwargs)
self.base_model_prefix_1 = ""
self.base_model_prefix_2 = ""
def _convert_state_dict(self, state_dict, module="prior"):
old_keys = []
new_keys = []
if module == "prior":
for k in state_dict.keys():
if "clip." in k:
continue
old_keys.append(k)
if k.endswith(".g"):
k = k[:-1] + "weight"
elif k.startswith("net.causal_transformer"):
if k.endswith("gamma"):
k = k[:-5] + "weight"
elif k.endswith("beta"):
k = k[:-4] + "bias"
new_keys.append("prior." + k)
elif module == "decoder":
for k in state_dict.keys():
if "clip." in k:
continue
old_keys.append(k)
if k.endswith(".g"):
k = k[:-1] + "weight"
elif "cross_attn" in k:
if k.endswith("gamma"):
k = k[:-5] + "weight"
elif k.endswith("beta"):
k = k[:-4] + "bias"
new_keys.append("decoder." + k)
ret_state_dict = {}
for old_key, new_key in zip(old_keys, new_keys):
ret_state_dict[new_key] = state_dict.pop(old_key)
return ret_state_dict
def load(self):
if dist.is_main_process():
# prior
logger.info("loading torch model prior...")
torch_state_dict = self._load_torch_state_dict(self.libai_cfg.model.prior_weight_path)[
"ema_model"
]
logger.info("converting torch model prior into oneflow model...")
flow_state_dict = self._convert_tensors(torch_state_dict)
prior_state_dict = self._convert_state_dict(flow_state_dict)
# decoder
logger.info("loading torch model decoder...")
torch_state_dict = self._load_torch_state_dict(self.libai_cfg.model.decoder_weight_path)
flow_state_dict = self._convert_tensors(torch_state_dict)
logger.info("converting torch model decoder into oneflow model...")
decoder_state_dict = self._convert_state_dict(flow_state_dict, module="decoder")
flow_state_dict = {**prior_state_dict, **decoder_state_dict}
else:
flow_state_dict = None
logger.info("building LiBai model...")
self.libai_cfg = _broadcast_py_object(self.libai_cfg, src=0)
self.model = build_model(self.model)
self.model._apply(dist.convert_to_distributed_default_setting)
self.model = self.model.eval()
flow.cuda.empty_cache()
# State_dict to global
logger.info("transfering state_dict local to global...")
flow_state_dict = self._state_dict_to_global(flow_state_dict, mode="pytorch") # oom
# Load
# (
# model,
# missing_keys,
# unexpected_keys,
# mismatched_keys,
# error_msgs,
# ) = self._load_pretrained_model(self.model, flow_state_dict, self.pretrained_model_path)
logger.info("loading model weights into LiBai...")
_load_state_dict_into_model(self.model, flow_state_dict, "")
return self.model
| 3,927 | 39.494845 | 100 | py |
libai | libai-main/projects/DALLE2/dalle2/models.py | import math
import random
from contextlib import contextmanager
from functools import partial, wraps
import flowvision.transforms as T
import kornia.augmentation as K
import numpy as np
import oneflow as flow
import oneflow.nn.functional as F
from einops import rearrange, reduce, repeat
from kornia.filters import gaussian_blur2d
from omegaconf import ListConfig
from oneflow import einsum, nn
from oneflow.nn import Conv2d, ConvTranspose2d, GroupNorm
from oneflow.nn.functional import layer_norm
from resize_right import resize
from tqdm.auto import tqdm
from libai.layers import Embedding, LayerNorm, Linear
from libai.utils import distributed as dist
from .einops_exts import EinopsToAndFrom, Rearrange, check_shape, rearrange_many, repeat_many
from .rotary_embedding_flow import RotaryEmbedding
from .tokenizer import SimpleTokenizer
from .vqgan_vae import NullVQGanVAE, VQGanVAE
# rotary embeddings
# constants
def get_default_sbp():
return dist.get_nd_sbp([flow.sbp.broadcast, flow.sbp.broadcast])
def get_default_placement():
return dist.get_layer_placement(0)
NAT = 1.0 / math.log(2.0)
random.seed(666)
np.random.seed(6666)
# helper functions
def exists(val):
return val is not None
def identity(t, *args, **kwargs):
return t
def first(arr, d=None):
if len(arr) == 0:
return d
return arr[0]
def maybe(fn):
@wraps(fn)
def inner(x):
if not exists(x):
return x
return fn(x)
return inner
def default(val, d):
if exists(val):
return val
return d() if callable(d) else d
def cast_tuple(val, length=None):
if isinstance(val, list) or isinstance(val, ListConfig):
val = tuple(val)
out = val if isinstance(val, tuple) else ((val,) * default(length, 1))
if exists(length):
assert len(out) == length
return out
def module_device(module):
return next(module.parameters()).device
def zero_init_(m):
nn.init.zeros_(m.weight)
if exists(m.bias):
nn.init.zeros_(m.bias)
@contextmanager
def null_context(*args, **kwargs):
yield
def eval_decorator(fn):
def inner(model, *args, **kwargs):
was_training = model.training
model.eval()
out = fn(model, *args, **kwargs)
model.train(was_training)
return out
return inner
def is_list_str(x):
if not isinstance(x, (list, tuple)):
return False
return all([type(el) == str for el in x])
def pad_tuple_to_length(t, length, fillvalue=None):
remain_length = length - len(t)
if remain_length <= 0:
return t
return (*t, *((fillvalue,) * remain_length))
# for controlling freezing of CLIP
def set_module_requires_grad_(module, requires_grad):
for param in module.parameters():
param.requires_grad = requires_grad
def freeze_all_layers_(module):
set_module_requires_grad_(module, False)
def unfreeze_all_layers_(module):
set_module_requires_grad_(module, True)
def freeze_model_and_make_eval_(model):
model.eval()
freeze_all_layers_(model)
# tensor helpers
def log(t, eps=1e-12):
return flow.log(t.clamp(min=eps))
def l2norm(t):
return F.normalize(t, dim=-1)
def resize_image_to(image, target_image_size):
orig_image_size = image.shape[-1]
if orig_image_size == target_image_size:
return image
scale_factors = target_image_size / orig_image_size
return resize(image, scale_factors=scale_factors)
# image normalization functions
# ddpms expect images to be in the range of -1 to 1
# but CLIP may otherwise
def normalize_neg_one_to_one(img):
return img * 2 - 1
def unnormalize_zero_to_one(normed_img):
return (normed_img + 1) * 0.5
# classifier free guidance functions
def prob_mask_like(shape, prob, placement=None, sbp=None):
placement = placement or get_default_placement()
sbp = sbp or get_default_sbp()
if prob == 1:
return flow.ones(shape, dtype=flow.bool, placement=placement, sbp=sbp)
elif prob == 0:
return flow.zeros(shape, dtype=flow.bool, placement=placement, sbp=sbp)
else:
return flow.zeros(shape, placement=placement, sbp=sbp).float().uniform_(0, 1) < prob
# gaussian diffusion helper functions
def extract(a, t, x_shape):
b, *_ = t.shape
out = a.gather(len(a.shape) - 1, t)
return out.reshape(b, *((1,) * (len(x_shape) - 1)))
def meanflat(x):
return x.mean(dim=tuple(range(1, len(x.shape))))
def normal_kl(mean1, logvar1, mean2, logvar2):
return 0.5 * (
-1.0
+ logvar2
- logvar1
+ flow.exp(logvar1 - logvar2)
+ ((mean1 - mean2) ** 2) * flow.exp(-logvar2)
)
def approx_standard_normal_cdf(x):
return 0.5 * (1.0 + flow.tanh(((2.0 / math.pi) ** 0.5) * (x + 0.044715 * (x ** 3))))
def discretized_gaussian_log_likelihood(x, *, means, log_scales, thres=0.999):
assert x.shape == means.shape == log_scales.shape
# attempting to correct nan gradients when learned variance is turned on
# in the setting of deepspeed fp16
eps = 1e-12 if x.dtype == flow.float32 else 1e-3
centered_x = x - means
inv_stdv = flow.exp(-log_scales)
plus_in = inv_stdv * (centered_x + 1.0 / 255.0)
cdf_plus = approx_standard_normal_cdf(plus_in)
min_in = inv_stdv * (centered_x - 1.0 / 255.0)
cdf_min = approx_standard_normal_cdf(min_in)
log_cdf_plus = log(cdf_plus)
log_one_minus_cdf_min = log(1.0 - cdf_min)
cdf_delta = cdf_plus - cdf_min
log_probs = flow.where(
x < -thres, log_cdf_plus, flow.where(x > thres, log_one_minus_cdf_min, log(cdf_delta, eps))
)
return log_probs
def cosine_beta_schedule(timesteps, s=0.008):
"""
cosine schedule
as proposed in https://openreview.net/forum?id=-NEXDKk8gZ
"""
steps = timesteps + 1
x = flow.linspace(0, timesteps, steps, dtype=flow.float64)
alphas_cumprod = flow.cos(((x / timesteps) + s) / (1 + s) * np.pi * 0.5) ** 2
alphas_cumprod = alphas_cumprod / first(alphas_cumprod)
betas = 1 - (alphas_cumprod[1:] / alphas_cumprod[:-1])
return flow.clip(betas, 0, 0.999)
def linear_beta_schedule(timesteps):
scale = 1000 / timesteps
beta_start = scale * 0.0001
beta_end = scale * 0.02
return flow.linspace(beta_start, beta_end, timesteps, dtype=flow.float64)
def quadratic_beta_schedule(timesteps):
scale = 1000 / timesteps
beta_start = scale * 0.0001
beta_end = scale * 0.02
return flow.linspace(beta_start ** 0.5, beta_end ** 0.5, timesteps, dtype=flow.float64) ** 2
def sigmoid_beta_schedule(timesteps):
scale = 1000 / timesteps
beta_start = scale * 0.0001
beta_end = scale * 0.02
betas = flow.linspace(-6, 6, timesteps, dtype=flow.float64)
return flow.sigmoid(betas) * (beta_end - beta_start) + beta_start
class NoiseScheduler(nn.Module):
def __init__(
self, *, beta_schedule, timesteps, loss_type, p2_loss_weight_gamma=0.0, p2_loss_weight_k=1
):
super().__init__()
if beta_schedule == "cosine":
betas = cosine_beta_schedule(timesteps)
elif beta_schedule == "linear":
betas = linear_beta_schedule(timesteps)
elif beta_schedule == "quadratic":
betas = quadratic_beta_schedule(timesteps)
elif beta_schedule == "jsd":
betas = 1.0 / flow.linspace(timesteps, 1, timesteps)
elif beta_schedule == "sigmoid":
betas = sigmoid_beta_schedule(timesteps)
else:
raise NotImplementedError()
betas = betas
alphas = 1.0 - betas
alphas_cumprod = flow.cumprod(alphas, dim=0)
alphas_cumprod_prev = F.pad(alphas_cumprod[:-1], (1, 0), value=1.0)
(timesteps,) = betas.shape
self.num_timesteps = int(timesteps)
if loss_type == "l1":
loss_fn = F.l1_loss
elif loss_type == "l2":
loss_fn = flow.nn.MSELoss()
elif loss_type == "huber":
loss_fn = F.smooth_l1_loss
else:
raise NotImplementedError()
self.loss_type = loss_type
self.loss_fn = loss_fn
# register buffer helper function to cast double back to float
def register_buffer(name, val):
self.register_buffer(name, val.to(flow.float32))
register_buffer("betas", betas)
register_buffer("alphas_cumprod", alphas_cumprod)
register_buffer("alphas_cumprod_prev", alphas_cumprod_prev)
# calculations for diffusion q(x_t | x_{t-1}) and others
register_buffer("sqrt_alphas_cumprod", flow.sqrt(alphas_cumprod))
register_buffer("sqrt_one_minus_alphas_cumprod", flow.sqrt(1.0 - alphas_cumprod))
register_buffer("log_one_minus_alphas_cumprod", flow.log(1.0 - alphas_cumprod))
register_buffer("sqrt_recip_alphas_cumprod", flow.sqrt(1.0 / alphas_cumprod))
register_buffer("sqrt_recipm1_alphas_cumprod", flow.sqrt(1.0 / alphas_cumprod - 1))
# calculations for posterior q(x_{t-1} | x_t, x_0)
posterior_variance = betas * (1.0 - alphas_cumprod_prev) / (1.0 - alphas_cumprod)
# above: equal to 1. / (1. / (1. - alpha_cumprod_tm1) + alpha_t / beta_t)
register_buffer("posterior_variance", posterior_variance)
register_buffer(
"posterior_log_variance_clipped", flow.log(posterior_variance.clamp(min=1e-20))
)
register_buffer(
"posterior_mean_coef1", betas * flow.sqrt(alphas_cumprod_prev) / (1.0 - alphas_cumprod)
)
register_buffer(
"posterior_mean_coef2",
(1.0 - alphas_cumprod_prev) * flow.sqrt(alphas) / (1.0 - alphas_cumprod),
)
# p2 loss reweighting
self.has_p2_loss_reweighting = p2_loss_weight_gamma > 0.0
register_buffer(
"p2_loss_weight",
(p2_loss_weight_k + alphas_cumprod / (1 - alphas_cumprod)) ** -p2_loss_weight_gamma,
)
def q_posterior(self, x_start, x_t, t):
posterior_mean = (
extract(self.posterior_mean_coef1, t, x_t.shape) * x_start
+ extract(self.posterior_mean_coef2, t, x_t.shape) * x_t
)
posterior_variance = extract(self.posterior_variance, t, x_t.shape)
posterior_log_variance_clipped = extract(self.posterior_log_variance_clipped, t, x_t.shape)
return posterior_mean, posterior_variance, posterior_log_variance_clipped
def q_sample(self, x_start, t, noise=None):
noise = default(noise, lambda: flow.randn_like(x_start))
return (
extract(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start
+ extract(self.sqrt_one_minus_alphas_cumprod, t, x_start.shape) * noise
)
def predict_start_from_noise(self, x_t, t, noise):
return (
extract(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t
- extract(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape) * noise
)
def p2_reweigh_loss(self, loss, times):
if not self.has_p2_loss_reweighting:
return loss
return loss * extract(self.p2_loss_weight, times, loss.shape)
# diffusion prior
class ChanLayerNorm(nn.Module):
def __init__(self, dim, eps=1e-5):
super().__init__()
self.eps = eps
self.weight = nn.Parameter(flow.ones(1, dim, 1, 1))
def forward(self, x):
# var = flow.var(x, dim = 1, unbiased = False, keepdim = True)
# mean = flow.mean(x, dim = 1, keepdim = True)
# return (x - mean) / (var + self.eps).sqrt() * self.weight
x = x.permute(0, 2, 3, 1)
out = layer_norm(x, normalized_shape=(x.shape[-1:]), eps=self.eps)
return out.permute(0, 3, 1, 2) * self.weight
class Residual(nn.Module):
def __init__(self, fn):
super().__init__()
self.fn = fn
def forward(self, x, **kwargs):
return self.fn(x, **kwargs) + x
# mlp
class MLP(nn.Module):
def __init__(
self,
dim_in,
dim_out,
*,
expansion_factor=2.0,
depth=2,
norm=False,
):
super().__init__()
hidden_dim = int(expansion_factor * dim_out)
def norm_fn():
return LayerNorm(hidden_dim) if norm else nn.Identity()
layers = [nn.Sequential(Linear(dim_in, hidden_dim), nn.SiLU(), norm_fn())]
for _ in range(depth - 1):
layers.append(nn.Sequential(Linear(hidden_dim, hidden_dim), nn.SiLU(), norm_fn()))
layers.append(Linear(hidden_dim, dim_out))
self.net = nn.Sequential(*layers)
def forward(self, x):
return self.net(x.float())
# relative positional bias for causal transformer
class RelPosBias(nn.Module):
def __init__(
self,
heads=8,
num_buckets=32,
max_distance=128,
):
super().__init__()
self.num_buckets = num_buckets
self.max_distance = max_distance
self.relative_attention_bias = Embedding(num_buckets, heads)
@staticmethod
def _relative_position_bucket(relative_position, num_buckets=32, max_distance=128):
n = -relative_position
n = flow.max(n, flow.zeros_like(n)).long()
max_exact = num_buckets // 2
is_small = n < max_exact
val_if_large = (
max_exact
+ (
flow.log(n.float() / max_exact)
/ math.log(max_distance / max_exact)
* (num_buckets - max_exact)
).long()
)
val_if_large = flow.min(val_if_large, flow.zeros_like(val_if_large) + num_buckets - 1)
return flow.where(is_small, n, val_if_large)
def forward(self, i, j):
q_pos = flow.arange(i, dtype=flow.long)
k_pos = flow.arange(j, dtype=flow.long)
rel_pos = rearrange(k_pos, "j -> 1 j") - rearrange(q_pos, "i -> i 1")
rp_bucket = self._relative_position_bucket(
rel_pos, num_buckets=self.num_buckets, max_distance=self.max_distance
)
values = self.relative_attention_bias(
rp_bucket.to_global(sbp=get_default_sbp(), placement=get_default_placement())
)
return rearrange(values, "i j h -> h i j")
# feedforward
class SwiGLU(nn.Module):
"""used successfully in https://arxiv.org/abs/2204.0231"""
def forward(self, x):
x, gate = x.chunk(2, dim=-1)
return x * F.silu(gate)
def FeedForward(dim, mult=4, dropout=0.0, post_activation_norm=False):
"""post-activation norm https://arxiv.org/abs/2110.09456"""
inner_dim = int(mult * dim)
return nn.Sequential(
LayerNorm(dim),
Linear(dim, inner_dim * 2, bias=False, parallel="col"),
SwiGLU(),
LayerNorm(inner_dim) if post_activation_norm else nn.Identity(),
nn.Dropout(dropout),
Linear(inner_dim, dim, bias=False, parallel="row"),
)
# attention
class Attention(nn.Module):
def __init__(self, dim, *, dim_head=64, heads=8, dropout=0.0, causal=False, rotary_emb=None):
super().__init__()
self.scale = dim_head ** -0.5
self.heads = heads
inner_dim = dim_head * heads
self.causal = causal
self.norm = LayerNorm(dim)
self.dropout = nn.Dropout(dropout)
self.null_kv = nn.Parameter(flow.randn(2, dim_head))
self.to_q = Linear(dim, inner_dim, bias=False, parallel="col")
self.to_kv = Linear(dim, dim_head * 2, bias=False, parallel="col")
self.rotary_emb = rotary_emb
self.to_out = nn.Sequential(
Linear(inner_dim, dim, bias=False, parallel="row"), LayerNorm(dim)
)
def forward(self, x, mask=None, attn_bias=None):
b, n = x.shape[:2]
x = self.norm(x)
q, k, v = (self.to_q(x), *self.to_kv(x).chunk(2, dim=-1))
q = rearrange(q, "b n (h d) -> b h n d", h=self.heads)
q = q * self.scale
# rotary embeddings
if exists(self.rotary_emb):
q, k = map(self.rotary_emb.rotate_queries_or_keys, (q, k))
nk, nv = repeat_many(self.null_kv.unbind(dim=-2), "d -> b 1 d", b=b)
k = flow.cat((nk, k), dim=-2)
v = flow.cat((nv, v), dim=-2)
# calculate query / key similarities
sim = einsum("b h i d, b j d -> b h i j", q, k)
# relative positional encoding (T5 style)
if exists(attn_bias):
sim = sim + attn_bias
# masking
max_neg_value = -3.4028e38 # flow.finfo(sim.dtype).max
if exists(mask):
mask = F.pad(mask, (1, 0), value=1)
mask = rearrange(mask, "b j -> b 1 1 j")
sim = sim.masked_fill(1 - mask, max_neg_value) # ~mask
if self.causal:
i, j = sim.shape[-2:]
causal_mask = flow.ones(
(i, j), placement=get_default_placement(), sbp=get_default_sbp(), dtype=flow.int32
).triu(j - i + 1)
sim = sim.masked_fill(causal_mask, max_neg_value)
# attention
attn = sim.softmax(dim=-1)
attn = self.dropout(attn)
# aggregate values
out = einsum("b h i j, b j d -> b h i d", attn, v)
out = rearrange(out, "b h n d -> b n (h d)")
return self.to_out(out)
class CausalTransformer(nn.Module):
def __init__(
self,
*,
dim,
depth,
dim_head=64,
heads=8,
ff_mult=4,
norm_out=True,
attn_dropout=0.0,
ff_dropout=0.0,
final_proj=True,
normformer=False,
rotary_emb=True,
):
super().__init__()
self.rel_pos_bias = RelPosBias(heads=heads)
rotary_emb = RotaryEmbedding(dim=min(32, dim_head)) if rotary_emb else None
self.layers = nn.ModuleList([])
for _ in range(depth):
self.layers.append(
nn.ModuleList(
[
Attention(
dim=dim,
causal=True,
dim_head=dim_head,
heads=heads,
dropout=attn_dropout,
rotary_emb=rotary_emb,
),
FeedForward(
dim=dim,
mult=ff_mult,
dropout=ff_dropout,
post_activation_norm=normformer,
),
]
)
)
self.norm = LayerNorm(dim) if norm_out else nn.Identity()
self.project_out = Linear(dim, dim, bias=False) if final_proj else nn.Identity()
def forward(
self,
x,
mask=None,
):
n = x.shape[1]
attn_bias = self.rel_pos_bias(n, n + 1)
for attn, ff in self.layers:
x = attn(x, mask=mask, attn_bias=attn_bias) + x
x = ff(x) + x
out = self.norm(x)
return self.project_out(out)
class DiffusionPriorNetwork(nn.Module):
def __init__(
self,
dim,
num_timesteps=None,
num_time_embeds=1,
num_image_embeds=1,
num_text_embeds=1,
max_text_len=256,
**kwargs,
):
super().__init__()
self.num_time_embeds = num_time_embeds
self.num_image_embeds = num_image_embeds
self.num_text_embeds = num_text_embeds
self.to_text_embeds = nn.Sequential(
Linear(dim, dim * num_text_embeds) if num_text_embeds > 1 else nn.Identity(),
Rearrange("b (n d) -> b n d", n=num_text_embeds),
)
self.to_time_embeds = nn.Sequential(
Embedding(num_timesteps, dim * num_time_embeds)
if exists(num_timesteps)
else nn.Sequential(
SinusoidalPosEmb(dim), MLP(dim, dim * num_time_embeds)
), # also offer a continuous version of timestep embeddings, with a 2 layer MLP
Rearrange("b (n d) -> b n d", n=num_time_embeds),
)
self.to_image_embeds = nn.Sequential(
Linear(dim, dim * num_image_embeds) if num_image_embeds > 1 else nn.Identity(),
Rearrange("b (n d) -> b n d", n=num_image_embeds),
)
self.learned_query = nn.Parameter(flow.randn(dim))
self.causal_transformer = CausalTransformer(dim=dim, **kwargs)
def forward_with_cond_scale(self, *args, cond_scale=1.0, **kwargs):
logits = self.forward(*args, **kwargs)
if cond_scale == 1:
return logits
null_logits = self.forward(*args, cond_drop_prob=1.0, **kwargs)
return null_logits + (logits - null_logits) * cond_scale
def forward(
self,
image_embed,
diffusion_timesteps,
*,
text_embed,
text_encodings=None,
mask=None,
cond_drop_prob=0.0,
):
batch, dim, dtype = *image_embed.shape, image_embed.dtype
num_time_embeds, num_image_embeds, num_text_embeds = (
self.num_time_embeds,
self.num_image_embeds,
self.num_text_embeds,
)
text_embed = self.to_text_embeds(text_embed)
image_embed = self.to_image_embeds(image_embed)
# make text encodings optional
# although the paper seems to suggest it is present <--
if not exists(text_encodings):
text_encodings = flow.empty(
(batch, 0, dim),
placement=get_default_placement(),
sbp=get_default_sbp(),
dtype=dtype,
)
if not exists(mask):
mask = flow.ones(
(batch, text_encodings.shape[-2]),
placement=get_default_placement(),
sbp=get_default_sbp(),
dtype=flow.bool,
)
# classifier free guidance
keep_mask = prob_mask_like((batch,), 1 - cond_drop_prob)
keep_mask = rearrange(keep_mask, "b -> b 1").to_global(
placement=get_default_placement(), sbp=get_default_sbp()
)
mask &= keep_mask
keep_mask = repeat(keep_mask, "b 1 -> b n", n=num_text_embeds)
mask = flow.cat((mask, keep_mask), dim=1)
if exists(mask):
attend_padding = (
1 + num_time_embeds + num_image_embeds
) # 1 for learned queries + number of image embeds + time embeds
mask = F.pad(mask.to(flow.int32), (0, attend_padding), value=1)
time_embed = self.to_time_embeds(
diffusion_timesteps.to_global(placement=get_default_placement(), sbp=get_default_sbp())
)
learned_queries = repeat(self.learned_query, "d -> b 1 d", b=batch)
tokens = flow.cat(
(text_encodings, text_embed, time_embed, image_embed, learned_queries), dim=-2
)
# attend
tokens = self.causal_transformer(tokens, mask=mask)
# get learned query, which should predict the image embedding (per DDPM timestep)
pred_image_embed = tokens[..., -1, :]
return pred_image_embed
class DiffusionPrior(nn.Module):
def __init__(
self,
net,
*,
clip=None,
image_embed_dim=None,
image_size=None,
image_channels=3,
timesteps=1000,
cond_drop_prob=0.0,
loss_type="l2",
predict_x_start=True,
beta_schedule="cosine",
condition_on_text_encodings=True,
sampling_clamp_l2norm=False,
training_clamp_l2norm=False,
init_image_embed_l2norm=False,
image_embed_scale=None,
clip_adapter_overrides=dict(),
):
super().__init__()
self.noise_scheduler = NoiseScheduler(
beta_schedule=beta_schedule, timesteps=timesteps, loss_type=loss_type
)
if exists(clip):
assert (
image_channels == clip.image_channels
), f"channels of image ({image_channels}) should be equal to the channels "
"that CLIP accepts ({clip.image_channels})"
freeze_model_and_make_eval_(clip)
self.clip = clip
else:
assert exists(
image_embed_dim
), "latent dimension must be given, if training prior network without CLIP given"
self.clip = None
self.net = net
self.image_embed_dim = default(image_embed_dim, lambda: clip.dim_latent)
self.channels = default(image_channels, lambda: clip.image_channels)
self.cond_drop_prob = cond_drop_prob
self.can_classifier_guidance = cond_drop_prob > 0.0
self.condition_on_text_encodings = condition_on_text_encodings
self.predict_x_start = predict_x_start
self.image_embed_scale = default(image_embed_scale, self.image_embed_dim ** 0.5)
# whether to force an l2norm, similar to clipping denoised, when sampling
self.sampling_clamp_l2norm = sampling_clamp_l2norm
self.training_clamp_l2norm = training_clamp_l2norm
self.init_image_embed_l2norm = init_image_embed_l2norm
# device tracker
self.register_buffer("_dummy", flow.tensor([True]), persistent=False)
def p_mean_variance(self, x, t, text_cond, clip_denoised=False, cond_scale=1.0):
assert not (
cond_scale != 1.0 and not self.can_classifier_guidance
), "the model was not trained with conditional dropout, "
"and thus one cannot use classifier free guidance (cond_scale anything other than 1)"
pred = self.net.forward_with_cond_scale(x, t, cond_scale=cond_scale, **text_cond)
if self.predict_x_start:
x_recon = pred
else:
x_recon = self.noise_scheduler.predict_start_from_noise(x, t=t, noise=pred)
if clip_denoised and not self.predict_x_start:
x_recon.clamp_(-1.0, 1.0)
if self.predict_x_start and self.sampling_clamp_l2norm:
x_recon = l2norm(x_recon) * self.image_embed_scale
model_mean, posterior_variance, posterior_log_variance = self.noise_scheduler.q_posterior(
x_start=x_recon, x_t=x, t=t
)
return model_mean, posterior_variance, posterior_log_variance
@flow.no_grad()
def p_sample(self, x, t, text_cond=None, clip_denoised=True, cond_scale=1.0):
b = x.shape[0]
model_mean, _, model_log_variance = self.p_mean_variance(
x=x, t=t, text_cond=text_cond, clip_denoised=clip_denoised, cond_scale=cond_scale
)
noise = flow.randn(*x.shape, placement=get_default_placement(), sbp=get_default_sbp())
# no noise when t == 0
nonzero_mask = (1 - (t == 0).float()).reshape(b, *((1,) * (len(x.shape) - 1)))
return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise
@flow.no_grad()
def p_sample_loop(self, shape, text_cond, cond_scale=1.0):
b = shape[0]
image_embed = flow.randn(*shape, placement=get_default_placement(), sbp=get_default_sbp())
if self.init_image_embed_l2norm:
image_embed = l2norm(image_embed) * self.image_embed_scale
for i in tqdm(
reversed(range(0, self.noise_scheduler.num_timesteps)),
desc="sampling loop time step",
total=self.noise_scheduler.num_timesteps,
):
times = flow.full(
(b,), i, placement=get_default_placement(), sbp=get_default_sbp(), dtype=flow.long
)
image_embed = self.p_sample(
image_embed, times, text_cond=text_cond, cond_scale=cond_scale
)
return image_embed
def p_losses(self, image_embed, times, text_cond, noise=None):
noise = default(
noise,
lambda: flow.randn(
*image_embed.shape, placement=get_default_placement(), sbp=get_default_sbp()
),
)
image_embed_noisy = self.noise_scheduler.q_sample(x_start=image_embed, t=times, noise=noise)
pred = self.net(image_embed_noisy, times, cond_drop_prob=self.cond_drop_prob, **text_cond)
if self.predict_x_start and self.training_clamp_l2norm:
pred = l2norm(pred) * self.image_embed_scale
target = noise if not self.predict_x_start else image_embed
loss = self.noise_scheduler.loss_fn(pred, target)
return loss
@flow.no_grad()
@eval_decorator
def sample_batch_size(self, batch_size, text_cond, cond_scale=1.0):
shape = (batch_size, self.image_embed_dim)
img = flow.randn(*shape, placement=get_default_placement(), sbp=get_default_sbp())
for i in tqdm(
reversed(range(0, self.noise_scheduler.num_timesteps)),
desc="sampling loop time step",
total=self.noise_scheduler.num_timesteps,
):
img = self.p_sample(
img,
flow.full(
(batch_size,),
i,
placement=get_default_placement(),
sbp=get_default_sbp(),
dtype=flow.long,
),
text_cond=text_cond,
cond_scale=cond_scale,
)
return img
@flow.no_grad()
@eval_decorator
def sample(
self,
text,
num_samples_per_batch=2,
cond_scale=1.0,
text_embed=None,
text_encodings=None,
text_mask=None,
):
# in the paper, what they did was
# sample 2 image embeddings, choose the top 1 similarity, as judged by CLIP
text = repeat(text, "b ... -> (b r) ...", r=num_samples_per_batch)
batch_size = text.shape[0]
image_embed_dim = self.image_embed_dim
if text_embed is None:
assert self.clip is not None
text_embed, text_encodings, text_mask = self.clip.embed_text(text)
text_cond = dict(text_embed=text_embed)
if self.condition_on_text_encodings:
text_cond = {**text_cond, "text_encodings": text_encodings, "mask": text_mask}
image_embeds = self.p_sample_loop(
(batch_size, image_embed_dim), text_cond=text_cond, cond_scale=cond_scale
)
# retrieve original unscaled image embed
image_embeds /= self.image_embed_scale
text_embeds = text_cond["text_embed"]
text_embeds = rearrange(text_embeds, "(b r) d -> b r d", r=num_samples_per_batch)
image_embeds = rearrange(image_embeds, "(b r) d -> b r d", r=num_samples_per_batch)
text_image_sims = einsum("b r d, b r d -> b r", l2norm(text_embeds), l2norm(image_embeds))
top_sim_indices = text_image_sims.topk(k=1)[1] # .indices
top_sim_indices = repeat(top_sim_indices, "b 1 -> b 1 d", d=image_embed_dim)
top_image_embeds = image_embeds.gather(1, top_sim_indices)
return rearrange(top_image_embeds, "b 1 d -> b d")
def forward(
self,
text=None,
image=None,
text_embed=None, # allow for training on preprocessed CLIP text and image embeddings
image_embed=None,
text_encodings=None, # as well as CLIP text encodings
text_mask=None,
*args,
**kwargs,
):
assert exists(text) ^ exists(text_embed), "either text or text embedding must be supplied"
assert exists(image) ^ exists(image_embed), "either text or text embedding must be supplied"
assert not (
self.condition_on_text_encodings and (not exists(text_encodings) and not exists(text))
), "text encodings must be present if you specified to condition on it on initialization"
if exists(image):
image_embed, _ = self.clip.embed_image(image)
# calculate text conditionings, based on what is passed in
if exists(text):
text_embed, text_encodings, text_mask = self.clip.embed_text(text)
text_cond = dict(text_embed=text_embed)
if self.condition_on_text_encodings:
assert exists(
text_encodings
), "text encodings must be present for diffusion prior if specified"
text_cond = {**text_cond, "text_encodings": text_encodings, "mask": text_mask}
# timestep conditioning from ddpm
batch = image_embed.shape[0]
times = flow.randint(
0,
self.noise_scheduler.num_timesteps,
(batch,),
placement=get_default_placement(),
sbp=get_default_sbp(),
dtype=flow.long,
)
# scale image embed (Katherine)
image_embed *= self.image_embed_scale
# calculate forward loss
return self.p_losses(image_embed, times, text_cond=text_cond, *args, **kwargs)
# decoder
def ConvTransposeUpsample(dim, dim_out=None):
dim_out = default(dim_out, dim)
return ConvTranspose2d(dim, dim_out, 4, 2, 1)
def NearestUpsample(dim, dim_out=None):
dim_out = default(dim_out, dim)
return nn.Sequential(
nn.Upsample(scale_factor=2, mode="nearest"), Conv2d(dim, dim_out, 3, padding=1)
)
def Downsample(dim, *, dim_out=None):
dim_out = default(dim_out, dim)
return Conv2d(dim, dim_out, 4, 2, 1)
class SinusoidalPosEmb(nn.Module):
def __init__(self, dim):
super().__init__()
self.dim = dim
def forward(self, x):
half_dim = self.dim // 2
emb = math.log(10000) / (half_dim - 1)
emb = flow.exp(
flow.arange(half_dim, placement=get_default_placement(), sbp=get_default_sbp()) * -emb
)
emb = rearrange(x, "i -> i 1") * rearrange(emb, "j -> 1 j")
return flow.cat((emb.sin(), emb.cos()), dim=-1)
class Block(nn.Module):
def __init__(self, dim, dim_out, groups=8):
super().__init__()
self.project = Conv2d(dim, dim_out, 3, padding=1)
self.norm = GroupNorm(groups, dim_out)
self.act = nn.SiLU()
def forward(self, x, scale_shift=None):
x = self.project(x)
x = self.norm(x)
if exists(scale_shift):
scale, shift = scale_shift
x = x * (scale + 1) + shift
x = self.act(x)
return x
class ResnetBlock(nn.Module):
def __init__(self, dim, dim_out, *, cond_dim=None, time_cond_dim=None, groups=8):
super().__init__()
self.time_mlp = None
if exists(time_cond_dim):
self.time_mlp = nn.Sequential(nn.SiLU(), Linear(time_cond_dim, dim_out * 2))
self.cross_attn = None
if exists(cond_dim):
self.cross_attn = EinopsToAndFrom(
"b c h w", "b (h w) c", CrossAttention(dim=dim_out, context_dim=cond_dim)
)
self.block1 = Block(dim, dim_out, groups=groups)
self.block2 = Block(dim_out, dim_out, groups=groups)
self.res_conv = Conv2d(dim, dim_out, 1) if dim != dim_out else nn.Identity()
def forward(self, x, time_emb=None, cond=None):
scale_shift = None
if exists(self.time_mlp) and exists(time_emb):
time_emb = self.time_mlp(time_emb)
time_emb = rearrange(time_emb, "b c -> b c 1 1")
scale_shift = time_emb.chunk(2, dim=1)
h = self.block1(x, scale_shift=scale_shift)
if exists(self.cross_attn):
assert exists(cond)
h = self.cross_attn(h, context=cond) + h
h = self.block2(h)
return h + self.res_conv(x)
class CrossAttention(nn.Module):
def __init__(
self, dim, *, context_dim=None, dim_head=64, heads=8, dropout=0.0, norm_context=False
):
super().__init__()
self.scale = dim_head ** -0.5
self.heads = heads
inner_dim = dim_head * heads
context_dim = default(context_dim, dim)
self.norm = LayerNorm(dim)
self.norm_context = LayerNorm(context_dim) if norm_context else nn.Identity()
self.dropout = nn.Dropout(dropout)
self.null_kv = nn.Parameter(
flow.randn(2, dim_head, placement=get_default_placement(), sbp=get_default_sbp())
)
self.to_q = Linear(dim, inner_dim, bias=False, parallel="col")
self.to_kv = Linear(context_dim, inner_dim * 2, bias=False, parallel="col")
self.to_out = nn.Sequential(
Linear(inner_dim, dim, bias=False, parallel="row"), LayerNorm(dim)
)
def forward(self, x, context, mask=None):
b, n = x.shape[:2]
x = self.norm(x)
context = self.norm_context(context)
q, k, v = (self.to_q(x), *self.to_kv(context).chunk(2, dim=-1))
q, k, v = rearrange_many((q, k, v), "b n (h d) -> b h n d", h=self.heads)
# add null key / value for classifier free guidance in prior net
nk, nv = repeat_many(self.null_kv.unbind(dim=-2), "d -> b h 1 d", h=self.heads, b=b)
k = flow.cat((nk, k), dim=-2)
v = flow.cat((nv, v), dim=-2)
q = q * self.scale
sim = einsum("b h i d, b h j d -> b h i j", q, k)
max_neg_value = -3.4028e38 # -flow.finfo(sim.dtype).max
if exists(mask):
mask = F.pad(mask, (1, 0), value=1)
mask = rearrange(mask, "b j -> b 1 1 j")
sim = sim.masked_fill(1 - mask, max_neg_value)
attn = sim.softmax(dim=-1)
out = einsum("b h i j, b h j d -> b h i d", attn, v)
out = rearrange(out, "b h n d -> b n (h d)")
return self.to_out(out)
class LinearAttention(nn.Module):
def __init__(self, dim, dim_head=32, heads=8):
super().__init__()
self.scale = dim_head ** -0.5
self.heads = heads
inner_dim = dim_head * heads
self.norm = ChanLayerNorm(dim)
self.nonlin = nn.GELU()
self.to_qkv = Conv2d(dim, inner_dim * 3, 1, bias=False)
self.to_out = nn.Sequential(Conv2d(inner_dim, dim, 1, bias=False), ChanLayerNorm(dim))
def forward(self, fmap):
h, x, y = self.heads, *fmap.shape[-2:]
fmap = self.norm(fmap)
q, k, v = self.to_qkv(fmap).chunk(3, dim=1)
q, k, v = rearrange_many((q, k, v), "b (h c) x y -> (b h) (x y) c", h=h)
q = q.softmax(dim=-1)
k = k.softmax(dim=-2)
q = q * self.scale
context = einsum("b n d, b n e -> b d e", k, v)
out = einsum("b n d, b d e -> b n e", q, context)
out = rearrange(out, "(b h) (x y) d -> b (h d) x y", h=h, x=x, y=y)
out = self.nonlin(out)
return self.to_out(out)
class CrossEmbedLayer(nn.Module):
def __init__(self, dim_in, kernel_sizes, dim_out=None, stride=2):
super().__init__()
assert all([*map(lambda t: (t % 2) == (stride % 2), kernel_sizes)])
dim_out = default(dim_out, dim_in)
kernel_sizes = sorted(kernel_sizes)
num_scales = len(kernel_sizes)
# calculate the dimension at each scale
dim_scales = [int(dim_out / (2 ** i)) for i in range(1, num_scales)]
dim_scales = [*dim_scales, dim_out - sum(dim_scales)]
self.convs = nn.ModuleList([])
for kernel, dim_scale in zip(kernel_sizes, dim_scales):
self.convs.append(
Conv2d(dim_in, dim_scale, kernel, stride=stride, padding=(kernel - stride) // 2)
)
def forward(self, x):
fmaps = tuple(map(lambda conv: conv(x), self.convs))
return flow.cat(fmaps, dim=1)
class Unet(nn.Module):
def __init__(
self,
dim,
*,
image_embed_dim=None,
text_embed_dim=None,
cond_dim=None,
num_image_tokens=4,
num_time_tokens=2,
out_dim=None,
dim_mults=(1, 2, 4, 8),
channels=3,
channels_out=None,
self_attn=False,
attn_dim_head=32,
attn_heads=16,
lowres_cond=False, #
sparse_attn=False,
attend_at_middle=True,
cond_on_text_encodings=False,
max_text_len=256,
cond_on_image_embeds=False,
add_image_embeds_to_time=True, #
init_dim=None,
init_conv_kernel_size=7,
resnet_groups=8,
num_resnet_blocks=2,
init_cross_embed_kernel_sizes=(3, 7, 15),
cross_embed_downsample=False,
cross_embed_downsample_kernel_sizes=(2, 4),
memory_efficient=False,
scale_skip_connection=False,
nearest_upsample=False,
final_conv_kernel_size=1,
**kwargs,
):
super().__init__()
# save locals to take care of some hyperparameters for cascading DDPM
self._locals = locals()
del self._locals["self"]
del self._locals["__class__"]
# for eventual cascading diffusion
self.lowres_cond = lowres_cond
# determine dimensions
self.channels = channels
self.channels_out = default(channels_out, channels)
init_channels = channels if not lowres_cond else channels * 2
init_dim = default(init_dim, dim)
self.init_conv = CrossEmbedLayer(
init_channels, dim_out=init_dim, kernel_sizes=init_cross_embed_kernel_sizes, stride=1
)
dims = [init_dim, *map(lambda m: dim * m, dim_mults)]
in_out = list(zip(dims[:-1], dims[1:]))
num_stages = len(in_out)
# time, image embeddings, and optional text encoding
cond_dim = default(cond_dim, dim)
time_cond_dim = dim * 4
self.to_time_hiddens = nn.Sequential(
SinusoidalPosEmb(dim), Linear(dim, time_cond_dim, parallel="col"), nn.GELU()
)
self.to_time_tokens = nn.Sequential(
Linear(time_cond_dim, cond_dim * num_time_tokens, parallel="row"),
Rearrange("b (r d) -> b r d", r=num_time_tokens),
)
self.to_time_cond = nn.Sequential(Linear(time_cond_dim, time_cond_dim, parallel="row"))
self.image_to_tokens = (
nn.Sequential(
Linear(image_embed_dim, cond_dim * num_image_tokens),
Rearrange("b (n d) -> b n d", n=num_image_tokens),
)
if cond_on_image_embeds and image_embed_dim != cond_dim
else nn.Identity()
)
self.to_image_hiddens = (
nn.Sequential(Linear(image_embed_dim, time_cond_dim), nn.GELU())
if cond_on_image_embeds and add_image_embeds_to_time
else None
)
self.norm_cond = LayerNorm(cond_dim)
self.norm_mid_cond = LayerNorm(cond_dim)
# text encoding conditioning (optional)
self.text_to_cond = None
if cond_on_text_encodings:
assert exists(
text_embed_dim
), "text_embed_dim must be given to the unet if cond_on_text_encodings is True"
self.text_to_cond = Linear(text_embed_dim, cond_dim)
# finer control over whether to condition on image embeddings and text encodings
# so one can have the latter unets in the cascading DDPMs only focus on super-resoluting
self.cond_on_text_encodings = cond_on_text_encodings
self.cond_on_image_embeds = cond_on_image_embeds
# for classifier free guidance
self.null_image_embed = nn.Parameter(flow.randn(1, num_image_tokens, cond_dim))
self.null_image_hiddens = nn.Parameter(flow.randn(1, time_cond_dim))
self.max_text_len = max_text_len
self.null_text_embed = nn.Parameter(flow.randn(1, max_text_len, cond_dim))
# whether to scale skip connection, adopted in Imagen
self.skip_connect_scale = 1.0 if not scale_skip_connection else (2 ** -0.5)
# attention related params
attn_kwargs = dict(heads=attn_heads, dim_head=attn_dim_head)
self_attn = cast_tuple(self_attn, num_stages)
def create_self_attn(dim):
return EinopsToAndFrom("b c h w", "b (h w) c", Residual(Attention(dim, **attn_kwargs)))
# resnet block klass
resnet_groups = cast_tuple(resnet_groups, num_stages)
top_level_resnet_group = first(resnet_groups)
num_resnet_blocks = cast_tuple(num_resnet_blocks, num_stages)
# downsample klass
downsample_klass = Downsample
if cross_embed_downsample:
downsample_klass = partial(
CrossEmbedLayer, kernel_sizes=cross_embed_downsample_kernel_sizes
)
# upsample klass
upsample_klass = ConvTransposeUpsample if not nearest_upsample else NearestUpsample
# give memory efficient unet an initial resnet block
self.init_resnet_block = (
ResnetBlock(
init_dim, init_dim, time_cond_dim=time_cond_dim, groups=top_level_resnet_group
)
if memory_efficient
else None
)
# layers
self.downs = nn.ModuleList([])
self.ups = nn.ModuleList([])
num_resolutions = len(in_out)
skip_connect_dims = [] # keeping track of skip connection dimensions
for ind, ((dim_in, dim_out), groups, layer_num_resnet_blocks, layer_self_attn) in enumerate(
zip(in_out, resnet_groups, num_resnet_blocks, self_attn)
):
is_first = ind == 0
is_last = ind >= (num_resolutions - 1)
layer_cond_dim = cond_dim if not is_first else None
dim_layer = dim_out if memory_efficient else dim_in
skip_connect_dims.append(dim_layer)
attention = nn.Identity()
if layer_self_attn:
attention = create_self_attn(dim_layer)
elif sparse_attn:
attention = Residual(LinearAttention(dim_layer, **attn_kwargs))
self.downs.append(
nn.ModuleList(
[
downsample_klass(dim_in, dim_out=dim_out) if memory_efficient else None,
ResnetBlock(
dim_layer, dim_layer, time_cond_dim=time_cond_dim, groups=groups
),
nn.ModuleList(
[
ResnetBlock(
dim_layer,
dim_layer,
cond_dim=layer_cond_dim,
time_cond_dim=time_cond_dim,
groups=groups,
)
for _ in range(layer_num_resnet_blocks)
]
),
attention,
downsample_klass(dim_layer, dim_out=dim_out)
if not is_last and not memory_efficient
else Conv2d(dim_layer, dim_out, 1),
]
)
)
mid_dim = dims[-1]
self.mid_block1 = ResnetBlock(
mid_dim,
mid_dim,
cond_dim=cond_dim,
time_cond_dim=time_cond_dim,
groups=resnet_groups[-1],
)
self.mid_attn = create_self_attn(mid_dim)
self.mid_block2 = ResnetBlock(
mid_dim,
mid_dim,
cond_dim=cond_dim,
time_cond_dim=time_cond_dim,
groups=resnet_groups[-1],
)
for ind, ((dim_in, dim_out), groups, layer_num_resnet_blocks, layer_self_attn) in enumerate(
zip(
reversed(in_out),
reversed(resnet_groups),
reversed(num_resnet_blocks),
reversed(self_attn),
)
):
is_last = ind >= (len(in_out) - 1)
layer_cond_dim = cond_dim if not is_last else None
skip_connect_dim = skip_connect_dims.pop()
attention = nn.Identity()
if layer_self_attn:
attention = create_self_attn(dim_out)
elif sparse_attn:
attention = Residual(LinearAttention(dim_out, **attn_kwargs))
self.ups.append(
nn.ModuleList(
[
ResnetBlock(
dim_out + skip_connect_dim,
dim_out,
cond_dim=layer_cond_dim,
time_cond_dim=time_cond_dim,
groups=groups,
),
nn.ModuleList(
[
ResnetBlock(
dim_out + skip_connect_dim,
dim_out,
cond_dim=layer_cond_dim,
time_cond_dim=time_cond_dim,
groups=groups,
)
for _ in range(layer_num_resnet_blocks)
]
),
attention,
upsample_klass(dim_out, dim_in)
if not is_last or memory_efficient
else nn.Identity(),
]
)
)
self.final_resnet_block = ResnetBlock(
dim * 2, dim, time_cond_dim=time_cond_dim, groups=top_level_resnet_group
)
self.to_out = Conv2d(
dim,
self.channels_out,
kernel_size=final_conv_kernel_size,
padding=final_conv_kernel_size // 2,
)
# if the current settings for the unet are not correct
# for cascading DDPM, then reinit the unet with the right settings
def cast_model_parameters(
self, *, lowres_cond, channels, channels_out, cond_on_image_embeds, cond_on_text_encodings
):
if (
lowres_cond == self.lowres_cond
and channels == self.channels
and cond_on_image_embeds == self.cond_on_image_embeds
and cond_on_text_encodings == self.cond_on_text_encodings
and channels_out == self.channels_out
):
return self
updated_kwargs = dict(
lowres_cond=lowres_cond,
channels=channels,
channels_out=channels_out,
cond_on_image_embeds=cond_on_image_embeds,
cond_on_text_encodings=cond_on_text_encodings,
)
return self.__class__(**{**self._locals, **updated_kwargs})
def forward_with_cond_scale(self, *args, cond_scale=1.0, **kwargs):
logits = self.forward(*args, **kwargs)
if cond_scale == 1:
return logits
null_logits = self.forward(
*args, text_cond_drop_prob=1.0, image_cond_drop_prob=1.0, **kwargs
)
return null_logits + (logits - null_logits) * cond_scale
def forward(
self,
x,
time,
*,
image_embed,
lowres_cond_img=None,
text_encodings=None,
text_mask=None,
image_cond_drop_prob=0.0,
text_cond_drop_prob=0.0,
blur_sigma=None,
blur_kernel_size=None,
):
batch_size = x.shape[0]
# add low resolution conditioning, if present
assert not (
self.lowres_cond and not exists(lowres_cond_img)
), "low resolution conditioning image must be present"
if exists(lowres_cond_img):
x = flow.cat((x, lowres_cond_img), dim=1)
# initial convolution
x = self.init_conv(x)
r = x.clone() # final residual
# time conditioning
time_hiddens = self.to_time_hiddens(time)
time_tokens = self.to_time_tokens(time_hiddens)
t = self.to_time_cond(time_hiddens)
# conditional dropout
image_keep_mask = prob_mask_like((batch_size,), 1 - image_cond_drop_prob)
text_keep_mask = prob_mask_like((batch_size,), 1 - text_cond_drop_prob)
text_keep_mask = rearrange(text_keep_mask, "b -> b 1 1")
# image embedding to be summed to time embedding
# discovered by @mhh0318 in the paper
if exists(image_embed) and exists(self.to_image_hiddens):
image_hiddens = self.to_image_hiddens(image_embed)
image_keep_mask_hidden = rearrange(image_keep_mask, "b -> b 1")
null_image_hiddens = self.null_image_hiddens.to(image_hiddens.dtype)
image_hiddens = flow.where(image_keep_mask_hidden, image_hiddens, null_image_hiddens)
t = t + image_hiddens
# mask out image embedding depending on condition dropout
# for classifier free guidance
image_tokens = None
if self.cond_on_image_embeds:
image_keep_mask_embed = rearrange(image_keep_mask, "b -> b 1 1")
image_tokens = self.image_to_tokens(image_embed)
null_image_embed = self.null_image_embed.to(
image_tokens.dtype
) # for some reason pyflow AMP not working
image_tokens = flow.where(image_keep_mask_embed, image_tokens, null_image_embed)
# take care of text encodings (optional)
text_tokens = None
if exists(text_encodings) and self.cond_on_text_encodings:
text_tokens = self.text_to_cond(text_encodings)
text_tokens = text_tokens[:, : self.max_text_len]
text_tokens_len = text_tokens.shape[1]
remainder = self.max_text_len - text_tokens_len
if remainder > 0:
text_tokens = F.pad(text_tokens, (0, 0, 0, remainder))
if exists(text_mask):
if remainder > 0:
# text_mask = F.pad(text_mask, (0, remainder), value = False)
text_mask = F.pad(text_mask.to(flow.int32), (0, remainder), value=0)
text_mask = rearrange(text_mask, "b n -> b n 1")
text_keep_mask = text_mask & text_keep_mask
null_text_embed = self.null_text_embed.to(
text_tokens.dtype
) # for some reason pyflow AMP not working
text_tokens = flow.where(text_keep_mask, text_tokens, null_text_embed)
# main conditioning tokens (c)
c = time_tokens
if exists(image_tokens):
c = flow.cat((c, image_tokens), dim=-2)
# text and image conditioning tokens (mid_c), to save on compute,
# only do cross attention based conditioning on the inner most layers of the Unet
mid_c = c if not exists(text_tokens) else flow.cat((c, text_tokens), dim=-2)
# normalize conditioning tokens
c = self.norm_cond(c)
mid_c = self.norm_mid_cond(mid_c)
# initial resnet block
if exists(self.init_resnet_block):
x = self.init_resnet_block(x, t)
# go through the layers of the unet, down and up
hiddens = []
for pre_downsample, init_block, resnet_blocks, attn, post_downsample in self.downs:
if exists(pre_downsample):
x = pre_downsample(x)
x = init_block(x, t, c)
for resnet_block in resnet_blocks:
x = resnet_block(x, t, c)
hiddens.append(x)
x = attn(x)
hiddens.append(x)
if exists(post_downsample):
x = post_downsample(x)
x = self.mid_block1(x, t, mid_c)
if exists(self.mid_attn):
x = self.mid_attn(x)
x = self.mid_block2(x, t, mid_c)
def connect_skip(fmap):
return flow.cat((fmap, hiddens.pop() * self.skip_connect_scale), dim=1)
for init_block, resnet_blocks, attn, upsample in self.ups:
x = connect_skip(x)
x = init_block(x, t, c)
for resnet_block in resnet_blocks:
x = connect_skip(x)
x = resnet_block(x, t, c)
x = attn(x)
x = upsample(x)
x = flow.cat((x, r), dim=1)
x = self.final_resnet_block(x, t)
return self.to_out(x)
class LowresConditioner(nn.Module):
def __init__(
self,
downsample_first=True,
blur_sigma=0.6,
blur_kernel_size=3,
):
super().__init__()
self.downsample_first = downsample_first
self.blur_sigma = blur_sigma
self.blur_kernel_size = blur_kernel_size
def forward(
self,
cond_fmap,
*,
target_image_size,
downsample_image_size=None,
blur_sigma=None,
blur_kernel_size=None,
):
if self.training and self.downsample_first and exists(downsample_image_size):
cond_fmap = resize_image_to(cond_fmap, downsample_image_size)
if self.training:
# when training, blur the low resolution conditional image
blur_sigma = default(blur_sigma, self.blur_sigma)
blur_kernel_size = default(blur_kernel_size, self.blur_kernel_size)
# allow for drawing a random sigma between lo and hi float values
if isinstance(blur_sigma, tuple):
blur_sigma = tuple(map(float, blur_sigma))
blur_sigma = random.uniform(*blur_sigma)
# allow for drawing a random kernel size between lo and hi int values
if isinstance(blur_kernel_size, tuple):
blur_kernel_size = tuple(map(int, blur_kernel_size))
kernel_size_lo, kernel_size_hi = blur_kernel_size
blur_kernel_size = random.randrange(kernel_size_lo, kernel_size_hi + 1)
cond_fmap = gaussian_blur2d(
cond_fmap, cast_tuple(blur_kernel_size, 2), cast_tuple(blur_sigma, 2)
)
cond_fmap = resize_image_to(cond_fmap, target_image_size)
return cond_fmap
class Decoder(nn.Module):
def __init__(
self,
unet,
*,
clip=None,
image_size=None,
channels=3,
vae=tuple(),
timesteps=1000,
image_cond_drop_prob=0.1,
text_cond_drop_prob=0.5,
loss_type="l2",
beta_schedule=None,
predict_x_start=False,
predict_x_start_for_latent_diffusion=False,
image_sizes=None, # for cascading ddpm, image size at each stage
random_crop_sizes=None,
lowres_downsample_first=True,
blur_sigma=0.6, # cascading ddpm - blur sigma
blur_kernel_size=3, # cascading ddpm - blur kernel size
clip_denoised=True,
clip_x_start=True,
clip_adapter_overrides=dict(),
learned_variance=True,
learned_variance_constrain_frac=False,
vb_loss_weight=0.001,
unconditional=False, # set to True for generating images without conditioning
auto_normalize_img=True,
use_dynamic_thres=False, # from the Imagen paper
dynamic_thres_percentile=0.9,
p2_loss_weight_gamma=0.0,
p2_loss_weight_k=1,
):
super().__init__()
# clip
self.clip = None
if exists(clip):
assert not unconditional, "clip must not be given if doing unconditional image training"
assert (
channels == clip.image_channels
), f"channels of image ({channels}) should be equal to the"
" channels that CLIP accepts ({clip.image_channels})"
freeze_model_and_make_eval_(clip)
self.clip = clip
# determine image size, with image_size and image_sizes taking precedence
if exists(image_size) or exists(image_sizes):
assert exists(image_size) ^ exists(
image_sizes
), "only one of image_size or image_sizes must be given"
image_size = default(image_size, lambda: image_sizes[-1])
elif exists(clip):
image_size = clip.image_size
else:
raise ("either image_size, image_sizes, or clip must be given to decoder")
# channels
self.channels = channels
# verify conditioning method
unets = cast_tuple(unet)
num_unets = len(unets)
self.unconditional = unconditional
# automatically take care of ensuring that first unet is unconditional while the rest
# of the unets are conditioned on the low resolution image produced by previous unet
vaes = pad_tuple_to_length(
cast_tuple(vae), len(unets), fillvalue=NullVQGanVAE(channels=self.channels)
)
# whether to use learned variance, defaults to True for the first unet in the cascade
learned_variance = pad_tuple_to_length(
cast_tuple(learned_variance), len(unets), fillvalue=False
)
self.learned_variance = learned_variance
# whether to constrain the output of the network (the interpolation fraction) from 0 to 1
self.learned_variance_constrain_frac = learned_variance_constrain_frac
self.vb_loss_weight = vb_loss_weight
# construct unets and vaes
self.unets = nn.ModuleList([])
self.vaes = nn.ModuleList([])
for ind, (one_unet, one_vae, one_unet_learned_var) in enumerate(
zip(unets, vaes, learned_variance)
):
assert isinstance(one_unet, Unet)
assert isinstance(one_vae, (VQGanVAE, NullVQGanVAE))
is_first = ind == 0
latent_dim = one_vae.encoded_dim if exists(one_vae) else None
unet_channels = default(latent_dim, self.channels)
unet_channels_out = unet_channels * (1 if not one_unet_learned_var else 2)
one_unet = one_unet.cast_model_parameters(
lowres_cond=not is_first,
cond_on_image_embeds=not unconditional and is_first,
cond_on_text_encodings=not unconditional and one_unet.cond_on_text_encodings,
channels=unet_channels,
channels_out=unet_channels_out,
)
self.unets.append(one_unet)
self.vaes.append(one_vae.copy_for_eval())
# determine from unets whether conditioning on text encoding is needed
self.condition_on_text_encodings = any([unet.cond_on_text_encodings for unet in self.unets])
# create noise schedulers per unet
if not exists(beta_schedule):
beta_schedule = (
"cosine",
*(("cosine",) * max(num_unets - 2, 0)),
*(("linear",) * int(num_unets > 1)),
)
beta_schedule = cast_tuple(beta_schedule, num_unets)
p2_loss_weight_gamma = cast_tuple(p2_loss_weight_gamma, num_unets)
self.noise_schedulers = nn.ModuleList([])
for unet_beta_schedule, unet_p2_loss_weight_gamma in zip(
beta_schedule, p2_loss_weight_gamma
):
noise_scheduler = NoiseScheduler(
beta_schedule=unet_beta_schedule,
timesteps=timesteps,
loss_type=loss_type,
p2_loss_weight_gamma=unet_p2_loss_weight_gamma,
p2_loss_weight_k=p2_loss_weight_k,
)
self.noise_schedulers.append(noise_scheduler)
# unet image sizes
image_sizes = default(image_sizes, (image_size,))
image_sizes = tuple(sorted(set(image_sizes)))
assert len(self.unets) == len(
image_sizes
), "you did not supply the correct number of u-nets "
f"({len(self.unets)}) for resolutions {image_sizes}"
self.image_sizes = image_sizes
self.sample_channels = cast_tuple(self.channels, len(image_sizes))
# random crop sizes (for super-resoluting unets at the end of cascade?)
self.random_crop_sizes = cast_tuple(random_crop_sizes, len(image_sizes))
# predict x0 config
self.predict_x_start = (
cast_tuple(predict_x_start, len(unets))
if not predict_x_start_for_latent_diffusion
else tuple(map(lambda t: isinstance(t, VQGanVAE), self.vaes))
)
# cascading ddpm related stuff
lowres_conditions = tuple(map(lambda t: t.lowres_cond, self.unets))
assert lowres_conditions == (
False,
*((True,) * (len(self.unets) - 1)),
), "the first unet must be unconditioned (by low resolution image), "
"and the rest of the unets must have `lowres_cond` set to True"
self.to_lowres_cond = LowresConditioner(
downsample_first=lowres_downsample_first,
blur_sigma=blur_sigma,
blur_kernel_size=blur_kernel_size,
)
# classifier free guidance
self.image_cond_drop_prob = image_cond_drop_prob
self.text_cond_drop_prob = text_cond_drop_prob
self.can_classifier_guidance = image_cond_drop_prob > 0.0 or text_cond_drop_prob > 0.0
# whether to clip when sampling
self.clip_denoised = clip_denoised
self.clip_x_start = clip_x_start
# dynamic thresholding settings, if clipping denoised during sampling
self.use_dynamic_thres = use_dynamic_thres
self.dynamic_thres_percentile = dynamic_thres_percentile
# normalize and unnormalize image functions
self.normalize_img = normalize_neg_one_to_one if auto_normalize_img else identity
self.unnormalize_img = unnormalize_zero_to_one if auto_normalize_img else identity
# device tracker
self.register_buffer("_dummy", flow.Tensor([True]), persistent=False)
def get_unet(self, unet_number):
assert 0 < unet_number <= len(self.unets)
index = unet_number - 1
return self.unets[index]
@contextmanager
def one_unet_in_gpu(self, unet_number=None, unet=None):
assert exists(unet_number) ^ exists(unet)
if exists(unet_number):
unet = self.get_unet(unet_number)
self.cuda()
devices = [module_device(unet) for unet in self.unets]
self.unets.cpu()
unet.cuda()
yield
for unet, device in zip(self.unets, devices):
unet.to(device)
def p_mean_variance(
self,
unet,
x,
t,
image_embed,
noise_scheduler,
text_encodings=None,
text_mask=None,
lowres_cond_img=None,
clip_denoised=True,
predict_x_start=False,
learned_variance=False,
cond_scale=1.0,
model_output=None,
):
assert not (
cond_scale != 1.0 and not self.can_classifier_guidance
), "the decoder was not trained with conditional dropout, "
"and thus one cannot use classifier free guidance (cond_scale anything other than 1)"
pred = default(
model_output,
lambda: unet.forward_with_cond_scale(
x,
t,
image_embed=image_embed,
text_encodings=text_encodings,
text_mask=text_mask,
cond_scale=cond_scale,
lowres_cond_img=lowres_cond_img,
),
)
if learned_variance:
pred, var_interp_frac_unnormalized = pred.chunk(2, dim=1)
if predict_x_start:
x_recon = pred
else:
x_recon = noise_scheduler.predict_start_from_noise(x, t=t, noise=pred)
if clip_denoised:
# s is the threshold amount
# static thresholding would just be s = 1
s = 1.0
if self.use_dynamic_thres:
s = flow.quantile(
rearrange(x_recon, "b ... -> b (...)").abs(),
self.dynamic_thres_percentile,
dim=-1,
)
s.clamp_(min=1.0)
s = s.view(-1, *((1,) * (x_recon.ndim - 1)))
# clip by threshold, depending on whether static or dynamic
x_recon = x_recon.clamp(-s, s) / s
model_mean, posterior_variance, posterior_log_variance = noise_scheduler.q_posterior(
x_start=x_recon, x_t=x, t=t
)
if learned_variance:
# if learned variance, posterio variance and posterior log variance are
# predicted by the network by an interpolation of the max and min log beta values
# eq 15 - https://arxiv.org/abs/2102.09672
min_log = extract(noise_scheduler.posterior_log_variance_clipped, t, x.shape)
max_log = extract(flow.log(noise_scheduler.betas), t, x.shape)
var_interp_frac = unnormalize_zero_to_one(var_interp_frac_unnormalized)
if self.learned_variance_constrain_frac:
var_interp_frac = var_interp_frac.sigmoid()
posterior_log_variance = var_interp_frac * max_log + (1 - var_interp_frac) * min_log
posterior_variance = posterior_log_variance.exp()
return model_mean, posterior_variance, posterior_log_variance
@flow.no_grad()
def p_sample(
self,
unet,
x,
t,
image_embed,
noise_scheduler,
text_encodings=None,
text_mask=None,
cond_scale=1.0,
lowres_cond_img=None,
predict_x_start=False,
learned_variance=False,
clip_denoised=True,
):
b = x.shape[0]
model_mean, _, model_log_variance = self.p_mean_variance(
unet,
x=x,
t=t,
image_embed=image_embed,
text_encodings=text_encodings,
text_mask=text_mask,
cond_scale=cond_scale,
lowres_cond_img=lowres_cond_img,
clip_denoised=clip_denoised,
predict_x_start=predict_x_start,
noise_scheduler=noise_scheduler,
learned_variance=learned_variance,
)
noise = flow.randn(*x.shape, placement=get_default_placement(), sbp=get_default_sbp())
# no noise when t == 0
nonzero_mask = (1 - (t == 0).float()).reshape(b, *((1,) * (len(x.shape) - 1)))
return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise
@flow.no_grad()
def p_sample_loop(
self,
unet,
shape,
image_embed,
noise_scheduler,
predict_x_start=False,
learned_variance=False,
clip_denoised=True,
lowres_cond_img=None,
text_encodings=None,
text_mask=None,
cond_scale=1,
is_latent_diffusion=False,
):
b = shape[0]
img = flow.randn(*shape, placement=get_default_placement(), sbp=get_default_sbp())
if not is_latent_diffusion:
lowres_cond_img = maybe(self.normalize_img)(lowres_cond_img)
for i in tqdm(
reversed(range(0, noise_scheduler.num_timesteps)),
desc="sampling loop time step",
total=noise_scheduler.num_timesteps,
):
img = self.p_sample(
unet,
img,
flow.full(
(b,),
i,
placement=get_default_placement(),
sbp=get_default_sbp(),
dtype=flow.long,
),
image_embed=image_embed,
text_encodings=text_encodings,
text_mask=text_mask,
cond_scale=cond_scale,
lowres_cond_img=lowres_cond_img,
predict_x_start=predict_x_start,
noise_scheduler=noise_scheduler,
learned_variance=learned_variance,
clip_denoised=clip_denoised,
)
unnormalize_img = self.unnormalize_img(img)
return unnormalize_img
def p_losses(
self,
unet,
x_start,
times,
*,
image_embed,
noise_scheduler,
lowres_cond_img=None,
text_encodings=None,
text_mask=None,
predict_x_start=False,
noise=None,
learned_variance=False,
clip_denoised=False,
is_latent_diffusion=False,
):
noise = default(
noise,
lambda: flow.randn(
*x_start.shape, placement=get_default_placement(), sbp=get_default_sbp()
),
)
# normalize to [-1, 1]
if not is_latent_diffusion:
x_start = self.normalize_img(x_start)
lowres_cond_img = maybe(self.normalize_img)(lowres_cond_img)
# get x_t
x_noisy = noise_scheduler.q_sample(x_start=x_start, t=times, noise=noise)
model_output = unet(
x_noisy,
times,
image_embed=image_embed,
text_encodings=text_encodings,
text_mask=text_mask,
lowres_cond_img=lowres_cond_img,
image_cond_drop_prob=self.image_cond_drop_prob,
text_cond_drop_prob=self.text_cond_drop_prob,
)
if learned_variance:
pred, _ = model_output.chunk(2, dim=1)
else:
pred = model_output
target = noise if not predict_x_start else x_start
loss = noise_scheduler.loss_fn(pred, target, reduction="none")
loss = reduce(loss, "b ... -> b (...)", "mean")
loss = noise_scheduler.p2_reweigh_loss(loss, times)
loss = loss.mean()
if not learned_variance:
# return simple loss if not using learned variance
return loss
# most of the code below is transcribed from
# https://github.com/hojonathanho/diffusion/blob/master/diffusion_tf/diffusion_utils_2.py
# the Improved DDPM paper then further modified it so that the mean is detached
# (shown a couple lines before), and weighted to be smaller than the l1 or l2 "simple" loss
# it is questionable whether this is really needed, looking at some of the figures in the
# paper, but may as well stay faithful to their implementation
# if learning the variance, also include the extra weight kl loss
true_mean, _, true_log_variance_clipped = noise_scheduler.q_posterior(
x_start=x_start, x_t=x_noisy, t=times
)
model_mean, _, model_log_variance = self.p_mean_variance(
unet,
x=x_noisy,
t=times,
image_embed=image_embed,
noise_scheduler=noise_scheduler,
clip_denoised=clip_denoised,
learned_variance=True,
model_output=model_output,
)
# kl loss with detached model predicted mean, for stability reasons as in paper
detached_model_mean = model_mean.detach()
kl = normal_kl(
true_mean, true_log_variance_clipped, detached_model_mean, model_log_variance
)
kl = meanflat(kl) * NAT
decoder_nll = -discretized_gaussian_log_likelihood(
x_start, means=detached_model_mean, log_scales=0.5 * model_log_variance
)
decoder_nll = meanflat(decoder_nll) * NAT
# at the first timestep return the decoder NLL,
# otherwise KL(q(x_{t-1}|x_t,x_0) || p(x_{t-1}|x_t))
vb_losses = flow.where(times == 0, decoder_nll, kl)
# weight the vb loss smaller, for stability, as in the paper (recommended 0.001)
vb_loss = vb_losses.mean() * self.vb_loss_weight
return loss + vb_loss
@flow.no_grad()
@eval_decorator
def sample(
self,
image_embed=None,
text=None,
text_mask=None,
text_encodings=None,
batch_size=1,
cond_scale=1.0,
stop_at_unet_number=None,
distributed=False,
):
assert self.unconditional or exists(
image_embed
), "image embed must be present on sampling from decoder unless if trained unconditionally"
if not self.unconditional:
batch_size = image_embed.shape[0]
if exists(text) and not exists(text_encodings) and not self.unconditional:
assert exists(self.clip)
_, text_encodings, text_mask = self.clip.embed_text(text)
assert not (
self.condition_on_text_encodings and not exists(text_encodings)
), "text or text encodings must be passed into decoder if specified"
assert not (
not self.condition_on_text_encodings and exists(text_encodings)
), "decoder specified not to be conditioned on text, yet it is presented"
img = None
for (
unet_number,
unet,
vae,
channel,
image_size,
predict_x_start,
learned_variance,
noise_scheduler,
) in tqdm(
zip(
range(1, len(self.unets) + 1),
self.unets,
self.vaes,
self.sample_channels,
self.image_sizes,
self.predict_x_start,
self.learned_variance,
self.noise_schedulers,
)
):
context = null_context()
with context:
lowres_cond_img = None
shape = (batch_size, channel, image_size, image_size)
if unet.lowres_cond:
lowres_cond_img = self.to_lowres_cond(img, target_image_size=image_size)
is_latent_diffusion = isinstance(vae, VQGanVAE)
image_size = vae.get_encoded_fmap_size(image_size)
shape = (batch_size, vae.encoded_dim, image_size, image_size)
lowres_cond_img = maybe(vae.encode)(lowres_cond_img)
img = self.p_sample_loop(
unet,
shape,
image_embed=image_embed,
text_encodings=text_encodings,
text_mask=text_mask,
cond_scale=cond_scale,
predict_x_start=predict_x_start,
learned_variance=learned_variance,
clip_denoised=not is_latent_diffusion,
lowres_cond_img=lowres_cond_img,
is_latent_diffusion=is_latent_diffusion,
noise_scheduler=noise_scheduler,
)
img = vae.decode(img)
if exists(stop_at_unet_number) and stop_at_unet_number == unet_number:
break
return img
def forward(
self,
image,
text=None,
image_embed=None,
text_encodings=None,
text_mask=None,
unet_number=None,
return_lowres_cond_image=False,
):
assert not (
len(self.unets) > 1 and not exists(unet_number)
), f"you must specify which unet you want trained, from a range of 1 to {len(self.unets)},"
" if you are training cascading DDPM (multiple unets)"
unet_number = default(unet_number, 1)
unet_index = unet_number - 1
unet = self.get_unet(unet_number)
vae = self.vaes[unet_index]
noise_scheduler = self.noise_schedulers[unet_index]
target_image_size = self.image_sizes[unet_index]
predict_x_start = self.predict_x_start[unet_index]
random_crop_size = self.random_crop_sizes[unet_index]
learned_variance = self.learned_variance[unet_index]
b, _, h, w, _, = (
*image.shape,
image.device,
)
check_shape(image, "b c h w", c=self.channels)
assert h >= target_image_size and w >= target_image_size
times = flow.randint(
0,
noise_scheduler.num_timesteps,
(b,),
placement=get_default_placement(),
sbp=get_default_sbp(),
dtype=flow.long,
)
if not exists(image_embed) and not self.unconditional:
assert exists(self.clip), "if you want to derive CLIP image embeddings automatically, "
"you must supply `clip` to the decoder on init"
image_embed, _ = self.clip.embed_image(image)
if exists(text) and not exists(text_encodings) and not self.unconditional:
assert exists(
self.clip
), "if you are passing in raw text, you need to supply `clip` to the decoder"
_, text_encodings, text_mask = self.clip.embed_text(text)
assert not (
self.condition_on_text_encodings and not exists(text_encodings)
), "text or text encodings must be passed into decoder if specified"
assert not (
not self.condition_on_text_encodings and exists(text_encodings)
), "decoder specified not to be conditioned on text, yet it is presented"
lowres_cond_img = (
self.to_lowres_cond(
image,
target_image_size=target_image_size,
downsample_image_size=self.image_sizes[unet_index - 1],
)
if unet_number > 1
else None
)
image = resize_image_to(image, target_image_size)
if exists(random_crop_size):
aug = K.RandomCrop((random_crop_size, random_crop_size), p=1.0)
# make sure low res conditioner and image both get augmented the same way
image = aug(image)
lowres_cond_img = aug(lowres_cond_img, params=aug._params)
is_latent_diffusion = not isinstance(vae, NullVQGanVAE)
vae.eval()
with flow.no_grad():
image = vae.encode(image)
lowres_cond_img = maybe(vae.encode)(lowres_cond_img)
losses = self.p_losses(
unet,
image,
times,
image_embed=image_embed,
text_encodings=text_encodings,
text_mask=text_mask,
lowres_cond_img=lowres_cond_img,
predict_x_start=predict_x_start,
learned_variance=learned_variance,
is_latent_diffusion=is_latent_diffusion,
noise_scheduler=noise_scheduler,
)
if not return_lowres_cond_image:
return losses
return losses, lowres_cond_img
# main class
class DALLE2(nn.Module):
def __init__(self, *, prior, decoder, prior_num_samples=2, **kwargs):
super().__init__()
# assert isinstance(prior, DiffusionPrior)
# assert isinstance(decoder, Decoder)
self.prior = prior
self.decoder = decoder
self.tokenizer = SimpleTokenizer()
self.prior_num_samples = prior_num_samples
self.decoder_need_text_cond = self.decoder.condition_on_text_encodings
self.to_pil = T.ToPILImage()
@flow.no_grad()
@eval_decorator
def forward(self, text, cond_scale=1.0, prior_cond_scale=1.0, return_pil_images=False):
device = module_device(self)
one_text = isinstance(text, str) or (not is_list_str(text) and text.shape[0] == 1)
if isinstance(text, str) or is_list_str(text):
text = [text] if not isinstance(text, (list, tuple)) else text
text = self.tokenizer.tokenize(text).to(device)
image_embed = self.prior.sample(
text, num_samples_per_batch=self.prior_num_samples, cond_scale=prior_cond_scale
)
text_cond = text if self.decoder_need_text_cond else None
images = self.decoder.sample(image_embed, text=text_cond, cond_scale=cond_scale)
if return_pil_images:
images = list(map(self.to_pil, images.unbind(dim=0)))
if one_text:
return first(images)
return images
| 83,430 | 31.821007 | 100 | py |
libai | libai-main/projects/DALLE2/dalle2/__init__.py | from .models import Unet, DALLE2, DiffusionPriorNetwork, DiffusionPrior, Decoder
from ._clip import OpenAIClipAdapter, import_flow_clip
| 136 | 44.666667 | 80 | py |
libai | libai-main/projects/DALLE2/dalle2/vector_quantize_flow.py | # from https://github.com/lucidrains/vector_quantize_pytorch/vector_quantize_pytorch.py
import oneflow as flow
import oneflow.nn.functional as F
from einops import rearrange, repeat
from oneflow import einsum, nn
from libai.utils import distributed
def exists(val):
return val is not None
def default(val, d):
return val if exists(val) else d
def noop(*args, **kwargs):
pass
def l2norm(t):
return F.normalize(t, p=2, dim=-1)
def log(t, eps=1e-20):
return flow.log(t.clamp(min=eps))
def uniform_init(*shape):
t = flow.empty(shape)
nn.init.kaiming_uniform_(t)
return t
def gumbel_noise(t):
noise = flow.zeros_like(t).uniform_(0, 1)
return -log(-log(noise))
def gumbel_sample(t, temperature=1.0, dim=-1):
if temperature == 0:
return t.argmax(dim=dim)
return ((t / temperature) + gumbel_noise(t)).argmax(dim=dim)
def ema_inplace(moving_avg, new, decay):
moving_avg.data.mul_(decay).add_(new, alpha=(1 - decay))
def laplace_smoothing(x, n_categories, eps=1e-5):
return (x + eps) / (x.sum() + n_categories * eps)
def sample_vectors(samples, num):
num_samples, device = samples.shape[0], samples.device
if num_samples >= num:
indices = flow.randperm(num_samples, device=device)[:num]
else:
indices = flow.randint(0, num_samples, (num,), device=device)
return samples[indices]
def batched_sample_vectors(samples, num):
return flow.stack([sample_vectors(sample, num) for sample in samples.unbind(dim=0)], dim=0)
def pad_shape(shape, size, dim=0):
return [size if i == dim else s for i, s in enumerate(shape)]
def sample_multinomial(total_count, probs):
device = probs.device
probs = probs.cpu()
total_count = probs.new_full((), total_count)
remainder = probs.new_ones(())
sample = flow.empty_like(probs, dtype=flow.long)
for i, p in enumerate(probs):
s = flow.binomial(total_count, p / remainder)
sample[i] = s
total_count -= s
remainder -= p
return sample.to(device)
def all_gather_sizes(x, dim):
size = flow.tensor(x.shape[dim], dtype=flow.long, device=x.device)
all_sizes = [flow.empty_like(size) for _ in range(distributed.get_world_size())]
distributed.all_gather(all_sizes, size)
return flow.stack(all_sizes)
def all_gather_variably_sized(x, sizes, dim=0):
rank = distributed.get_rank()
all_x = []
for i, size in enumerate(sizes):
t = x if i == rank else x.new_empty(pad_shape(x.shape, size, dim))
distributed.broadcast(t, src=i, async_op=True)
all_x.append(t)
distributed.barrier()
return all_x
def sample_vectors_distributed(local_samples, num):
rank = distributed.get_rank()
all_num_samples = all_gather_sizes(local_samples, dim=0)
if rank == 0:
samples_per_rank = sample_multinomial(num, all_num_samples / all_num_samples.sum())
else:
samples_per_rank = flow.empty_like(all_num_samples)
distributed.broadcast(samples_per_rank, src=0)
samples_per_rank = samples_per_rank.tolist()
local_samples = batched_sample_vectors(local_samples, samples_per_rank[rank])
all_samples = all_gather_variably_sized(local_samples, samples_per_rank, dim=0)
return flow.cat(all_samples, dim=0)
def batched_bincount(x, *, minlength):
batch, dtype, device = x.shape[0], x.dtype, x.device
target = flow.zeros(batch, minlength, dtype=dtype, device=device)
values = flow.ones_like(x)
target.scatter_add_(-1, x, values)
return target
def kmeans(
samples,
num_clusters,
num_iters=10,
use_cosine_sim=False,
sample_fn=batched_sample_vectors,
all_reduce_fn=noop,
):
num_codebooks, dim, dtype, _ = (
samples.shape[0],
samples.shape[-1],
samples.dtype,
samples.device,
)
means = sample_fn(samples, num_clusters)
for _ in range(num_iters):
if use_cosine_sim:
dists = samples @ rearrange(means, "h n d -> h d n")
else:
dists = -flow.cdist(samples, means, p=2)
buckets = flow.argmax(dists, dim=-1)
bins = batched_bincount(buckets, minlength=num_clusters)
all_reduce_fn(bins)
zero_mask = bins == 0
bins_min_clamped = bins.masked_fill(zero_mask, 1)
new_means = buckets.new_zeros(num_codebooks, num_clusters, dim, dtype=dtype)
new_means.scatter_add_(1, repeat(buckets, "h n -> h n d", d=dim), samples)
new_means = new_means / rearrange(bins_min_clamped, "... -> ... 1")
all_reduce_fn(new_means)
if use_cosine_sim:
new_means = l2norm(new_means)
means = flow.where(rearrange(zero_mask, "... -> ... 1"), means, new_means)
return means, bins
def batched_embedding(indices, embeds):
batch, dim = indices.shape[1], embeds.shape[-1]
indices = repeat(indices, "h b n -> h b n d", d=dim)
embeds = repeat(embeds, "h c d -> h b c d", b=batch)
return embeds.gather(2, indices)
# regularization losses
def orthgonal_loss_fn(t):
# eq (2) from https://arxiv.org/abs/2112.00384
h, n = t.shape[:2]
normed_codes = l2norm(t)
identity = repeat(flow.eye(n, device=t.device), "i j -> h i j", h=h)
cosine_sim = einsum("h i d, h j d -> h i j", normed_codes, normed_codes)
return ((cosine_sim - identity) ** 2).sum() / (h * n ** 2)
# distance types
class EuclideanCodebook(nn.Module):
def __init__(
self,
dim,
codebook_size,
num_codebooks=1,
kmeans_init=False,
kmeans_iters=10,
decay=0.8,
eps=1e-5,
threshold_ema_dead_code=2,
use_ddp=False,
learnable_codebook=False,
sample_codebook_temp=0,
):
super().__init__()
self.decay = decay
init_fn = uniform_init if not kmeans_init else flow.zeros
embed = init_fn(num_codebooks, codebook_size, dim)
self.codebook_size = codebook_size
self.num_codebooks = num_codebooks
self.kmeans_iters = kmeans_iters
self.eps = eps
self.threshold_ema_dead_code = threshold_ema_dead_code
self.sample_codebook_temp = sample_codebook_temp
self.sample_fn = sample_vectors_distributed if use_ddp else batched_sample_vectors
self.all_reduce_fn = distributed.all_reduce if use_ddp else noop
self.register_buffer("initted", flow.Tensor([not kmeans_init]))
self.register_buffer("cluster_size", flow.zeros(num_codebooks, codebook_size))
self.register_buffer("embed_avg", embed.clone())
self.learnable_codebook = learnable_codebook
if learnable_codebook:
self.embed = nn.Parameter(embed)
else:
self.register_buffer("embed", embed)
def init_embed_(self, data):
if self.initted:
return
embed, cluster_size = kmeans(
data,
self.codebook_size,
self.kmeans_iters,
sample_fn=self.sample_fn,
all_reduce_fn=self.all_reduce_fn,
)
self.embed.data.copy_(embed)
self.embed_avg.data.copy_(embed.clone())
self.cluster_size.data.copy_(cluster_size)
self.initted.data.copy_(flow.Tensor([True]))
def replace(self, batch_samples, batch_mask):
batch_samples = l2norm(batch_samples)
for ind, (samples, mask) in enumerate(
zip(batch_samples.unbind(dim=0), batch_mask.unbind(dim=0))
):
if not flow.any(mask):
continue
sampled = self.sample_fn(rearrange(samples, "... -> 1 ..."), mask.sum().item())
self.embed.data[ind][mask] = rearrange(sampled, "1 ... -> ...")
def expire_codes_(self, batch_samples):
if self.threshold_ema_dead_code == 0:
return
expired_codes = self.cluster_size < self.threshold_ema_dead_code
if not flow.any(expired_codes):
return
batch_samples = rearrange(batch_samples, "h ... d -> h (...) d")
self.replace(batch_samples, batch_mask=expired_codes)
def forward(self, x):
needs_codebook_dim = x.ndim < 4
x = x.float()
if needs_codebook_dim:
x = rearrange(x, "... -> 1 ...")
shape, dtype = x.shape, x.dtype
flatten = rearrange(x, "h ... d -> h (...) d")
self.init_embed_(flatten)
embed = self.embed if not self.learnable_codebook else self.embed.detach()
dist = -flow.cdist(flatten, embed, p=2)
embed_ind = gumbel_sample(dist, dim=-1, temperature=self.sample_codebook_temp)
embed_onehot = F.one_hot(embed_ind, self.codebook_size).type(dtype)
embed_ind = embed_ind.view(*shape[:-1])
quantize = batched_embedding(embed_ind, self.embed)
if self.training:
cluster_size = embed_onehot.sum(dim=1)
self.all_reduce_fn(cluster_size)
ema_inplace(self.cluster_size, cluster_size, self.decay)
embed_sum = einsum("h n d, h n c -> h c d", flatten, embed_onehot)
self.all_reduce_fn(embed_sum)
cluster_size = (
laplace_smoothing(self.cluster_size, self.codebook_size, self.eps)
* self.cluster_size.sum()
)
embed_normalized = self.embed_avg / rearrange(cluster_size, "... -> ... 1")
self.embed.data.copy_(embed_normalized)
self.expire_codes_(x)
if needs_codebook_dim:
quantize, embed_ind = map(lambda t: rearrange(t, "1 ... -> ..."), (quantize, embed_ind))
return quantize, embed_ind
class CosineSimCodebook(nn.Module):
def __init__(
self,
dim,
codebook_size,
num_codebooks=1,
kmeans_init=False,
kmeans_iters=10,
decay=0.8,
eps=1e-5,
threshold_ema_dead_code=2,
use_ddp=False,
learnable_codebook=False,
sample_codebook_temp=0.0,
):
super().__init__()
self.decay = decay
if not kmeans_init:
embed = l2norm(uniform_init(num_codebooks, codebook_size, dim))
else:
embed = flow.zeros(num_codebooks, codebook_size, dim)
self.codebook_size = codebook_size
self.num_codebooks = num_codebooks
self.kmeans_iters = kmeans_iters
self.eps = eps
self.threshold_ema_dead_code = threshold_ema_dead_code
self.sample_codebook_temp = sample_codebook_temp
self.sample_fn = sample_vectors_distributed if use_ddp else batched_sample_vectors
self.all_reduce_fn = distributed.all_reduce if use_ddp else noop
self.register_buffer("initted", flow.Tensor([not kmeans_init]))
self.register_buffer("cluster_size", flow.zeros(num_codebooks, codebook_size))
self.learnable_codebook = learnable_codebook
if learnable_codebook:
self.embed = nn.Parameter(embed)
else:
self.register_buffer("embed", embed)
def init_embed_(self, data):
if self.initted:
return
embed, cluster_size = kmeans(
data,
self.codebook_size,
self.kmeans_iters,
use_cosine_sim=True,
sample_fn=self.sample_fn,
all_reduce_fn=self.all_reduce_fn,
)
self.embed.data.copy_(embed)
self.cluster_size.data.copy_(cluster_size)
self.initted.data.copy_(flow.Tensor([True]))
def replace(self, batch_samples, batch_mask):
batch_samples = l2norm(batch_samples)
for ind, (samples, mask) in enumerate(
zip(batch_samples.unbind(dim=0), batch_mask.unbind(dim=0))
):
if not flow.any(mask):
continue
sampled = self.sample_fn(rearrange(samples, "... -> 1 ..."), mask.sum().item())
self.embed.data[ind][mask] = rearrange(sampled, "1 ... -> ...")
def expire_codes_(self, batch_samples):
if self.threshold_ema_dead_code == 0:
return
expired_codes = self.cluster_size < self.threshold_ema_dead_code
if not flow.any(expired_codes):
return
batch_samples = rearrange(batch_samples, "h ... d -> h (...) d")
self.replace(batch_samples, batch_mask=expired_codes)
def forward(self, x):
needs_codebook_dim = x.ndim < 4
x = x.float()
if needs_codebook_dim:
x = rearrange(x, "... -> 1 ...")
shape, dtype = x.shape, x.dtype
flatten = rearrange(x, "h ... d -> h (...) d")
flatten = l2norm(flatten)
self.init_embed_(flatten)
embed = self.embed if not self.learnable_codebook else self.embed.detach()
embed = l2norm(embed)
dist = einsum("h n d, h c d -> h n c", flatten, embed)
embed_ind = gumbel_sample(dist, dim=-1, temperature=self.sample_codebook_temp)
embed_onehot = F.one_hot(embed_ind, self.codebook_size).type(dtype)
embed_ind = embed_ind.view(*shape[:-1])
quantize = batched_embedding(embed_ind, self.embed)
if self.training:
bins = embed_onehot.sum(dim=1)
self.all_reduce_fn(bins)
ema_inplace(self.cluster_size, bins, self.decay)
zero_mask = bins == 0
bins = bins.masked_fill(zero_mask, 1.0)
embed_sum = einsum("h n d, h n c -> h c d", flatten, embed_onehot)
self.all_reduce_fn(embed_sum)
embed_normalized = embed_sum / rearrange(bins, "... -> ... 1")
embed_normalized = l2norm(embed_normalized)
embed_normalized = flow.where(
rearrange(zero_mask, "... -> ... 1"), embed, embed_normalized
)
ema_inplace(self.embed, embed_normalized, self.decay)
self.expire_codes_(x)
if needs_codebook_dim:
quantize, embed_ind = map(lambda t: rearrange(t, "1 ... -> ..."), (quantize, embed_ind))
return quantize, embed_ind
# main class
class VectorQuantize(nn.Module):
def __init__(
self,
dim,
codebook_size,
codebook_dim=None,
heads=1,
separate_codebook_per_head=False,
decay=0.8,
eps=1e-5,
kmeans_init=False,
kmeans_iters=10,
use_cosine_sim=False,
threshold_ema_dead_code=0,
channel_last=True,
accept_image_fmap=False,
commitment_weight=1.0,
orthogonal_reg_weight=0.0,
orthogonal_reg_active_codes_only=False,
orthogonal_reg_max_codes=None,
sample_codebook_temp=0.0,
sync_codebook=False,
):
super().__init__()
self.heads = heads
self.separate_codebook_per_head = separate_codebook_per_head
codebook_dim = default(codebook_dim, dim)
codebook_input_dim = codebook_dim * heads
requires_projection = codebook_input_dim != dim
self.project_in = (
nn.Linear(dim, codebook_input_dim) if requires_projection else nn.Identity()
)
self.project_out = (
nn.Linear(codebook_input_dim, dim) if requires_projection else nn.Identity()
)
self.eps = eps
self.commitment_weight = commitment_weight
has_codebook_orthogonal_loss = orthogonal_reg_weight > 0
self.orthogonal_reg_weight = orthogonal_reg_weight
self.orthogonal_reg_active_codes_only = orthogonal_reg_active_codes_only
self.orthogonal_reg_max_codes = orthogonal_reg_max_codes
codebook_class = EuclideanCodebook if not use_cosine_sim else CosineSimCodebook
self._codebook = codebook_class(
dim=codebook_dim,
num_codebooks=heads if separate_codebook_per_head else 1,
codebook_size=codebook_size,
kmeans_init=kmeans_init,
kmeans_iters=kmeans_iters,
decay=decay,
eps=eps,
threshold_ema_dead_code=threshold_ema_dead_code,
use_ddp=sync_codebook,
learnable_codebook=has_codebook_orthogonal_loss,
sample_codebook_temp=sample_codebook_temp,
)
self.codebook_size = codebook_size
self.accept_image_fmap = accept_image_fmap
self.channel_last = channel_last
@property
def codebook(self):
return self._codebook.embed
def forward(self, x):
_, device, heads, is_multiheaded, _ = (
x.shape,
x.device,
self.heads,
self.heads > 1,
self.codebook_size,
)
need_transpose = not self.channel_last and not self.accept_image_fmap
if self.accept_image_fmap:
height, width = x.shape[-2:]
x = rearrange(x, "b c h w -> b (h w) c")
if need_transpose:
x = rearrange(x, "b d n -> b n d")
x = self.project_in(x)
if is_multiheaded:
ein_rhs_eq = "h b n d" if self.separate_codebook_per_head else "1 (b h) n d"
x = rearrange(x, f"b n (h d) -> {ein_rhs_eq}", h=heads)
quantize, embed_ind = self._codebook(x)
if self.training:
quantize = x + (quantize - x).detach()
loss = flow.tensor([0.0], device=device, requires_grad=self.training)
if self.training:
if self.commitment_weight > 0:
commit_loss = F.mse_loss(quantize.detach(), x)
loss = loss + commit_loss * self.commitment_weight
if self.orthogonal_reg_weight > 0:
codebook = self.codebook
if self.orthogonal_reg_active_codes_only:
# only calculate orthogonal loss for the activated codes for this batch
unique_code_ids = flow.unique(embed_ind)
codebook = codebook[unique_code_ids]
num_codes = codebook.shape[0]
if (
exists(self.orthogonal_reg_max_codes)
and num_codes > self.orthogonal_reg_max_codes
):
rand_ids = flow.randperm(num_codes, device=device)[
: self.orthogonal_reg_max_codes
]
codebook = codebook[rand_ids]
orthogonal_reg_loss = orthgonal_loss_fn(codebook)
loss = loss + orthogonal_reg_loss * self.orthogonal_reg_weight
if is_multiheaded:
if self.separate_codebook_per_head:
quantize = rearrange(quantize, "h b n d -> b n (h d)", h=heads)
embed_ind = rearrange(embed_ind, "h b n -> b n h", h=heads)
else:
quantize = rearrange(quantize, "1 (b h) n d -> b n (h d)", h=heads)
embed_ind = rearrange(embed_ind, "1 (b h) n -> b n h", h=heads)
quantize = self.project_out(quantize)
if need_transpose:
quantize = rearrange(quantize, "b n d -> b d n")
if self.accept_image_fmap:
quantize = rearrange(quantize, "b (h w) c -> b c h w", h=height, w=width)
embed_ind = rearrange(embed_ind, "b (h w) ... -> b h w ...", h=height, w=width)
return quantize, embed_ind, loss
| 19,209 | 30.033926 | 100 | py |
libai | libai-main/projects/DALLE2/dalle2/einops_exts.py | # From https://github.com/arogozhnikov/einops/blob/master/einops/layers/oneflow.py
import re
from functools import wraps
import oneflow as flow
from einops import rearrange, reduce, repeat
from einops._backends import AbstractBackend
from einops.layers import RearrangeMixin
from oneflow import nn
class Rearrange(RearrangeMixin, flow.nn.Module):
def forward(self, input):
return self._apply_recipe(input)
class OneFlowBackend(AbstractBackend):
framework_name = "oneflow"
def __init__(self):
import oneflow as flow
self.flow = flow
def is_appropriate_type(self, tensor):
return isinstance(tensor, self.flow.Tensor)
def from_numpy(self, x):
variable = self.flow.from_numpy(x)
if self.is_float_type(variable):
# attach grad only to floating types
variable.requires_grad = True
return variable
def to_numpy(self, x):
return x.detach().cpu().numpy()
def arange(self, start, stop):
return self.flow.arange(start, stop, dtype=self.flow.int64)
def reduce(self, x, operation, reduced_axes):
for axis in sorted(reduced_axes, reverse=True):
if operation == "min":
x, _ = x.min(dim=axis)
elif operation == "max":
x, _ = x.max(dim=axis)
elif operation in ["sum", "mean", "prod"]:
x = getattr(x, operation)(dim=axis)
else:
raise NotImplementedError("Unknown reduction ", operation)
return x
def transpose(self, x, axes):
return x.permute(axes)
def stack_on_zeroth_dimension(self, tensors: list):
return self.flow.stack(tensors)
def add_axes(self, x, n_axes, pos2len):
repeats = [-1] * n_axes
for axis_position, axis_length in pos2len.items():
x = self.add_axis(x, axis_position)
repeats[axis_position] = axis_length
return x.expand(*repeats)
def tile(self, x, repeats):
return x.repeat(repeats)
def add_axis(self, x, new_position):
return self.flow.unsqueeze(x, new_position)
def is_float_type(self, x):
return x.dtype in [self.flow.float16, self.flow.float32, self.flow.float64]
def einsum(self, pattern, *x):
return self.flow.einsum(pattern, *x)
# From https://github.com/lucidrains/einops-exts/tree/main/einops_exts
class EinopsToAndFrom(nn.Module):
def __init__(self, from_einops, to_einops, fn):
super().__init__()
self.from_einops = from_einops
self.to_einops = to_einops
self.fn = fn
def forward(self, x, **kwargs):
shape = x.shape
reconstitute_kwargs = dict(tuple(zip(self.from_einops.split(" "), shape)))
x = rearrange(x, f"{self.from_einops} -> {self.to_einops}")
x = self.fn(x, **kwargs)
x = rearrange(x, f"{self.to_einops} -> {self.from_einops}", **reconstitute_kwargs)
return x
# checking shape
# @nils-werner
# https://github.com/arogozhnikov/einops/issues/168#issuecomment-1042933838
def check_shape(tensor, pattern, **kwargs):
return rearrange(tensor, f"{pattern} -> {pattern}", **kwargs)
# do same einops operations on a list of tensors
def _many(fn):
@wraps(fn)
def inner(tensors, pattern, **kwargs):
return (fn(tensor, pattern, **kwargs) for tensor in tensors)
return inner
# do einops with unflattening of anonymously named dimensions
# (...flattened) -> ...flattened
def _with_anon_dims(fn):
@wraps(fn)
def inner(tensor, pattern, **kwargs):
regex = r"(\.\.\.[a-zA-Z]+)"
matches = re.findall(regex, pattern)
def get_anon_dim_name(t):
return t.lstrip("...")
dim_prefixes = tuple(map(get_anon_dim_name, set(matches)))
update_kwargs_dict = dict()
for prefix in dim_prefixes:
assert prefix in kwargs, f'dimension list "{prefix}" was not passed in'
dim_list = kwargs[prefix]
assert isinstance(
dim_list, (list, tuple)
), f'dimension list "{prefix}" needs to be a tuple of list of dimensions'
dim_names = list(map(lambda ind: f"{prefix}{ind}", range(len(dim_list))))
update_kwargs_dict[prefix] = dict(zip(dim_names, dim_list))
def sub_with_anonymous_dims(t):
dim_name_prefix = get_anon_dim_name(t.groups()[0])
return " ".join(update_kwargs_dict[dim_name_prefix].keys())
pattern_new = re.sub(regex, sub_with_anonymous_dims, pattern)
for prefix, update_dict in update_kwargs_dict.items():
del kwargs[prefix]
kwargs.update(update_dict)
return fn(tensor, pattern_new, **kwargs)
return inner
# generate all helper functions
rearrange_many = _many(rearrange)
repeat_many = _many(repeat)
reduce_many = _many(reduce)
rearrange_with_anon_dims = _with_anon_dims(rearrange)
repeat_with_anon_dims = _with_anon_dims(repeat)
reduce_with_anon_dims = _with_anon_dims(reduce)
| 5,044 | 28.852071 | 90 | py |
libai | libai-main/projects/DALLE2/dalle2/model_weights/download_utils.py | import logging
import os
from libai.utils.file_utils import download_file
logger = logging.getLogger(__name__) # pylint: disable=invalid-name
url_map = {
"prior": "https://huggingface.co/nousr/conditioned-prior/resolve/main/vit-l-14/prior_aes_finetune.pth", # noqa
"decoder": "https://huggingface.co/laion/DALLE2-PyTorch/resolve/main/decoder/1.5B_laion2B/latest.pth", # noqa
"clip": "https://openaipublic.azureedge.net/clip/models/b8cca3fd41ae0c99ba7e8951adf17d267cdb84cd88be6f7c2e0eca1737a03836/ViT-L-14.pt", # noqa
"bpe_vocab": "https://oneflow-static.oss-cn-beijing.aliyuncs.com/libai/clip/bpe_simple_vocab_16e6.txt.gz", # noqa
"swinir": "https://github.com/JingyunLiang/SwinIR/releases/download/v0.0/",
}
def _download_if_not_exist(path, name):
if os.path.exists(path):
logger.info((f"using {name}'s weight at {path}"))
return
if name == "swinir":
os.makedirs(os.path.dirname(path), exist_ok=True)
download_file(path, url_map[name] + os.path.basename(path))
return
os.makedirs(os.path.dirname(path), exist_ok=True)
download_file(path, url_map[name])
def download_dalle2_weights(cfg):
if not os.path.exists("./dalle2/data/bpe_simple_vocab_16e6.txt.gz"):
os.makedirs("./dalle2/data", exist_ok=True)
download_file("./dalle2/data/bpe_simple_vocab_16e6.txt.gz", url_map["bpe_vocab"])
_download_if_not_exist(cfg.swinir.swinir_path, "swinir")
_download_if_not_exist(cfg.model.prior_weight_path, "prior")
_download_if_not_exist(cfg.model.decoder_weight_path, "decoder")
_download_if_not_exist(cfg.model.prior.clip.name, "clip")
| 1,649 | 42.421053 | 146 | py |
libai | libai-main/projects/DALLE2/configs/dalle2_config.py | from libai.config import LazyCall
from configs.common.train import train
from dalle2.models import DiffusionPrior, DiffusionPriorNetwork, Unet, Decoder, DALLE2
from dalle2._clip import OpenAIClipAdapter
from omegaconf import DictConfig
clip = LazyCall(OpenAIClipAdapter)(name="")
swinir = DictConfig({"swinir_path": None})
prior = LazyCall(DiffusionPrior)(
net=LazyCall(DiffusionPriorNetwork)(
dim=768,
depth=24,
num_timesteps=1000,
max_text_len=77,
num_time_embeds=1,
num_image_embeds=1,
num_text_embeds=1,
dim_head=64,
heads=32,
ff_mult=4,
attn_dropout=0.05,
ff_dropout=0.05,
normformer=True,
),
clip=clip,
image_embed_dim=768,
timesteps=1000,
cond_drop_prob=0.1,
loss_type="l2",
condition_on_text_encodings=True,
)
unet1 = LazyCall(Unet)(
dim=320,
image_embed_dim=768,
text_embed_dim=768,
cond_dim=512,
channels=3,
dim_mults=(1, 2, 3, 4),
num_resnet_blocks=4,
attn_heads=8,
attn_dim_head=64,
sparse_attn=True,
memory_efficient=True,
cond_on_text_encodings=True,
self_attn=[False, True, True, True],
)
decoder = LazyCall(Decoder)(
unet=(unet1,),
image_sizes=[
64,
],
clip=None,
channels=3,
timesteps=1000,
loss_type="l2",
beta_schedule=["cosine"],
learned_variance=True,
)
model = LazyCall(DALLE2)(
prior=prior,
decoder=decoder,
prior_weight_path="",
decoder_weight_path="",
)
| 1,532 | 21.217391 | 86 | py |
libai | libai-main/projects/DALLE2/swinir/utils.py | # -----------------------------------------------------------------------------------
# from
# https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/layers/weight_init.py
# -----------------------------------------------------------------------------------
import collections.abc
import math
import warnings
from itertools import repeat
import oneflow as flow
import oneflow.nn as nn
def _no_grad_trunc_normal_(tensor, mean, std, a, b):
# Cut & paste from Pytorch official master until it's in a few official releases - RW
# Method based on https://people.sc.fsu.edu/~jburkardt/presentations/truncated_normal.pdf
def norm_cdf(x):
# Computes standard normal cumulative distribution function
return (1.0 + math.erf(x / math.sqrt(2.0))) / 2.0
if (mean < a - 2 * std) or (mean > b + 2 * std):
warnings.warn(
"mean is more than 2 std from [a, b] in nn.init.trunc_normal_. "
"The distribution of values may be incorrect.",
stacklevel=2,
)
with flow.no_grad():
# Values are generated by using a truncated uniform distribution and
# then using the inverse CDF for the normal distribution.
# Get upper and lower cdf values
l = norm_cdf((a - mean) / std)
u = norm_cdf((b - mean) / std)
# Uniformly fill tensor with values from [l, u], then translate to
# [2l-1, 2u-1].
tensor.uniform_(2 * l - 1, 2 * u - 1)
# Use inverse cdf transform for normal distribution to get truncated
# standard normal
tensor.erfinv_()
# Transform to proper mean, std
tensor.mul_(std * math.sqrt(2.0))
tensor.add_(mean)
# Clamp to ensure it's in the proper range
tensor.clamp_(min=a, max=b)
return tensor
def trunc_normal_(tensor, mean=0.0, std=1.0, a=-2.0, b=2.0):
# type: (flow.Tensor, float, float, float, float) -> flow.Tensor
r"""Fills the input Tensor with values drawn from a truncated
normal distribution. The values are effectively drawn from the
normal distribution :math:`\mathcal{N}(\text{mean}, \text{std}^2)`
with values outside :math:`[a, b]` redrawn until they are within
the bounds. The method used for generating the random values works
best when :math:`a \leq \text{mean} \leq b`.
NOTE: this impl is similar to the Pytorch trunc_normal_, the bounds [a, b] are
applied while sampling the normal with mean/std applied, therefore a, b args
should be adjusted to match the range of mean, std args.
Args:
tensor: an n-dimensional `torch.Tensor`
mean: the mean of the normal distribution
std: the standard deviation of the normal distribution
a: the minimum cutoff value
b: the maximum cutoff value
Examples:
>>> w = torch.empty(3, 5)
>>> nn.init.trunc_normal_(w)
"""
return _no_grad_trunc_normal_(tensor, mean, std, a, b)
# -----------------------------------------------------------------------------------
# from https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/layers/helpers.py
# -----------------------------------------------------------------------------------
# From Pytorch internals
def _ntuple(n):
def parse(x):
if isinstance(x, collections.abc.Iterable) and not isinstance(x, str):
return x
return tuple(repeat(x, n))
return parse
to_1tuple = _ntuple(1)
to_2tuple = _ntuple(2)
to_3tuple = _ntuple(3)
to_4tuple = _ntuple(4)
to_ntuple = _ntuple
# -----------------------------------------------------------------------------------
# from https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/layers/drop.py
# -----------------------------------------------------------------------------------
def drop_path(x, drop_prob: float = 0.0, training: bool = False, scale_by_keep: bool = True):
"""Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).
This is the same as the DropConnect impl I created for EfficientNet, etc networks, however,
the original name is misleading as 'Drop Connect' is a different form of dropout in a
separate paper...
See discussion: https://github.com/tensortorch/tpu/issues/494#issuecomment-532968956 ...
I've opted for changing the layer and argument names to 'drop path' rather than mix DropConnect
as a layer name and use 'survival rate' as the argument.
"""
if drop_prob == 0.0 or not training:
return x
keep_prob = 1 - drop_prob
shape = (x.shape[0],) + (1,) * (x.ndim - 1) # work with diff dim tensors, not just 2D ConvNets
random_tensor = x.new_empty(shape).bernoulli_(keep_prob)
if keep_prob > 0.0 and scale_by_keep:
random_tensor.div_(keep_prob)
return x * random_tensor
class DropPath(nn.Module):
"""Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks)."""
def __init__(self, drop_prob: float = 0.0, scale_by_keep: bool = True):
super(DropPath, self).__init__()
self.drop_prob = drop_prob
self.scale_by_keep = scale_by_keep
def forward(self, x):
return drop_path(x, self.drop_prob, self.training, self.scale_by_keep)
def extra_repr(self):
return f"drop_prob={round(self.drop_prob,3):0.3f}"
| 5,374 | 39.413534 | 99 | py |
libai | libai-main/projects/DALLE2/swinir/models.py | # -----------------------------------------------------------------------------------
# SwinIR: Image Restoration Using Swin Transformer, https://arxiv.org/abs/2108.10257
# Originally Written by Ze Liu, Modified by Jingyun Liang.
# -----------------------------------------------------------------------------------
# code from https://github.com/JingyunLiang/SwinIR/blob/main/models/network_swinir.py
import math
import oneflow as flow
import oneflow.nn as nn
import oneflow.nn.functional as F
from oneflow.utils import checkpoint
from .utils import DropPath, to_2tuple, trunc_normal_
class Mlp(nn.Module):
def __init__(
self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.0
):
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
self.fc1 = nn.Linear(in_features, hidden_features)
self.act = act_layer()
self.fc2 = nn.Linear(hidden_features, out_features)
self.drop = nn.Dropout(drop)
def forward(self, x):
x = self.fc1(x)
x = self.act(x)
x = self.drop(x)
x = self.fc2(x)
x = self.drop(x)
return x
def window_partition(x, window_size):
"""
Args:
x: (B, H, W, C)
window_size (int): window size
Returns:
windows: (num_windows*B, window_size, window_size, C)
"""
B, H, W, C = x.shape
x = x.view(B, H // window_size, window_size, W // window_size, window_size, C)
windows = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size, window_size, C)
return windows
def window_reverse(windows, window_size, H, W):
"""
Args:
windows: (num_windows*B, window_size, window_size, C)
window_size (int): Window size
H (int): Height of image
W (int): Width of image
Returns:
x: (B, H, W, C)
"""
B = int(windows.shape[0] / (H * W / window_size / window_size))
x = windows.view(B, H // window_size, W // window_size, window_size, window_size, -1)
x = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(B, H, W, -1)
return x
class WindowAttention(nn.Module):
r"""Window based multi-head self attention (W-MSA) module with relative position bias.
It supports both of shifted and non-shifted window.
Args:
dim (int): Number of input channels.
window_size (tuple[int]): The height and width of the window.
num_heads (int): Number of attention heads.
qkv_bias (bool, optional):
If True, add a learnable bias to query, key, value. Default: True
qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set
attn_drop (float, optional): Dropout ratio of attention weight. Default: 0.0
proj_drop (float, optional): Dropout ratio of output. Default: 0.0
"""
def __init__(
self,
dim,
window_size,
num_heads,
qkv_bias=True,
qk_scale=None,
attn_drop=0.0,
proj_drop=0.0,
):
super().__init__()
self.dim = dim
self.window_size = window_size # Wh, Ww
self.num_heads = num_heads
head_dim = dim // num_heads
self.scale = qk_scale or head_dim ** -0.5
# define a parameter table of relative position bias
self.relative_position_bias_table = nn.Parameter(
flow.zeros((2 * window_size[0] - 1) * (2 * window_size[1] - 1), num_heads)
) # 2*Wh-1 * 2*Ww-1, nH
# get pair-wise relative position index for each token inside the window
coords_h = flow.arange(self.window_size[0])
coords_w = flow.arange(self.window_size[1])
coords = flow.stack(flow.meshgrid([coords_h, coords_w])) # 2, Wh, Ww
coords_flatten = flow.flatten(coords, 1) # 2, Wh*Ww
relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :] # 2, Wh*Ww, Wh*Ww
relative_coords = relative_coords.permute(1, 2, 0).contiguous() # Wh*Ww, Wh*Ww, 2
relative_coords[:, :, 0] += self.window_size[0] - 1 # shift to start from 0
relative_coords[:, :, 1] += self.window_size[1] - 1
relative_coords[:, :, 0] *= 2 * self.window_size[1] - 1
relative_position_index = relative_coords.sum(-1) # Wh*Ww, Wh*Ww
self.register_buffer("relative_position_index", relative_position_index)
self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(dim, dim)
self.proj_drop = nn.Dropout(proj_drop)
trunc_normal_(self.relative_position_bias_table, std=0.02)
self.softmax = nn.Softmax(dim=-1)
def forward(self, x, mask=None):
"""
Args:
x: input features with shape of (num_windows*B, N, C)
mask: (0/-inf) mask with shape of (num_windows, Wh*Ww, Wh*Ww) or None
"""
B_, N, C = x.shape
qkv = (
self.qkv(x)
.reshape(B_, N, 3, self.num_heads, C // self.num_heads)
.permute(2, 0, 3, 1, 4)
)
q, k, v = qkv[0], qkv[1], qkv[2] # make torchscript happy (cannot use tensor as tuple)
q = q * self.scale
attn = q @ k.transpose(-2, -1)
relative_position_bias = self.relative_position_bias_table[
self.relative_position_index.view(-1)
].view(
self.window_size[0] * self.window_size[1], self.window_size[0] * self.window_size[1], -1
) # Wh*Ww,Wh*Ww,nH
relative_position_bias = relative_position_bias.permute(
2, 0, 1
).contiguous() # nH, Wh*Ww, Wh*Ww
attn = attn + relative_position_bias.unsqueeze(0)
if mask is not None:
nW = mask.shape[0]
attn = attn.view(B_ // nW, nW, self.num_heads, N, N) + mask.unsqueeze(1).unsqueeze(0)
attn = attn.view(-1, self.num_heads, N, N)
attn = self.softmax(attn)
else:
attn = self.softmax(attn)
attn = self.attn_drop(attn)
x = (attn @ v).transpose(1, 2).reshape(B_, N, C)
x = self.proj(x)
x = self.proj_drop(x)
return x
def extra_repr(self) -> str:
return f"dim={self.dim}, window_size={self.window_size}, num_heads={self.num_heads}"
def flops(self, N):
# calculate flops for 1 window with token length of N
flops = 0
# qkv = self.qkv(x)
flops += N * self.dim * 3 * self.dim
# attn = (q @ k.transpose(-2, -1))
flops += self.num_heads * N * (self.dim // self.num_heads) * N
# x = (attn @ v)
flops += self.num_heads * N * N * (self.dim // self.num_heads)
# x = self.proj(x)
flops += N * self.dim * self.dim
return flops
class SwinTransformerBlock(nn.Module):
r"""Swin Transformer Block.
Args:
dim (int): Number of input channels.
input_resolution (tuple[int]): Input resulotion.
num_heads (int): Number of attention heads.
window_size (int): Window size.
shift_size (int): Shift size for SW-MSA.
mlp_ratio (float): Ratio of mlp hidden dim to embedding dim.
qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True
qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set.
drop (float, optional): Dropout rate. Default: 0.0
attn_drop (float, optional): Attention dropout rate. Default: 0.0
drop_path (float, optional): Stochastic depth rate. Default: 0.0
act_layer (nn.Module, optional): Activation layer. Default: nn.GELU
norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm
"""
def __init__(
self,
dim,
input_resolution,
num_heads,
window_size=7,
shift_size=0,
mlp_ratio=4.0,
qkv_bias=True,
qk_scale=None,
drop=0.0,
attn_drop=0.0,
drop_path=0.0,
act_layer=nn.GELU,
norm_layer=nn.LayerNorm,
):
super().__init__()
self.dim = dim
self.input_resolution = input_resolution
self.num_heads = num_heads
self.window_size = window_size
self.shift_size = shift_size
self.mlp_ratio = mlp_ratio
if min(self.input_resolution) <= self.window_size:
# if window size is larger than input resolution, we don't partition windows
self.shift_size = 0
self.window_size = min(self.input_resolution)
assert 0 <= self.shift_size < self.window_size, "shift_size must in 0-window_size"
self.norm1 = norm_layer(dim)
self.attn = WindowAttention(
dim,
window_size=to_2tuple(self.window_size),
num_heads=num_heads,
qkv_bias=qkv_bias,
qk_scale=qk_scale,
attn_drop=attn_drop,
proj_drop=drop,
)
self.drop_path = DropPath(drop_path) if drop_path > 0.0 else nn.Identity()
self.norm2 = norm_layer(dim)
mlp_hidden_dim = int(dim * mlp_ratio)
self.mlp = Mlp(
in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop
)
if self.shift_size > 0:
attn_mask = self.calculate_mask(self.input_resolution)
else:
attn_mask = None
self.register_buffer("attn_mask", attn_mask)
def calculate_mask(self, x_size):
# calculate attention mask for SW-MSA
H, W = x_size
img_mask = flow.zeros((1, H, W, 1)) # 1 H W 1
h_slices = (
slice(0, -self.window_size),
slice(-self.window_size, -self.shift_size),
slice(-self.shift_size, None),
)
w_slices = (
slice(0, -self.window_size),
slice(-self.window_size, -self.shift_size),
slice(-self.shift_size, None),
)
cnt = 0
for h in h_slices:
for w in w_slices:
img_mask[:, h, w, :] = cnt
cnt += 1
mask_windows = window_partition(
img_mask, self.window_size
) # nW, window_size, window_size, 1
mask_windows = mask_windows.view(-1, self.window_size * self.window_size)
attn_mask = mask_windows.unsqueeze(1) - mask_windows.unsqueeze(2)
attn_mask = attn_mask.masked_fill(attn_mask != 0, float(-100.0)).masked_fill(
attn_mask == 0, float(0.0)
)
return attn_mask
def forward(self, x, x_size):
H, W = x_size
B, L, C = x.shape
# assert L == H * W, "input feature has wrong size"
shortcut = x
x = self.norm1(x)
x = x.view(B, H, W, C)
# cyclic shift
if self.shift_size > 0:
shifted_x = flow.roll(x, shifts=(-self.shift_size, -self.shift_size), dims=(1, 2))
else:
shifted_x = x
# partition windows
x_windows = window_partition(
shifted_x, self.window_size
) # nW*B, window_size, window_size, C
x_windows = x_windows.view(
-1, self.window_size * self.window_size, C
) # nW*B, window_size*window_size, C
# W-MSA/SW-MSA (to be compatible for testing on images
# whose shapes are the multiple of window size
if self.input_resolution == x_size:
attn_windows = self.attn(
x_windows, mask=self.attn_mask
) # nW*B, window_size*window_size, C
else:
attn_windows = self.attn(x_windows, mask=self.calculate_mask(x_size).to(x.device))
# merge windows
attn_windows = attn_windows.view(-1, self.window_size, self.window_size, C)
shifted_x = window_reverse(attn_windows, self.window_size, H, W) # B H' W' C
# reverse cyclic shift
if self.shift_size > 0:
x = flow.roll(shifted_x, shifts=(self.shift_size, self.shift_size), dims=(1, 2))
else:
x = shifted_x
x = x.view(B, H * W, C)
# FFN
x = shortcut + self.drop_path(x)
x = x + self.drop_path(self.mlp(self.norm2(x)))
return x
def extra_repr(self) -> str:
return (
f"dim={self.dim}, input_resolution={self.input_resolution}, "
f"num_heads={self.num_heads}, window_size={self.window_size},"
f"shift_size={self.shift_size}, mlp_ratio={self.mlp_ratio}"
)
def flops(self):
flops = 0
H, W = self.input_resolution
# norm1
flops += self.dim * H * W
# W-MSA/SW-MSA
nW = H * W / self.window_size / self.window_size
flops += nW * self.attn.flops(self.window_size * self.window_size)
# mlp
flops += 2 * H * W * self.dim * self.dim * self.mlp_ratio
# norm2
flops += self.dim * H * W
return flops
class PatchMerging(nn.Module):
r"""Patch Merging Layer.
Args:
input_resolution (tuple[int]): Resolution of input feature.
dim (int): Number of input channels.
norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm
"""
def __init__(self, input_resolution, dim, norm_layer=nn.LayerNorm):
super().__init__()
self.input_resolution = input_resolution
self.dim = dim
self.reduction = nn.Linear(4 * dim, 2 * dim, bias=False)
self.norm = norm_layer(4 * dim)
def forward(self, x):
"""
x: B, H*W, C
"""
H, W = self.input_resolution
B, L, C = x.shape
assert L == H * W, "input feature has wrong size"
assert H % 2 == 0 and W % 2 == 0, f"x size ({H}*{W}) are not even."
x = x.view(B, H, W, C)
x0 = x[:, 0::2, 0::2, :] # B H/2 W/2 C
x1 = x[:, 1::2, 0::2, :] # B H/2 W/2 C
x2 = x[:, 0::2, 1::2, :] # B H/2 W/2 C
x3 = x[:, 1::2, 1::2, :] # B H/2 W/2 C
x = flow.cat([x0, x1, x2, x3], -1) # B H/2 W/2 4*C
x = x.view(B, -1, 4 * C) # B H/2*W/2 4*C
x = self.norm(x)
x = self.reduction(x)
return x
def extra_repr(self) -> str:
return f"input_resolution={self.input_resolution}, dim={self.dim}"
def flops(self):
H, W = self.input_resolution
flops = H * W * self.dim
flops += (H // 2) * (W // 2) * 4 * self.dim * 2 * self.dim
return flops
class BasicLayer(nn.Module):
"""A basic Swin Transformer layer for one stage.
Args:
dim (int): Number of input channels.
input_resolution (tuple[int]): Input resolution.
depth (int): Number of blocks.
num_heads (int): Number of attention heads.
window_size (int): Local window size.
mlp_ratio (float): Ratio of mlp hidden dim to embedding dim.
qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True
qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set.
drop (float, optional): Dropout rate. Default: 0.0
attn_drop (float, optional): Attention dropout rate. Default: 0.0
drop_path (float | tuple[float], optional): Stochastic depth rate. Default: 0.0
norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm
downsample (nn.Module | None, optional):
Downsample layer at the end of the layer. Default: None
use_checkpoint (bool): Whether to use checkpointing to save memory. Default: False.
"""
def __init__(
self,
dim,
input_resolution,
depth,
num_heads,
window_size,
mlp_ratio=4.0,
qkv_bias=True,
qk_scale=None,
drop=0.0,
attn_drop=0.0,
drop_path=0.0,
norm_layer=nn.LayerNorm,
downsample=None,
use_checkpoint=False,
):
super().__init__()
self.dim = dim
self.input_resolution = input_resolution
self.depth = depth
self.use_checkpoint = use_checkpoint
# build blocks
self.blocks = nn.ModuleList(
[
SwinTransformerBlock(
dim=dim,
input_resolution=input_resolution,
num_heads=num_heads,
window_size=window_size,
shift_size=0 if (i % 2 == 0) else window_size // 2,
mlp_ratio=mlp_ratio,
qkv_bias=qkv_bias,
qk_scale=qk_scale,
drop=drop,
attn_drop=attn_drop,
drop_path=drop_path[i] if isinstance(drop_path, list) else drop_path,
norm_layer=norm_layer,
)
for i in range(depth)
]
)
# patch merging layer
if downsample is not None:
self.downsample = downsample(input_resolution, dim=dim, norm_layer=norm_layer)
else:
self.downsample = None
def forward(self, x, x_size):
for blk in self.blocks:
if self.use_checkpoint:
x = checkpoint.checkpoint(blk, x, x_size)
else:
x = blk(x, x_size)
if self.downsample is not None:
x = self.downsample(x)
return x
def extra_repr(self) -> str:
return f"dim={self.dim}, input_resolution={self.input_resolution}, depth={self.depth}"
def flops(self):
flops = 0
for blk in self.blocks:
flops += blk.flops()
if self.downsample is not None:
flops += self.downsample.flops()
return flops
class RSTB(nn.Module):
"""Residual Swin Transformer Block (RSTB).
Args:
dim (int): Number of input channels.
input_resolution (tuple[int]): Input resolution.
depth (int): Number of blocks.
num_heads (int): Number of attention heads.
window_size (int): Local window size.
mlp_ratio (float): Ratio of mlp hidden dim to embedding dim.
qkv_bias (bool, optional):
If True, add a learnable bias to query, key, value. Default: True
qk_scale (float | None, optional):
Override default qk scale of head_dim ** -0.5 if set.
drop (float, optional): Dropout rate. Default: 0.0
attn_drop (float, optional): Attention dropout rate. Default: 0.0
drop_path (float | tuple[float], optional): Stochastic depth rate. Default: 0.0
norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm
downsample (nn.Module | None, optional):
Downsample layer at the end of the layer. Default: None
use_checkpoint (bool):
Whether to use checkpointing to save memory. Default: False.
img_size: Input image size.
patch_size: Patch size.
resi_connection: The convolutional block before residual connection.
"""
def __init__(
self,
dim,
input_resolution,
depth,
num_heads,
window_size,
mlp_ratio=4.0,
qkv_bias=True,
qk_scale=None,
drop=0.0,
attn_drop=0.0,
drop_path=0.0,
norm_layer=nn.LayerNorm,
downsample=None,
use_checkpoint=False,
img_size=224,
patch_size=4,
resi_connection="1conv",
):
super(RSTB, self).__init__()
self.dim = dim
self.input_resolution = input_resolution
self.residual_group = BasicLayer(
dim=dim,
input_resolution=input_resolution,
depth=depth,
num_heads=num_heads,
window_size=window_size,
mlp_ratio=mlp_ratio,
qkv_bias=qkv_bias,
qk_scale=qk_scale,
drop=drop,
attn_drop=attn_drop,
drop_path=drop_path,
norm_layer=norm_layer,
downsample=downsample,
use_checkpoint=use_checkpoint,
)
if resi_connection == "1conv":
self.conv = nn.Conv2d(dim, dim, 3, 1, 1)
elif resi_connection == "3conv":
# to save parameters and memory
self.conv = nn.Sequential(
nn.Conv2d(dim, dim // 4, 3, 1, 1),
nn.LeakyReLU(negative_slope=0.2, inplace=True),
nn.Conv2d(dim // 4, dim // 4, 1, 1, 0),
nn.LeakyReLU(negative_slope=0.2, inplace=True),
nn.Conv2d(dim // 4, dim, 3, 1, 1),
)
self.patch_embed = PatchEmbed(
img_size=img_size, patch_size=patch_size, in_chans=0, embed_dim=dim, norm_layer=None
)
self.patch_unembed = PatchUnEmbed(
img_size=img_size, patch_size=patch_size, in_chans=0, embed_dim=dim, norm_layer=None
)
def forward(self, x, x_size):
return (
self.patch_embed(self.conv(self.patch_unembed(self.residual_group(x, x_size), x_size)))
+ x
)
def flops(self):
flops = 0
flops += self.residual_group.flops()
H, W = self.input_resolution
flops += H * W * self.dim * self.dim * 9
flops += self.patch_embed.flops()
flops += self.patch_unembed.flops()
return flops
class PatchEmbed(nn.Module):
r"""Image to Patch Embedding
Args:
img_size (int): Image size. Default: 224.
patch_size (int): Patch token size. Default: 4.
in_chans (int): Number of input image channels. Default: 3.
embed_dim (int): Number of linear projection output channels. Default: 96.
norm_layer (nn.Module, optional): Normalization layer. Default: None
"""
def __init__(self, img_size=224, patch_size=4, in_chans=3, embed_dim=96, norm_layer=None):
super().__init__()
img_size = to_2tuple(img_size)
patch_size = to_2tuple(patch_size)
patches_resolution = [img_size[0] // patch_size[0], img_size[1] // patch_size[1]]
self.img_size = img_size
self.patch_size = patch_size
self.patches_resolution = patches_resolution
self.num_patches = patches_resolution[0] * patches_resolution[1]
self.in_chans = in_chans
self.embed_dim = embed_dim
if norm_layer is not None:
self.norm = norm_layer(embed_dim)
else:
self.norm = None
def forward(self, x):
x = x.flatten(2).transpose(1, 2) # B Ph*Pw C
if self.norm is not None:
x = self.norm(x)
return x
def flops(self):
flops = 0
H, W = self.img_size
if self.norm is not None:
flops += H * W * self.embed_dim
return flops
class PatchUnEmbed(nn.Module):
r"""Image to Patch Unembedding
Args:
img_size (int): Image size. Default: 224.
patch_size (int): Patch token size. Default: 4.
in_chans (int): Number of input image channels. Default: 3.
embed_dim (int): Number of linear projection output channels. Default: 96.
norm_layer (nn.Module, optional): Normalization layer. Default: None
"""
def __init__(self, img_size=224, patch_size=4, in_chans=3, embed_dim=96, norm_layer=None):
super().__init__()
img_size = to_2tuple(img_size)
patch_size = to_2tuple(patch_size)
patches_resolution = [img_size[0] // patch_size[0], img_size[1] // patch_size[1]]
self.img_size = img_size
self.patch_size = patch_size
self.patches_resolution = patches_resolution
self.num_patches = patches_resolution[0] * patches_resolution[1]
self.in_chans = in_chans
self.embed_dim = embed_dim
def forward(self, x, x_size):
B, HW, C = x.shape
x = x.transpose(1, 2).view(B, self.embed_dim, x_size[0], x_size[1]) # B Ph*Pw C
return x
def flops(self):
flops = 0
return flops
class Upsample(nn.Sequential):
"""Upsample module.
Args:
scale (int): Scale factor. Supported scales: 2^n and 3.
num_feat (int): Channel number of intermediate features.
"""
def __init__(self, scale, num_feat):
m = []
if (scale & (scale - 1)) == 0: # scale = 2^n
for _ in range(int(math.log(scale, 2))):
m.append(nn.Conv2d(num_feat, 4 * num_feat, 3, 1, 1))
m.append(nn.PixelShuffle(2))
elif scale == 3:
m.append(nn.Conv2d(num_feat, 9 * num_feat, 3, 1, 1))
m.append(nn.PixelShuffle(3))
else:
raise ValueError(f"scale {scale} is not supported. " "Supported scales: 2^n and 3.")
super(Upsample, self).__init__(*m)
class UpsampleOneStep(nn.Sequential):
"""UpsampleOneStep module
(the difference with Upsample is that it always only has 1conv + 1pixelshuffle)
Used in lightweight SR to save parameters.
Args:
scale (int): Scale factor. Supported scales: 2^n and 3.
num_feat (int): Channel number of intermediate features.
"""
def __init__(self, scale, num_feat, num_out_ch, input_resolution=None):
self.num_feat = num_feat
self.input_resolution = input_resolution
m = []
m.append(nn.Conv2d(num_feat, (scale ** 2) * num_out_ch, 3, 1, 1))
m.append(nn.PixelShuffle(scale))
super(UpsampleOneStep, self).__init__(*m)
def flops(self):
H, W = self.input_resolution
flops = H * W * self.num_feat * 3 * 9
return flops
class SwinIR(nn.Module):
r"""SwinIR
A PyTorch impl of : `SwinIR: Image Restoration Using Swin Transformer`,
based on Swin Transformer.
Args:
img_size (int | tuple(int)): Input image size. Default 64
patch_size (int | tuple(int)): Patch size. Default: 1
in_chans (int): Number of input image channels. Default: 3
embed_dim (int): Patch embedding dimension. Default: 96
depths (tuple(int)): Depth of each Swin Transformer layer.
num_heads (tuple(int)): Number of attention heads in different layers.
window_size (int): Window size. Default: 7
mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. Default: 4
qkv_bias (bool): If True, add a learnable bias to query, key, value. Default: True
qk_scale (float):
Override default qk scale of head_dim ** -0.5 if set. Default: None
drop_rate (float): Dropout rate. Default: 0
attn_drop_rate (float): Attention dropout rate. Default: 0
drop_path_rate (float): Stochastic depth rate. Default: 0.1
norm_layer (nn.Module): Normalization layer. Default: nn.LayerNorm.
ape (bool):
If True, add absolute position embedding to the patch embedding. Default: False
patch_norm (bool): If True, add normalization after patch embedding. Default: True
use_checkpoint (bool):
Whether to use checkpointing to save memory. Default: False
upscale: Upscale factor.
2/3/4/8 for image SR, 1 for denoising and compress artifact reduction
img_range: Image range. 1. or 255.
upsampler: The reconstruction reconstruction module.
'pixelshuffle'/'pixelshuffledirect'/'nearest+conv'/None
resi_connection: The convolutional block before residual connection. '1conv'/'3conv'
"""
def __init__(
self,
img_size=64,
patch_size=1,
in_chans=3,
embed_dim=96,
depths=[6, 6, 6, 6],
num_heads=[6, 6, 6, 6],
window_size=7,
mlp_ratio=4.0,
qkv_bias=True,
qk_scale=None,
drop_rate=0.0,
attn_drop_rate=0.0,
drop_path_rate=0.1,
norm_layer=nn.LayerNorm,
ape=False,
patch_norm=True,
use_checkpoint=False,
upscale=2,
img_range=1.0,
upsampler="",
resi_connection="1conv",
**kwargs,
):
super(SwinIR, self).__init__()
num_in_ch = in_chans
num_out_ch = in_chans
num_feat = 64
self.img_range = img_range
if in_chans == 3:
rgb_mean = (0.4488, 0.4371, 0.4040)
self.mean = flow.Tensor(rgb_mean).view(1, 3, 1, 1)
else:
self.mean = flow.zeros(1, 1, 1, 1)
self.upscale = upscale
self.upsampler = upsampler
self.window_size = window_size
# 1, shallow feature extraction
self.conv_first = nn.Conv2d(num_in_ch, embed_dim, 3, 1, 1)
# 2, deep feature extraction
self.num_layers = len(depths)
self.embed_dim = embed_dim
self.ape = ape
self.patch_norm = patch_norm
self.num_features = embed_dim
self.mlp_ratio = mlp_ratio
# split image into non-overlapping patches
self.patch_embed = PatchEmbed(
img_size=img_size,
patch_size=patch_size,
in_chans=embed_dim,
embed_dim=embed_dim,
norm_layer=norm_layer if self.patch_norm else None,
)
num_patches = self.patch_embed.num_patches
patches_resolution = self.patch_embed.patches_resolution
self.patches_resolution = patches_resolution
# merge non-overlapping patches into image
self.patch_unembed = PatchUnEmbed(
img_size=img_size,
patch_size=patch_size,
in_chans=embed_dim,
embed_dim=embed_dim,
norm_layer=norm_layer if self.patch_norm else None,
)
# absolute position embedding
if self.ape:
self.absolute_pos_embed = nn.Parameter(flow.zeros(1, num_patches, embed_dim))
trunc_normal_(self.absolute_pos_embed, std=0.02)
self.pos_drop = nn.Dropout(p=drop_rate)
# stochastic depth
dpr = [
x.item() for x in flow.linspace(0, drop_path_rate, sum(depths))
] # stochastic depth decay rule
# build Residual Swin Transformer blocks (RSTB)
self.layers = nn.ModuleList()
for i_layer in range(self.num_layers):
layer = RSTB(
dim=embed_dim,
input_resolution=(patches_resolution[0], patches_resolution[1]),
depth=depths[i_layer],
num_heads=num_heads[i_layer],
window_size=window_size,
mlp_ratio=self.mlp_ratio,
qkv_bias=qkv_bias,
qk_scale=qk_scale,
drop=drop_rate,
attn_drop=attn_drop_rate,
drop_path=dpr[
sum(depths[:i_layer]) : sum(depths[: i_layer + 1])
], # no impact on SR results
norm_layer=norm_layer,
downsample=None,
use_checkpoint=use_checkpoint,
img_size=img_size,
patch_size=patch_size,
resi_connection=resi_connection,
)
self.layers.append(layer)
self.norm = norm_layer(self.num_features)
# build the last conv layer in deep feature extraction
if resi_connection == "1conv":
self.conv_after_body = nn.Conv2d(embed_dim, embed_dim, 3, 1, 1)
elif resi_connection == "3conv":
# to save parameters and memory
self.conv_after_body = nn.Sequential(
nn.Conv2d(embed_dim, embed_dim // 4, 3, 1, 1),
nn.LeakyReLU(negative_slope=0.2, inplace=True),
nn.Conv2d(embed_dim // 4, embed_dim // 4, 1, 1, 0),
nn.LeakyReLU(negative_slope=0.2, inplace=True),
nn.Conv2d(embed_dim // 4, embed_dim, 3, 1, 1),
)
# 3, high quality image reconstruction
if self.upsampler == "pixelshuffle":
# for classical SR
self.conv_before_upsample = nn.Sequential(
nn.Conv2d(embed_dim, num_feat, 3, 1, 1), nn.LeakyReLU(inplace=True)
)
self.upsample = Upsample(upscale, num_feat)
self.conv_last = nn.Conv2d(num_feat, num_out_ch, 3, 1, 1)
elif self.upsampler == "pixelshuffledirect":
# for lightweight SR (to save parameters)
self.upsample = UpsampleOneStep(
upscale, embed_dim, num_out_ch, (patches_resolution[0], patches_resolution[1])
)
elif self.upsampler == "nearest+conv":
# for real-world SR (less artifacts)
self.conv_before_upsample = nn.Sequential(
nn.Conv2d(embed_dim, num_feat, 3, 1, 1), nn.LeakyReLU(inplace=True)
)
self.conv_up1 = nn.Conv2d(num_feat, num_feat, 3, 1, 1)
if self.upscale == 4:
self.conv_up2 = nn.Conv2d(num_feat, num_feat, 3, 1, 1)
self.conv_hr = nn.Conv2d(num_feat, num_feat, 3, 1, 1)
self.conv_last = nn.Conv2d(num_feat, num_out_ch, 3, 1, 1)
self.lrelu = nn.LeakyReLU(negative_slope=0.2, inplace=True)
else:
# for image denoising and JPEG compression artifact reduction
self.conv_last = nn.Conv2d(embed_dim, num_out_ch, 3, 1, 1)
self.apply(self._init_weights)
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=0.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
def no_weight_decay(self):
return {"absolute_pos_embed"}
def no_weight_decay_keywords(self):
return {"relative_position_bias_table"}
def check_image_size(self, x):
_, _, h, w = x.size()
mod_pad_h = (self.window_size - h % self.window_size) % self.window_size
mod_pad_w = (self.window_size - w % self.window_size) % self.window_size
x = F.pad(x, (0, mod_pad_w, 0, mod_pad_h), "reflect")
return x
def forward_features(self, x):
x_size = (x.shape[2], x.shape[3])
x = self.patch_embed(x)
if self.ape:
x = x + self.absolute_pos_embed
x = self.pos_drop(x)
for layer in self.layers:
x = layer(x, x_size)
x = self.norm(x) # B L C
x = self.patch_unembed(x, x_size)
return x
def forward(self, x):
H, W = x.shape[2:]
x = self.check_image_size(x)
self.mean = self.mean.type_as(x).to(x.device)
x = (x - self.mean) * self.img_range
if self.upsampler == "pixelshuffle":
# for classical SR
x = self.conv_first(x)
x = self.conv_after_body(self.forward_features(x)) + x
x = self.conv_before_upsample(x)
x = self.conv_last(self.upsample(x))
elif self.upsampler == "pixelshuffledirect":
# for lightweight SR
x = self.conv_first(x)
x = self.conv_after_body(self.forward_features(x)) + x
x = self.upsample(x)
elif self.upsampler == "nearest+conv":
# for real-world SR
x = self.conv_first(x)
x = self.conv_after_body(self.forward_features(x)) + x
x = self.conv_before_upsample(x)
x = self.lrelu(
self.conv_up1(flow.nn.functional.interpolate(x, scale_factor=2, mode="nearest"))
)
if self.upscale == 4:
x = self.lrelu(
self.conv_up2(flow.nn.functional.interpolate(x, scale_factor=2, mode="nearest"))
)
x = self.conv_last(self.lrelu(self.conv_hr(x)))
else:
# for image denoising and JPEG compression artifact reduction
x_first = self.conv_first(x)
res = self.conv_after_body(self.forward_features(x_first)) + x_first
x = x + self.conv_last(res)
x = x / self.img_range + self.mean
return x[:, :, : H * self.upscale, : W * self.upscale]
def flops(self):
flops = 0
H, W = self.patches_resolution
flops += H * W * 3 * self.embed_dim * 9
flops += self.patch_embed.flops()
for i, layer in enumerate(self.layers):
flops += layer.flops()
flops += H * W * 3 * self.embed_dim * self.embed_dim
flops += self.upsample.flops()
return flops
if __name__ == "__main__":
upscale = 4
window_size = 8
height = (1024 // upscale // window_size + 1) * window_size
width = (720 // upscale // window_size + 1) * window_size
model = SwinIR(
upscale=2,
img_size=(height, width),
window_size=window_size,
img_range=1.0,
depths=[6, 6, 6, 6],
embed_dim=60,
num_heads=[6, 6, 6, 6],
mlp_ratio=2,
upsampler="pixelshuffledirect",
)
print(model)
print(height, width, model.flops() / 1e9)
x = flow.randn((1, 3, height, width))
x = model(x)
print(x.shape)
| 37,112 | 34.823359 | 100 | py |
libai | libai-main/projects/DALLE2/swinir/__init__.py | from .models import SwinIR
from .upsample import load_model, upsample4x, upsample16x
| 85 | 27.666667 | 57 | py |
libai | libai-main/projects/DALLE2/swinir/upsample.py | import os
import oneflow as flow
import requests
from .models import SwinIR as net
def load_torch_weight(model, model_path):
# load torch weight
import torch
param_key_g = "params_ema"
pretrained_model = torch.load(model_path, map_location="cpu")
pretrained_model = (
pretrained_model[param_key_g]
if param_key_g in pretrained_model.keys()
else pretrained_model
)
new_state_dict = {}
for k, v in pretrained_model.items():
flow_tensor = flow.tensor(v.numpy())
new_state_dict[k] = flow_tensor
model.load_state_dict(new_state_dict, strict=True)
return model
def load_model(model_path=None):
# set up model
if os.path.exists(model_path):
print(f"loading model from {model_path}")
else:
os.makedirs(os.path.dirname(model_path), exist_ok=True)
url = "https://github.com/JingyunLiang/SwinIR/releases/download/v0.0/{}".format(
os.path.basename(model_path)
)
r = requests.get(url, allow_redirects=True)
print(f"downloading model {model_path}")
open(model_path, "wb").write(r.content)
model = net(
upscale=4,
in_chans=3,
img_size=64,
window_size=8,
img_range=1.0,
depths=[6, 6, 6, 6, 6, 6, 6, 6, 6],
embed_dim=240,
num_heads=[8, 8, 8, 8, 8, 8, 8, 8, 8],
mlp_ratio=2,
upsampler="nearest+conv",
resi_connection="3conv",
)
model = load_torch_weight(model, model_path)
return model
def upsample4x(img_lq, model):
"""upsample img from h*w to (4h) * (4w)"""
device = flow.device("cuda" if flow.cuda.is_available() else "cpu")
model.eval()
model = model.to(device)
img_lq = img_lq.to(device)
window_size = 8
scale = 4
# inference
with flow.no_grad():
# pad input image to be a multiple of window_size
_, _, h_old, w_old = img_lq.size()
h_pad = (h_old // window_size + 1) * window_size - h_old
w_pad = (w_old // window_size + 1) * window_size - w_old
img_lq = flow.cat([img_lq, flow.flip(img_lq, [2])], 2)[:, :, : h_old + h_pad, :]
img_lq = flow.cat([img_lq, flow.flip(img_lq, [3])], 3)[:, :, :, : w_old + w_pad]
output = model(img_lq)
output = output[..., : h_old * scale, : w_old * scale]
output = output.clamp_(0, 1)
return output
def upsample16x(imgs, model):
return upsample4x(upsample4x(imgs, model), model)
| 2,486 | 28.607143 | 88 | py |
libai | libai-main/projects/BLOOM/configs/bloom_inference.py | from omegaconf import DictConfig
from libai.config import LazyCall
from projects.BLOOM.modeling.bloom_model import BloomModel
cfg = dict(
# model
vocab_size=250880,
hidden_size=64,
hidden_layers=2,
n_head=8,
padding_idx=3,
layer_norm_epsilon=1e-5,
initializer_range=0.02,
apply_residual_connection_post_layernorm=False,
hidden_dropout=0.0,
attention_dropout=0.0,
pretraining_tp=1,
slow_but_exact=False,
amp_enabled=False,
# Inference
is_encoder_decoder=False,
max_length=512,
min_length=0,
do_sample=False,
early_stopping=False,
num_beams=1,
num_beam_groups=1,
diversity_penalty=0.0,
temperature=1.0,
top_k=50,
top_p=1.0,
typical_p=1.0,
repetition_penalty=1.0,
length_penalty=1.0,
no_repeat_ngram_size=0,
encoder_no_repeat_ngram_size=0,
num_return_sequences=1,
chunk_size_feed_forward=0,
output_scores=False,
forced_bos_token_id=None,
forced_eos_token_id=None,
remove_invalid_values=False,
exponential_decay_length_penalty=None,
use_cache=True,
# Tokenizer
pad_token_id=3,
eos_token_id=2,
bos_token_id=1,
sep_token_id=None,
decoder_start_token_id=None,
)
cfg = DictConfig(cfg)
glm_model = LazyCall(BloomModel)(cfg=cfg)
| 1,303 | 21.877193 | 58 | py |
libai | libai-main/projects/BLOOM/utils/model_loader.py | # coding=utf-8
# Copyright 2021 The OneFlow Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
from libai.models.utils import ModelLoaderHuggerFace, ModelLoaderLiBai
class BlooMLoaderHuggerFace(ModelLoaderHuggerFace):
def __init__(self, model, libai_cfg, pretrained_model_path, **kwargs):
super().__init__(model, libai_cfg, pretrained_model_path, **kwargs)
"""NOTE: base_model_prefix_1 is BLOOM's prefix in Transformers.
base_model_prefix_2 is BLOOM's prefix in LiBai."""
self.base_model_prefix_1 = "transformer"
self.base_model_prefix_2 = "transformer"
def _convert_state_dict(self, flow_state_dict, cfg):
"""Convert state_dict's keys to match model.
Args:
flow_state_dict (OrderedDict): model state dict.
cfg (dict): model's default config dict in LiBai.
Returns:
OrderedDict: flow state dict.
"""
# The converted checkpoint.
oneflow_state_dict = flow_state_dict.copy()
old_keys = list(oneflow_state_dict.keys())
# prefix
has_prefix = any(s.startswith(self.base_model_prefix_1) for s in oneflow_state_dict)
prefix2 = "transformer." if has_prefix else ""
# Convert layers.
for key in old_keys:
oneflow_state_dict[prefix2 + key] = oneflow_state_dict.pop(key)
return oneflow_state_dict
def _load_config_from_json(self, config_file):
"""load config from `config.json`, and update default config.
Args:
config_file (str): Path of config file.
"""
with open(config_file, mode="r", encoding="utf-8") as f:
cfg_dict = json.load(f)
self._update_cfg("hidden_layers", cfg_dict["n_layer"])
self._update_cfg("hidden_size", cfg_dict["n_embed"])
self._update_cfg("n_head", cfg_dict["num_attention_heads"])
# update libai_cfg by config.json
for k, v in cfg_dict.items():
self._update_cfg(k, v)
# update libai_cfg by kwargs
for k, v in self.kwargs.items():
self._update_cfg(k, v)
self._update_cfg_log()
class BlooMLoaderLibai(ModelLoaderLiBai):
def __init__(self, model, libai_cfg, pretrained_model_path, **kwargs):
super().__init__(model, libai_cfg, pretrained_model_path, **kwargs)
self.base_model_prefix_2 = "transformer"
| 2,934 | 34.792683 | 92 | py |
libai | libai-main/projects/BLOOM/modeling/activation.py | # coding=utf-8
# Copyright 2021 The OneFlow Authors. All rights reserved.
# Copyright 2022 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import oneflow as flow
from oneflow import nn
def bloom_gelu_forward(x):
"""
Custom bias GELU function. Adapted from Megatron-DeepSpeed code. Here we use a simple
implementation (inference) to make the model jitable.
Args:
x (`torch.tensor`, *required*):
input hidden states
"""
return x * 0.5 * (1.0 + flow.tanh(0.79788456 * x * (1 + 0.044715 * x * x)))
def bloom_gelu_back(g, x):
"""
gradient of tanh approximation of gelu gradient of actual gelu is:
0.5 * (1. + torch.erf(x * 0.70710678)) + 0.3989423 * x * torch.exp(-0.5 * x * x)
Args:
g (`torch.tensor`, *required*):
gradient output tensor
x (`torch.tensor`, *required*):
input tensor
"""
x = x[0]
tanh_out = flow.tanh(0.79788456 * x * (1 + 0.044715 * x * x))
ff = 0.5 * x * ((1 - tanh_out * tanh_out) * (0.79788456 + 0.1070322243 * x * x)) + 0.5 * (
1 + tanh_out
)
return ff * g
class GeLUFunction(flow.autograd.Function):
@staticmethod
def forward(ctx, input):
ctx.save_for_backward(input)
return bloom_gelu_forward(input)
@staticmethod
def backward(ctx, grad_output):
input = ctx.saved_tensors
tmp = bloom_gelu_back(grad_output, input)
return tmp
class BloomGelu(nn.Module):
"""
BloomBiasGelu wrapper function that make use of the simple function on inference mode to make
the model torchscriptable and use the autograd function in training mode to get the accurate
results of the gradients Partly copied from Megatron-DeepSpeed code and adapted for our needs
See here why autograd functions are not torchscriptable:
https://github.com/pytorch/pytorch/issues/22329
"""
def __init__(self):
super().__init__()
def forward(self, x):
if self.training:
return GeLUFunction.apply(x)
else:
return bloom_gelu_forward(x)
| 2,621 | 30.590361 | 97 | py |
libai | libai-main/projects/BLOOM/modeling/transformers.py | # coding=utf-8
# Copyright 2021 The OneFlow Authors. All rights reserved.
# Copyright 2022 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from oneflow import nn
from libai.layers import LayerNorm
from libai.utils import distributed as dist
from projects.BLOOM.modeling.attention import BloomAttention
from projects.BLOOM.modeling.mlp import BloomMLP
class BloomBlock(nn.Module):
def __init__(
self,
hidden_size,
n_head,
layer_norm_epsilon,
hidden_dropout,
attention_dropout,
pretraining_tp,
slow_but_exact,
init_method,
output_layer_init_method,
apply_residual_connection_post_layernorm,
layer_idx=0,
):
super().__init__()
hidden_size = hidden_size
self.input_layernorm = LayerNorm(hidden_size, eps=layer_norm_epsilon, layer_idx=layer_idx)
self.num_heads = n_head
self.self_attention = BloomAttention(
hidden_size=hidden_size,
n_head=n_head,
hidden_dropout=hidden_dropout,
attention_dropout=attention_dropout,
pretraining_tp=pretraining_tp,
slow_but_exact=slow_but_exact,
init_method=init_method,
output_layer_init_method=output_layer_init_method,
layer_idx=layer_idx,
)
self.post_attention_layernorm = LayerNorm(
hidden_size, eps=layer_norm_epsilon, layer_idx=layer_idx
)
self.mlp = BloomMLP(
hidden_size,
pretraining_tp,
slow_but_exact,
hidden_dropout,
init_method,
output_layer_init_method,
layer_idx,
)
self.apply_residual_connection_post_layernorm = apply_residual_connection_post_layernorm
self.hidden_dropout = hidden_dropout
self.layer_idx = layer_idx
def forward(
self,
hidden_states,
alibi,
attention_mask,
layer_past=None,
head_mask=None,
use_cache: bool = False,
output_attentions: bool = False,
):
# Change placement for pipeline parallelsim
hidden_states = hidden_states.to_global(placement=dist.get_layer_placement(self.layer_idx))
alibi = alibi.to_global(placement=dist.get_layer_placement(self.layer_idx))
# hidden_states shape: (batch_size, seq_length, hidden_size)
if attention_mask is not None:
attention_mask = attention_mask.to_global(
placement=dist.get_layer_placement(self.layer_idx)
)
layernorm_output = self.input_layernorm(hidden_states)
if self.apply_residual_connection_post_layernorm:
residual = layernorm_output
else:
residual = hidden_states
# Self attention.
attn_outputs = self.self_attention(
layernorm_output,
residual,
layer_past=layer_past,
attention_mask=attention_mask,
alibi=alibi,
head_mask=head_mask,
use_cache=use_cache,
output_attentions=output_attentions,
)
attention_output = attn_outputs[0]
outputs = attn_outputs[1:]
layernorm_output = self.post_attention_layernorm(attention_output)
if self.apply_residual_connection_post_layernorm:
residual = layernorm_output
else:
residual = attention_output
# MLP.
output = self.mlp(layernorm_output, residual)
if use_cache:
outputs = (output,) + outputs
else:
outputs = (output,) + outputs[1:]
return outputs # hidden_states, present, attentions
| 4,242 | 30.664179 | 99 | py |
libai | libai-main/projects/BLOOM/modeling/mlp.py | # coding=utf-8
# Copyright 2021 The OneFlow Authors. All rights reserved.
# Copyright 2022 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import oneflow as flow
import oneflow.nn.functional as F
from oneflow import nn
from libai.layers import Linear
from projects.BLOOM.modeling.activation import BloomGelu
from projects.BLOOM.modeling.attention import dropout_add
class BloomMLP(nn.Module):
def __init__(
self,
hidden_size,
pretraining_tp,
slow_but_exact,
hidden_dropout,
init_method=None,
output_layer_init_method=None,
layer_idx=0,
):
super().__init__()
hidden_size = hidden_size
if output_layer_init_method is None:
output_layer_init_method = init_method
self.pretraining_tp = pretraining_tp
self.slow_but_exact = slow_but_exact
self.dense_h_to_4h = Linear(
hidden_size,
4 * hidden_size,
parallel="col",
init_method=init_method,
layer_idx=layer_idx,
)
self.gelu_impl = BloomGelu()
self.dense_4h_to_h = Linear(
4 * hidden_size,
hidden_size,
parallel="row",
init_method=output_layer_init_method,
layer_idx=layer_idx,
)
self.hidden_dropout = hidden_dropout
def forward(self, hidden_states, residual):
hidden_states = self.gelu_impl(self.dense_h_to_4h(hidden_states))
if self.pretraining_tp > 1 and self.slow_but_exact:
intermediate_output = flow.zeros_like(residual)
slices = self.dense_4h_to_h.weight.shape[-1] / self.pretraining_tp
for i in range(self.pretraining_tp):
intermediate_output = intermediate_output + F.linear(
hidden_states[:, :, int(i * slices) : int((i + 1) * slices)],
self.dense_4h_to_h.weight[:, int(i * slices) : int((i + 1) * slices)],
)
else:
intermediate_output = self.dense_4h_to_h(hidden_states)
output = dropout_add(intermediate_output, residual, self.hidden_dropout, self.training)
return output
| 2,716 | 33.833333 | 95 | py |
libai | libai-main/projects/BLOOM/modeling/bloom_model.py | # coding=utf-8
# Copyright 2021 The OneFlow Authors. All rights reserved.
# Copyright 2022 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import oneflow as flow
from oneflow import nn
from libai.config import configurable
from libai.inference.generator.generation_utils import Generator
from libai.layers import Embedding, LayerNorm, LMLogits
from libai.models.utils import init_method_normal, scaled_init_method_normal
from libai.utils import distributed as dist
from projects.BLOOM.modeling.mask import _expand_mask, _make_causal_mask, build_alibi_tensor
from projects.BLOOM.modeling.transformers import BloomBlock
class BloomModel(nn.Module):
@configurable
def __init__(
self,
vocab_size,
hidden_size,
hidden_layers,
n_head,
padding_idx,
pretraining_tp=1,
slow_but_exact=False,
initializer_range=0.02,
apply_residual_connection_post_layernorm=False,
hidden_dropout=0,
attention_dropout=0,
amp_enabled=False,
layer_norm_epsilon=1e-12,
cfg=None,
):
super().__init__()
self.cfg = cfg
self.embed_dim = hidden_size
self.num_heads = n_head
self.hidden_layers = hidden_layers
init_method = init_method_normal(initializer_range)
scaled_init_method = scaled_init_method_normal(initializer_range, hidden_layers)
self.word_embeddings = Embedding(
vocab_size,
self.embed_dim,
padding_idx=padding_idx,
init_method=init_method,
amp_enabled=amp_enabled,
layer_idx=0,
)
self.word_embeddings_layernorm = LayerNorm(
self.embed_dim, eps=layer_norm_epsilon, layer_idx=0
)
self.h = flow.nn.ModuleList(
[
BloomBlock(
hidden_size=hidden_size,
n_head=n_head,
layer_norm_epsilon=layer_norm_epsilon,
hidden_dropout=hidden_dropout,
attention_dropout=attention_dropout,
pretraining_tp=pretraining_tp,
slow_but_exact=slow_but_exact,
init_method=init_method,
output_layer_init_method=scaled_init_method,
apply_residual_connection_post_layernorm=apply_residual_connection_post_layernorm, # noqa
layer_idx=i,
)
for i in range(hidden_layers)
]
)
# Final Layer Norm
self.ln_f = LayerNorm(self.embed_dim, eps=layer_norm_epsilon, layer_idx=hidden_layers - 1)
@classmethod
def from_config(cls, cfg):
return {
"vocab_size": cfg.vocab_size,
"hidden_size": cfg.hidden_size,
"hidden_layers": cfg.hidden_layers,
"n_head": cfg.n_head,
"padding_idx": cfg.padding_idx,
"pretraining_tp": cfg.pretraining_tp,
"slow_but_exact": cfg.slow_but_exact,
"apply_residual_connection_post_layernorm": cfg.apply_residual_connection_post_layernorm, # noqa
"hidden_dropout": cfg.hidden_dropout,
"attention_dropout": cfg.attention_dropout,
"amp_enabled": cfg.amp_enabled,
"layer_norm_epsilon": cfg.layer_norm_epsilon,
"cfg": cfg,
}
def _prepare_attn_mask(
self,
attention_mask,
input_shape,
past_key_values_length,
):
combined_attention_mask = None
_, src_length = input_shape
if src_length > 1:
combined_attention_mask = _make_causal_mask(
input_shape, past_key_values_length=past_key_values_length
)
# [batch_size, seq_length] -> [batch_size, 1, tgt_length, src_length]
expanded_attn_mask = _expand_mask(attention_mask, tgt_length=src_length)
combined_attention_mask = (
expanded_attn_mask
if combined_attention_mask is None
else expanded_attn_mask | combined_attention_mask
)
return combined_attention_mask
def _convert_head_mask_to_5d(self, head_mask, num_hidden_layers):
"""-> [num_hidden_layers x batch x num_heads x seq_length x seq_length]"""
if head_mask.dim() == 1:
head_mask = head_mask.unsqueeze(0).unsqueeze(0).unsqueeze(-1).unsqueeze(-1)
head_mask = head_mask.expand(num_hidden_layers, -1, -1, -1, -1)
elif head_mask.dim() == 2:
head_mask = (
head_mask.unsqueeze(1).unsqueeze(-1).unsqueeze(-1)
) # We can specify head_mask for each layer
assert head_mask.dim() == 5, f"head_mask.dim != 5, instead {head_mask.dim()}"
head_mask = head_mask.to(dtype=self.dtype) # switch to float if need + fp16 compatibility
return head_mask
def get_head_mask(self, head_mask, num_hidden_layers, is_attention_chunked=False):
"""
Prepare the head mask if needed.
Args:
head_mask (`torch.Tensor` with shape `[num_heads]` or `[num_hidden_layers x num_heads]`,
*optional*):
The mask indicating if we should keep the heads or not (1.0 for keep, 0.0 for
discard).
num_hidden_layers (`int`):
The number of hidden layers in the model.
is_attention_chunked: (`bool`, *optional*, defaults to `False`):
Whether or not the attentions scores are computed by chunks or not.
Returns:
`torch.Tensor` with shape `[num_hidden_layers x batch x num_heads x seq_length x
seq_length]` or list with `[None]` for each layer.
"""
if head_mask is not None:
head_mask = self._convert_head_mask_to_5d(head_mask, num_hidden_layers)
if is_attention_chunked is True:
head_mask = head_mask.unsqueeze(-1)
else:
head_mask = [None] * num_hidden_layers
return head_mask
def forward(
self,
input_ids=None,
attention_mask=None,
past_key_values=None,
head_mask=None,
inputs_embeds=None,
use_cache=None,
output_attentions=None,
):
input_ids = (
input_ids.to_global(placement=dist.get_layer_placement(0))
if input_ids is not None
else input_ids
)
attention_mask = (
attention_mask.to_global(placement=dist.get_layer_placement(0))
if attention_mask is not None
else attention_mask
)
head_mask = (
head_mask.to_global(placement=dist.get_layer_placement(0))
if head_mask is not None
else head_mask
)
inputs_embeds = (
inputs_embeds.to_global(placement=dist.get_layer_placement(0))
if inputs_embeds is not None
else inputs_embeds
)
if input_ids is not None:
batch_size, seq_length = input_ids.shape
if past_key_values is None:
past_key_values = tuple([None] * len(self.h))
head_mask = self.get_head_mask(head_mask, self.hidden_layers)
if inputs_embeds is None:
inputs_embeds = self.word_embeddings(input_ids)
hidden_states = self.word_embeddings_layernorm(inputs_embeds)
presents = () if use_cache else None
seq_length_with_past = seq_length
past_key_values_length = 0
if past_key_values[0] is not None:
past_key_values_length = past_key_values[0][0].shape[2]
seq_length_with_past = seq_length_with_past + past_key_values_length
if attention_mask is None:
attention_mask = flow.ones(
(batch_size, seq_length_with_past),
sbp=dist.get_nd_sbp([flow.sbp.broadcast, flow.sbp.broadcast]),
placement=dist.get_layer_placement(0),
)
alibi = build_alibi_tensor(attention_mask, self.num_heads, hidden_states.dtype)
causal_mask = self._prepare_attn_mask(
attention_mask,
input_shape=(batch_size, seq_length),
past_key_values_length=past_key_values_length,
)
for i, (block, layer_past) in enumerate(zip(self.h, past_key_values)):
outputs = block(
hidden_states,
layer_past=layer_past,
attention_mask=causal_mask,
head_mask=head_mask[i],
use_cache=use_cache,
output_attentions=output_attentions,
alibi=alibi,
)
hidden_states = outputs[0]
if use_cache is True:
presents = presents + (outputs[1],)
hidden_states = self.ln_f(hidden_states)
return {"last_hidden_state": hidden_states, "past_key_values": presents}
class BloomForCausalLM(nn.Module, Generator):
@configurable
def __init__(
self,
vocab_size,
hidden_size,
hidden_layers,
n_head,
padding_idx,
pretraining_tp=1,
slow_but_exact=False,
initializer_range=0.02,
apply_residual_connection_post_layernorm=False,
hidden_dropout=0,
attention_dropout=0,
amp_enabled=False,
layer_norm_epsilon=1e-12,
cfg=None,
):
super().__init__()
self.cfg = cfg
self.transformer = BloomModel(
vocab_size=vocab_size,
hidden_size=hidden_size,
hidden_layers=hidden_layers,
n_head=n_head,
padding_idx=padding_idx,
pretraining_tp=pretraining_tp,
slow_but_exact=slow_but_exact,
initializer_range=initializer_range,
apply_residual_connection_post_layernorm=apply_residual_connection_post_layernorm,
hidden_dropout=hidden_dropout,
attention_dropout=attention_dropout,
amp_enabled=amp_enabled,
layer_norm_epsilon=layer_norm_epsilon,
cfg=cfg,
)
self.lm_head = LMLogits(vocab_size, bias=False)
@classmethod
def from_config(cls, cfg):
return {
"vocab_size": cfg.vocab_size,
"hidden_size": cfg.hidden_size,
"hidden_layers": cfg.hidden_layers,
"n_head": cfg.n_head,
"padding_idx": cfg.padding_idx,
"pretraining_tp": cfg.pretraining_tp,
"slow_but_exact": cfg.slow_but_exact,
"apply_residual_connection_post_layernorm": cfg.apply_residual_connection_post_layernorm, # noqa
"hidden_dropout": cfg.hidden_dropout,
"attention_dropout": cfg.attention_dropout,
"amp_enabled": cfg.amp_enabled,
"layer_norm_epsilon": cfg.layer_norm_epsilon,
"cfg": cfg,
}
def forward(
self,
input_ids=None,
attention_mask=None,
past_key_values=None,
head_mask=None,
inputs_embeds=None,
use_cache=None,
output_attentions=None,
):
transformer_outputs = self.transformer(
input_ids,
past_key_values=past_key_values,
attention_mask=attention_mask,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
use_cache=use_cache,
output_attentions=output_attentions,
)
hidden_states = transformer_outputs["last_hidden_state"]
lm_logits = self.lm_head(hidden_states, self.transformer.word_embeddings.weight)
return {
"logits": lm_logits,
"past_key_values": transformer_outputs["past_key_values"],
"hidden_states": transformer_outputs["last_hidden_state"],
# "attentions": transformer_outputs.attentions,
}
def prepare_inputs_for_generation(
self,
input_ids,
past_key_values=None,
attention_mask=None,
inputs_embeds=None,
**kwargs,
) -> dict:
# only last token for input_ids if past is not None
if past_key_values:
input_ids = input_ids[:, -1].unsqueeze(-1)
if past_key_values[0][0].shape[0] == input_ids.shape[0]:
past_key_values = self._convert_to_bloom_cache(past_key_values)
if inputs_embeds is not None and past_key_values is None:
model_inputs = {"inputs_embeds": inputs_embeds}
else:
model_inputs = {"input_ids": input_ids}
model_inputs.update(
{
"past_key_values": past_key_values,
"use_cache": kwargs.get("use_cache"),
"attention_mask": attention_mask,
}
)
return model_inputs
def _reorder_cache(self, past, beam_idx):
standardized_past = self._convert_to_standard_cache(past, batch_size=len(beam_idx))
device_to_beam_idx = {
past_state.device: beam_idx.to(past_state.device)
for layer_past in past
for past_state in layer_past
}
reordered_past = tuple(
(
layer_past[0].index_select(0, device_to_beam_idx[layer_past[0].device]),
layer_past[1].index_select(0, device_to_beam_idx[layer_past[0].device]),
)
for layer_past in standardized_past
)
return self._convert_to_bloom_cache(reordered_past)
def _convert_to_standard_cache(
past_key_value,
batch_size,
):
"""
Standardizes the format of the cache so as to match most implementations,
i.e. to tuple(tuple([batch_size, num_heads, ...]))
"""
batch_size_times_num_heads, head_dim, seq_length = past_key_value[0][0].shape
num_heads = batch_size_times_num_heads // batch_size
return tuple(
(
layer_past[0].view(batch_size, num_heads, head_dim, seq_length),
layer_past[1].view(batch_size, num_heads, seq_length, head_dim),
)
for layer_past in past_key_value
)
def _convert_to_bloom_cache(past_key_value):
"""
Converts the cache to the format expected by Bloom,
i.e. to tuple(tuple([batch_size * num_heads, ...]))
"""
batch_size, num_heads, head_dim, seq_length = past_key_value[0][0].shape
batch_size_times_num_heads = batch_size * num_heads
return tuple(
(
layer_past[0].view(batch_size_times_num_heads, head_dim, seq_length),
layer_past[1].view(batch_size_times_num_heads, seq_length, head_dim),
)
for layer_past in past_key_value
)
| 15,297 | 35.080189 | 110 | py |
libai | libai-main/projects/BLOOM/modeling/attention.py | # coding=utf-8
# Copyright 2021 The OneFlow Authors. All rights reserved.
# Copyright 2022 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
import oneflow as flow
from oneflow import nn
from oneflow.nn import functional as F
from libai.layers import Linear
def dropout_add(x, residual, prob, training):
"""
Dropout add function
Args:
x (`torch.tensor`, *required*):
input tensor
residual (`torch.tensor`, *required*):
esidual tensor
prob (`float`, *required*):
dropout probability
training (`bool`, *required*):
training mode
"""
out = F.dropout(x, p=prob, training=training)
out = residual + out
return out
class BloomAttention(nn.Module):
def __init__(
self,
hidden_size,
n_head,
hidden_dropout,
attention_dropout,
pretraining_tp,
slow_but_exact,
init_method=nn.init.xavier_normal_,
output_layer_init_method=None,
layer_idx=0,
):
super().__init__()
self.pretraining_tp = pretraining_tp
self.slow_but_exact = slow_but_exact
self.hidden_size = hidden_size
self.num_heads = n_head
self.head_dim = self.hidden_size // self.num_heads
self.split_size = self.hidden_size
self.hidden_dropout = hidden_dropout
if output_layer_init_method is None:
output_layer_init_method = init_method
if self.head_dim * self.num_heads != self.hidden_size:
raise ValueError(
f"`hidden_size` must be divisible by num_heads "
f"(got `hidden_size`: {self.hidden_size} and `num_heads`:"
f" {self.num_heads})."
)
# Layer-wise attention scaling
self.inv_norm_factor = 1.0 / math.sqrt(self.head_dim)
self.beta = 1.0
self.query_key_value = Linear(
self.hidden_size,
3 * self.hidden_size,
bias=True,
parallel="col",
init_method=init_method,
layer_idx=layer_idx,
)
self.dense = Linear(
self.hidden_size,
self.hidden_size,
parallel="row",
init_method=output_layer_init_method,
layer_idx=layer_idx,
)
self.attention_dropout = nn.Dropout(attention_dropout)
def _split_heads(self, fused_qkv):
"""
Split the last dimension into (num_heads, head_dim) without making any copies, results share
same memory storage as `fused_qkv`
Args:
fused_qkv (`torch.tensor`, *required*):
[batch_size, seq_length, num_heads * 3 * head_dim]
Returns:
query: [batch_size, seq_length, num_heads, head_dim]
key: [batch_size, seq_length, num_heads, head_dim]
value: [batch_size, seq_length, num_heads, head_dim]
"""
batch_size, seq_length, three_times_hidden_size = fused_qkv.shape
fused_qkv = fused_qkv.view(batch_size, seq_length, self.num_heads, 3, self.head_dim)
return fused_qkv[..., 0, :], fused_qkv[..., 1, :], fused_qkv[..., 2, :]
def _merge_heads(self, x):
"""
Merge heads together over the last dimenstion
Args:
x: (`torch.tensor`, *required*): [batch_size * num_heads, seq_length, head_dim]
Returns:
torch.tensor: [batch_size, seq_length, num_heads * head_dim]
"""
# What we want to achieve is:
# batch_size * num_heads, seq_len, head_dim -> batch_size, seq_len, num_heads * head_dim
batch_size_and_num_heads, seq_length, _ = x.shape
batch_size = batch_size_and_num_heads // self.num_heads
# First view to decompose the batch size
# batch_size * num_heads, seq_len, head_dim -> batch_size, num_heads, seq_len, head_dim
x = x.view(batch_size, self.num_heads, seq_length, self.head_dim)
# batch_size, num_heads, seq_length, head_dim -> batch_size, seq_length, num_heads, head_dim
x = x.permute(0, 2, 1, 3)
# batch_size, seq_len, num_heads, head_dim -> batch_size, seq_len, num_heads * head_dim
return x.reshape(batch_size, seq_length, self.num_heads * self.head_dim)
def forward(
self,
hidden_states,
residual,
alibi,
attention_mask,
layer_past=None,
head_mask=None,
use_cache=False,
output_attentions=False,
):
fused_qkv = self.query_key_value(hidden_states) # [batch_size, seq_length, 3 x hidden_size]
# 3 x [batch_size, seq_length, num_heads, head_dim]
(query_layer, key_layer, value_layer) = self._split_heads(fused_qkv)
batch_size, q_length, _, _ = query_layer.shape
query_layer = query_layer.transpose(1, 2).reshape(
batch_size * self.num_heads, q_length, self.head_dim
)
key_layer = key_layer.permute(0, 2, 3, 1).reshape(
batch_size * self.num_heads, self.head_dim, q_length
)
value_layer = value_layer.transpose(1, 2).reshape(
batch_size * self.num_heads, q_length, self.head_dim
)
if layer_past is not None:
past_key, past_value = layer_past
key_layer = flow.cat((past_key, key_layer), dim=2)
value_layer = flow.cat((past_value, value_layer), dim=1)
_, _, kv_length = key_layer.shape
if use_cache is True:
present = (key_layer, value_layer)
else:
present = None
matmul_result = flow.baddbmm(
alibi,
batch1=query_layer,
batch2=key_layer,
beta=self.beta,
alpha=self.inv_norm_factor,
)
attention_scores = matmul_result.view(batch_size, self.num_heads, q_length, kv_length)
input_dtype = attention_scores.dtype
attn_weights = flow.masked_fill(
attention_scores, attention_mask, flow.finfo(attention_scores.dtype).min
)
attention_probs = F.softmax(attn_weights, dim=-1).to(input_dtype)
attention_probs = self.attention_dropout(attention_probs)
if head_mask is not None:
attention_probs = attention_probs * head_mask
attention_probs_reshaped = attention_probs.view(
batch_size * self.num_heads, q_length, kv_length
)
context_layer = flow.bmm(attention_probs_reshaped, value_layer)
context_layer = self._merge_heads(context_layer)
if self.pretraining_tp > 1 and self.slow_but_exact:
slices = self.hidden_size / self.pretraining_tp
output_tensor = flow.zeros_like(context_layer)
for i in range(self.pretraining_tp):
output_tensor = output_tensor + F.linear(
context_layer[:, :, int(i * slices) : int((i + 1) * slices)],
self.dense.weight[:, int(i * slices) : int((i + 1) * slices)],
)
else:
output_tensor = self.dense(context_layer)
output_tensor = dropout_add(output_tensor, residual, self.hidden_dropout, self.training)
outputs = (output_tensor, present)
if output_attentions:
outputs += (attention_probs,)
return outputs
| 7,869 | 33.669604 | 100 | py |
libai | libai-main/projects/BLOOM/modeling/mask.py | # coding=utf-8
# Copyright 2021 The OneFlow Authors. All rights reserved.
# Copyright 2022 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
import oneflow as flow
from libai.utils import distributed as dist
def _make_causal_mask(input_ids_shape, past_key_values_length):
"""
Make causal mask used for self-attention.
"""
batch_size, target_length = input_ids_shape
mask = flow.ones(
(target_length, target_length + past_key_values_length),
dtype=flow.bool,
sbp=dist.get_nd_sbp([flow.sbp.broadcast, flow.sbp.broadcast]),
placement=dist.get_layer_placement(0),
)
# ONNX doesn't support `torch.Tensor.triu` properly, thus we use this workaround
seq_ids = flow.arange(
target_length,
dtype=flow.long,
sbp=dist.get_nd_sbp([flow.sbp.broadcast, flow.sbp.broadcast]),
placement=dist.get_layer_placement(0),
)
mask[:, past_key_values_length:] = seq_ids[:, None] < seq_ids[None, :]
if past_key_values_length > 0:
mask[:, :past_key_values_length] = False
expanded_mask = mask[None, None, :, :].expand(
batch_size, 1, target_length, target_length + past_key_values_length
)
return expanded_mask
def _expand_mask(mask, tgt_length):
"""
Expands attention_mask from `[batch_size, src_length]` to
`[batch_size, 1, tgt_length, src_length]`.
"""
batch_size, src_length = mask.shape
tgt_length = tgt_length if tgt_length is not None else src_length
expanded_mask = ~(mask[:, None, None, :].to(flow.bool))
return expanded_mask.expand(batch_size, 1, tgt_length, src_length)
def build_alibi_tensor(attention_mask, num_heads, dtype):
batch_size, seq_length = attention_mask.shape
closest_power_of_2 = 2 ** math.floor(math.log2(num_heads))
base = flow.tensor(
2 ** (-(2 ** -(math.log2(closest_power_of_2) - 3))),
sbp=dist.get_nd_sbp([flow.sbp.broadcast, flow.sbp.broadcast]),
placement=attention_mask.placement,
)
powers = flow.arange(
1,
1 + closest_power_of_2,
sbp=dist.get_nd_sbp([flow.sbp.broadcast, flow.sbp.broadcast]),
placement=attention_mask.placement,
)
slopes = flow.pow(base, powers)
if closest_power_of_2 != num_heads:
extra_base = flow.tensor(
2 ** (-(2 ** -(math.log2(2 * closest_power_of_2) - 3))),
sbp=dist.get_nd_sbp([flow.sbp.broadcast, flow.sbp.broadcast]),
placement=attention_mask.placement,
)
num_remaining_heads = min(closest_power_of_2, num_heads - closest_power_of_2)
extra_powers = flow.arange(
1,
1 + 2 * num_remaining_heads,
2,
sbp=dist.get_nd_sbp([flow.sbp.broadcast, flow.sbp.broadcast]),
placement=attention_mask.placement,
)
slopes = flow.cat([slopes, flow.pow(extra_base, extra_powers)], dim=0)
arange_tensor = ((attention_mask.cumsum(dim=-1) - 1) * attention_mask)[:, None, :]
alibi = slopes[..., None] * arange_tensor
return alibi.reshape(batch_size * num_heads, 1, seq_length).to(dtype)
| 3,662 | 35.63 | 86 | py |
libai | libai-main/projects/T5/models/embedding.py | # coding=utf-8
# Copyright 2021 The OneFlow Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import oneflow as flow
import oneflow.nn as nn
from oneflow.nn import init
import libai.utils.distributed as dist
from libai.layers.embedding import VocabEmbedding
class T5Embedding(flow.nn.Module):
def __init__(
self,
hidden_size,
vocab_size,
embedding_dropout_prob,
init_method=flow.nn.init.xavier_normal_,
amp_enabled=False,
) -> None:
super().__init__()
self.hidden_size = hidden_size
self.vocab_size = vocab_size
self.word_embeddings = VocabEmbedding(
num_embeddings=vocab_size,
embedding_dim=hidden_size,
init_method=init_method,
amp_enabled=amp_enabled,
)
self.embedding_dropout = flow.nn.Dropout(embedding_dropout_prob)
def forward(self, input_ids):
word_embeddings = self.word_embeddings(input_ids)
embeddings = self.embedding_dropout(word_embeddings)
return embeddings
class Embedding(nn.Module):
"""Construct the trainable embedding module, which does not support parallelization.
This can be used for positional embedding and token type embedding.
Arguments:
num_embeddings: size of vocabulary.
embedding_dim: dimension of embeddings.
padding_idx: pad index. Defaults to None.
init_method: method to initialize weights. Defaults to ``flow.nn.init.xavier_normal_``.
amp_enabled: fp16 option for embedding weight. Defaults to False.
"""
def __init__(
self,
num_embeddings,
embedding_dim,
padding_idx=None,
init_method=init.xavier_normal_,
amp_enabled=False,
layer_idx=0,
):
super().__init__()
self.num_embeddings = num_embeddings
self.embedding_dim = embedding_dim
if padding_idx is not None:
if padding_idx > 0:
assert (
padding_idx < self.num_embeddings
), "Padding_idx must be within num_embeddings"
elif padding_idx < 0:
assert (
padding_idx >= -self.num_embeddings
), "Padding_idx must be within num_embeddings"
padding_idx = self.num_embeddings + padding_idx
self.padding_idx = padding_idx
self.init_method = init_method
self.amp_enabled = amp_enabled
assert num_embeddings > 0
self.weight = nn.Parameter(
flow.empty(
(num_embeddings, embedding_dim),
dtype=flow.float32,
placement=dist.get_layer_placement(layer_idx),
sbp=dist.get_nd_sbp([flow.sbp.broadcast, flow.sbp.broadcast]),
)
)
self.init_method(self.weight)
# FIXME(lxy): Fill padding_idx is not supported in nd_sbp right now.
# self._fill_padding_idx_with_zero()
def forward(self, input_ids):
weight = flow._C.amp_white_identity(self.weight) if self.amp_enabled else self.weight
# embeddings with sbp sign: [B, B]
# [B, B] x [S(0), B] --> [S(0), B]
# ↑ ↑ ↑
# embed pos_ids pos_embed
input_embeds = flow._C.gather(weight, input_ids, axis=0)
return input_embeds
def _fill_padding_idx_with_zero(self) -> None:
if self.padding_idx is not None:
with flow.no_grad():
self.weight[self.padding_idx] = flow.zeros(
self.embedding_dim,
placement=dist.get_layer_placement(0),
sbp=dist.get_nd_sbp([flow.sbp.broadcast, flow.sbp.broadcast]),
)
def extra_repr(self) -> str:
s = "num_embeddings={num_embeddings}, embedding_dim={embedding_dim}"
if self.padding_idx is not None:
s += ", padding_idx={padding_idx}"
return s.format(**self.__dict__)
| 4,527 | 34.936508 | 95 | py |
libai | libai-main/projects/T5/models/logits.py | # coding=utf-8
# Copyright 2021 The OneFlow Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import oneflow as flow
from oneflow import nn
from libai.layers import Linear
from libai.utils import distributed as dist
class LMLogits(nn.Module):
def __init__(self, vocab_size, hidden_size=None, bias=False, model_type="t5", layer_idx=-1):
super().__init__()
self.model_type = model_type
if model_type == "t5":
self.bias = (
nn.Parameter(
flow.zeros(
(vocab_size,),
dtype=flow.float32,
placement=dist.get_layer_placement(layer_idx),
sbp=dist.get_nd_sbp([flow.sbp.broadcast, flow.sbp.split(0)]),
)
)
if bias
else None
)
elif model_type == "mt5":
self.linear = Linear(hidden_size, vocab_size, bias=False, layer_idx=layer_idx)
def forward(self, input, word_embeddings=None):
if self.model_type == "t5":
w = word_embeddings.to_global(placement=input.placement)
input = input.to_global(grad_sbp=input.sbp)
logits = flow._C.matmul(input, w, transpose_b=True)
if self.bias is not None:
logits = logits + self.bias
else:
logits = self.linear(input)
return logits
| 1,958 | 35.962264 | 96 | py |
libai | libai-main/projects/T5/models/t5_model.py | # coding=utf-8
# Copyright 2021 The OneFlow Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import oneflow as flow
import oneflow.nn as nn
from libai.config import configurable
from libai.layers import Linear, LMLogits
from libai.models.t5_model import T5Loss
from libai.models.utils import init_method_normal, scaled_init_method_normal
from libai.utils import distributed as dist
from projects.MT5.utils.mt5_loader import T5LoaderHuggerFace
from projects.T5.models.embedding import T5Embedding
from projects.T5.models.layer_norm import LayerNorm
from projects.T5.models.transformer_layer import TransformerLayer
from projects.T5.utils.mask import ExtendedMask
class T5Model(flow.nn.Module):
@configurable
def __init__(
self,
vocab_size,
hidden_size,
hidden_layers,
num_attention_heads,
head_size,
intermediate_size,
embedding_dropout_prob,
hidden_dropout_prob,
attention_probs_dropout_prob,
relative_attention_num_buckets,
initializer_range=0.02,
layernorm_eps=1e-12,
amp_enabled=False,
model_type="t5",
) -> None:
super().__init__()
self.model_type = model_type
init_method = init_method_normal(initializer_range)
scaled_init_method = scaled_init_method_normal(initializer_range, hidden_layers)
self.embedding = T5Embedding(
hidden_size=hidden_size,
vocab_size=vocab_size,
embedding_dropout_prob=embedding_dropout_prob,
init_method=init_method,
amp_enabled=amp_enabled,
)
self.extended_attn_mask = ExtendedMask()
encoder_layers = flow.nn.ModuleList(
[
TransformerLayer(
hidden_size=hidden_size,
ffn_hidden_size=intermediate_size,
num_attention_heads=num_attention_heads,
head_size=head_size,
relative_attention_num_buckets=relative_attention_num_buckets,
is_decoder=False,
attention_dropout_prob=attention_probs_dropout_prob,
output_dropout_prob=hidden_dropout_prob,
layernorm_epsilon=layernorm_eps,
init_method=init_method,
output_layer_init_method=scaled_init_method,
layer_idx=i,
model_type=model_type,
has_relative_attention_bias=bool(i == 0),
)
for i in range(hidden_layers)
]
)
encoder_final_layernorm = LayerNorm(
(hidden_size,),
eps=layernorm_eps,
layer_idx=hidden_layers - 1,
)
self.encoder = flow.nn.Sequential()
self.encoder.add_module("layers", encoder_layers)
self.encoder.add_module("final_layernorm", encoder_final_layernorm)
decoder_layers = flow.nn.ModuleList(
[
TransformerLayer(
hidden_size=hidden_size,
ffn_hidden_size=intermediate_size,
num_attention_heads=num_attention_heads,
head_size=head_size,
relative_attention_num_buckets=relative_attention_num_buckets,
is_decoder=True,
attention_dropout_prob=attention_probs_dropout_prob,
output_dropout_prob=hidden_dropout_prob,
layernorm_epsilon=layernorm_eps,
init_method=init_method,
output_layer_init_method=scaled_init_method,
layer_idx=i,
model_type=model_type,
has_relative_attention_bias=bool(i - hidden_layers == 0),
)
for i in range(hidden_layers, 2 * hidden_layers)
]
)
decoder_final_layernorm = LayerNorm(
(hidden_size,),
eps=layernorm_eps,
layer_idx=2 * hidden_layers - 1,
)
self.decoder = flow.nn.Sequential()
self.decoder.add_module("layers", decoder_layers)
self.decoder.add_module("final_layernorm", decoder_final_layernorm)
self.past_key_values = [None] * len(self.decoder.layers)
self.encoder_states = None
self.past_length = 0
if model_type == "mt5":
self.lm_head = Linear(
hidden_size, vocab_size, bias=False, layer_idx=2 * hidden_layers - 1
)
else:
self.lm_head = LMLogits(vocab_size, bias=False)
@classmethod
def from_config(cls, cfg):
return {
"vocab_size": cfg.vocab_size,
"hidden_size": cfg.hidden_size,
"hidden_layers": cfg.hidden_layers,
"num_attention_heads": cfg.num_attention_heads,
"head_size": cfg.head_size,
"intermediate_size": cfg.intermediate_size,
"embedding_dropout_prob": cfg.embedding_dropout_prob,
"hidden_dropout_prob": cfg.hidden_dropout_prob,
"attention_probs_dropout_prob": cfg.attention_probs_dropout_prob,
"relative_attention_num_buckets": cfg.relative_attention_num_buckets,
"initializer_range": cfg.initializer_range,
"layernorm_eps": cfg.layernorm_eps,
"amp_enabled": cfg.amp_enabled,
"model_type": cfg.model_type,
}
def forward(
self,
encoder_input_ids,
decoder_input_ids,
encoder_attn_mask,
decoder_attn_mask,
encoder_decoder_attn_mask,
use_cache=False,
):
encoder_input_ids = encoder_input_ids.to_global(placement=dist.get_layer_placement(0))
decoder_input_ids = decoder_input_ids.to_global(placement=dist.get_layer_placement(0))
encoder_attn_mask = encoder_attn_mask.to_global(placement=dist.get_layer_placement(0))
decoder_attn_mask = decoder_attn_mask.to_global(placement=dist.get_layer_placement(0))
encoder_decoder_attn_mask = encoder_decoder_attn_mask.to_global(
placement=dist.get_layer_placement(0)
)
if use_cache and self.encoder_states is not None:
encoder_states = self.encoder_states
else:
position_bias = None
encoder_decoder_position_bias = None
self.set_cache(encoder_states=None, past_key_values=None)
encoder_attn_mask = self.extended_attn_mask(encoder_attn_mask)
enc_embedding_output = self.embedding(encoder_input_ids)
enc_hidden_states = enc_embedding_output
for layer in self.encoder.layers:
enc_hidden_states, position_bias = layer(
enc_hidden_states,
encoder_attn_mask,
position_bias=position_bias,
)
encoder_states = self.encoder.final_layernorm(enc_hidden_states)
decoder_attn_mask = self.extended_attn_mask(
decoder_attn_mask, decoder_input_ids, is_decoder=True
)
encoder_decoder_attn_mask = self.extended_attn_mask(encoder_decoder_attn_mask)
dec_embedding_output = self.embedding(decoder_input_ids)
dec_hidden_states = dec_embedding_output
if use_cache:
presents = []
position_bias = None
encoder_decoder_position_bias = None
for layer, past_key_value in zip(self.decoder.layers, self.past_key_values):
dec_hidden_states, position_bias, encoder_decoder_position_bias = layer(
dec_hidden_states,
decoder_attn_mask,
encoder_states,
encoder_decoder_attn_mask,
past_key_value=past_key_value,
position_bias=position_bias,
encoder_decoder_position_bias=encoder_decoder_position_bias,
use_cache=use_cache,
)
if use_cache:
dec_hidden_states, present = dec_hidden_states
presents.append(present)
if use_cache:
self.set_cache(encoder_states, past_key_values=presents)
decoder_states = self.decoder.final_layernorm(dec_hidden_states)
if self.model_type == "mt5":
logits = self.lm_head(decoder_states)
else:
logits = self.lm_head(decoder_states, self.embedding.word_embeddings.weight)
return logits
def set_cache(self, encoder_states, past_key_values):
self.encoder_states = encoder_states
self.past_length = 0 if past_key_values is None else past_key_values[0][0].shape[2]
if past_key_values is None:
past_key_values = [None] * len(self.decoder.layers)
assert len(past_key_values) == len(self.decoder.layers), (
f"past_key_values's length {len(past_key_values)} doesn't match "
f"decoder num_layers' length {self.decoder.layers}"
)
self.past_key_values = past_key_values
class T5ForPreTraining(flow.nn.Module):
def __init__(self, cfg) -> None:
super().__init__()
if cfg.pretrained_model_path is not None:
loader = T5LoaderHuggerFace(T5Model, cfg, cfg.pretrained_model_path)
self.t5_model = loader.load()
else:
self.t5_model = T5Model(cfg)
self.loss_func = T5Loss()
def set_cache(self, encoder_states, past_key_values):
self.t5_model.set_cache(encoder_states, past_key_values)
def forward(
self,
encoder_input_ids,
decoder_input_ids,
encoder_attn_mask,
decoder_attn_mask,
encoder_decoder_attn_mask,
lm_labels=None,
loss_mask=None,
use_cache=False,
):
logits = self.t5_model(
encoder_input_ids,
decoder_input_ids,
encoder_attn_mask,
decoder_attn_mask,
encoder_decoder_attn_mask,
use_cache=use_cache,
)
if lm_labels is not None:
lm_loss = self.loss_func(logits, lm_labels, loss_mask)
return lm_loss
else:
return {
"prediction_scores": logits,
}
@staticmethod
def set_pipeline_stage_id(model):
dist_utils = dist.get_dist_util()
# Set pipeline parallelism stage_id
if hasattr(model.t5_model.encoder.final_layernorm, "config"):
# Old API in OneFlow 0.8
for module_block in model.modules():
if isinstance(module_block.origin, T5Embedding):
module_block.config.set_stage(
dist_utils.get_layer_stage_id(0), dist.get_layer_placement(0)
)
elif isinstance(module_block.origin, ExtendedMask):
module_block.config.set_stage(
dist_utils.get_layer_stage_id(0), dist.get_layer_placement(0)
)
elif isinstance(module_block.origin, TransformerLayer):
module_block.config.set_stage(
dist_utils.get_layer_stage_id(module_block.layer_idx),
dist.get_layer_placement(module_block.layer_idx),
)
elif isinstance(module_block.origin, T5Loss):
module_block.config.set_stage(
dist_utils.get_layer_stage_id(-1), dist.get_layer_placement(-1)
)
model.t5_model.encoder.final_layernorm.config.set_stage(
dist_utils.get_layer_stage_id(model.t5_model.encoder.final_layernorm.layer_idx),
dist.get_layer_placement(model.t5_model.encoder.final_layernorm.layer_idx),
)
model.t5_model.decoder.final_layernorm.config.set_stage(
dist_utils.get_layer_stage_id(model.t5_model.decoder.final_layernorm.layer_idx),
dist.get_layer_placement(model.t5_model.decoder.final_layernorm.layer_idx),
)
else:
for module_block in model.modules():
if isinstance(module_block.to(nn.Module), T5Embedding):
module_block.to(flow.nn.graph.GraphModule).set_stage(
dist_utils.get_layer_stage_id(0), dist.get_layer_placement(0)
)
elif isinstance(module_block.to(nn.Module), ExtendedMask):
module_block.to(flow.nn.graph.GraphModule).set_stage(
dist_utils.get_layer_stage_id(0), dist.get_layer_placement(0)
)
elif isinstance(module_block.to(nn.Module), TransformerLayer):
module_block.to(flow.nn.graph.GraphModule).set_stage(
dist_utils.get_layer_stage_id(module_block.layer_idx),
dist.get_layer_placement(module_block.layer_idx),
)
elif isinstance(module_block.to(nn.Module), T5Loss):
module_block.to(flow.nn.graph.GraphModule).set_stage(
dist_utils.get_layer_stage_id(-1), dist.get_layer_placement(-1)
)
model.t5_model.encoder.final_layernorm.to(flow.nn.graph.GraphModule).set_stage(
dist_utils.get_layer_stage_id(model.t5_model.encoder.final_layernorm.layer_idx),
dist.get_layer_placement(model.t5_model.encoder.final_layernorm.layer_idx),
)
model.t5_model.decoder.final_layernorm.to(flow.nn.graph.GraphModule).set_stage(
dist_utils.get_layer_stage_id(model.t5_model.decoder.final_layernorm.layer_idx),
dist.get_layer_placement(model.t5_model.decoder.final_layernorm.layer_idx),
)
| 14,346 | 40.585507 | 96 | py |
libai | libai-main/projects/T5/models/mlp.py | # coding=utf-8
# Copyright 2021 The OneFlow Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from oneflow import nn
from libai.layers import Linear, build_activation
class T5MLP(nn.Module):
def __init__(
self,
hidden_size,
ffn_hidden_size,
output_dropout_prob=0.0,
init_method=nn.init.xavier_normal_,
output_layer_init_method=None,
*,
layer_idx=0,
):
super().__init__()
self.output_dropout_prob = output_dropout_prob
if output_layer_init_method is None:
output_layer_init_method = init_method
self.dense_h_to_4h = Linear(
hidden_size,
ffn_hidden_size,
bias=False,
parallel="col",
skip_bias_add=False,
init_method=init_method,
layer_idx=layer_idx,
)
self.activation_func = build_activation("relu")
self.dense_4h_to_h = Linear(
ffn_hidden_size,
hidden_size,
bias=False,
parallel="row",
skip_bias_add=False,
init_method=output_layer_init_method,
layer_idx=layer_idx,
)
self.dropout = nn.Dropout(self.output_dropout_prob)
def forward(self, hidden_states):
intermediate = self.dense_h_to_4h(hidden_states)
intermediate = self.activation_func(intermediate)
output = self.dense_4h_to_h(intermediate)
output = self.dropout(output)
return output
class MT5MLP(nn.Module):
def __init__(
self,
hidden_size,
ffn_hidden_size,
output_dropout_prob=0.0,
init_method=nn.init.xavier_normal_,
output_layer_init_method=None,
*,
layer_idx=0,
):
super().__init__()
self.output_dropout_prob = output_dropout_prob
if output_layer_init_method is None:
output_layer_init_method = init_method
self.wi_0 = Linear(
hidden_size,
ffn_hidden_size,
bias=False,
parallel="col",
skip_bias_add=False,
init_method=init_method,
layer_idx=layer_idx,
)
self.wi_1 = Linear(
hidden_size,
ffn_hidden_size,
bias=False,
parallel="col",
skip_bias_add=False,
init_method=init_method,
layer_idx=layer_idx,
)
self.activation_func = build_activation("gelu_tanh")
self.wo = Linear(
ffn_hidden_size,
hidden_size,
bias=False,
parallel="row",
skip_bias_add=False,
init_method=output_layer_init_method,
layer_idx=layer_idx,
)
self.dropout = nn.Dropout(self.output_dropout_prob)
def forward(self, hidden_states):
wi_0_out = self.wi_0(hidden_states)
hidden_gelu = self.activation_func(wi_0_out)
hidden_linear = self.wi_1(hidden_states)
hidden_states = hidden_gelu * hidden_linear
output = self.wo(hidden_states)
output = self.dropout(output)
return output
| 3,696 | 27.658915 | 74 | py |
libai | libai-main/projects/T5/models/transformer_layer.py | # coding=utf-8
# Copyright 2021 The OneFlow Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import oneflow.nn as nn
from libai.layers.droppath import DropPath
from libai.utils import distributed as dist
from projects.T5.models.attention import MultiheadAttention
from projects.T5.models.layer_norm import LayerNorm
from projects.T5.models.mlp import MT5MLP, T5MLP
class TransformerLayer(nn.Module):
"""A single transformer layer.
Transformer layer takes input with size [bsz, seq_length, hidden size] and returns an
output of the same size.
The input and output has same sbp sign, (S(0), B).
Arguments:
hidden_size: size of hidden state.
ffn_hidden_size: size of feed forword neural network.
num_attention_heads: number of attention heads.
is_decoder: used to specify whether this is transformer encoder layer or transformer
decoder layer. Default: ``False``.
attention_dropout_prob: dropout probability of attention weights.
output_dropout_prob: dropout probability of output.
layernorm_epsilon: epsilon used in layernorm layer. Default: `1e-5`.
init_method: method to initialize the input layer weights.
output_layer_init_method: method to initialize the output layer weights.
If None, use `init_method`.
layer_idx: the layer index, which determines the placement.
"""
def __init__(
self,
hidden_size,
ffn_hidden_size,
num_attention_heads,
head_size,
relative_attention_num_buckets,
is_decoder=False,
attention_dropout_prob=0.0,
output_dropout_prob=0.0,
drop_path_prob=0.0,
layernorm_epsilon=1e-5,
init_method=nn.init.xavier_normal_,
output_layer_init_method=None,
*,
layer_idx=0,
model_type="t5",
has_relative_attention_bias=False
):
super().__init__()
self.hidden_size = hidden_size
self.ffn_hidden_size = ffn_hidden_size
self.num_attention_heads = num_attention_heads
self.head_size = head_size
self.attention_dropout_prob = attention_dropout_prob
self.output_dropout_prob = output_dropout_prob
self.layernorm_epsilon = layernorm_epsilon
self.layer_idx = layer_idx
self.is_decoder = is_decoder
self.init_method = init_method
if output_layer_init_method is None:
output_layer_init_method = init_method
self.output_layer_init_method = output_layer_init_method
self.drop_path = DropPath(drop_path_prob) if drop_path_prob > 0.0 else nn.Identity()
self.input_layernorm = LayerNorm(
self.hidden_size, eps=self.layernorm_epsilon, layer_idx=self.layer_idx
)
self.self_attention = self.build_attention(
is_cross_attention=False,
relative_attention_num_buckets=relative_attention_num_buckets,
has_relative_attention_bias=has_relative_attention_bias,
is_decoder=self.is_decoder,
)
self.post_attention_layernorm = LayerNorm(
self.hidden_size, eps=self.layernorm_epsilon, layer_idx=self.layer_idx
)
if self.is_decoder:
self.cross_attention = self.build_attention(
is_cross_attention=True,
relative_attention_num_buckets=relative_attention_num_buckets,
is_decoder=self.is_decoder,
)
self.post_cross_attention_layernorm = LayerNorm(
self.hidden_size, eps=self.layernorm_epsilon, layer_idx=self.layer_idx
)
if model_type == "mt5":
self.mlp = MT5MLP(
self.hidden_size,
self.ffn_hidden_size,
self.output_dropout_prob,
self.init_method,
output_layer_init_method=self.output_layer_init_method,
layer_idx=self.layer_idx,
)
elif model_type == "t5":
self.mlp = T5MLP(
self.hidden_size,
self.ffn_hidden_size,
self.output_dropout_prob,
self.init_method,
output_layer_init_method=self.output_layer_init_method,
layer_idx=self.layer_idx,
)
def forward(
self,
hidden_states,
attention_mask=None,
encoder_states=None,
encoder_attention_mask=None,
past_key_value=None,
use_cache=False,
position_bias=None,
encoder_decoder_position_bias=None,
):
"""
Args:
hidden_states: shape is (batch_size, seq_length, hidden_size),
sbp signature is (S(0), B).
attention_mask: the combination of key padding mask and casual mask of hidden states
with shape (batch_size, 1, seq_length, seq_length) and the sbp
signature is (S(0), B),
encoder_states: encoder output with shape (batch_size, seq_length, hidden_size)
and the sbp signature is (S(0), B), which will be used in cross attention.
encoder_attention_mask: key padding mask of encoder states with shape
(batch_size, 1, seq_length, seq_length) and the sbp signature is (S(0), B).
past_key_value: tuple of key and value, each shape is
(seq_length, bsz, num_heads, head_size), For decoder layer,
the past_key_value contains the states both from self attention
and cross attention.
use_cache: it will be set to `True` when the model is in the inference phase and
used for incremental decoding.
"""
# Change placement for pipeline parallelsim
hidden_states = hidden_states.to_global(placement=dist.get_layer_placement(self.layer_idx))
# hidden_states shape: (batch_size, seq_length, hidden_size)
if attention_mask is not None:
attention_mask = attention_mask.to_global(
placement=dist.get_layer_placement(self.layer_idx)
)
if past_key_value is not None:
if self.is_decoder:
assert len(past_key_value) == 4
self_attn_past_key_value = past_key_value[:2]
cross_attn_past_key_value = past_key_value[2:]
else:
self_attn_past_key_value = past_key_value
cross_attn_past_key_value = None
else:
self_attn_past_key_value, cross_attn_past_key_value = None, None
layernorm_output = self.input_layernorm(hidden_states)
attention_output, position_bias = self.self_attention(
layernorm_output,
attention_mask=attention_mask,
past_key_value=self_attn_past_key_value,
position_bias=position_bias,
use_cache=use_cache,
)
attention_output = self.drop_path(attention_output)
if use_cache:
attention_output, presents = attention_output
else:
presents = None
hidden_states = hidden_states + attention_output
layernorm_output = self.post_attention_layernorm(hidden_states)
if self.is_decoder:
if presents is not None:
query_length = presents[0].shape[2]
else:
query_length = None
attention_output, encoder_decoder_position_bias = self.cross_attention(
layernorm_output,
encoder_states,
attention_mask=encoder_attention_mask,
past_key_value=cross_attn_past_key_value,
position_bias=encoder_decoder_position_bias,
use_cache=use_cache,
query_length=query_length,
)
if use_cache:
attention_output, decoder_presents = attention_output
presents = presents + decoder_presents
attention_output = self.drop_path(attention_output)
hidden_states = hidden_states + attention_output
layernorm_output = self.post_cross_attention_layernorm(hidden_states)
mlp_output = self.mlp(layernorm_output)
mlp_output = self.drop_path(mlp_output)
output = hidden_states + mlp_output
if use_cache:
output = (output, presents)
output = (output,) + (position_bias,)
if self.is_decoder:
output = output + (encoder_decoder_position_bias,)
return output
def build_attention(
self,
is_cross_attention=False,
relative_attention_num_buckets=None,
has_relative_attention_bias=False,
is_decoder=False,
):
return MultiheadAttention(
self.hidden_size,
self.num_attention_heads,
head_size=self.head_size,
relative_attention_num_buckets=relative_attention_num_buckets,
is_cross_attention=is_cross_attention,
attention_dropout_prob=self.attention_dropout_prob,
output_dropout_prob=self.output_dropout_prob,
init_method=self.init_method,
output_layer_init_method=self.output_layer_init_method,
layer_idx=self.layer_idx,
has_relative_attention_bias=has_relative_attention_bias,
is_decoder=is_decoder,
)
| 9,934 | 38.268775 | 99 | py |
libai | libai-main/projects/T5/models/layer_norm.py | # coding=utf-8
# Copyright 2021 The OneFlow Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import oneflow as flow
from libai.utils import distributed as dist
class LayerNorm(flow.nn.Module):
def __init__(self, normalized_shape, eps=1e-6, layer_idx=0):
super().__init__()
self.layer_idx = layer_idx
self.weight = flow.nn.Parameter(
flow.ones(
normalized_shape,
dtype=flow.float32,
placement=dist.get_layer_placement(layer_idx),
sbp=dist.get_nd_sbp([flow.sbp.broadcast, flow.sbp.broadcast]),
)
)
self.l2norm_epsilon = eps
def forward(self, hidden_states):
return flow._C.rms_norm(hidden_states, self.weight, self.weight.shape, self.l2norm_epsilon)
| 1,324 | 34.810811 | 99 | py |
libai | libai-main/projects/T5/models/attention.py | # coding=utf-8
# Copyright 2021 The OneFlow Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
from typing import Tuple
import oneflow as flow
from oneflow import nn
from libai.layers.linear import Linear
from libai.utils import distributed as dist
from projects.T5.models.embedding import Embedding
class MultiheadAttention(nn.Module):
"""Multi-head attention layer, support self attention and cross attention.
Args:
hidden_size: size of hidden state.
num_attention_heads: number of attention heads.
is_cross_attention: used to specify whether it is self attention or cross attention.
Defaults to False.
attention_dropout_prob: dropout probability of attention weights.
Defaults to 0.0.
output_dropout_prob: dropout probability of output. Defaults to 0.0.
init_method: method to initialize the input layer weights.
Defaults to ``init.xavier_normal_``.
output_layer_init_method: method to initialize the output layer weights.
If None, use ``init_method``.
layer_idx: a layer_idx sign which determines the placements.
It will be used in pipeline parallelism. Defaults to 0.
"""
def __init__(
self,
hidden_size,
num_attention_heads,
head_size,
relative_attention_num_buckets,
is_cross_attention=False,
attention_dropout_prob=0.0,
output_dropout_prob=0.0,
init_method=nn.init.xavier_normal_,
output_layer_init_method=None,
*,
layer_idx=0,
has_relative_attention_bias=False,
is_decoder=False,
):
super().__init__()
self.hidden_size = hidden_size
self.relative_attention_num_buckets = relative_attention_num_buckets
self.has_relative_attention_bias = has_relative_attention_bias
self.is_decoder = is_decoder
self.attention_dropout_prob = attention_dropout_prob
if output_layer_init_method is None:
output_layer_init_method = init_method
self.num_heads = num_attention_heads
self.head_size = head_size
self.dropout = nn.Dropout(p=attention_dropout_prob)
self.norm_factor = 1.0 / math.sqrt(float(self.head_size))
self.is_cross_attention = is_cross_attention
self.output_dropout = nn.Dropout(p=output_dropout_prob)
if self.is_cross_attention:
self.query = Linear(
self.hidden_size,
self.num_heads * self.head_size,
bias=False,
parallel="col",
init_method=init_method,
layer_idx=layer_idx,
)
self.key_value = Linear(
self.hidden_size,
self.num_heads * self.head_size * 2,
bias=False,
parallel="col",
init_method=init_method,
layer_idx=layer_idx,
)
else:
self.query_key_value = Linear(
self.hidden_size,
self.num_heads * self.head_size * 3,
bias=False,
parallel="col",
init_method=init_method,
layer_idx=layer_idx,
)
self.dense = Linear(
self.num_heads * self.head_size,
self.hidden_size,
bias=False,
parallel="row",
init_method=output_layer_init_method,
skip_bias_add=False,
layer_idx=layer_idx,
)
if self.has_relative_attention_bias:
self.relative_attention_bias = Embedding(
self.relative_attention_num_buckets, self.num_heads, layer_idx=layer_idx
)
def forward(
self,
hidden_states: flow.Tensor,
encoder_states: flow.Tensor = None,
attention_mask: flow.Tensor = None,
past_key_value: Tuple[flow.Tensor, flow.Tensor] = None,
use_cache: bool = False,
position_bias=None,
query_length=None,
):
"""
Args:
hidden_states (flow.Tensor): shape is [bsz, tgt_len, hidden_size].
encoder_states (flow.Tensor, optional): shape is [bsz, src_len, hidden_size].
Defaults to None.
attention_mask (flow.Tensor, optional): shape is [bsz, 1, tgt_len, src_len].
It should be the combination of padding mask and casual mask.
It is the padding mask of source input when used with self-attention in encoder.
And it is the combination of padding mask of target input and casual mask when
used with self-attention in decoder. It is the padding mask of source input when
used with cross-attention in decoder.
Defaults to None.
past_key_value (Tuple[flow.Tensor, flow.Tensor], optional): tuple of key and value,
each shape is [bsz, num_heads, src_len, head_size]. Defaults to None.
use_cache (bool, optional): it will be set to True, when the model is in the inference
phase and used for incremental decoding. Defaults to False.
"""
# hidden_states, encoder_states: [S(0), B]
# attention_mask: [S(0), B]
if encoder_states is not None:
encoder_states = encoder_states.to_global(placement=hidden_states.placement)
if attention_mask is not None:
attention_mask = attention_mask.to_global(placement=hidden_states.placement)
bsz, real_seq_length = hidden_states.size()[:2]
if past_key_value is not None:
assert (
len(past_key_value) == 2
), "past_key_value should have 2 past states: keys and values."
f"Got {len(past_key_value)} past states.\n"
real_seq_length += past_key_value[0].shape[2] if query_length is None else query_length
key_length = real_seq_length if encoder_states is None else encoder_states.shape[1]
if self.is_cross_attention:
# if it is cross attention, key and value should be calculated only once, and the
# result can be reused.
query = self.query(hidden_states)
query = query.view(bsz, -1, self.num_heads, self.head_size)
query = query.permute(0, 2, 1, 3)
if past_key_value is not None:
key, value = past_key_value
elif encoder_states is not None:
key_value = self.key_value(encoder_states)
key_value = key_value.view(bsz, -1, self.num_heads, 2 * self.head_size)
key_value = key_value.permute(0, 2, 1, 3)
key, value = flow.chunk(key_value, chunks=2, dim=-1)
else:
raise ValueError(
"past_key_value and encoder_states cannot be None at the same time."
)
else:
# if it is self attention, query, key, and value are all obtained from hidden_states.
# when in the inference phase of an incremental decoder,
# hidden_states is the last-added state,
# the full key and value could be obtained by concatenating with past_key_value.
query_key_value = self.query_key_value(hidden_states)
query_key_value = query_key_value.view(bsz, -1, self.num_heads, 3 * self.head_size)
query_key_value = query_key_value.permute(
0, 2, 1, 3
) # [bsz, num_heads, src_len, 3 * head_size]
query, key, value = flow.chunk(query_key_value, chunks=3, dim=-1)
if past_key_value is not None:
past_key, past_value = past_key_value
key = flow.cat((past_key.type_as(key), key), dim=2)
value = flow.cat((past_value.type_as(value), value), dim=2)
# query, key, value: [S(0), S(1)], shape: [bsz, num_heads, seq_length, head_size]
if use_cache:
past_key_value = (key, value)
# [bsz, num_heads, tgt_len, src_len] with [S(0), S(1)]
attention_scores = flow.matmul(query, key, transpose_b=True)
if position_bias is None:
if not self.has_relative_attention_bias:
position_bias = flow.zeros(
(1, self.num_heads, real_seq_length, key_length),
sbp=dist.get_nd_sbp([flow.sbp.broadcast, flow.sbp.broadcast]),
placement=attention_scores.placement,
)
else:
position_bias = self.compute_bias(
real_seq_length, key_length, placement=attention_mask.placement
)
if past_key_value is not None:
position_bias = position_bias[:, :, -hidden_states.size(1) :, :]
position_bias = position_bias + (1 - attention_mask) * -1000
position_bias = position_bias.to_global(placement=attention_scores.placement)
attention_scores = attention_scores + position_bias
# [S(0), S(1)] x [S(0), B] = [S(0), S(1)]
if attention_mask is not None:
attention_scores = flow.mul(attention_scores, attention_mask)
attention_scores = attention_scores - 10000.0 * (1 - attention_mask)
# TODO(xingyu.liao): graph will occur `where_scalar` errors
# when using `masked_fill`
# attention_scores = attention_scores.masked_fill(1 - attention_mask, -10000.0)
attention_weights = flow.softmax(attention_scores, dim=-1)
# [bsz, num_heads, tgt_len, src_len]
attention_weights = self.dropout(attention_weights)
else:
attention_weights = flow.softmax(attention_scores, dim=-1)
# [bsz, num_heads, tgt_len, src_len]
attention_weights = self.dropout(attention_weights)
# Context shape: [bsz, num_heads, tgt_len, head_size] with [S(0), S(1)]
context = flow.matmul(attention_weights, value)
# Change shape: [bsz, num_heads, tgt_len, head_size] -> [bsz, tgt_len, num_heads, head_size]
context = context.transpose(1, 2)
# Concat multi-head results from
# [bsz, tgt_len, num_heads, head_size] -> [bsz, tgt_len, num_heads * head_size]
# SBP sign: [S(0), S(2)]
# [S(0), S(2)] x [B, S(0)] = [S(0), P] -> [S(0), B]
output = self.dense(context.flatten(2))
output = self.output_dropout(output)
if use_cache:
output = (output, past_key_value)
output = (output,) + (position_bias,)
return output
def extra_repr(self) -> str:
return "hidden_size={}, num_heads={}, is_cross_attention={}".format(
self.hidden_size,
self.num_heads,
self.is_cross_attention,
)
def _relative_position_bucket(
self, relative_position, bidirectional=True, num_buckets=32, max_distance=128
):
# relative_position: (seq_len, seq_len)
relative_buckets = 0
if bidirectional:
num_buckets //= 2
relative_buckets = (
relative_buckets + (relative_position > 0).to(flow.long) * num_buckets
)
relative_position = flow.abs(relative_position)
else:
relative_position = (
-1
* flow.min(
relative_position,
flow.zeros(
relative_position.size(),
sbp=relative_position.sbp,
placement=relative_position.placement,
),
).to(flow.long)
)
max_exact = num_buckets // 2
is_small = relative_position < max_exact
relative_postion_if_large = max_exact + (
flow.log(relative_position.float() / max_exact)
/ math.log(max_distance / max_exact)
* (num_buckets - max_exact)
).to(flow.long)
relative_postion_if_large = flow.min(
relative_postion_if_large,
flow.zeros(
relative_postion_if_large.size(),
dtype=relative_postion_if_large.dtype,
sbp=relative_postion_if_large.sbp,
placement=relative_postion_if_large.placement,
).fill_(num_buckets - 1),
)
relative_buckets = relative_buckets + flow.where(
is_small, relative_position, relative_postion_if_large
)
return relative_buckets
def compute_bias(self, query_length, key_length, placement=None):
"""Compute binned relative position bias"""
context_position = flow.arange(
query_length,
dtype=flow.long,
sbp=dist.get_nd_sbp([flow.sbp.broadcast, flow.sbp.broadcast]),
placement=placement,
)
memory_position = flow.arange(
key_length,
dtype=flow.long,
sbp=dist.get_nd_sbp([flow.sbp.broadcast, flow.sbp.broadcast]),
placement=placement,
)
relative_position = (
memory_position[None, :] - context_position[:, None]
) # shape (query_length, key_length)
relative_position_bucket = self._relative_position_bucket(
relative_position,
bidirectional=(not self.is_decoder),
num_buckets=self.relative_attention_num_buckets,
) # shape (query_length, key_length)
values = self.relative_attention_bias(
relative_position_bucket
) # shape (query_length, key_length, num_heads)
values = values.permute([2, 0, 1]).unsqueeze(
0
) # shape (1, num_heads, query_length, key_length)
return values
| 14,317 | 39.792023 | 100 | py |
libai | libai-main/projects/T5/datasets/dataset.py | # coding=utf-8
# Copyright 2021 The OneFlow Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import warnings
try:
import datasets
except: # noqa
warnings.warn("datasets library is needed")
import numpy as np
import oneflow as flow
from oneflow.utils.data import Dataset
from libai.data.structures import DistTensorData, Instance
def get_data(path):
total_data = []
for i in range(10):
path = path[:-1] + str(i)
dataset = datasets.load_from_disk(path)
for i in dataset:
total_data.append(i)
return total_data
def compute_input_and_target_lengths(inputs_length, noise_density, mean_noise_span_length):
"""This function is copy of `random_spans_helper <https://github.com/google-research/
text-to-text-transfer-transformer/blob/84f8bcc14b5f2c03de51bd3587609ba8f6bbd1cd/
t5/data/preprocessors.py#L2466>`__ .
Training parameters to avoid padding with random_spans_noise_mask.
When training a model with random_spans_noise_mask, we would like to set the other
training hyperparmeters in a way that avoids padding.
This function helps us compute these hyperparameters.
We assume that each noise span in the input is replaced by extra_tokens_per_span_inputs
sentinel tokens, and each non-noise span in the targets is replaced by
extra_tokens_per_span_targets sentinel tokens.
This function tells us the required number of tokens in the raw example (for split_tokens())
as well as the length of the encoded targets. Note that this function assumes
the inputs and targets will have EOS appended and includes that in the reported length.
Args:
inputs_length: an integer - desired length of the tokenized inputs sequence
noise_density: a float
mean_noise_span_length: a float
Returns:
tokens_length: length of original text in tokens
targets_length: an integer - length in tokens of encoded targets sequence
"""
def _tokens_length_to_inputs_length_targets_length(tokens_length):
num_noise_tokens = int(round(tokens_length * noise_density))
num_nonnoise_tokens = tokens_length - num_noise_tokens
num_noise_spans = int(round(num_noise_tokens / mean_noise_span_length))
# inputs contain all nonnoise tokens, sentinels for all noise spans
# and one EOS token.
_input_length = num_nonnoise_tokens + num_noise_spans + 1
_output_length = num_noise_tokens + num_noise_spans + 1
return _input_length, _output_length
tokens_length = inputs_length
while _tokens_length_to_inputs_length_targets_length(tokens_length + 1)[0] <= inputs_length:
tokens_length += 1
inputs_length, targets_length = _tokens_length_to_inputs_length_targets_length(tokens_length)
# minor hack to get the targets length to be equal to inputs length
# which is more likely to have been set to a nice round number.
if noise_density == 0.5 and targets_length > inputs_length:
tokens_length -= 1
targets_length -= 1
return tokens_length, targets_length
class UnsuperviseT5Dataset(Dataset):
"""This function is copy of https://github.com/IDEA-CCNL/Fengshenbang-LM/blob/
ec13aeb8689cfafaa6a7a9e9595d110edbe34123/fengshen/data/t5_dataloader/t5_datasets.py#L61.
"""
def __init__(self, data_path):
# [{input_ids: ...}, {input_ids: ...}, ...]
self.data = get_data(data_path)
def __len__(self):
return len(self.data)
def __getitem__(self, index):
x = self.data[index]
return x
class collate_fn:
def __init__(
self,
vocab_size,
max_seq_length,
noise_density,
mean_noise_span_length,
eos_token_id=1,
pad_token_id=0,
decoder_start_token_id=0,
):
self.max_seq_length = max_seq_length
self.eos_token_id = eos_token_id
self.pad_token_id = pad_token_id
self.decoder_start_token_id = decoder_start_token_id
self.vocab_size = vocab_size
self.noise_density = noise_density
self.mean_noise_span_length = mean_noise_span_length
self.expanded_inputs_length, self.targets_length = compute_input_and_target_lengths(
inputs_length=self.max_seq_length,
noise_density=self.noise_density,
mean_noise_span_length=self.mean_noise_span_length,
)
def __call__(self, examples):
batch = {
k: np.array([examples[i][k] for i in range(len(examples))])
for k, v in examples[0].items()
}
input_ids = np.array(batch["input_ids"])
batch_size, expanded_input_length = input_ids.shape
mask_indices = np.asarray(
[self.random_spans_noise_mask(expanded_input_length) for i in range(batch_size)]
)
labels_mask = ~mask_indices
input_ids_sentinel = self.create_sentinel_ids(mask_indices.astype(np.int8))
labels_sentinel = self.create_sentinel_ids(labels_mask.astype(np.int8))
batch["input_ids"] = self.filter_input_ids(input_ids, input_ids_sentinel)
batch["labels"] = self.filter_input_ids(input_ids, labels_sentinel)
if batch["input_ids"].shape[-1] != self.max_seq_length:
raise ValueError(
f"`input_ids` are incorrectly preprocessed. `input_ids` length is \
{batch['input_ids'].shape[-1]}, but should be {self.targets_length}."
)
if batch["labels"].shape[-1] != self.targets_length:
raise ValueError(
f"`labels` are incorrectly preprocessed. `labels` length is \
{batch['labels'].shape[-1]}, but should be {self.targets_length}."
)
batch["decoder_input_ids"] = self.shift_tokens_right(
batch["labels"], self.pad_token_id, self.decoder_start_token_id
)
return Instance(
encoder_input_ids=DistTensorData(flow.tensor(batch["input_ids"])),
decoder_input_ids=DistTensorData(flow.tensor(batch["decoder_input_ids"])),
encoder_attn_mask=DistTensorData(
flow.ones(len(batch["input_ids"]), len(batch["input_ids"][0])).to(flow.bool)
),
decoder_attn_mask=DistTensorData(
flow.ones(len(batch["decoder_input_ids"]), len(batch["decoder_input_ids"][0])).to(
flow.bool
)
),
encoder_decoder_attn_mask=DistTensorData(
flow.ones(
len(batch["input_ids"]),
len(batch["decoder_input_ids"][0]),
len(batch["input_ids"][0]),
).to(flow.bool)
),
lm_labels=DistTensorData(flow.tensor(batch["labels"])),
loss_mask=DistTensorData(flow.tensor(batch["labels"])),
)
def filter_input_ids(self, input_ids, sentinel_ids):
batch_size = input_ids.shape[0]
input_ids_full = np.where(sentinel_ids != 0, sentinel_ids, input_ids)
input_ids = input_ids_full[input_ids_full >= 0].reshape((batch_size, -1))
input_ids = np.concatenate(
[input_ids, np.full((batch_size, 1), self.eos_token_id, dtype=np.int32)], axis=-1
)
return input_ids
def create_sentinel_ids(self, mask_indices):
start_indices = mask_indices - np.roll(mask_indices, 1, axis=-1) * mask_indices
start_indices[:, 0] = mask_indices[:, 0]
sentinel_ids = np.where(
start_indices != 0, np.cumsum(start_indices, axis=-1), start_indices
)
sentinel_ids = np.where(sentinel_ids != 0, (self.vocab_size - sentinel_ids), 0)
sentinel_ids -= mask_indices - start_indices
return sentinel_ids
def random_spans_noise_mask(self, length):
orig_length = length
num_noise_tokens = int(np.round(length * self.noise_density))
# avoid degeneracy by ensuring positive numbers of noise and nonnoise tokens.
num_noise_tokens = min(max(num_noise_tokens, 1), length - 1)
num_noise_spans = int(np.round(num_noise_tokens / self.mean_noise_span_length))
# avoid degeneracy by ensuring positive number of noise spans
num_noise_spans = max(num_noise_spans, 1)
num_nonnoise_tokens = length - num_noise_tokens
# pick the lengths of the noise spans and the non-noise spans
def _random_segmentation(num_items, num_segments):
mask_indices = np.arange(num_items - 1) < (num_segments - 1)
np.random.shuffle(mask_indices)
first_in_segment = np.pad(mask_indices, [[1, 0]])
segment_id = np.cumsum(first_in_segment)
# count length of sub segments assuming that list is sorted
_, segment_length = np.unique(segment_id, return_counts=True)
return segment_length
noise_span_lengths = _random_segmentation(num_noise_tokens, num_noise_spans)
nonnoise_span_lengths = _random_segmentation(num_nonnoise_tokens, num_noise_spans)
interleaved_span_lengths = np.reshape(
np.stack([nonnoise_span_lengths, noise_span_lengths], axis=1), [num_noise_spans * 2]
)
span_starts = np.cumsum(interleaved_span_lengths)[:-1]
span_start_indicator = np.zeros((length,), dtype=np.int8)
span_start_indicator[span_starts] = True
span_num = np.cumsum(span_start_indicator)
is_noise = np.equal(span_num % 2, 1)
return is_noise[:orig_length]
def shift_tokens_right(
self, input_ids: np.array, pad_token_id: int, decoder_start_token_id: int
) -> np.ndarray:
"""
Shift input ids one token to the right.
"""
shifted_input_ids = np.zeros_like(input_ids)
shifted_input_ids[:, 1:] = input_ids[:, :-1]
shifted_input_ids[:, 0] = decoder_start_token_id
shifted_input_ids = np.where(shifted_input_ids == -100, pad_token_id, shifted_input_ids)
return shifted_input_ids
| 10,558 | 40.735178 | 98 | py |
libai | libai-main/projects/T5/configs/optim.py | import oneflow as flow
from libai.optim import get_default_optimizer_params
from libai.config import LazyCall
optim = LazyCall(flow.optim.AdamW)(
params=LazyCall(get_default_optimizer_params)(
# params.model is meant to be set to the model object,
# before instantiating the optimizer.
clip_grad_max_norm=1.0,
clip_grad_norm_type=2.0,
weight_decay_norm=0.0,
weight_decay_bias=0.0,
),
lr=1e-4,
weight_decay=0.01,
betas=(0.9, 0.999),
eps=1e-8,
do_bias_correction=True,
)
| 547 | 25.095238 | 62 | py |
libai | libai-main/projects/T5/configs/t5_model_config.py | from omegaconf import DictConfig
cfg = dict(
vocab_size=30522,
hidden_size=768,
hidden_layers=6,
num_attention_heads=12,
head_size=64,
intermediate_size=1536,
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
relative_attention_num_buckets=32,
embedding_dropout_prob=0.1,
initializer_range=0.02,
layernorm_eps=1e-5,
amp_enabled=False,
model_type="t5",
)
cfg = DictConfig(cfg)
| 444 | 19.227273 | 38 | py |
libai | libai-main/projects/T5/configs/mt5_pretrain.py | from libai import evaluation
from libai.data.build import build_nlp_train_loader
from omegaconf import OmegaConf
from libai.config import LazyCall
from libai.evaluation import PPLEvaluator, evaluator
from libai.scheduler import WarmupExponentialLR
from configs.common.train import train
from configs.common.models.graph import graph
from projects.T5.configs.optim import optim
from projects.T5.configs.t5_model_config import cfg
from projects.T5.datasets.dataset import UnsuperviseT5Dataset, collate_fn
from projects.T5.models.t5_model import T5ForPreTraining
train_data_path = "projects/T5/data/training_data/part_0"
pretrained_model_path = None
micro_batch_size = 64
optim["lr"] = 1e-4
# dataloader
dataloader = OmegaConf.create()
dataloader.train = LazyCall(build_nlp_train_loader)(
dataset=[
LazyCall(UnsuperviseT5Dataset)(
data_path=train_data_path,
)
],
collate_fn=collate_fn(
vocab_size=12902,
max_seq_length=512,
noise_density=0.15,
mean_noise_span_length=3,
eos_token_id=12801,
pad_token_id=0,
decoder_start_token_id=12800,
),
)
model = LazyCall(T5ForPreTraining)(cfg=cfg)
# model config
model.cfg.vocab_size = 12902
model.cfg.hidden_size = 512
model.cfg.hidden_layers = 8
model.cfg.num_attention_heads = 6
model.cfg.head_size = 64
model.cfg.intermediate_size = 1024
model.cfg.hidden_dropout_prob = 0.0
model.cfg.attention_probs_dropout_prob = 0.0
model.cfg.embedding_dropout_prob = 0.0
model.cfg.layernorm_eps = 1e-6
model.cfg.model_type = "mt5"
model.cfg.pretrained_model_path = pretrained_model_path
train.update(
dict(
output_dir="projects/T5/output/mt5_output",
train_micro_batch_size=micro_batch_size,
train_epoch=1,
train_iter=24000,
log_period=10,
amp=dict(enabled=False),
warmup_ratio=1 / 24,
# checkpointer=dict(period=10, max_to_keep=20),
dist=dict(
data_parallel_size=2,
tensor_parallel_size=2,
pipeline_parallel_size=1,
pipeline_num_layers=2 * model.cfg.hidden_layers,
),
scheduler=LazyCall(WarmupExponentialLR)(
warmup_factor=0.001,
gamma=1.0,
warmup_method="linear",
warmup_iter=0.0,
),
evaluation=dict(
evaluator=LazyCall(PPLEvaluator)(),
enabled=True,
eval_iter=1e5,
eval_period=5000,
),
)
)
train.zero_optimization.enabled = True
train.zero_optimization.stage = 2
| 2,566 | 26.902174 | 73 | py |
libai | libai-main/projects/T5/utils/weight_convert.py | import argparse
import oneflow as flow
import torch
from libai.config import LazyConfig
def parse_args():
parser = argparse.ArgumentParser(description="MT5 Weight Convertor")
parser.add_argument(
"--oneflow_state_dict_path", type=str, help="The path of mt5's checkpoint in LiBai"
)
parser.add_argument(
"--config_path",
type=str,
default="projects/T5/configs/mt5_pretrain.py",
help="The path of the training config",
)
parser.add_argument("--save_path", type=str, default="projects/T5/pytorch_model.bin")
return parser.parse_args()
def fix_qkv_ordering(qkv, head_size, num_heads, hidden_size=None):
hidden_size = (head_size * num_heads) if hidden_size is None else hidden_size
num_of_qkv = qkv.shape[0] // (head_size * num_heads)
qkv = qkv.view(-1)
qkv = qkv.view(num_heads, num_of_qkv, head_size, hidden_size)
qkv = qkv.permute(1, 0, 2, 3).contiguous()
qkv = qkv.view(num_of_qkv * head_size * num_heads, hidden_size)
return qkv
def convert_tensor(tensor):
return torch.tensor(tensor.detach().to_numpy(), dtype=torch.float32)
def convert_state_dict(oneflow_state_dict_path, libai_cfg, prefix="t5_model."):
oneflow_state_dict = flow.load(oneflow_state_dict_path)
torch_state_dict = {}
# Get configs
num_heads = libai_cfg.get("num_attention_heads")
hidden_size = libai_cfg.get("hidden_size")
head_size = libai_cfg.get("head_size", None)
if head_size is None:
head_size = int(hidden_size / num_heads)
layer_idx = 3 if len(prefix) > 1 else 2
enc_dec_idx = 1 if len(prefix) > 1 else 0
op_idx = 4 if len(prefix) > 1 else 3
# Convert T5's Embedding layers.
x = convert_tensor(oneflow_state_dict.pop(prefix + "embedding.word_embeddings.weight"))
new_key = "shared.weight"
torch_state_dict[new_key] = x
new_key = "encoder.embed_tokens.weight"
torch_state_dict[new_key] = x
new_key = "decoder.embed_tokens.weight"
torch_state_dict[new_key] = x
# Convert T5's final_layer_norm
new_key = "encoder.final_layer_norm.weight"
torch_state_dict[new_key] = convert_tensor(
oneflow_state_dict.pop(prefix + "encoder.final_layernorm.weight")
)
new_key = "decoder.final_layer_norm.weight"
torch_state_dict[new_key] = convert_tensor(
oneflow_state_dict.pop(prefix + "decoder.final_layernorm.weight")
)
old_keys = list(oneflow_state_dict.keys())
# Convert T5's lm_head
new_key = "lm_head.weight"
if prefix + new_key in old_keys:
torch_state_dict[new_key] = convert_tensor(oneflow_state_dict.pop(prefix + new_key))
for key in old_keys:
keys = key.split(".")
if op_idx > len(keys):
continue
layers = keys[layer_idx]
enc_dec = keys[enc_dec_idx]
op_name = keys[op_idx]
if keys[op_idx + 1] == "relative_attention_bias":
new_key = enc_dec + ".block.0.layer.0.SelfAttention.relative_attention_bias.weight"
torch_state_dict[new_key] = convert_tensor(oneflow_state_dict.pop(key))
# Convert T5's Encoder layers.
if enc_dec == "encoder":
if op_name == "self_attention":
if keys[op_idx + 1] == "query_key_value":
x = oneflow_state_dict.pop(key)
x = fix_qkv_ordering(x, head_size, num_heads, hidden_size)
q, k, v = flow.chunk(x, chunks=3, dim=0)
new_key = "encoder.block." + layers + ".layer.0.SelfAttention.q.weight"
torch_state_dict[new_key] = convert_tensor(q)
new_key = new_key.replace(".q", ".k")
torch_state_dict[new_key] = convert_tensor(k)
new_key = new_key.replace(".k", ".v")
torch_state_dict[new_key] = convert_tensor(v)
if keys[op_idx + 1] == "dense":
new_key = "encoder.block." + layers + ".layer.0.SelfAttention.o.weight"
torch_state_dict[new_key] = convert_tensor(oneflow_state_dict.pop(key))
elif op_name == "input_layernorm":
new_key = "encoder.block." + layers + ".layer.0.layer_norm.weight"
torch_state_dict[new_key] = convert_tensor(oneflow_state_dict.pop(key))
elif op_name == "post_attention_layernorm":
new_key = "encoder.block." + layers + ".layer.1.layer_norm.weight"
torch_state_dict[new_key] = convert_tensor(oneflow_state_dict.pop(key))
elif op_name == "mlp":
if libai_cfg.get("model_type") == "mt5":
if keys[op_idx + 1] == "wi_0":
new_key = "encoder.block." + layers + ".layer.1.DenseReluDense.wi_0.weight"
torch_state_dict[new_key] = convert_tensor(oneflow_state_dict.pop(key))
if keys[op_idx + 1] == "wi_1":
new_key = "encoder.block." + layers + ".layer.1.DenseReluDense.wi_1.weight"
torch_state_dict[new_key] = convert_tensor(oneflow_state_dict.pop(key))
if keys[op_idx + 1] == "wo":
new_key = "encoder.block." + layers + ".layer.1.DenseReluDense.wo.weight"
torch_state_dict[new_key] = convert_tensor(oneflow_state_dict.pop(key))
elif libai_cfg.get("model_type") == "t5":
if keys[op_idx + 1] == "dense_h_to_4h":
new_key = "encoder.block." + layers + ".layer.1.DenseReluDense.wi.weight"
torch_state_dict[new_key] = convert_tensor(oneflow_state_dict.pop(key))
if keys[op_idx + 1] == "dense_4h_to_h":
new_key = "encoder.block." + layers + ".layer.1.DenseReluDense.wo.weight"
torch_state_dict[new_key] = convert_tensor(oneflow_state_dict.pop(key))
# Convert T5's decoder Layers.
elif enc_dec == "decoder":
if op_name == "self_attention":
if keys[op_idx + 1] == "query_key_value":
x = oneflow_state_dict.pop(key)
x = fix_qkv_ordering(x, head_size, num_heads, hidden_size)
q, k, v = flow.chunk(x, chunks=3, dim=0)
new_key = "decoder.block." + layers + ".layer.0.SelfAttention.q.weight"
torch_state_dict[new_key] = convert_tensor(q)
new_key = new_key.replace(".q", ".k")
torch_state_dict[new_key] = convert_tensor(k)
new_key = new_key.replace(".k", ".v")
torch_state_dict[new_key] = convert_tensor(v)
if keys[op_idx + 1] == "dense":
new_key = "decoder.block." + layers + ".layer.0.SelfAttention.o.weight"
torch_state_dict[new_key] = convert_tensor(oneflow_state_dict.pop(key))
elif op_name == "input_layernorm":
new_key = "decoder.block." + layers + ".layer.0.layer_norm.weight"
torch_state_dict[new_key] = convert_tensor(oneflow_state_dict.pop(key))
elif op_name == "post_attention_layernorm":
new_key = "decoder.block." + layers + ".layer.1.layer_norm.weight"
torch_state_dict[new_key] = convert_tensor(oneflow_state_dict.pop(key))
elif op_name == "post_cross_attention_layernorm":
new_key = "decoder.block." + layers + ".layer.2.layer_norm.weight"
torch_state_dict[new_key] = convert_tensor(oneflow_state_dict.pop(key))
elif op_name == "cross_attention":
if keys[op_idx + 1] == "query":
x = oneflow_state_dict.pop(key)
x = fix_qkv_ordering(x, head_size, num_heads, hidden_size)
new_key = "decoder.block." + layers + ".layer.1.EncDecAttention.q.weight"
torch_state_dict[new_key] = convert_tensor(x)
if keys[op_idx + 1] == "key_value":
x = oneflow_state_dict.pop(key)
x = fix_qkv_ordering(x, head_size, num_heads, hidden_size)
k, v = flow.chunk(x, chunks=2, dim=0)
new_key = "decoder.block." + layers + ".layer.1.EncDecAttention.k.weight"
torch_state_dict[new_key] = convert_tensor(k)
new_key = new_key.replace(".k", ".v")
torch_state_dict[new_key] = convert_tensor(v)
if keys[op_idx + 1] == "dense":
new_key = "decoder.block." + layers + ".layer.1.EncDecAttention.o.weight"
torch_state_dict[new_key] = convert_tensor(oneflow_state_dict.pop(key))
elif op_name == "mlp":
if libai_cfg.get("model_type") == "mt5":
if keys[op_idx + 1] == "wi_0":
new_key = "decoder.block." + layers + ".layer.2.DenseReluDense.wi_0.weight"
torch_state_dict[new_key] = convert_tensor(oneflow_state_dict.pop(key))
if keys[op_idx + 1] == "wi_1":
new_key = "decoder.block." + layers + ".layer.2.DenseReluDense.wi_1.weight"
torch_state_dict[new_key] = convert_tensor(oneflow_state_dict.pop(key))
if keys[op_idx + 1] == "wo":
new_key = "decoder.block." + layers + ".layer.2.DenseReluDense.wo.weight"
torch_state_dict[new_key] = convert_tensor(oneflow_state_dict.pop(key))
elif libai_cfg.get("model_type") == "t5":
if keys[op_idx + 1] == "dense_h_to_4h":
new_key = "decoder.block." + layers + ".layer.2.DenseReluDense.wi.weight"
torch_state_dict[new_key] = convert_tensor(oneflow_state_dict.pop(key))
if keys[op_idx + 1] == "dense_4h_to_h":
new_key = "decoder.block." + layers + ".layer.2.DenseReluDense.wo.weight"
torch_state_dict[new_key] = convert_tensor(oneflow_state_dict.pop(key))
return torch_state_dict
if __name__ == "__main__":
args = parse_args()
oneflow_state_dict_path = args.oneflow_state_dict_path
config_path = args.config_path
save_path = args.save_path
training_config = LazyConfig.load(config_path)
model_config = training_config.model.cfg
torch_state_dict = convert_state_dict(oneflow_state_dict_path, model_config)
torch.save(torch_state_dict, save_path)
| 10,611 | 47.678899 | 99 | py |
libai | libai-main/projects/T5/utils/mask.py | # coding=utf-8
# Copyright 2021 The OneFlow Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import oneflow as flow
from libai.utils import distributed as dist
class ExtendedMask(flow.nn.Module):
def forward(self, x, input_tensor=None, is_decoder=False):
if x.dim() == 3:
extended_mask = x[:, None, :, :]
elif x.dim() == 2:
if is_decoder:
extended_mask = self.create_extended_mask_for_decoder(x, input_tensor)
else:
extended_mask = x[:, None, None, :]
return extended_mask
def create_extended_mask_for_decoder(self, x, input_tensor):
batch_size, seq_len = input_tensor.size()
seq_ids = flow.arange(
seq_len,
sbp=dist.get_nd_sbp([flow.sbp.broadcast, flow.sbp.broadcast]),
placement=x.placement,
)
causal_mask = (
seq_ids[None, None, :].repeat(batch_size, seq_len, 1) <= seq_ids[None, :, None]
)
causal_mask = causal_mask.to(x.dtype)
causal_mask = causal_mask.to_global(sbp=x.sbp)
if causal_mask.shape[1] < x.shape[1]:
prefix_seq_len = x.shape[1] - causal_mask.shape[1]
ones = flow.ones(
(batch_size, seq_len, prefix_seq_len),
dtype=causal_mask.dtype,
sbp=causal_mask.sbp,
placement=causal_mask.placement,
)
causal_mask = flow.cat(
[
ones,
causal_mask,
],
dim=-1,
)
extended_mask = causal_mask[:, None, :, :] * x[:, None, None, :]
return extended_mask
| 2,225 | 33.78125 | 91 | py |
libai | libai-main/projects/MagicPrompt/gpt2.py | # coding=utf-8
# Copyright 2021 The OneFlow Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import oneflow as flow
from oneflow import nn
from oneflow.nn import init
from libai.config import configurable
from libai.inference.generator.generation_utils import Generator
from libai.layers import Embedding, LayerNorm, LMLogits, VocabEmbedding
from libai.layers.attention import AttnMaskType
from libai.models.gpt_model import GPTLoss
from libai.models.utils import GPT2LoaderHuggerFace, init_method_normal, scaled_init_method_normal
from libai.utils import distributed as dist
from projects.MagicPrompt.layers.transformer_layer import TransformerLayer
class GPTModel(nn.Module, Generator):
"""GPT-2 language model. The output of the forward method is logits.
Args:
hidden_layers (int): The number of ``TransformerLayer`` in the gpt model.
vocab_size (int): The size of vocabulary file.
hidden_size (int): The size of hidden states.
ffn_hidden_size (int):
The size of intermediate layer in feed-forward network for each ``TransformerLayer``.
num_attention_heads (int):
The number of attention heads for each attention layer of ``TransformerLayer``.
max_seq_length (int, optional):
Max sequence length of input, defines the shape of Position Embeddings in GPTEmebedding.
Defaults to 1024.
embedding_dropout_prob (float, optional):
The dropout ratio for the output of GPTEmbedding Layer. Defaults to 0.0.
attention_dropout_prob (float, optional):
The dropout ratio for the output of each attention layer in ``TransformerLayer``.
Defaults to 0.0.
output_dropout_prob (float, optional):
The dropout ratio for the output for each TransformerLayer. Defaults to 0.0.
layernorm_epsilon (float, optional):
The epsilon of LayerNorm layer. Defaults to 1e-5.
initializer_range (float, optional):
Sigma of the normal distribution in the initialization method. Defaults to 0.02.
use_scaled_init_for_output_weights (bool, optional): Defaults to ``True``.
bias_gelu_fusion (bool, optional):
Whether or not to fuse the computing of bias and gelu. Defaults to ``False``.
bias_dropout_fusion (bool, optional):
Whether or not to fuse the computing of dropout and bias. Defaults to ``False``.
scale_mask_softmax_fusion (bool, optional):
Whether to fuse the computing of mask and softmax in attention layers.
Defaults to ``False``.
apply_query_key_layer_scaling (bool, optional):
Whether or not to use layer index related scaling in computing attention scores.
If ``True``, the scaling factor equals to sqrt(d) * (layer_index + 1).
Defaults to ``False``.
apply_residual_post_layernorm (bool, optional):
If set ``True``, use original BERT residual connection ordering otherwise use Megatron
BERT residual connection which is more stable when scaling model size introduced in
https://arxiv.org/pdf/1909.08053.pdf.
Default: ``False``.
amp_enabled (bool, optional):
Whether or not to set fp16 for embedding weight in T5 model. Defaults to ``False``.
"""
@configurable
def __init__(
self,
hidden_layers,
vocab_size,
hidden_size,
ffn_hidden_size,
num_attention_heads,
max_seq_length=1024,
embedding_dropout_prob=0.0,
attention_dropout_prob=0.0,
output_dropout_prob=0.0,
layernorm_epsilon=1e-5,
initializer_range=0.02,
use_scaled_init_for_output_weights=True,
bias_gelu_fusion=False,
bias_dropout_fusion=False,
scale_mask_softmax_fusion=False,
apply_query_key_layer_scaling=False,
apply_residual_post_layernorm=False,
amp_enabled=False,
cfg=None,
):
super().__init__()
self.cfg = cfg
init_method = init_method_normal(sigma=initializer_range)
if use_scaled_init_for_output_weights:
output_layer_init_method = scaled_init_method_normal(initializer_range, hidden_layers)
else:
output_layer_init_method = init_method
self.embeddings = GPTEmbedding(
vocab_size,
hidden_size,
max_seq_length,
init_method=init_method,
embedding_dropout_prob=embedding_dropout_prob,
amp_enabled=amp_enabled,
)
self.transformer = Transformer(
hidden_layers,
hidden_size,
ffn_hidden_size,
num_attention_heads,
attention_dropout_prob=attention_dropout_prob,
output_dropout_prob=output_dropout_prob,
layernorm_epsilon=layernorm_epsilon,
init_method=init_method,
output_layer_init_method=output_layer_init_method,
bias_gelu_fusion=bias_gelu_fusion,
bias_dropout_fusion=bias_dropout_fusion,
scale_mask_softmax_fusion=scale_mask_softmax_fusion,
apply_query_key_layer_scaling=apply_query_key_layer_scaling,
apply_residual_post_layernorm=apply_residual_post_layernorm,
set_cache=self.set_cache,
)
self.past_key_values = [None] * hidden_layers
self.past_length = 0
self.lm_head = LMLogits(vocab_size, bias=False)
@classmethod
def from_config(cls, cfg):
return {
"hidden_layers": cfg.hidden_layers,
"vocab_size": cfg.vocab_size,
"hidden_size": cfg.hidden_size,
"ffn_hidden_size": cfg.ffn_hidden_size,
"num_attention_heads": cfg.num_attention_heads,
"max_seq_length": cfg.max_seq_length,
"embedding_dropout_prob": cfg.embedding_dropout_prob,
"attention_dropout_prob": cfg.attention_dropout_prob,
"output_dropout_prob": cfg.output_dropout_prob,
"layernorm_epsilon": cfg.layernorm_epsilon,
"initializer_range": cfg.initializer_range,
"use_scaled_init_for_output_weights": cfg.use_scaled_init_for_output_weights,
"bias_gelu_fusion": cfg.bias_gelu_fusion,
"bias_dropout_fusion": cfg.bias_dropout_fusion,
"scale_mask_softmax_fusion": cfg.scale_mask_softmax_fusion,
"apply_query_key_layer_scaling": cfg.apply_query_key_layer_scaling,
"apply_residual_post_layernorm": cfg.apply_residual_post_layernorm,
"amp_enabled": cfg.amp_enabled,
"cfg": cfg,
}
def forward(self, input_ids, use_cache=False):
"""
Args:
input_ids (flow.LongTensor): Indices of input sequence tokens in vocabulary.
Returns:
flow.Tensor: logits
"""
input_ids = input_ids.to_global(placement=dist.get_layer_placement(0))
if use_cache and self.past_key_values[0] is not None:
self.past_length = self.past_key_values[0][0].size(-2)
else:
self.past_length = 0
input_embeds = self.embeddings(input_ids, self.past_length)
transformer_output = self.transformer(
input_embeds,
attention_mask=None,
past_key_values=self.past_key_values,
use_cache=use_cache,
)
logits = self.lm_head(transformer_output, self.embeddings.token_embeddings.weight)
return {"logits": logits}
def set_cache(self, past_key_values):
self.past_length = 0 if past_key_values is None else past_key_values[0][0].shape[2]
if past_key_values is None:
past_key_values = [None] * self.cfg.hidden_layers
assert len(past_key_values) == self.cfg.hidden_layers, (
f"past_key_values's length {len(past_key_values)} doesn't match "
f"num_layers:' {self.cfg.hidden_layers}"
)
self.past_key_values = past_key_values
def _reorder_cache(self, beam_idx):
past_key_values = self.past_key_values
return tuple(
tuple(
past_state.index_select(0, beam_idx.to(past_state.device))
for past_state in layer_past
)
for layer_past in past_key_values
)
def prepare_inputs_for_generation(
self,
input_ids,
past=None,
use_cache=None,
):
if past is not None:
input_ids = input_ids[:, -1:]
self.past_key_values = past
return {"input_ids": input_ids, "use_cache": use_cache}
class GPTEmbedding(nn.Module):
def __init__(
self,
vocab_size,
hidden_size,
max_seq_length,
init_method=init.xavier_normal_,
embedding_dropout_prob=0.0,
amp_enabled=False,
):
super().__init__()
self.token_embeddings = VocabEmbedding(
vocab_size, hidden_size, init_method=init_method, amp_enabled=amp_enabled
)
self.position_embeddings = Embedding(
max_seq_length, hidden_size, init_method=init_method, amp_enabled=amp_enabled
)
self.dropout = nn.Dropout(embedding_dropout_prob)
self.position_ids = flow.arange(
max_seq_length,
dtype=flow.long,
sbp=dist.get_nd_sbp([flow.sbp.broadcast, flow.sbp.broadcast]),
placement=dist.get_layer_placement(0),
).unsqueeze(0)
def forward(self, input_ids, past_length=0):
bsz, seq_length = input_ids.size()
position_ids = self.position_ids[:, past_length : past_length + seq_length]
position_ids = position_ids.expand_as(input_ids).to_global(sbp=input_ids.sbp)
token_embeds = self.token_embeddings(input_ids)
position_embeds = self.position_embeddings(position_ids)
input_embeds = token_embeds + position_embeds
input_embeds = self.dropout(input_embeds)
return input_embeds
class Transformer(nn.Module):
def __init__(
self,
hidden_layers,
hidden_size,
ffn_hidden_size,
num_attention_heads,
attention_dropout_prob=0.0,
output_dropout_prob=0.0,
layernorm_epsilon=1e-5,
init_method=init.xavier_normal_,
output_layer_init_method=None,
bias_gelu_fusion=False,
bias_dropout_fusion=False,
scale_mask_softmax_fusion=False,
apply_query_key_layer_scaling=False,
apply_residual_post_layernorm=False,
set_cache=None,
):
super().__init__()
self.hidden_layers = hidden_layers
self.set_cache = set_cache
def build_layer(layer_number):
return TransformerLayer(
hidden_size,
ffn_hidden_size,
num_attention_heads,
attention_dropout_prob=attention_dropout_prob,
output_dropout_prob=output_dropout_prob,
layernorm_epsilon=layernorm_epsilon,
init_method=init_method,
output_layer_init_method=output_layer_init_method,
bias_gelu_fusion=bias_gelu_fusion,
bias_dropout_fusion=bias_dropout_fusion,
scale_mask_softmax_fusion=scale_mask_softmax_fusion,
apply_query_key_layer_scaling=apply_query_key_layer_scaling,
apply_residual_post_layernorm=apply_residual_post_layernorm,
attn_mask_type=AttnMaskType.causal,
layer_idx=layer_number,
)
self.layers = nn.ModuleList([build_layer(i) for i in range(self.hidden_layers)])
self.layernorm_f = LayerNorm(hidden_size, eps=layernorm_epsilon, layer_idx=-1)
def forward(self, hidden_states, attention_mask, past_key_values=None, use_cache=False):
if use_cache:
presents = []
for layer, past_key_value in zip(self.layers, past_key_values):
hidden_states = layer(
hidden_states,
attention_mask,
past_key_value=past_key_value,
use_cache=use_cache,
)
if use_cache:
hidden_states, present = hidden_states
presents.append(present)
output = self.layernorm_f(hidden_states)
if use_cache:
self.set_cache(presents)
return output
class GPTForPreTraining(nn.Module):
"""
GPT Model with classification head on top.
"""
def __init__(self, cfg) -> None:
super().__init__()
if cfg.pretrained_model_path is not None:
loader = GPT2LoaderHuggerFace(GPTModel, cfg, cfg.pretrained_model_path)
self.GPT_model = loader.load()
else:
self.GPT_model = GPTModel(cfg)
self.loss_func = GPTLoss()
def forward(
self,
input_ids,
labels=None,
):
"""
Args:
input_ids (flow.LongTensor): Indices of input sequence tokens in vocabulary.
labels (flow.LongTensor, optional): Labels for computing language modeling loss.
None for evaluating. Defaults to None.
Returns:
dict:
A dict containing :code:`loss_value` or :code:`logits`
depending on training or evaluation.
:code:`{"masked_lm_loss": loss_value}` when training,
:code:`{"prediction_scores": logits}` when evaluating.
"""
logits = self.GPT_model(input_ids)["logits"]
if labels is not None:
lm_loss = self.loss_func(logits, labels)
return lm_loss
else:
return {"prediction_scores": logits}
@staticmethod
def set_pipeline_stage_id(model: nn.Module):
dist_utils = dist.get_dist_util()
if hasattr(model.GPT_model.transformer.layernorm_f, "config"):
# Old API in OneFlow 0.8
for module_block in model.modules():
if isinstance(module_block.origin, (GPTEmbedding)):
module_block.config.set_stage(
dist_utils.get_layer_stage_id(0), dist.get_layer_placement(0)
)
elif isinstance(module_block.origin, TransformerLayer):
module_block.config.set_stage(
dist_utils.get_layer_stage_id(module_block.layer_idx),
dist.get_layer_placement(module_block.layer_idx),
)
elif isinstance(module_block.origin, (LMLogits, GPTLoss)):
module_block.config.set_stage(
dist_utils.get_layer_stage_id(-1), dist.get_layer_placement(-1)
)
model.GPT_model.transformer.layernorm_f.config.set_stage(
dist_utils.get_layer_stage_id(-1), dist.get_layer_placement(-1)
)
else:
for module_block in model.modules():
if isinstance(module_block.to(nn.Module), (GPTEmbedding)):
module_block.to(nn.graph.GraphModule).set_stage(
dist_utils.get_layer_stage_id(0), dist.get_layer_placement(0)
)
elif isinstance(module_block.to(nn.Module), TransformerLayer):
module_block.to(nn.graph.GraphModule).set_stage(
dist_utils.get_layer_stage_id(module_block.layer_idx),
dist.get_layer_placement(module_block.layer_idx),
)
elif isinstance(module_block.to(nn.Module), (LMLogits, GPTLoss)):
module_block.to(nn.graph.GraphModule).set_stage(
dist_utils.get_layer_stage_id(-1), dist.get_layer_placement(-1)
)
model.GPT_model.transformer.layernorm_f.to(nn.graph.GraphModule).set_stage(
dist_utils.get_layer_stage_id(-1), dist.get_layer_placement(-1)
)
@staticmethod
def set_activation_checkpoint(model):
for module_block in model.modules():
# Old API in OneFlow 0.8
if hasattr(module_block, "origin"):
if isinstance(module_block.origin, TransformerLayer):
module_block.config.activation_checkpointing = True
else:
if isinstance(module_block.to(nn.Module), TransformerLayer):
module_block.to(nn.graph.GraphModule).activation_checkpointing = True
| 17,127 | 38.740139 | 100 | py |
libai | libai-main/projects/MagicPrompt/pipeline.py | # coding=utf-8
# Copyright 2021 The OneFlow Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from libai.inference.basic import BasePipeline
from libai.utils import distributed as dist
class TextGenerationPipeline(BasePipeline):
def load_pretrain_weight(self, libai_cfg_model, model_path, mode="huggingface"):
"""load pretrained model.
Args:
libai_cfg_model (libai.models): Lazy config Model in Libai, you can import it
by `from libai.config.configs.common.models.bert
import pretrain_model as libai_cfg_model`
model_path (str): The directory path of pretrained model,
"""
if mode == "huggingface":
from libai.models.utils import GPT2LoaderHuggerFace
model_loader = GPT2LoaderHuggerFace(
libai_cfg_model,
libai_cfg_model.cfg,
model_path,
)
model = model_loader.load()
model.eval()
return model
elif mode == "libai":
from libai.models.utils import GPT2LoaderLiBai
model_loader = GPT2LoaderLiBai(
libai_cfg_model,
libai_cfg_model.cfg,
model_path,
)
model = model_loader.load()
model.eval()
return model
elif mode == "random":
from libai.engine import DefaultTrainer
return DefaultTrainer.build_model(self.cfg)
else:
raise NotImplementedError
def _parse_parameters(self, **pipeline_parameters):
preprocess_params = {}
forward_params = {**pipeline_parameters}
postprocess_params = {}
return preprocess_params, forward_params, postprocess_params
def preprocess(self, inputs, **kwargs) -> dict:
# tokenizer encoder
if self.tokenizer.pad_token is None:
self.tokenizer.pad_token = "[PAD]"
inputs = self.tokenizer(inputs, return_tensors="of", padding=True)
inputs = {
"input_ids": inputs.input_ids,
}
return inputs
def forward(self, inputs, **kwargs) -> dict:
outputs = self.model.generate(inputs["input_ids"], max_length=50, **kwargs)
return {"return_ids": outputs}
def postprocess(self, model_output_dict, **kwargs) -> dict:
return_ids = model_output_dict["return_ids"]
records = [
{"generated_text": self.tokenizer.decode(return_ids[i], skip_special_tokens=True)}
for i in range(return_ids.size(0))
]
return records
if __name__ == "__main__":
pipeline = TextGenerationPipeline(
"projects/MagicPrompt/configs/gpt2_inference.py",
data_parallel=1,
tensor_parallel=1,
pipeline_parallel=1,
# pipeline_stage_id=[0] * 6 + [1] * 6,
# pipeline_num_layers=12,
model_path="/path/to/oneflow-model",
mode="libai",
)
text = ["a dog", "a cute pig", "a cute girl"]
output = pipeline(inputs=text)
if dist.is_main_process():
print(output)
| 3,639 | 32.703704 | 94 | py |
libai | libai-main/projects/MagicPrompt/datasets/datasets.py | import os
def convert_txt2json(file_path):
"""Store the dataset in loose JSON format file, you can refer:
https://libai.readthedocs.io/en/latest/tutorials/basics/Preprocessing_Dataset.html
"""
filename, ext = os.path.splitext(file_path)
filename = filename.split("/")[-1]
with open(file_path) as f:
lines = f.readlines()
print(len(lines))
target_file = "/home/xiezipeng/libai/projects/MagicPrompt/" + filename + "_magicprompy.txt"
with open(target_file, "w", encoding="utf-8") as f:
for line in lines:
line = "{" + '"' + "text" + '"' + ": " + '"' + line.strip() + '"' + "}" + "\n"
f.write(line)
os.rename(target_file, target_file[:-4] + ".json")
if __name__ == "__main__":
convert_txt2json("/path/to/test.txt")
convert_txt2json("/path/to/train.txt")
| 848 | 31.653846 | 95 | py |
libai | libai-main/projects/MagicPrompt/layers/transformer_layer.py | # coding=utf-8
# Copyright 2021 The OneFlow Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import oneflow.nn as nn
from libai.layers import build_activation
from libai.layers.droppath import DropPath
from libai.layers.layer_norm import LayerNorm
from libai.layers.mlp import MLP
from libai.utils import distributed as dist
from projects.MagicPrompt.layers.attention_layer import AttnMaskType, MultiheadAttention
class TransformerLayer(nn.Module):
"""A single transformer layer.
Transformer layer takes input with size [bsz, seq_length, hidden size] and returns an
output of the same size.
The input and output has same sbp sign, (S(0), B).
Arguments:
hidden_size: size of hidden state.
ffn_hidden_size: size of feed forword neural network.
num_attention_heads: number of attention heads.
is_decoder: used to specify whether this is transformer encoder layer or transformer
decoder layer. Default: ``False``.
attention_dropout_prob: dropout probability of attention weights.
output_dropout_prob: dropout probability of output.
layernorm_epsilon: epsilon used in layernorm layer. Default: `1e-5`.
init_method: method to initialize the input layer weights.
output_layer_init_method: method to initialize the output layer weights.
If None, use `init_method`.
bias_gelu_fusion: whether fuse add bias and gelu. Default: ``False``.
bias_dropout_fusion: whether fuse add bias and dropout. Default: ``False``.
scale_mask_softmax_fusion: whether to fuse scale, mask and softmax. Default: ``False``.
apply_query_key_layer_scaling: if `true`, scaling the attention score by layer index.
Default: ``False``.
apply_residual_post_layernorm: if ``true``, use original BERT residual
connection ordering. Otherwise, use Megatron BERT residual connection which
is more stable when scaling model size introduced in
https://arxiv.org/pdf/1909.08053.pdf.
Default: ``False``.
layer_idx: the layer index, which determines the placement.
"""
def __init__(
self,
hidden_size,
ffn_hidden_size,
num_attention_heads,
is_decoder=False,
attention_dropout_prob=0.0,
output_dropout_prob=0.0,
drop_path_prob=0.0,
layernorm_epsilon=1e-5,
init_method=nn.init.xavier_normal_,
output_layer_init_method=None,
bias_gelu_fusion=False,
bias_dropout_fusion=False,
scale_mask_softmax_fusion=False,
apply_query_key_layer_scaling=False,
apply_residual_post_layernorm=False,
attn_mask_type=AttnMaskType.padding,
*,
layer_idx=0
):
super().__init__()
self.hidden_size = hidden_size
self.ffn_hidden_size = ffn_hidden_size
self.num_attention_heads = num_attention_heads
self.attention_dropout_prob = attention_dropout_prob
self.output_dropout_prob = output_dropout_prob
self.layernorm_epsilon = layernorm_epsilon
self.attn_mask_type = attn_mask_type
self.layer_idx = layer_idx
self.is_decoder = is_decoder
self.bias_gelu_fusion = bias_gelu_fusion
self.bias_dropout_fusion = bias_dropout_fusion
self.scale_mask_softmax_fusion = scale_mask_softmax_fusion
self.apply_query_key_layer_scaling = apply_query_key_layer_scaling
self.apply_residual_post_layernorm = apply_residual_post_layernorm
self.init_method = init_method
if output_layer_init_method is None:
output_layer_init_method = init_method
self.output_layer_init_method = output_layer_init_method
self.drop_path = DropPath(drop_path_prob) if drop_path_prob > 0.0 else nn.Identity()
self.input_layernorm = LayerNorm(
self.hidden_size, eps=self.layernorm_epsilon, layer_idx=self.layer_idx
)
self.self_attention = self.build_attention(is_cross_attention=False)
self.post_attention_layernorm = LayerNorm(
self.hidden_size, eps=self.layernorm_epsilon, layer_idx=self.layer_idx
)
if self.is_decoder:
self.cross_attention = self.build_attention(is_cross_attention=True)
self.post_cross_attention_layernorm = LayerNorm(
self.hidden_size, eps=self.layernorm_epsilon, layer_idx=self.layer_idx
)
self.mlp = MLP(
self.hidden_size,
self.ffn_hidden_size,
self.output_dropout_prob,
self.init_method,
output_layer_init_method=self.output_layer_init_method,
bias_gelu_fusion=self.bias_gelu_fusion,
bias_dropout_fusion=self.bias_dropout_fusion,
layer_idx=self.layer_idx,
)
# use gelu_tanh activation
if not bias_gelu_fusion:
self.mlp.activation_func = build_activation("gelu_tanh")
def forward(
self,
hidden_states,
attention_mask=None,
encoder_states=None,
encoder_attention_mask=None,
past_key_value=None,
use_cache=False,
):
"""
Args:
hidden_states: shape is (batch_size, seq_length, hidden_size),
sbp signature is (S(0), B).
attention_mask: the combination of key padding mask and casual mask of hidden states
with shape (batch_size, 1, seq_length, seq_length) and the sbp
signature is (S(0), B),
encoder_states: encoder output with shape (batch_size, seq_length, hidden_size)
and the sbp signature is (S(0), B), which will be used in cross attention.
encoder_attention_mask: key padding mask of encoder states with shape
(batch_size, 1, seq_length, seq_length) and the sbp signature is (S(0), B).
past_key_value: tuple of key and value, each shape is
(seq_length, bsz, num_heads, head_size), For decoder layer,
the past_key_value contains the states both from self attention
and cross attention.
use_cache: it will be set to `True` when the model is in the inference phase and
used for incremental decoding.
"""
hidden_states = hidden_states.to_global(placement=dist.get_layer_placement(self.layer_idx))
if attention_mask is not None:
attention_mask = attention_mask.to_global(
placement=dist.get_layer_placement(self.layer_idx)
)
if past_key_value is not None:
if self.is_decoder:
assert len(past_key_value) == 4
self_attn_past_key_value = past_key_value[:2]
cross_attn_past_key_value = past_key_value[2:]
else:
self_attn_past_key_value = past_key_value
cross_attn_past_key_value = None
else:
self_attn_past_key_value, cross_attn_past_key_value = None, None
layernorm_output = self.input_layernorm(hidden_states)
attention_output = self.self_attention(
layernorm_output,
attention_mask=attention_mask,
past_key_value=self_attn_past_key_value,
use_cache=use_cache,
)
attention_output = self.drop_path(attention_output)
if use_cache:
attention_output, presents = attention_output
if self.apply_residual_post_layernorm:
residual = layernorm_output
else:
residual = hidden_states
hidden_states = residual + attention_output
layernorm_output = self.post_attention_layernorm(hidden_states)
if self.is_decoder:
attention_output = self.cross_attention(
layernorm_output,
encoder_states,
attention_mask=encoder_attention_mask,
past_key_value=cross_attn_past_key_value,
use_cache=use_cache,
)
if use_cache:
attention_output, decoder_presents = attention_output
presents += decoder_presents
attention_output = self.drop_path(attention_output)
if self.apply_residual_post_layernorm:
residual = layernorm_output
else:
residual = hidden_states
hidden_states = residual + attention_output
layernorm_output = self.post_cross_attention_layernorm(hidden_states)
mlp_output = self.mlp(layernorm_output)
mlp_output = self.drop_path(mlp_output)
if self.apply_residual_post_layernorm:
residual = layernorm_output
else:
residual = hidden_states
output = residual + mlp_output
if use_cache:
output = (output, presents)
return output
def build_attention(self, is_cross_attention=False):
return MultiheadAttention(
self.hidden_size,
self.num_attention_heads,
is_cross_attention=is_cross_attention,
attention_dropout_prob=self.attention_dropout_prob,
output_dropout_prob=self.output_dropout_prob,
init_method=self.init_method,
output_layer_init_method=self.output_layer_init_method,
bias_dropout_fusion=self.bias_dropout_fusion,
scale_mask_softmax_fusion=self.scale_mask_softmax_fusion,
apply_query_key_layer_scaling=self.apply_query_key_layer_scaling,
attn_mask_type=self.attn_mask_type,
layer_idx=self.layer_idx,
)
| 10,211 | 39.685259 | 99 | py |
libai | libai-main/projects/MagicPrompt/layers/__init__.py | 0 | 0 | 0 | py | |
libai | libai-main/projects/MagicPrompt/layers/attention_layer.py | # coding=utf-8
# Copyright 2021 The OneFlow Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
from typing import Tuple
import oneflow as flow
from oneflow import nn
from libai.layers.attention import AttnMaskType
from libai.layers.linear import Linear
from libai.utils import distributed as dist
class MultiheadAttention(nn.Module):
"""Multi-head attention layer, support self attention and cross attention.
Args:
hidden_size: size of hidden state.
num_attention_heads: number of attention heads.
is_cross_attention: used to specify whether it is self attention or cross attention.
Defaults to False.
attention_dropout_prob: dropout probability of attention weights.
Defaults to 0.0.
output_dropout_prob: dropout probability of output. Defaults to 0.0.
init_method: method to initialize the input layer weights.
Defaults to ``init.xavier_normal_``.
output_layer_init_method: method to initialize the output layer weights.
If None, use ``init_method``.
bias_dropout_fusion: whether to fuse add bias and dropout.
Defaults to False.
scale_mask_softmax_fusion: whether to fuse scale, mask and softmax.
Defaults to False.
apply_query_key_layer_scaling: if `True`, scaling the attention score by layer index.
Defaults to False.
layer_idx: a layer_idx sign which determines the placements.
It will be used in pipeline parallelism. Defaults to 0.
"""
def __init__(
self,
hidden_size,
num_attention_heads,
is_cross_attention=False,
attention_dropout_prob=0.0,
output_dropout_prob=0.0,
init_method=nn.init.xavier_normal_,
output_layer_init_method=None,
bias_dropout_fusion=False,
scale_mask_softmax_fusion=False,
apply_query_key_layer_scaling=False,
attn_mask_type=AttnMaskType.padding,
*,
layer_idx=0
):
super().__init__()
self.hidden_size = hidden_size
if output_layer_init_method is None:
output_layer_init_method = init_method
assert (
hidden_size % num_attention_heads == 0
), "hidden_size must be divisible by num_attention_heads."
self.num_heads = num_attention_heads
self.head_size = hidden_size // num_attention_heads
self.attn_mask_type = attn_mask_type
self.attention_dropout_prob = attention_dropout_prob
self.dropout = nn.Dropout(p=attention_dropout_prob)
self.norm_factor = 1.0 / math.sqrt(float(self.head_size))
self.coeff = None
if apply_query_key_layer_scaling:
self.coeff = layer_idx + 1
self.norm_factor /= self.coeff
self.is_cross_attention = is_cross_attention
self.scale_mask_softmax_fusion = scale_mask_softmax_fusion
self.bias_dropout_fusion = bias_dropout_fusion
if self.bias_dropout_fusion:
self.output_dropout_prob = output_dropout_prob
else:
self.output_dropout = nn.Dropout(p=output_dropout_prob)
if self.is_cross_attention:
self.query = Linear(
self.hidden_size,
self.hidden_size,
parallel="col",
init_method=init_method,
layer_idx=layer_idx,
)
self.key_value = Linear(
self.hidden_size,
self.hidden_size * 2,
parallel="col",
init_method=init_method,
layer_idx=layer_idx,
)
else:
self.query_key_value = Linear(
self.hidden_size,
self.hidden_size * 3,
parallel="col",
init_method=init_method,
layer_idx=layer_idx,
)
self.dense = Linear(
self.hidden_size,
self.hidden_size,
parallel="row",
init_method=output_layer_init_method,
skip_bias_add=self.bias_dropout_fusion,
layer_idx=layer_idx,
)
self.bias = flow.tril(flow.ones((1024, 1024), dtype=flow.uint8)).view(1, 1, 1024, 1024)
self.bias = self.bias.to_global(
sbp=dist.get_nd_sbp([flow.sbp.broadcast, flow.sbp.broadcast]),
placement=dist.get_layer_placement(layer_idx),
)
def forward(
self,
hidden_states: flow.Tensor,
encoder_states: flow.Tensor = None,
attention_mask: flow.Tensor = None,
past_key_value: Tuple[flow.Tensor, flow.Tensor] = None,
use_cache: bool = False,
):
"""
Args:
hidden_states (flow.Tensor): shape is [bsz, tgt_len, hidden_size].
encoder_states (flow.Tensor, optional): shape is [bsz, src_len, hidden_size].
Defaults to None.
attention_mask (flow.Tensor, optional): shape is [bsz, 1, tgt_len, src_len].
It should be the combination of padding mask and casual mask.
It is the padding mask of source input when used with self-attention in encoder.
And it is the combination of padding mask of target input and casual mask when
used with self-attention in decoder. It is the padding mask of source input when
used with cross-attention in decoder.
Defaults to None.
past_key_value (Tuple[flow.Tensor, flow.Tensor], optional): tuple of key and value,
each shape is [bsz, num_heads, src_len, head_size]. Defaults to None.
use_cache (bool, optional): it will be set to True, when the model is in the inference
phase and used for incremental decoding. Defaults to False.
"""
if encoder_states is not None:
encoder_states = encoder_states.to_global(placement=hidden_states.placement)
if attention_mask is not None:
attention_mask = attention_mask.to_global(placement=hidden_states.placement)
bsz, tgt_len = hidden_states.size()[:2]
if self.is_cross_attention:
query = self.query(hidden_states)
query = query.view(bsz, -1, self.num_heads, self.head_size)
query = query.permute(0, 2, 1, 3)
if past_key_value is not None:
key, value = past_key_value
elif encoder_states is not None:
key_value = self.key_value(encoder_states)
key_value = key_value.view(bsz, -1, self.num_heads, 2 * self.head_size)
key_value = key_value.permute(0, 2, 1, 3)
key, value = flow.chunk(key_value, chunks=2, dim=-1)
else:
raise ValueError(
"past_key_value and encoder_states cannot be None at the same time."
)
else:
query_key_value = self.query_key_value(hidden_states)
query_key_value = query_key_value.view(bsz, -1, self.num_heads, 3 * self.head_size)
query_key_value = query_key_value.permute(
0, 2, 1, 3
) # [bsz, num_heads, src_len, 3 * head_size]
query, key, value = flow.chunk(query_key_value, chunks=3, dim=-1)
if past_key_value is not None:
past_key, past_value = past_key_value
key = flow.cat((past_key.type_as(key), key), dim=2)
value = flow.cat((past_value.type_as(value), value), dim=2)
if use_cache:
past_key_value = (key, value)
attention_scores = flow.matmul(query, key, transpose_b=True, alpha=self.norm_factor)
if not self.is_cross_attention:
query_length, key_length = query.size(-2), key.size(-2)
causal_mask = self.bias[:, :, key_length - query_length : key_length, :key_length].to(
flow.bool
)
causal_mask = causal_mask.repeat(attention_scores.size(0), 1, 1, 1)
causal_mask = causal_mask.to_global(placement=attention_scores.placement)
fill_value = flow.finfo(attention_scores.dtype).min
if causal_mask.shape[0] == 1:
mask_value = flow.ones(
causal_mask.size(),
dtype=attention_scores.dtype,
sbp=dist.get_nd_sbp([flow.sbp.broadcast, flow.sbp.broadcast]),
placement=attention_scores.placement,
).fill_(fill_value)
else:
mask_value = flow.ones(
causal_mask.size(),
dtype=attention_scores.dtype,
sbp=dist.get_nd_sbp([flow.sbp.split(0), flow.sbp.broadcast]),
placement=attention_scores.placement,
).fill_(fill_value)
attention_scores = flow.where(causal_mask, attention_scores, mask_value)
if attention_mask is not None:
if self.scale_mask_softmax_fusion:
if self.attn_mask_type == AttnMaskType.padding:
attention_mask = (
attention_mask.expand_as(attention_scores) if use_cache else attention_mask
)
attention_weights = flow._C.fused_scale_mask_softmax_dropout(
attention_scores,
attention_mask,
fill_value=-10000.0,
scale=self.coeff,
p=self.attention_dropout_prob,
)[0]
else:
if self.coeff is not None:
attention_scores *= self.coeff
attention_scores = flow.mul(attention_scores, attention_mask)
attention_scores = attention_scores - 10000.0 * (1 - attention_mask)
attention_weights = flow.softmax(attention_scores, dim=-1)
attention_weights = self.dropout(attention_weights)
else:
if self.scale_mask_softmax_fusion and self.attn_mask_type == AttnMaskType.causal:
attention_weights = flow._C.fused_scale_tril_softmax_mask_scale(
attention_scores,
p=self.attention_dropout_prob,
diagonal=0,
tril_scale_value=self.coeff,
tril_fill_value=-10000.0,
)[0]
else:
attention_weights = flow.softmax(attention_scores, dim=-1)
attention_weights = self.dropout(attention_weights)
context = flow.matmul(attention_weights, value)
context = context.transpose(1, 2)
output = self.dense(context.flatten(2))
if self.bias_dropout_fusion:
output, bias = output
output = flow._C.fused_bias_add_dropout(
output, bias, p=self.output_dropout_prob, axis=output.ndim - 1
)
else:
output = self.output_dropout(output)
if use_cache:
output = (output, past_key_value)
return output
def extra_repr(self) -> str:
return "hidden_size={}, num_heads={}, is_cross_attention={}".format(
self.hidden_size,
self.num_heads,
self.is_cross_attention,
)
| 11,880 | 40.982332 | 99 | py |
libai | libai-main/projects/MagicPrompt/configs/gpt2_dataset.py | from libai.config import LazyCall
from libai.data import build_nlp_train_val_test_loader
from configs.common.data.gpt_dataset import tokenization, dataloader
from libai.tokenizer import GPT2Tokenizer
from libai.data.datasets import GPT2Dataset
from libai.data.data_utils import get_indexed_dataset
data_prefix = "/data/home/magicprompt/train/en_train_mmap_text_sentence"
tokenization.tokenizer = LazyCall(GPT2Tokenizer)(
vocab_file="/data/home/magicprompt/vocab.json",
merges_file="/data/home/magicprompt/merges.txt",
do_lower_case=True,
do_chinese_wwm=True,
)
tokenization.append_eod = False
dataloader.train = LazyCall(build_nlp_train_val_test_loader)(
dataset=[
LazyCall(GPT2Dataset)(
name="gpt-2",
data_prefix=data_prefix,
indexed_dataset=LazyCall(get_indexed_dataset)(
data_prefix=data_prefix,
data_impl="mmap",
skip_warmup=False,
),
max_seq_length=1024,
seed=1234,
),
],
train_val_test_num_samples=None, # a hint for deferred assignment
splits=[[949.0, 50.0, 1.0]],
weights=[1.0],
num_workers=4,
)
| 1,185 | 30.210526 | 72 | py |
libai | libai-main/projects/MagicPrompt/configs/gpt2_training.py | from libai.config import LazyCall
from libai.evaluation import PPLEvaluator
from projects.MagicPrompt.configs.gpt2_inference import pretrain_model as model
from projects.MagicPrompt.configs.gpt2_dataset import dataloader, tokenization
from configs.common.optim import optim
from libai.scheduler import WarmupExponentialLR
from configs.common.train import train
from configs.common.models.graph import graph
vocab_file = "/data/home/magicprompt/vocab.json"
merge_files = "/data/home/magicprompt/merges.txt"
train_data_prefix = "/data/home/magicprompt/train/en_train_mmap_text_sentence"
tokenization.tokenizer.vocab_file = vocab_file
tokenization.tokenizer.merges_file = merge_files
dataloader.train.dataset[0].data_prefix = train_data_prefix
dataloader.train.dataset[0].indexed_dataset.data_prefix = train_data_prefix
# gpt2 model config
model.cfg.pretrained_model_path = None
model.cfg.embedding_dropout_prob = 0.1
model.cfg.attention_dropout_prob = 0.1
model.cfg.output_dropout_prob = 0.1
model.cfg.num_attention_heads = 12
model.cfg.hidden_size = 768
model.cfg.ffn_hidden_size = 4 * 768
model.cfg.hidden_layers = 12
model.cfg.max_seq_length = 1024
model.cfg.initializer_range = 0.02
model.cfg.vocab_size = 50257
model.cfg.layernorm_epsilon = 1e-5
model.cfg.use_scaled_init_for_output_weights = True
model.cfg.bias_gelu_fusion = True
model.cfg.bias_dropout_fusion = True
model.cfg.scale_mask_softmax_fusion = False
model.cfg.apply_query_key_layer_scaling = True
model.cfg.apply_residual_post_layernorm = False
model.cfg.amp_enabled = True
train.input_placement_device = "cpu"
train.dist.pipeline_num_layers = model.cfg.hidden_layers
for ds in dataloader.train.dataset:
ds.max_seq_length = model.cfg.max_seq_length
optim.lr = 5.0e-05
train.update(
dict(
output_dir="projects/MagicPrompt/oneflow_magicprompt",
train_micro_batch_size=4,
test_micro_batch_size=4,
train_epoch=33,
train_iter=10000,
log_period=50,
amp=dict(enabled=True),
warmup_ratio=0,
checkpointer=dict(period=8000, max_to_keep=20),
dist=dict(
data_parallel_size=1,
tensor_parallel_size=1,
pipeline_parallel_size=1,
# pipeline_num_layers=model.cfg.hidden_layers,
),
scheduler=LazyCall(WarmupExponentialLR)(
warmup_factor=0.0,
gamma=1.0,
warmup_method="linear",
warmup_iter=0.0,
),
evaluation=dict(
enabled=True,
evaluator=LazyCall(PPLEvaluator)(),
eval_iter=250,
eval_period=4000,
),
rdma_enabled=False,
)
)
| 2,667 | 30.761905 | 79 | py |
libai | libai-main/projects/MagicPrompt/configs/gpt2_inference.py | from configs.common.models.gpt import cfg
from libai.config import LazyCall
from projects.mock_transformers import mock_tokenization
from projects.MagicPrompt.gpt2 import GPTModel, GPTForPreTraining
from configs.common.data.gpt_dataset import tokenization
from configs.common.train import train
cfg.update(
# Model
embedding_dropout_prob=0,
attention_dropout_prob=0,
output_dropout_prob=0,
bias_gelu_fusion=False,
bias_dropout_fusion=False,
scale_mask_softmax_fusion=False,
apply_query_key_layer_scaling=False,
apply_residual_post_layernorm=False,
amp_enabled=False,
num_attention_heads=12,
hidden_size=768,
ffn_hidden_size=4 * 768,
hidden_layers=12,
max_seq_length=1024,
initializer_range=0.02,
vocab_size=50304,
# Inference
is_encoder_decoder=False,
max_length=20,
min_length=0,
do_sample=False,
early_stopping=False,
num_beams=1,
num_beam_groups=1,
diversity_penalty=0.0,
temperature=1.0,
top_k=50,
top_p=1.0,
typical_p=1.0,
repetition_penalty=1.0,
length_penalty=1.0,
no_repeat_ngram_size=0,
encoder_no_repeat_ngram_size=0,
num_return_sequences=1,
chunk_size_feed_forward=0,
output_scores=False,
forced_bos_token_id=None,
forced_eos_token_id=None,
remove_invalid_values=False,
exponential_decay_length_penalty=None,
use_cache=True,
# Tokenizer
pad_token_id=0,
eos_token_id=50256,
bos_token_id=50256,
sep_token_id=None,
decoder_start_token_id=None,
# train
pretrained_model_path="/data/home/magicprompt",
)
model = LazyCall(GPTModel)(cfg=cfg)
pretrain_model = LazyCall(GPTForPreTraining)(cfg=cfg)
tokenization.tokenizer = LazyCall(mock_tokenization.GPT2Tokenizer)(
vocab_file="/data/home/magicprompt/vocab.json",
merges_file="/data/home/magicprompt/merges.txt",
)
| 1,879 | 26.246377 | 67 | py |
libai | libai-main/projects/Stable_Diffusion/modeling.py | # coding=utf-8
# Copyright 2021 The OneFlow Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import oneflow as flow
from diffusers import AutoencoderKL, DDPMScheduler, UNet2DConditionModel
from diffusers.loaders import AttnProcsLayers
from diffusers.models.cross_attention import LoRACrossAttnProcessor
from oneflow import nn
from oneflow.nn import functional as F
from transformers import CLIPTextModel, CLIPTokenizer
from projects.mock_transformers import init_env # noqa
LoRACrossAttnProcessor.forward = LoRACrossAttnProcessor.__call__
class StableDiffusion(nn.Module):
def __init__(
self,
model_path,
train_vae=False,
train_text_encoder=False,
train_with_lora=False,
):
super().__init__()
self.model_path = model_path
self.tokenizer = CLIPTokenizer.from_pretrained(model_path, subfolder="tokenizer")
self.text_encoder = CLIPTextModel.from_pretrained(model_path, subfolder="text_encoder")
self.vae = AutoencoderKL.from_pretrained(model_path, subfolder="vae")
self.unet = UNet2DConditionModel.from_pretrained(model_path, subfolder="unet")
self.noise_scheduler = DDPMScheduler.from_pretrained(model_path, subfolder="scheduler")
for name in self.noise_scheduler.__dict__.keys():
if flow.is_tensor(getattr(self.noise_scheduler, name)):
setattr(
self.noise_scheduler,
name,
getattr(self.noise_scheduler, name).to_global(
sbp=flow.sbp.broadcast, placement=flow.env.all_device_placement("cuda")
),
)
if not train_with_lora:
if not train_vae:
self.vae.requires_grad_(False)
if not train_text_encoder:
self.text_encoder.requires_grad_(False)
else:
self.vae.requires_grad_(False)
self.text_encoder.requires_grad_(False)
self.unet.requires_grad_(False)
# Set correct lora layers
lora_attn_procs = {}
for name in self.unet.attn_processors.keys():
cross_attention_dim = (
None
if name.endswith("attn1.processor")
else self.unet.config.cross_attention_dim
)
if name.startswith("mid_block"):
hidden_size = self.unet.config.block_out_channels[-1]
elif name.startswith("up_blocks"):
block_id = int(name[len("up_blocks.")])
hidden_size = list(reversed(self.unet.config.block_out_channels))[block_id]
elif name.startswith("down_blocks"):
block_id = int(name[len("down_blocks.")])
hidden_size = self.unet.config.block_out_channels[block_id]
lora_attn_procs[name] = LoRACrossAttnProcessor(
hidden_size=hidden_size, cross_attention_dim=cross_attention_dim
)
self.unet.set_attn_processor(lora_attn_procs)
self.lora_layers = AttnProcsLayers(self.unet.attn_processors)
def forward(self, pixel_values, input_ids):
from oneflow.utils.global_view import global_mode
placement_sbp_dict = dict(
placement=flow.env.all_device_placement("cuda"),
sbp=flow.sbp.split(0),
)
with global_mode(True, **placement_sbp_dict):
latents = self.vae.encode(pixel_values).latent_dist.sample()
latents = latents * 0.18215
# Sample noise that we'll add to the latents
noise = flow.randn(
latents.shape, sbp=latents.sbp, placement=latents.placement, dtype=self.unet.dtype
).to(latents.device)
bsz = latents.shape[0]
# Sample a random timestep for each image
timesteps = flow.randint(
0,
self.noise_scheduler.config.num_train_timesteps,
(bsz,),
sbp=latents.sbp,
placement=latents.placement,
dtype=flow.long,
)
# Add noise to the latents according to the noise magnitude at each timestep
# (this is the forward diffusion process)
noisy_latents = self.noise_scheduler.add_noise(latents, noise, timesteps)
noisy_latents = noisy_latents.to(dtype=self.unet.dtype)
# Get the text embedding for conditioning
encoder_hidden_states = self.text_encoder(input_ids)[0]
# Predict the noise residual
noise_pred = self.unet(noisy_latents, timesteps, encoder_hidden_states).sample
if self.noise_scheduler.config.prediction_type == "epsilon":
target = noise
elif self.noise_scheduler.config.prediction_type == "v_prediction":
target = self.noise_scheduler.get_velocity(latents, noise, timesteps)
else:
raise ValueError(
f"Unknown prediction type {self.noise_scheduler.config.prediction_type}"
)
loss = F.mse_loss(noise_pred.float(), target.float(), reduction="mean")
return {"loss": loss}
@staticmethod
def set_activation_checkpoint(model):
from diffusers.models.unet_2d_blocks import (
DualTransformer2DModel,
ResnetBlock2D,
Transformer2DModel,
)
from transformers.models.clip.modeling_clip import CLIPEncoder
for module_block in model.modules():
prefix_name = module_block.to(nn.graph.GraphModule).name_prefix
# unset vae checkpointing
if prefix_name.startswith("model.vae"):
continue
# set clip checkpointing
elif isinstance(module_block.to(nn.Module), CLIPEncoder):
module_block.to(nn.graph.GraphModule).activation_checkpointing = True
# set unet checkpointing
elif isinstance(
module_block.to(nn.Module),
(ResnetBlock2D, DualTransformer2DModel, Transformer2DModel),
):
module_block.to(nn.graph.GraphModule).activation_checkpointing = True
| 6,819 | 41.360248 | 98 | py |
libai | libai-main/projects/Stable_Diffusion/dataset.py | # coding=utf-8
# Copyright 2021 The OneFlow Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from pathlib import Path
import numpy as np
import oneflow as flow
from flowvision import transforms
from oneflow.utils.data import Dataset
from PIL import Image
from libai.data.structures import DistTensorData, Instance
class DreamBoothDataset(Dataset):
"""
A dataset to prepare the instance and class images with the prompts for fine-tuning the model.
It pre-processes the images and the tokenizes prompts.
"""
def __init__(
self,
instance_data_root,
instance_prompt,
tokenizer,
tokenizer_pretrained_folder=None,
class_data_root=None,
class_prompt=None,
size=512,
center_crop=False,
):
self.size = size
self.center_crop = center_crop
self.tokenizer = tokenizer
if tokenizer_pretrained_folder:
self.tokenizer = self.tokenizer.from_pretrained(
tokenizer_pretrained_folder[0], subfolder=tokenizer_pretrained_folder[1]
)
self.instance_data_root = Path(instance_data_root)
if not self.instance_data_root.exists():
raise ValueError("Instance images root doesn't exists.")
self.instance_images_path = list(Path(instance_data_root).iterdir())
self.num_instance_images = len(self.instance_images_path)
self.instance_prompt = instance_prompt
self._length = self.num_instance_images
if class_data_root is not None:
self.class_data_root = Path(class_data_root)
self.class_data_root.mkdir(parents=True, exist_ok=True)
self.class_images_path = list(self.class_data_root.iterdir())
self.num_class_images = len(self.class_images_path)
self._length = max(self.num_class_images, self.num_instance_images)
self.class_prompt = class_prompt
else:
self.class_data_root = None
self.image_transforms = transforms.Compose(
[
transforms.Resize(size, interpolation=transforms.InterpolationMode.BILINEAR),
transforms.CenterCrop(size) if center_crop else transforms.RandomCrop(size),
transforms.ToTensor(),
transforms.Normalize([0.5], [0.5]),
]
)
def __len__(self):
return self._length
def __getitem__(self, index):
if self.class_data_root and np.random.rand() > 0.5:
class_image = Image.open(self.class_images_path[index % self.num_class_images])
if not class_image.mode == "RGB":
class_image = class_image.convert("RGB")
instance_images = self.image_transforms(class_image)
input_ids = self.tokenizer(
self.class_prompt,
truncation=True,
padding="max_length",
max_length=self.tokenizer.model_max_length,
return_tensors="np",
).input_ids
else:
instance_image = Image.open(self.instance_images_path[index % self.num_instance_images])
if not instance_image.mode == "RGB":
instance_image = instance_image.convert("RGB")
instance_images = self.image_transforms(instance_image)
input_ids = self.tokenizer(
self.instance_prompt,
truncation=True,
padding="max_length",
max_length=self.tokenizer.model_max_length,
return_tensors="np",
).input_ids
return Instance(
pixel_values=DistTensorData(instance_images.to(dtype=flow.float32)),
input_ids=DistTensorData(flow.tensor(input_ids[0])),
)
class PromptDataset(Dataset):
"A simple dataset to prepare the prompts to generate class images on multiple GPUs."
def __init__(self, prompt, num_samples):
self.prompt = prompt
self.num_samples = num_samples
def __len__(self):
return self.num_samples
def __getitem__(self, index):
example = {}
example["prompt"] = self.prompt
example["index"] = index
return example
class TXTDataset(Dataset):
def __init__(
self,
foloder_name,
tokenizer,
tokenizer_pretrained_folder=None,
thres=0.2,
size=512,
center_crop=False,
):
print(f"Loading folder data from {foloder_name}.")
self.image_paths = []
self.tokenizer = tokenizer
if tokenizer_pretrained_folder:
self.tokenizer = self.tokenizer.from_pretrained(
tokenizer_pretrained_folder[0], subfolder=tokenizer_pretrained_folder[1]
)
for each_file in os.listdir(foloder_name):
if each_file.endswith(".jpg"):
self.image_paths.append(os.path.join(foloder_name, each_file))
self.image_transforms = transforms.Compose(
[
transforms.Resize(size, interpolation=transforms.InterpolationMode.BILINEAR),
transforms.CenterCrop(size) if center_crop else transforms.RandomCrop(size),
transforms.ToTensor(),
transforms.Normalize([0.5], [0.5]),
]
)
print("Done loading data. Len of images:", len(self.image_paths))
def __len__(self):
return len(self.image_paths)
def __getitem__(self, idx):
img_path = str(self.image_paths[idx])
instance_image = Image.open(img_path)
if not instance_image.mode == "RGB":
instance_image = instance_image.convert("RGB")
instance_images = self.image_transforms(instance_image)
caption_path = img_path.replace(".jpg", ".txt")
with open(caption_path, "r") as f:
caption = f.read()
input_ids = self.tokenizer(
caption,
padding="max_length",
truncation=True,
max_length=self.tokenizer.model_max_length,
return_tensors="np",
).input_ids
return Instance(
pixel_values=DistTensorData(instance_images.to(dtype=flow.float32)),
input_ids=DistTensorData(flow.tensor(input_ids[0])),
)
| 6,839 | 35.190476 | 100 | py |
libai | libai-main/projects/Stable_Diffusion/generate_prior_image.py | # coding=utf-8
# Copyright 2021 The OneFlow Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import hashlib
from pathlib import Path
import oneflow as flow
from diffusers import OneFlowStableDiffusionPipeline
from tqdm import tqdm
from projects.Stable_Diffusion.dataset import PromptDataset
def parse_args(input_args=None):
parser = argparse.ArgumentParser(description="Simple example of a training script.")
parser.add_argument(
"--pretrained_model_name_or_path",
type=str,
default=None,
required=True,
help="Path to pretrained model or model identifier from huggingface.co/models.",
)
parser.add_argument(
"--revision",
type=str,
default=None,
required=False,
help=(
"Revision of pretrained model identifier from huggingface.co/models. "
"Trainable model components should be float32 precision."
),
)
parser.add_argument(
"--class_data_dir",
type=str,
default=None,
required=True,
help="A folder containing the training data of class images.",
)
parser.add_argument(
"--class_prompt",
type=str,
default=None,
required=True,
help="The prompt to specify images in the same class as provided instance images.",
)
parser.add_argument(
"--num_class_images",
type=int,
default=100,
required=False,
help=(
"Minimal class images for prior preservation loss. "
"If there are not enough images already present in "
"class_data_dir, additional images will be sampled with class_prompt."
),
)
parser.add_argument(
"--sample_batch_size",
type=int,
default=4,
help="Batch size (per device) for sampling images.",
)
parser.add_argument(
"--prior_generation_precision",
type=str,
default="fp16",
choices=["no", "fp32", "fp16", "bf16"],
help=(
"Choose prior generation precision between fp32, fp16 and bf16 (bfloat16)."
" Bf16 requires PyTorch >=1.10.and an Nvidia Ampere GPU. "
" Default to fp16 if a GPU is available else fp32."
),
)
if input_args is not None:
args = parser.parse_args(input_args)
else:
args = parser.parse_args()
return args
def main(args):
class_images_dir = Path(args.class_data_dir)
if not class_images_dir.exists():
class_images_dir.mkdir(parents=True)
cur_class_images = len(list(class_images_dir.iterdir()))
if args.prior_generation_precision == "fp32":
torch_dtype = flow.float32
elif args.prior_generation_precision == "fp16":
torch_dtype = flow.float16
elif args.prior_generation_precision == "bf16":
torch_dtype = flow.bfloat16
if cur_class_images < args.num_class_images:
pipeline = OneFlowStableDiffusionPipeline.from_pretrained(
args.pretrained_model_name_or_path,
use_auth_token=True,
revision=args.revision,
torch_dtype=torch_dtype,
).to("cuda")
pipeline.set_progress_bar_config(disable=True)
num_new_images = args.num_class_images - cur_class_images
print(f"Number of class images to sample: {num_new_images}.")
sample_dataset = PromptDataset(args.class_prompt, num_new_images)
sample_dataloader = flow.utils.data.DataLoader(
sample_dataset, batch_size=args.sample_batch_size
)
for example in tqdm(sample_dataloader, desc="Generating class images"):
images = pipeline(example["prompt"]).images
for i, image in enumerate(images):
hash_image = hashlib.sha1(image.tobytes()).hexdigest()
image_filename = (
class_images_dir / f"{example['index'][i] + cur_class_images}-{hash_image}.jpg"
)
image.save(image_filename)
del pipeline
if flow.cuda.is_available():
flow.cuda.empty_cache()
return
if __name__ == "__main__":
args = parse_args()
main(args)
| 4,744 | 31.951389 | 99 | py |
libai | libai-main/projects/Stable_Diffusion/train_net.py | # coding=utf-8
# Copyright 2021 The OneFlow Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
import random
import sys
import numpy as np
import oneflow as flow
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir)))
from onediff import OneFlowStableDiffusionPipeline # noqa
from libai.config import LazyConfig, default_argument_parser, try_get_key # noqa
from libai.engine import DefaultTrainer, default_setup, hooks # noqa
from libai.engine.trainer import HookBase # noqa
from libai.utils import distributed as dist # noqa
from libai.utils.checkpoint import Checkpointer # noqa
logger = logging.getLogger("libai." + __name__)
class SdCheckpointer(HookBase):
def __init__(
self,
model: flow.nn.Module,
save_path: str,
) -> None:
self._model = model
self._save_path = save_path
def after_train(self):
def model_to_local(model):
model.zero_grad(set_to_none=True)
model = model.to_global(
sbp=flow.sbp.broadcast, placement=flow.env.all_device_placement("cpu")
)
return model.to_local()
if hasattr(self._model, "lora_layers"):
unet = model_to_local(self._model.unet)
save_path = os.path.join(self._save_path, "model_sd_for_inference")
logger.info(f"saving stable diffusion model to {save_path}")
if dist.is_main_process():
unet.save_attn_procs(save_path)
else:
pipeline = OneFlowStableDiffusionPipeline.from_pretrained(
self._model.model_path,
tokenizer=self._model.tokenizer,
text_encoder=model_to_local(self._model.text_encoder),
vae=model_to_local(self._model.vae),
unet=model_to_local(self._model.unet),
)
save_path = os.path.join(self._save_path, "model_sd_for_inference")
logger.info(f"saving stable diffusion model to {save_path}")
if dist.is_main_process():
pipeline.save_pretrained(save_path)
class Trainer(DefaultTrainer):
def build_hooks(self):
"""
Build a list of default hooks, including timing, evaluation,
checkpointing, lr scheduling, precise BN, writing events.
Returns:
list[HookBase]:
"""
ret = [
hooks.IterationTimer(),
hooks.LRScheduler(), # for beauty lr scheduler printer in `nn.Graph` mode
SdCheckpointer(self.model, self.cfg.train.output_dir),
]
if not try_get_key(self.cfg, "model.train_with_lora", default=False):
ret.append(
hooks.PeriodicCheckpointer(self.checkpointer, self.cfg.train.checkpointer.period),
)
if self.cfg.train.evaluation.enabled:
assert self.cfg.train.evaluation.eval_iter > 0, "run_iter must be positive number"
def test_and_save_results():
model = self.graph_eval if self.cfg.graph.enabled else self.model
self._last_eval_results = self.test(self.cfg, self.test_loader, model)
return self._last_eval_results
if dist.is_main_process():
# run writers in the end, so that evaluation metrics are written
ret.append(hooks.PeriodicWriter(self.build_writers(), self.cfg.train.log_period))
return ret
def main(args):
cfg = LazyConfig.load(args.config_file)
cfg = LazyConfig.apply_overrides(cfg, args.opts)
default_setup(cfg, args)
seed_for_rank = cfg.train.seed + flow.env.get_rank()
flow.manual_seed(seed_for_rank)
flow.cuda.manual_seed(seed_for_rank)
np.random.seed(seed_for_rank)
random.seed(seed_for_rank)
if args.fast_dev_run:
cfg.train.train_epoch = 0
cfg.train.train_iter = 20
cfg.train.evaluation.eval_period = 10
cfg.train.log_period = 1
if args.eval_only:
tokenizer = None
if try_get_key(cfg, "tokenization") is not None:
tokenizer = Trainer.build_tokenizer(cfg)
model = Trainer.build_model(cfg)
Checkpointer(model, save_dir=cfg.train.output_dir).resume_or_load(
cfg.train.load_weight, resume=args.resume
)
if try_get_key(cfg, "train.graph.enabled", default=False):
model = Trainer.build_graph(cfg, model, is_train=False)
test_loader = Trainer.build_test_loader(cfg, tokenizer)
if len(test_loader) == 0:
logger.info("No dataset in dataloader.test, please set dataset for dataloader.test")
_ = Trainer.test(cfg, test_loader, model)
return
trainer = Trainer(cfg)
return trainer.train()
if __name__ == "__main__":
args = default_argument_parser().parse_args()
main(args)
| 5,384 | 35.385135 | 98 | py |
libai | libai-main/projects/Stable_Diffusion/configs/lora_config.py | from omegaconf import OmegaConf
from libai.config import get_config
from libai.config import LazyCall
from libai.data.build import build_nlp_train_loader, build_nlp_test_loader
from projects.Stable_Diffusion.dataset import DreamBoothDataset
from projects.Stable_Diffusion.modeling import StableDiffusion
from transformers import CLIPTokenizer
optim = get_config("common/optim.py").optim
graph = get_config("common/models/graph.py").graph
train = get_config("common/train.py").train
graph.global_mode.enabled = True
dataloader = OmegaConf.create()
dataloader.train = LazyCall(build_nlp_train_loader)(
dataset=[
LazyCall(DreamBoothDataset)(
instance_data_root="/path/to/demo_dog/",
instance_prompt="a photo of sks dog",
tokenizer=CLIPTokenizer,
tokenizer_pretrained_folder=["CompVis/stable-diffusion-v1-4", "tokenizer"],
)
],
num_workers=4,
)
optim.lr = 5e-4
model = LazyCall(StableDiffusion)(model_path="CompVis/stable-diffusion-v1-4", train_with_lora=True)
train.update(
dict(
rdma_enabled=True,
activation_checkpoint=dict(enabled=True),
zero_optimization=dict(
enabled=True,
stage=2,
),
checkpointer=dict(period=5000000),
amp=dict(enabled=True),
output_dir="output/stable_diffusion/",
train_micro_batch_size=1,
test_micro_batch_size=1,
train_epoch=0,
train_iter=800,
log_period=1,
warmup_ratio=0.01,
dist=dict(
data_parallel_size=1,
tensor_parallel_size=1,
pipeline_parallel_size=1,
pipeline_stage_id=None,
pipeline_num_layers=None,
),
evaluation=dict(
enabled=False,
),
)
)
| 1,799 | 28.032258 | 99 | py |
libai | libai-main/projects/Stable_Diffusion/configs/prior_preservation_config.py | from omegaconf import OmegaConf
import oneflow as flow
from libai.optim import get_default_optimizer_params
from libai.config import LazyCall
from libai.data.build import build_nlp_train_loader, build_nlp_test_loader
from projects.Stable_Diffusion.dataset import DreamBoothDataset
from transformers import CLIPTokenizer
from .dreambooth_config import (
train,
optim,
graph,
model,
)
optim.lr = 2e-6
model.train_text_encoder = True
dataloader = OmegaConf.create()
dataloader.train = LazyCall(build_nlp_train_loader)(
dataset=[
LazyCall(DreamBoothDataset)(
instance_data_root="path/to/demo_dog/",
instance_prompt="a photo of sks dog",
class_data_root="/path/to/prior_dog/",
class_prompt="a photo of dog",
tokenizer=CLIPTokenizer,
tokenizer_pretrained_folder=["CompVis/stable-diffusion-v1-4", "tokenizer"],
)
],
num_workers=4,
)
train.train_iter = 2000
train.log_period = 10
| 993 | 25.864865 | 87 | py |
libai | libai-main/projects/Stable_Diffusion/configs/dreambooth_config.py | from omegaconf import OmegaConf
from libai.config import get_config
from libai.config import LazyCall
from libai.data.build import build_nlp_train_loader, build_nlp_test_loader
from projects.Stable_Diffusion.dataset import DreamBoothDataset
from projects.Stable_Diffusion.modeling import StableDiffusion
from transformers import CLIPTokenizer
optim = get_config("common/optim.py").optim
graph = get_config("common/models/graph.py").graph
train = get_config("common/train.py").train
graph.global_mode.enabled = True
dataloader = OmegaConf.create()
dataloader.train = LazyCall(build_nlp_train_loader)(
dataset=[
LazyCall(DreamBoothDataset)(
instance_data_root="/path/to/demo_dog/",
instance_prompt="a photo of sks dog",
tokenizer=CLIPTokenizer,
tokenizer_pretrained_folder=["CompVis/stable-diffusion-v1-4", "tokenizer"],
)
],
num_workers=4,
)
optim.lr = 5e-6
model = LazyCall(StableDiffusion)(model_path="CompVis/stable-diffusion-v1-4")
train.update(
dict(
rdma_enabled=True,
activation_checkpoint=dict(enabled=True),
zero_optimization=dict(
enabled=True,
stage=2,
),
checkpointer=dict(period=5000000),
amp=dict(enabled=True),
output_dir="output/stable_diffusion/",
train_micro_batch_size=1,
test_micro_batch_size=1,
train_epoch=0,
train_iter=800,
log_period=1,
warmup_ratio=0.01,
dist=dict(
data_parallel_size=1,
tensor_parallel_size=1,
pipeline_parallel_size=1,
pipeline_stage_id=None,
pipeline_num_layers=None,
),
evaluation=dict(
enabled=False,
),
)
)
| 1,777 | 27.677419 | 87 | py |
libai | libai-main/projects/Stable_Diffusion/configs/config.py | from omegaconf import OmegaConf
from libai.config import get_config
from libai.config import LazyCall
from libai.data.build import build_nlp_train_loader, build_nlp_test_loader
from projects.Stable_Diffusion.dataset import TXTDataset
from projects.Stable_Diffusion.modeling import StableDiffusion
from transformers import CLIPTokenizer
optim = get_config("common/optim.py").optim
graph = get_config("common/models/graph.py").graph
train = get_config("common/train.py").train
graph.global_mode.enabled = True
dataloader = OmegaConf.create()
dataloader.train = LazyCall(build_nlp_train_loader)(
dataset=[
LazyCall(TXTDataset)(
foloder_name="/path/to/mscoco/00000",
tokenizer=CLIPTokenizer,
tokenizer_pretrained_folder=["CompVis/stable-diffusion-v1-4", "tokenizer"],
)
],
num_workers=4,
)
model = LazyCall(StableDiffusion)(model_path="CompVis/stable-diffusion-v1-4")
train.update(
dict(
rdma_enabled=True,
activation_checkpoint=dict(enabled=True),
zero_optimization=dict(
enabled=True,
stage=2,
),
checkpointer=dict(period=5000000),
amp=dict(enabled=True),
output_dir="output/stable_diffusion/",
train_micro_batch_size=1,
test_micro_batch_size=1,
train_epoch=0,
train_iter=20,
log_period=1,
warmup_ratio=0.01,
dist=dict(
data_parallel_size=1,
tensor_parallel_size=1,
pipeline_parallel_size=1,
pipeline_stage_id=None,
pipeline_num_layers=None,
),
evaluation=dict(
enabled=False,
),
)
)
| 1,693 | 27.233333 | 87 | py |
libai | libai-main/tests/test_trainer.py | # coding=utf-8
# Copyright 2021 The OneFlow Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import oneflow as flow
from omegaconf import OmegaConf
from oneflow.utils.data import DataLoader, TensorDataset
sys.path.append(".")
from libai.config import LazyCall, default_argument_parser
from libai.engine import DefaultTrainer, default_setup
from libai.optim import get_default_optimizer_params
from libai.scheduler import WarmupMultiStepLR
from tests.layers.test_trainer_model import build_graph, build_model
def setup(args):
"""
Create configs and perform basic setups.
"""
cfg = OmegaConf.create()
cfg.train = dict(
output_dir="./demo_output",
train_micro_batch_size=32,
test_micro_batch_size=32,
dist=dict(
data_parallel_size=1,
tensor_parallel_size=1,
pipeline_parallel_size=1,
pipeline_num_layers=4,
),
start_iter=0,
train_iter=20,
train_epoch=1,
warmup_ratio=0.05,
lr_warmup_fraction=0.01,
lr_decay_iter=6000,
eval_period=1000,
log_period=1,
checkpointer=dict(period=100),
nccl_fusion_threshold_mb=16,
nccl_fusion_max_ops=24,
scheduler=LazyCall(WarmupMultiStepLR)(
warmup_factor=0.001,
# alpha=0.01,
warmup_method="linear",
milestones=[0.1, 0.2],
),
)
cfg.optim = LazyCall(flow.optim.AdamW)(
parameters=LazyCall(get_default_optimizer_params)(
# parameters.model is meant to be set to the model object, before
# instantiating the optimizer.
clip_grad_max_norm=1.0,
clip_grad_norm_type=2.0,
weight_decay_norm=0.0,
weight_decay_bias=0.0,
),
lr=1e-4,
weight_decay=0.01,
betas=(0.9, 0.999),
do_bias_correction=True,
)
cfg.graph = dict(
enabled=True,
)
default_setup(cfg, args)
return cfg
class DemoTrainer(DefaultTrainer):
@classmethod
def build_model(cls, cfg):
"""
Returns:
flow.nn.Module:
It now calls :func:`libai.layers.build_model`.
Overwrite it if you'd like a different model.
"""
model = build_model(cfg)
return model
@classmethod
def build_graph(cls, cfg, model, optimizer=None, lr_scheduler=None, is_train=True):
return build_graph(cfg, model, optimizer, lr_scheduler)
@classmethod
def get_batch(cls, data):
return [
flow.randn(
32,
512,
sbp=flow.sbp.split(0),
placement=flow.placement("cuda", [0]),
)
]
@classmethod
def build_train_loader(cls, cfg, tokenizer=None):
return (
DataLoader(
TensorDataset(flow.randn(1000)), batch_size=cfg.train.train_micro_batch_size
),
None,
None,
)
@classmethod
def build_test_loader(cls, cfg):
return []
def main(args):
cfg = setup(args)
trainer = DemoTrainer(cfg)
# trainer.resume_or_load(resume=args.resume)
return trainer.train()
if __name__ == "__main__":
args = default_argument_parser().parse_args()
main(args)
| 3,870 | 26.06993 | 92 | py |
libai | libai-main/tests/test_file_io.py | # coding=utf-8
# Copyright 2021 The OneFlow Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Unittests followed https://github.com/facebookresearch/iopath/blob/v0.1.8/tests/test_file_io.py
"""
import os
import shutil
import tempfile
import unittest
import uuid
from typing import Optional
from unittest.mock import MagicMock
from libai.utils.file_io import LazyPath, PathManagerBase, PathManagerFactory, g_pathmgr
class TestNativeIO(unittest.TestCase):
_tmpdir: Optional[str] = None
_filename: Optional[str] = None
_tmpfile: Optional[str] = None
_tmpfile_contents = "Hello, World"
_pathmgr = PathManagerBase()
@classmethod
def setUpClass(cls) -> None:
cls._tmpdir = tempfile.mkdtemp()
cls._filename = "test.txt"
# pyre-ignore
with open(os.path.join(cls._tmpdir, cls._filename), "w") as f:
cls._tmpfile = f.name
f.write(cls._tmpfile_contents)
f.flush()
@classmethod
def tearDownClass(cls) -> None:
# Cleanup temp working dir.
if cls._tmpdir is not None:
shutil.rmtree(cls._tmpdir) # type: ignore
def setUp(self) -> None:
# Reset class variables set by methods before each test.
self._pathmgr.set_cwd(None)
self._pathmgr._native_path_handler._non_blocking_io_manager = None
self._pathmgr._native_path_handler._non_blocking_io_executor = None
self._pathmgr._async_handlers.clear()
def test_open(self) -> None:
# pyre-ignore
with self._pathmgr.open(self._tmpfile, "r") as f:
self.assertEqual(f.read(), self._tmpfile_contents)
def test_factory_open(self) -> None:
with g_pathmgr.open(self._tmpfile, "r") as f:
self.assertEqual(f.read(), self._tmpfile_contents)
_pathmgr = PathManagerFactory.get("test_pm")
with _pathmgr.open(self._tmpfile, "r") as f:
self.assertEqual(f.read(), self._tmpfile_contents)
PathManagerFactory.remove("test_pm")
def test_open_args(self) -> None:
self._pathmgr.set_strict_kwargs_checking(True)
f = self._pathmgr.open(
self._tmpfile, # type: ignore
mode="r",
buffering=1,
encoding="UTF-8",
errors="ignore",
newline=None,
closefd=True,
opener=None,
)
f.close()
def test_get_local_path(self) -> None:
self.assertEqual(
# pyre-ignore
self._pathmgr.get_local_path(self._tmpfile),
self._tmpfile,
)
def test_get_local_path_forced(self) -> None:
self.assertEqual(
# pyre-ignore
self._pathmgr.get_local_path(self._tmpfile, force=True),
self._tmpfile,
)
def test_exists(self) -> None:
# pyre-ignore
self.assertTrue(self._pathmgr.exists(self._tmpfile))
# pyre-ignore
fake_path = os.path.join(self._tmpdir, uuid.uuid4().hex)
self.assertFalse(self._pathmgr.exists(fake_path))
def test_isfile(self) -> None:
self.assertTrue(self._pathmgr.isfile(self._tmpfile)) # pyre-ignore
# This is a directory, not a file, so it should fail
self.assertFalse(self._pathmgr.isfile(self._tmpdir)) # pyre-ignore
# This is a non-existing path, so it should fail
fake_path = os.path.join(self._tmpdir, uuid.uuid4().hex) # pyre-ignore
self.assertFalse(self._pathmgr.isfile(fake_path))
def test_isdir(self) -> None:
# pyre-ignore
self.assertTrue(self._pathmgr.isdir(self._tmpdir))
# This is a file, not a directory, so it should fail
# pyre-ignore
self.assertFalse(self._pathmgr.isdir(self._tmpfile))
# This is a non-existing path, so it should fail
# pyre-ignore
fake_path = os.path.join(self._tmpdir, uuid.uuid4().hex)
self.assertFalse(self._pathmgr.isdir(fake_path))
def test_ls(self) -> None:
# Create some files in the tempdir to ls out.
root_dir = os.path.join(self._tmpdir, "ls") # pyre-ignore
os.makedirs(root_dir, exist_ok=True)
files = sorted(["foo.txt", "bar.txt", "baz.txt"])
for f in files:
open(os.path.join(root_dir, f), "a").close()
children = sorted(self._pathmgr.ls(root_dir))
self.assertListEqual(children, files)
# Cleanup the tempdir
shutil.rmtree(root_dir)
def test_mkdirs(self) -> None:
# pyre-ignore
new_dir_path = os.path.join(self._tmpdir, "new", "tmp", "dir")
self.assertFalse(self._pathmgr.exists(new_dir_path))
self._pathmgr.mkdirs(new_dir_path)
self.assertTrue(self._pathmgr.exists(new_dir_path))
def test_copy(self) -> None:
_tmpfile_2 = self._tmpfile + "2" # pyre-ignore
_tmpfile_2_contents = "something else"
with open(_tmpfile_2, "w") as f:
f.write(_tmpfile_2_contents)
f.flush()
self.assertTrue(self._pathmgr.copy(self._tmpfile, _tmpfile_2, overwrite=True))
with self._pathmgr.open(_tmpfile_2, "r") as f:
self.assertEqual(f.read(), self._tmpfile_contents)
def test_move(self) -> None:
_tmpfile_2 = self._tmpfile + "2" + uuid.uuid4().hex # pyre-ignore
_tmpfile_3 = self._tmpfile + "3_" + uuid.uuid4().hex # pyre-ignore
_tmpfile_2_contents = "Hello Move"
with open(_tmpfile_2, "w") as f:
f.write(_tmpfile_2_contents)
f.flush()
# pyre-ignore
self.assertTrue(self._pathmgr.mv(_tmpfile_2, _tmpfile_3))
with self._pathmgr.open(_tmpfile_3, "r") as f:
self.assertEqual(f.read(), _tmpfile_2_contents)
self.assertFalse(self._pathmgr.exists(_tmpfile_2))
self._pathmgr.rm(_tmpfile_3)
def test_symlink(self) -> None:
_symlink = self._tmpfile + "_symlink" # pyre-ignore
self.assertTrue(self._pathmgr.symlink(self._tmpfile, _symlink)) # pyre-ignore
with self._pathmgr.open(_symlink) as f:
self.assertEqual(f.read(), self._tmpfile_contents)
self.assertEqual(os.readlink(_symlink), self._tmpfile)
os.remove(_symlink)
def test_rm(self) -> None:
# pyre-ignore
with open(os.path.join(self._tmpdir, "test_rm.txt"), "w") as f:
rm_file = f.name
f.write(self._tmpfile_contents)
f.flush()
self.assertTrue(self._pathmgr.exists(rm_file))
self.assertTrue(self._pathmgr.isfile(rm_file))
self._pathmgr.rm(rm_file)
self.assertFalse(self._pathmgr.exists(rm_file))
self.assertFalse(self._pathmgr.isfile(rm_file))
def test_set_cwd(self) -> None:
# File not found since cwd not set yet.
self.assertFalse(self._pathmgr.isfile(self._filename))
self.assertTrue(self._pathmgr.isfile(self._tmpfile))
# Once cwd is set, relative file path works.
self._pathmgr.set_cwd(self._tmpdir)
self.assertTrue(self._pathmgr.isfile(self._filename))
# Set cwd to None
self._pathmgr.set_cwd(None)
self.assertFalse(self._pathmgr.isfile(self._filename))
self.assertTrue(self._pathmgr.isfile(self._tmpfile))
# Set cwd to invalid path
with self.assertRaises(ValueError):
self._pathmgr.set_cwd("/nonexistent/path")
def test_get_path_with_cwd(self) -> None:
self._pathmgr.set_cwd(self._tmpdir)
# Make sure _get_path_with_cwd() returns correctly.
self.assertEqual(
self._pathmgr._native_path_handler._get_path_with_cwd(self._filename),
self._tmpfile,
)
self.assertEqual(
self._pathmgr._native_path_handler._get_path_with_cwd("/abs.txt"),
"/abs.txt",
)
def test_bad_args(self) -> None:
# TODO (T58240718): Replace with dynamic checks
with self.assertRaises(ValueError):
self._pathmgr.copy(self._tmpfile, self._tmpfile, foo="foo") # type: ignore
with self.assertRaises(ValueError):
self._pathmgr.exists(self._tmpfile, foo="foo") # type: ignore
with self.assertRaises(ValueError):
self._pathmgr.get_local_path(self._tmpfile, foo="foo") # type: ignore
with self.assertRaises(ValueError):
self._pathmgr.isdir(self._tmpfile, foo="foo") # type: ignore
with self.assertRaises(ValueError):
self._pathmgr.isfile(self._tmpfile, foo="foo") # type: ignore
with self.assertRaises(ValueError):
self._pathmgr.ls(self._tmpfile, foo="foo") # type: ignore
with self.assertRaises(ValueError):
self._pathmgr.mkdirs(self._tmpfile, foo="foo") # type: ignore
with self.assertRaises(ValueError):
self._pathmgr.open(self._tmpfile, foo="foo") # type: ignore
with self.assertRaises(ValueError):
self._pathmgr.opena(self._tmpfile, foo="foo") # type: ignore
with self.assertRaises(ValueError):
self._pathmgr.rm(self._tmpfile, foo="foo") # type: ignore
with self.assertRaises(ValueError):
self._pathmgr.set_cwd(self._tmpdir, foo="foo") # type: ignore
self._pathmgr.set_strict_kwargs_checking(False)
self._pathmgr.copy(self._tmpfile, self._tmpfile + "2", foo="foo") # type: ignore
self._pathmgr.exists(self._tmpfile, foo="foo") # type: ignore
self._pathmgr.get_local_path(self._tmpfile, foo="foo") # type: ignore
self._pathmgr.isdir(self._tmpfile, foo="foo") # type: ignore
self._pathmgr.isfile(self._tmpfile, foo="foo") # type: ignore
self._pathmgr.ls(self._tmpdir, foo="foo") # type: ignore
self._pathmgr.mkdirs(self._tmpdir, foo="foo") # type: ignore
f = self._pathmgr.open(self._tmpfile, foo="foo") # type: ignore
f.close()
# pyre-ignore
with open(os.path.join(self._tmpdir, "test_rm.txt"), "w") as f:
rm_file = f.name
f.write(self._tmpfile_contents)
f.flush()
self._pathmgr.rm(rm_file, foo="foo") # type: ignore
class TestLazyPath(unittest.TestCase):
_pathmgr = PathManagerBase()
def test_materialize(self) -> None:
f = MagicMock(return_value="test")
x = LazyPath(f)
f.assert_not_called()
p = os.fspath(x)
f.assert_called()
self.assertEqual(p, "test")
p = os.fspath(x)
# should only be called once
f.assert_called_once()
self.assertEqual(p, "test")
def test_join(self) -> None:
f = MagicMock(return_value="test")
x = LazyPath(f)
p = os.path.join(x, "a.txt")
f.assert_called_once()
self.assertEqual(p, "test/a.txt")
def test_getattr(self) -> None:
x = LazyPath(lambda: "abc")
with self.assertRaises(AttributeError):
x.startswith("ab")
_ = os.fspath(x)
self.assertTrue(x.startswith("ab"))
def test_PathManager(self) -> None:
x = LazyPath(lambda: "./")
output = self._pathmgr.ls(x) # pyre-ignore
output_gt = self._pathmgr.ls("./")
self.assertEqual(sorted(output), sorted(output_gt))
def test_getitem(self) -> None:
x = LazyPath(lambda: "abc")
with self.assertRaises(TypeError):
x[0]
_ = os.fspath(x)
self.assertEqual(x[0], "a")
if __name__ == "__main__":
unittest.main()
| 12,018 | 37.155556 | 95 | py |
libai | libai-main/tests/__init__.py | 0 | 0 | 0 | py | |
libai | libai-main/tests/test_scheduler.py | # coding=utf-8
# Copyright 2021 The OneFlow Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
import unittest
from unittest import TestCase
import numpy as np
import oneflow as flow
import oneflow.nn as nn
from libai.scheduler import (
WarmupCosineLR,
WarmupExponentialLR,
WarmupMultiStepLR,
WarmupPolynomialLR,
WarmupStepLR,
)
# @unittest.skip("Bugs in warmup scheduler")
class TestScheduler(TestCase):
def test_warmup_multistep(self):
p = nn.Parameter(flow.zeros(0))
opt = flow.optim.SGD([p], lr=5.0)
sched = WarmupMultiStepLR(
optimizer=opt,
max_iter=10,
milestones=[10, 15, 20],
gamma=0.1,
warmup_factor=0.001,
warmup_iter=5,
warmup_method="linear",
)
p.sum().backward()
opt.step()
lrs = [0.005]
for _ in range(30):
sched.step()
lrs.append(opt.param_groups[0]["lr"])
self.assertTrue(np.allclose(lrs[:5], [0.005, 1.004, 2.003, 3.002, 4.001]))
self.assertTrue(np.allclose(lrs[5:10], 5.0))
self.assertTrue(np.allclose(lrs[10:15], 0.5))
self.assertTrue(np.allclose(lrs[15:20], 0.05))
self.assertTrue(np.allclose(lrs[20:], 0.005))
def test_warmup_step(self):
p = nn.Parameter(flow.zeros(0))
opt = flow.optim.SGD([p], lr=5.0)
sched = WarmupStepLR(
optimizer=opt,
max_iter=10,
step_size=10,
gamma=0.1,
warmup_factor=0.001,
warmup_iter=5,
warmup_method="linear",
)
p.sum().backward()
opt.step()
lrs = [0.005]
for _ in range(30):
sched.step()
lrs.append(opt.param_groups[0]["lr"])
self.assertTrue(np.allclose(lrs[:5], [0.005, 1.004, 2.003, 3.002, 4.001]))
self.assertTrue(np.allclose(lrs[5:10], 5.0))
self.assertTrue(np.allclose(lrs[10:20], 0.5))
self.assertTrue(np.allclose(lrs[20:30], 0.05))
self.assertTrue(np.allclose(lrs[30:], 0.005))
def test_warmup_cosine(self):
p = nn.Parameter(flow.zeros(0))
opt = flow.optim.SGD([p], lr=5.0)
sched = WarmupCosineLR(
optimizer=opt,
max_iter=30,
warmup_factor=0.001,
warmup_iter=5,
warmup_method="linear",
)
p.sum().backward()
opt.step()
self.assertEqual(opt.param_groups[0]["lr"], 0.005)
lrs = [0.005]
for _ in range(30):
sched.step()
lrs.append(opt.param_groups[0]["lr"])
for idx, lr in enumerate(lrs):
expected_cosine = 2.5 * (1.0 + math.cos(math.pi * idx / 30))
if idx >= 5:
self.assertAlmostEqual(lr, expected_cosine)
else:
self.assertNotAlmostEqual(lr, expected_cosine)
def test_warmup_exponential(self):
p = nn.Parameter(flow.zeros(0))
opt = flow.optim.SGD([p], lr=5.0)
sched = WarmupExponentialLR(
optimizer=opt,
max_iter=10,
gamma=0.1,
warmup_factor=0.001,
warmup_iter=5,
warmup_method="linear",
)
p.sum().backward()
opt.step()
self.assertEqual(opt.param_groups[0]["lr"], 0.005)
lrs = [0.005]
def _get_exponential_lr(base_lr, gamma, max_iters, warmup_iters):
valid_values = []
for idx in range(warmup_iters, max_iters + 1):
valid_values.append(base_lr * (gamma ** idx))
return valid_values
for _ in range(30):
sched.step()
lrs.append(opt.param_groups[0]["lr"])
self.assertTrue(
np.allclose(
lrs[:5], [0.005, 0.00401, 0.0030199999999999997, 0.00203, 0.0010399999999999997]
)
)
valid_intermediate_values = _get_exponential_lr(
base_lr=5.0, gamma=0.1, max_iters=30, warmup_iters=5
)
self.assertEqual(lrs[5:], valid_intermediate_values)
def test_warmup_polynomial(self):
p = nn.Parameter(flow.zeros(0))
opt = flow.optim.SGD([p], lr=5.0)
sched = WarmupPolynomialLR(
optimizer=opt,
max_iter=30,
warmup_factor=0.001,
warmup_iter=0,
end_learning_rate=1e-4,
power=1.0,
cycle=False,
warmup_method="linear",
)
# self.assertEqual(opt.param_groups[0]["lr"], 0.005)
# lrs = [0.005]
lrs = [5.0] # lr_scheduler first invoke result
def _get_polynomial_lr(
base_lr, max_iters, warmup_iters, end_lr=1e-4, power=1.0, cycle=False
):
valid_values = []
decay_steps = max_iters - warmup_iters
for step in range(max_iters - warmup_iters):
if cycle:
if step == 0:
step = 1
decay_steps = decay_steps * math.ceil(step / decay_steps)
else:
step = min(step, decay_steps)
valid_values.append(
(base_lr - end_lr) * ((1 - step / decay_steps) ** power) + end_lr
)
return valid_values
for _ in range(29):
sched.step() # only invoke (max_iter-1), because the first invoke is done when init
lrs.append(opt.param_groups[0]["lr"])
# self.assertTrue(np.allclose(lrs[:5], [0.005, 1.004, 2.003, 3.002, 4.001]))
valid_intermediate_values = _get_polynomial_lr(base_lr=5.0, max_iters=30, warmup_iters=0)
# self.assertEqual(lrs[5:30], valid_intermediate_values)
self.assertEqual(lrs, valid_intermediate_values)
if __name__ == "__main__":
unittest.main()
| 6,402 | 31.015 | 97 | py |
libai | libai-main/tests/test_optim.py | # coding=utf-8
# Copyright 2021 The OneFlow Authors. All rights reserved.
# Copyright (c) Facebook, Inc. and its affiliates.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Unittests followed https://github.com/facebookresearch/detectron2/blob/main/tests/test_solver.py
"""
import unittest
from libai.optim.build import _expand_param_groups, reduce_param_groups
class TestOptimizer(unittest.TestCase):
def testExpandParamGroups(self):
params = [
{"params": ["p1", "p2", "p3", "p4"], "lr": 1.0, "weight_decay": 3.0},
{"params": ["p2", "p3", "p5"], "lr": 2.0, "momentum": 2.0},
{"params": ["p1"], "weight_decay": 4.0},
]
out = _expand_param_groups(params)
gt = [
dict(params=["p1"], lr=1.0, weight_decay=4.0), # noqa
dict(params=["p2"], lr=2.0, weight_decay=3.0, momentum=2.0), # noqa
dict(params=["p3"], lr=2.0, weight_decay=3.0, momentum=2.0), # noqa
dict(params=["p4"], lr=1.0, weight_decay=3.0), # noqa
dict(params=["p5"], lr=2.0, momentum=2.0), # noqa
]
self.assertEqual(out, gt)
def testReduceParamGroups(self):
params = [
dict(params=["p1"], lr=1.0, weight_decay=4.0), # noqa
dict(params=["p2", "p6"], lr=2.0, weight_decay=3.0, momentum=2.0), # noqa
dict(params=["p3"], lr=2.0, weight_decay=3.0, momentum=2.0), # noqa
dict(params=["p4"], lr=1.0, weight_decay=3.0), # noqa
dict(params=["p5"], lr=2.0, momentum=2.0), # noqa
]
gt_groups = [
{"lr": 1.0, "weight_decay": 4.0, "params": ["p1"]},
{"lr": 2.0, "weight_decay": 3.0, "momentum": 2.0, "params": ["p2", "p6", "p3"]},
{"lr": 1.0, "weight_decay": 3.0, "params": ["p4"]},
{"lr": 2.0, "momentum": 2.0, "params": ["p5"]},
]
out = reduce_param_groups(params)
self.assertTrue(out, gt_groups)
if __name__ == "__main__":
unittest.main()
| 2,519 | 39 | 96 | py |
libai | libai-main/tests/fixtures/utils.py | from libai.utils.download import download
fixtrue_urls = {
"sample_text.txt": "https://oneflow-static.oss-cn-beijing.aliyuncs.com/ci-files/dataset/libai/fixtures/sample_text.txt", # noqa
"spiece.model": "https://oneflow-static.oss-cn-beijing.aliyuncs.com/ci-files/dataset/libai/fixtures/spiece.model", # noqa
"test_sentencepiece.model": "https://oneflow-static.oss-cn-beijing.aliyuncs.com/ci-files/dataset/libai/fixtures/test_sentencepiece.model", # noqa
}
BASE_DIR = "tests/fixtures"
def get_fixtures(fixture_dir):
fixture_name = fixture_dir.split("/")[-1]
if fixture_name not in fixtrue_urls:
raise RuntimeError("{} not available in LiBai tests fixtrues!".format(fixture_name))
return download(fixtrue_urls[fixture_name], BASE_DIR)
| 773 | 44.529412 | 150 | py |
libai | libai-main/tests/config/test_lazy_config.py | # coding=utf-8
# Copyright 2021 The OneFlow Authors. All rights reserved.
# Copyright (c) Facebook, Inc. and its affiliates.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Unittests followed
https://github.com/facebookresearch/detectron2/blob/main/tests/config/test_lazy_config.py
"""
import os
import unittest
import tempfile
from itertools import count
from libai.config import LazyConfig, LazyCall
from omegaconf import DictConfig
class TestLazyPythonConfig(unittest.TestCase):
def setUp(self):
self.root_filename = os.path.join(os.path.dirname(__file__), "root_cfg.py")
def test_load(self):
cfg = LazyConfig.load(self.root_filename)
self.assertEqual(cfg.dir1a_dict.a, "modified")
self.assertEqual(cfg.dir1b_dict.a, 1)
self.assertEqual(cfg.lazyobj.x, "base_a_1")
cfg.lazyobj.x = "new_x"
# reload
cfg = LazyConfig.load(self.root_filename)
self.assertEqual(cfg.lazyobj.x, "base_a_1")
def test_save_load(self):
cfg = LazyConfig.load(self.root_filename)
with tempfile.TemporaryDirectory(prefix="detectron2") as d:
fname = os.path.join(d, "test_config.yaml")
LazyConfig.save(cfg, fname)
cfg2 = LazyConfig.load(fname)
self.assertEqual(cfg2.lazyobj._target_, "itertools.count")
self.assertEqual(cfg.lazyobj._target_, count)
cfg2.lazyobj.pop("_target_")
cfg.lazyobj.pop("_target_")
# the rest are equal
self.assertEqual(cfg, cfg2)
def test_failed_save(self):
cfg = DictConfig({"x": lambda: 3}, flags={"allow_objects": True})
with tempfile.TemporaryDirectory(prefix="detectron2") as d:
fname = os.path.join(d, "test_config.yaml")
LazyConfig.save(cfg, fname)
self.assertTrue(os.path.exists(fname))
self.assertTrue(os.path.exists(fname + ".pkl"))
def test_overrides(self):
cfg = LazyConfig.load(self.root_filename)
LazyConfig.apply_overrides(cfg, ["lazyobj.x=123", 'dir1b_dict.a="123"'])
self.assertEqual(cfg.dir1b_dict.a, "123")
self.assertEqual(cfg.lazyobj.x, 123)
def test_invalid_overrides(self):
cfg = LazyConfig.load(self.root_filename)
with self.assertRaises(KeyError):
LazyConfig.apply_overrides(cfg, ["lazyobj.x.xxx=123"])
def test_to_py(self):
cfg = LazyConfig.load(self.root_filename)
cfg.lazyobj.x = {
"a": 1,
"b": 2,
"c": LazyCall(count)(x={"r": "a", "s": 2.4, "t": [1, 2, 3, "z"]}),
}
cfg.list = ["a", 1, "b", 3.2]
py_str = LazyConfig.to_py(cfg)
expected = """cfg.dir1a_dict.a = "modified"
cfg.dir1a_dict.b = 2
cfg.dir1b_dict.a = 1
cfg.dir1b_dict.b = 2
cfg.lazyobj = itertools.count(
x={
"a": 1,
"b": 2,
"c": itertools.count(x={"r": "a", "s": 2.4, "t": [1, 2, 3, "z"]}),
},
y="base_a_1_from_b",
)
cfg.list = ["a", 1, "b", 3.2]
"""
self.assertEqual(py_str, expected)
| 3,540 | 33.378641 | 89 | py |
libai | libai-main/tests/config/test_instantiate_config.py | # coding=utf-8
# Copyright 2021 The OneFlow Authors. All rights reserved.
# Copyright (c) Facebook, Inc. and its affiliates.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Unittests followed
https://github.com/facebookresearch/detectron2/blob/main/tests/config/test_instantiate_config.py
"""
from collections import namedtuple
import os
import unittest
import yaml
import tempfile
from libai.config import instantiate, LazyCall
from omegaconf import OmegaConf
from dataclasses import dataclass
from omegaconf import __version__ as oc_version
OC_VERSION = tuple(int(x) for x in oc_version.split(".")[:2])
class ShapeSpec(namedtuple("_ShapeSpec", ["channels", "width"])):
"""
A simple structure that contains basic shape specification about a tensor.
It is often used as the auxiliary inputs/outputs of models,
to complement the lack of shape inference ability among pytorch modules.
Attributes:
channels:
width:
"""
def __new__(cls, channels=None, width=None):
return super().__new__(cls, channels, width)
class TestClass:
def __init__(self, int_arg, list_arg=None, dict_arg=None, extra_arg=None) -> None:
self.int_arg = int_arg
self.list_arg = list_arg
self.dict_arg = dict_arg
self.extra_arg = extra_arg
def __call__(self, call_arg):
return call_arg + self.int_arg
@dataclass
class TestDataClass:
x: int
y: str
@unittest.skipIf(OC_VERSION < (2, 1), "omegaconf version too old")
class TestConstruction(unittest.TestCase):
def test_basic_construct(self):
objconf = LazyCall(TestClass)(
int_arg=3,
list_arg=[10],
dict_arg={},
extra_arg=LazyCall(TestClass)(int_arg=4, list_arg="${..list_arg}"),
)
obj = instantiate(objconf)
self.assertIsInstance(obj, TestClass)
self.assertEqual(obj.int_arg, 3)
self.assertEqual(obj.extra_arg.int_arg, 4)
self.assertEqual(obj.extra_arg.list_arg, obj.list_arg)
objconf.extra_arg.list_arg = [5]
obj = instantiate(objconf)
self.assertIsInstance(obj, TestClass)
self.assertEqual(obj.extra_arg.list_arg, [5])
def test_instantiate_other_obj(self):
# do nothing for other obj
self.assertEqual(instantiate(5), 5)
x = [3, 4, 5]
self.assertEqual(instantiate(x), x)
x = TestClass(1)
self.assertIs(instantiate(x), x)
x = {"xx": "yy"}
self.assertEqual(instantiate(x), x)
def test_instantiate_lazy_target(self):
# _target_ is result of instantiate
objconf = LazyCall(LazyCall(len)(int_arg=3))(call_arg=4)
objconf._target_._target_ = TestClass
self.assertEqual(instantiate(objconf), 7)
def test_instantiate_lst(self):
lst = [1, 2, LazyCall(TestClass)(int_arg=1)]
x = LazyCall(TestClass)(
int_arg=lst
) # list as an argument should be recursively instantiated
x = instantiate(x).int_arg
self.assertEqual(x[:2], [1, 2])
self.assertIsInstance(x[2], TestClass)
self.assertEqual(x[2].int_arg, 1)
def test_instantiate_namedtuple(self):
x = LazyCall(TestClass)(int_arg=ShapeSpec(channels=1, width=3))
# test serialization
with tempfile.TemporaryDirectory() as d:
fname = os.path.join(d, "lb_test.yaml")
OmegaConf.save(x, fname)
with open(fname) as f:
x = yaml.unsafe_load(f)
x = instantiate(x)
self.assertIsInstance(x.int_arg, ShapeSpec)
self.assertEqual(x.int_arg.channels, 1)
def test_bad_lazycall(self):
with self.assertRaises(Exception):
LazyCall(3)
def test_instantiate_dataclass(self):
a = LazyCall(TestDataClass)(x=1, y="s")
a = instantiate(a)
self.assertEqual(a.x, 1)
self.assertEqual(a.y, "s")
def test_instantiate_no_recursive(self):
def helper_func(obj):
self.assertNotIsInstance(obj, TestClass)
obj = instantiate(obj)
self.assertIsInstance(obj, TestClass)
return obj.int_arg
objconf = LazyCall(helper_func)(obj=LazyCall(TestClass)(int_arg=4))
self.assertEqual(instantiate(objconf, _recursive_=False), 4)
if __name__ == "__main__":
unittest.main()
| 4,865 | 31.657718 | 96 | py |
libai | libai-main/tests/config/root_cfg.py | # Copyright (c) Facebook, Inc. and its affiliates.
from itertools import count
from libai.config import LazyCall
from .dir1.dir1_a import dir1a_dict, dir1a_str
dir1a_dict.a = "modified"
# modification above won't affect future imports
from .dir1.dir1_b import dir1b_dict, dir1b_str
lazyobj = LazyCall(count)(x=dir1a_str, y=dir1b_str)
| 340 | 21.733333 | 51 | py |
libai | libai-main/tests/config/dir1/dir1_a.py | # Copyright (c) Facebook, Inc. and its affiliates.
dir1a_str = "base_a_1"
dir1a_dict = {"a": 1, "b": 2}
| 104 | 25.25 | 50 | py |
libai | libai-main/tests/config/dir1/dir1_b.py | # Copyright (c) Facebook, Inc. and its affiliates.
from libai.config import LazyConfig
# equivalent to relative import
dir1a_str, dir1a_dict = LazyConfig.load_rel("dir1_a.py", ("dir1a_str", "dir1a_dict"))
dir1b_str = dir1a_str + "_from_b"
dir1b_dict = dir1a_dict
# Every import is a reload: not modified by other config files
assert dir1a_dict.a == 1
| 354 | 28.583333 | 85 | py |
libai | libai-main/tests/models/test_vit.py | # coding=utf-8
# Copyright 2021 The OneFlow Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import shutil
import unittest
import oneflow as flow
import oneflow.unittest
from flowvision.loss.cross_entropy import SoftTargetCrossEntropy
import libai.utils.distributed as dist
from configs.common.models.vit.vit_small_patch16_224 import model
from libai.config import LazyCall, LazyConfig
from libai.data.datasets import CIFAR10Dataset
from libai.engine import DefaultTrainer
from libai.engine.default import _check_batch_size
from libai.utils.file_utils import get_data_from_cache
from libai.utils.logger import setup_logger
DATA_URL = "https://oneflow-static.oss-cn-beijing.aliyuncs.com/ci-files/dataset/libai/cifar10/cifar-10-python.tar.gz" # noqa
DATA_MD5 = "c58f30108f718f92721af3b95e74349a"
TEST_OUTPUT = os.path.join(os.getenv("TEST_OUTPUT", "output_unittest"), "test_vit")
setup_logger(distributed_rank=dist.get_rank())
class TestViTModel(flow.unittest.TestCase):
def setUp(self) -> None:
cache_dir = os.path.join(os.getenv("ONEFLOW_TEST_CACHE_DIR", "./data_test"), "vit_data")
cfg = LazyConfig.load("configs/vit_imagenet.py")
# set model
cfg.model = model
cfg.model.num_classes = 10
cfg.model.depth = 6
cfg.model.loss_func = LazyCall(SoftTargetCrossEntropy)()
# prepare data path
if dist.get_local_rank() == 0:
get_data_from_cache(DATA_URL, cache_dir, md5=DATA_MD5)
os.makedirs(TEST_OUTPUT, exist_ok=True)
dist.synchronize()
data_path = get_data_from_cache(DATA_URL, cache_dir, md5=DATA_MD5)
cfg.dataloader.train.dataset[0]._target_ = CIFAR10Dataset
cfg.dataloader.train.dataset[0].root = "/".join(data_path.split("/")[:-1])
cfg.dataloader.train.dataset[0].download = True
cfg.dataloader.train.num_workers = 0
cfg.dataloader.test[0].dataset._target_ = CIFAR10Dataset
cfg.dataloader.test[0].dataset.train = False
cfg.dataloader.test[0].dataset.root = "/".join(data_path.split("/")[:-1])
cfg.dataloader.test[0].dataset.download = True
cfg.dataloader.test[0].num_workers = 0
# refine mixup cfg
cfg.dataloader.train.mixup_func.num_classes = 10
# set training config
cfg.train.train_epoch = 0
cfg.train.train_iter = 10
cfg.train.evaluation.eval_period = 10
cfg.train.evaluation.eval_iter = 10
cfg.train.log_period = 1
cfg.train.train_micro_batch_size = 8
cfg.train.num_accumulation_steps = 1
cfg.train.resume = False
cfg.train.output_dir = TEST_OUTPUT
cfg.train.activation_checkpoint.enabled = True
cfg.train.amp.enabled = True
cfg.train.rdma_enabled = False
self.cfg = cfg
@classmethod
def tearDownClass(cls) -> None:
if os.path.isdir(TEST_OUTPUT) and dist.get_local_rank() == 0:
shutil.rmtree(TEST_OUTPUT)
@flow.unittest.skip_unless_1n4d()
def test_vit_eager_with_data_tensor_parallel(self):
# set distributed config
self.cfg.train.dist.data_parallel_size = 2
self.cfg.train.dist.tensor_parallel_size = 2
# pipeline parallelism not supported in eager global now!
self.cfg.train.dist.pipeline_parallel_size = 1
dist.setup_dist_util(self.cfg.train.dist)
_check_batch_size(self.cfg)
self.cfg.graph.enabled = False
trainer = DefaultTrainer(self.cfg)
trainer.train()
@flow.unittest.skip_unless_1n4d()
def test_vit_eager_with_pipeline_parallel(self):
# set distributed config
self.cfg.train.dist.data_parallel_size = 1
self.cfg.train.dist.tensor_parallel_size = 1
self.cfg.train.dist.pipeline_parallel_size = 4
self.cfg.train.dist.pipeline_num_layers = self.cfg.model.depth
dist.setup_dist_util(self.cfg.train.dist)
_check_batch_size(self.cfg)
self.cfg.graph.enabled = False
trainer = DefaultTrainer(self.cfg)
trainer.train()
@flow.unittest.skip_unless_1n4d()
def test_vit_graph_with_data_tensor_parallel(self):
self.cfg.train.num_accumulation_steps = 1
# set distributed config
self.cfg.train.dist.data_parallel_size = 2
self.cfg.train.dist.tensor_parallel_size = 2
self.cfg.train.dist.pipeline_parallel_size = 1
dist.setup_dist_util(self.cfg.train.dist)
_check_batch_size(self.cfg)
self.cfg.graph.enabled = True
trainer = DefaultTrainer(self.cfg)
trainer.train()
@flow.unittest.skip_unless_1n4d()
def test_vit_graph_with_data_tensor_pipeline_parallel(self):
self.cfg.train.num_accumulation_steps = 4
# set distributed config
self.cfg.train.dist.data_parallel_size = 2
# change to 2 when 2d sbp bugfix
self.cfg.train.dist.tensor_parallel_size = 1
self.cfg.train.dist.pipeline_parallel_size = 2
self.cfg.train.dist.pipeline_num_layers = self.cfg.model.depth
dist.setup_dist_util(self.cfg.train.dist)
_check_batch_size(self.cfg)
self.cfg.graph.enabled = True
trainer = DefaultTrainer(self.cfg)
trainer.train()
@flow.unittest.skip_unless_1n4d()
def test_vit_with_zero(self):
# set distributed config
self.cfg.train.dist.data_parallel_size = 4
self.cfg.train.dist.tensor_parallel_size = 1
self.cfg.train.dist.pipeline_parallel_size = 1
dist.setup_dist_util(self.cfg.train.dist)
_check_batch_size(self.cfg)
self.cfg.graph.enabled = True
self.cfg.train.zero_optimization.enabled = True
self.cfg.train.zero_optimization.stage = 3
trainer = DefaultTrainer(self.cfg)
trainer.train()
if __name__ == "__main__":
unittest.main()
| 6,402 | 34.77095 | 125 | py |
libai | libai-main/tests/models/test_swin.py | # coding=utf-8
# Copyright 2021 The OneFlow Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import shutil
import unittest
import oneflow as flow
import oneflow.unittest
from flowvision.loss.cross_entropy import SoftTargetCrossEntropy
import libai.utils.distributed as dist
from configs.common.models.swin.swin_tiny_patch4_window7_224 import model
from libai.config import LazyConfig
from libai.data.datasets import CIFAR10Dataset
from libai.engine import DefaultTrainer
from libai.engine.default import _check_batch_size
from libai.utils.file_utils import get_data_from_cache
from libai.utils.logger import setup_logger
DATA_URL = "https://oneflow-static.oss-cn-beijing.aliyuncs.com/ci-files/dataset/libai/cifar10/cifar-10-python.tar.gz" # noqa
DATA_MD5 = "c58f30108f718f92721af3b95e74349a"
TEST_OUTPUT = os.path.join(os.getenv("TEST_OUTPUT", "output_unittest"), "test_swin")
setup_logger(distributed_rank=dist.get_rank())
class TestSwinModel(flow.unittest.TestCase):
def setUp(self) -> None:
cache_dir = os.path.join(os.getenv("ONEFLOW_TEST_CACHE_DIR", "./data_test"), "swin_data")
cfg = LazyConfig.load("configs/swin_cifar100.py")
# set model
cfg.model = model
cfg.model.cfg.num_classes = 10
cfg.model.cfg.loss_func = SoftTargetCrossEntropy()
# prepare data path
if dist.get_local_rank() == 0:
get_data_from_cache(DATA_URL, cache_dir, md5=DATA_MD5)
os.makedirs(TEST_OUTPUT, exist_ok=True)
dist.synchronize()
data_path = get_data_from_cache(DATA_URL, cache_dir, md5=DATA_MD5)
cfg.dataloader.train.dataset[0]._target_ = CIFAR10Dataset
cfg.dataloader.train.dataset[0].root = "/".join(data_path.split("/")[:-1])
cfg.dataloader.train.dataset[0].download = True
cfg.dataloader.train.num_workers = 0
cfg.dataloader.test[0].dataset._target_ = CIFAR10Dataset
cfg.dataloader.test[0].dataset.train = False
cfg.dataloader.test[0].dataset.root = "/".join(data_path.split("/")[:-1])
cfg.dataloader.test[0].dataset.download = True
cfg.dataloader.test[0].num_workers = 0
# refine mixup cfg
cfg.dataloader.train.mixup_func.num_classes = 10
# set training config
cfg.train.train_epoch = 0
cfg.train.train_iter = 10
cfg.train.evaluation.eval_period = 10
cfg.train.evaluation.eval_iter = 10
cfg.train.log_period = 1
cfg.train.train_micro_batch_size = 8
cfg.train.num_accumulation_steps = 1
cfg.train.resume = False
cfg.train.output_dir = TEST_OUTPUT
cfg.train.activation_checkpoint.enabled = True
cfg.train.amp.enabled = True
cfg.train.rdma_enabled = False
self.cfg = cfg
@classmethod
def tearDownClass(cls) -> None:
if os.path.isdir(TEST_OUTPUT) and dist.get_local_rank() == 0:
shutil.rmtree(TEST_OUTPUT)
@flow.unittest.skip_unless_1n4d()
def test_swin_eager_with_data_tensor_parallel(self):
# set distributed config
self.cfg.train.dist.data_parallel_size = 2
self.cfg.train.dist.tensor_parallel_size = 2
# pipeline parallelism not supported in eager global now!
self.cfg.train.dist.pipeline_parallel_size = 1
dist.setup_dist_util(self.cfg.train.dist)
_check_batch_size(self.cfg)
self.cfg.graph.enabled = False
trainer = DefaultTrainer(self.cfg)
trainer.train()
@flow.unittest.skip_unless_1n4d()
def test_swin_eager_with_pipeline_parallel(self):
# set distributed config
self.cfg.train.dist.data_parallel_size = 1
self.cfg.train.dist.tensor_parallel_size = 1
self.cfg.train.dist.pipeline_parallel_size = 4
self.cfg.train.dist.pipeline_num_layers = sum(self.cfg.model.cfg.depths)
dist.setup_dist_util(self.cfg.train.dist)
_check_batch_size(self.cfg)
self.cfg.graph.enabled = False
trainer = DefaultTrainer(self.cfg)
trainer.train()
@flow.unittest.skip_unless_1n4d()
def test_swin_graph_with_data_tensor_parallel(self):
self.cfg.train.num_accumulation_steps = 1
# set distributed config
self.cfg.train.dist.data_parallel_size = 2
self.cfg.train.dist.tensor_parallel_size = 2
self.cfg.train.dist.pipeline_parallel_size = 1
dist.setup_dist_util(self.cfg.train.dist)
_check_batch_size(self.cfg)
self.cfg.graph.enabled = True
trainer = DefaultTrainer(self.cfg)
trainer.train()
@flow.unittest.skip_unless_1n4d()
def test_swin_graph_with_data_tensor_pipeline_parallel(self):
self.cfg.train.num_accumulation_steps = 4
# set distributed config
self.cfg.train.dist.data_parallel_size = 2
# change to 2 when 2d sbp bugfix
self.cfg.train.dist.tensor_parallel_size = 1
self.cfg.train.dist.pipeline_parallel_size = 2
self.cfg.train.dist.pipeline_num_layers = sum(self.cfg.model.cfg.depths)
dist.setup_dist_util(self.cfg.train.dist)
_check_batch_size(self.cfg)
self.cfg.graph.enabled = True
trainer = DefaultTrainer(self.cfg)
trainer.train()
@flow.unittest.skip_unless_1n4d()
def test_swin_with_zero(self):
# set distributed config
self.cfg.train.dist.data_parallel_size = 4
self.cfg.train.dist.tensor_parallel_size = 1
self.cfg.train.dist.pipeline_parallel_size = 1
dist.setup_dist_util(self.cfg.train.dist)
_check_batch_size(self.cfg)
self.cfg.graph.enabled = True
self.cfg.train.zero_optimization.enabled = True
self.cfg.train.zero_optimization.stage = 3
trainer = DefaultTrainer(self.cfg)
trainer.train()
if __name__ == "__main__":
unittest.main()
| 6,399 | 34.955056 | 125 | py |
libai | libai-main/tests/models/test_swinv2.py | # coding=utf-8
# Copyright 2021 The OneFlow Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import shutil
import unittest
import oneflow as flow
import oneflow.unittest
from flowvision.loss.cross_entropy import SoftTargetCrossEntropy
import libai.utils.distributed as dist
from configs.common.models.swinv2.swinv2_tiny_patch4_window8_256 import model
from libai.config import LazyConfig
from libai.data.datasets import CIFAR10Dataset
from libai.engine import DefaultTrainer
from libai.engine.default import _check_batch_size
from libai.utils.file_utils import get_data_from_cache
from libai.utils.logger import setup_logger
DATA_URL = "https://oneflow-static.oss-cn-beijing.aliyuncs.com/ci-files/dataset/libai/cifar10/cifar-10-python.tar.gz" # noqa
DATA_MD5 = "c58f30108f718f92721af3b95e74349a"
TEST_OUTPUT = os.path.join(os.getenv("TEST_OUTPUT", "output_unittest"), "test_swinv2")
setup_logger(distributed_rank=dist.get_rank())
class TestSwinV2Model(flow.unittest.TestCase):
def setUp(self) -> None:
cache_dir = os.path.join(os.getenv("ONEFLOW_TEST_CACHE_DIR", "./data_test"), "swinv2_data")
cfg = LazyConfig.load("configs/swinv2_cifar100.py")
# set model
cfg.model = model
cfg.model.cfg.num_classes = 10
cfg.model.cfg.loss_func = SoftTargetCrossEntropy()
# prepare data path
if dist.get_local_rank() == 0:
get_data_from_cache(DATA_URL, cache_dir, md5=DATA_MD5)
os.makedirs(TEST_OUTPUT, exist_ok=True)
dist.synchronize()
data_path = get_data_from_cache(DATA_URL, cache_dir, md5=DATA_MD5)
cfg.dataloader.train.dataset[0]._target_ = CIFAR10Dataset
cfg.dataloader.train.dataset[0].root = "/".join(data_path.split("/")[:-1])
cfg.dataloader.train.dataset[0].download = True
cfg.dataloader.train.num_workers = 0
cfg.dataloader.test[0].dataset._target_ = CIFAR10Dataset
cfg.dataloader.test[0].dataset.train = False
cfg.dataloader.test[0].dataset.root = "/".join(data_path.split("/")[:-1])
cfg.dataloader.test[0].dataset.download = True
cfg.dataloader.test[0].num_workers = 0
# refine mixup cfg
cfg.dataloader.train.mixup_func.num_classes = 10
# set training config
cfg.train.train_epoch = 0
cfg.train.train_iter = 10
cfg.train.evaluation.eval_period = 10
cfg.train.evaluation.eval_iter = 10
cfg.train.log_period = 1
cfg.train.train_micro_batch_size = 8
cfg.train.num_accumulation_steps = 1
cfg.train.resume = False
cfg.train.output_dir = TEST_OUTPUT
cfg.train.activation_checkpoint.enabled = True
cfg.train.amp.enabled = True
cfg.train.rdma_enabled = False
self.cfg = cfg
@classmethod
def tearDownClass(cls) -> None:
if os.path.isdir(TEST_OUTPUT) and dist.get_local_rank() == 0:
shutil.rmtree(TEST_OUTPUT)
@flow.unittest.skip_unless_1n4d()
def test_swinv2_eager_with_data_tensor_parallel(self):
# set distributed config
self.cfg.train.dist.data_parallel_size = 2
self.cfg.train.dist.tensor_parallel_size = 2
# pipeline parallelism not supported in eager global now!
self.cfg.train.dist.pipeline_parallel_size = 1
dist.setup_dist_util(self.cfg.train.dist)
_check_batch_size(self.cfg)
self.cfg.graph.enabled = False
trainer = DefaultTrainer(self.cfg)
trainer.train()
@flow.unittest.skip_unless_1n4d()
def test_swinv2_eager_with_pipeline_parallel(self):
# set distributed config
self.cfg.train.dist.data_parallel_size = 1
self.cfg.train.dist.tensor_parallel_size = 1
self.cfg.train.dist.pipeline_parallel_size = 4
self.cfg.train.dist.pipeline_num_layers = sum(self.cfg.model.cfg.depths)
dist.setup_dist_util(self.cfg.train.dist)
_check_batch_size(self.cfg)
self.cfg.graph.enabled = False
trainer = DefaultTrainer(self.cfg)
trainer.train()
@flow.unittest.skip_unless_1n4d()
def test_swinv2_graph_with_data_tensor_parallel(self):
self.cfg.train.num_accumulation_steps = 1
# set distributed config
self.cfg.train.dist.data_parallel_size = 2
self.cfg.train.dist.tensor_parallel_size = 2
self.cfg.train.dist.pipeline_parallel_size = 1
dist.setup_dist_util(self.cfg.train.dist)
_check_batch_size(self.cfg)
self.cfg.graph.enabled = True
trainer = DefaultTrainer(self.cfg)
trainer.train()
@flow.unittest.skip_unless_1n4d()
def test_swinv2_graph_with_data_tensor_pipeline_parallel(self):
self.cfg.train.num_accumulation_steps = 4
# set distributed config
self.cfg.train.dist.data_parallel_size = 2
# change to 2 when 2d sbp bugfix
self.cfg.train.dist.tensor_parallel_size = 1
self.cfg.train.dist.pipeline_parallel_size = 2
self.cfg.train.dist.pipeline_num_layers = sum(self.cfg.model.cfg.depths)
dist.setup_dist_util(self.cfg.train.dist)
_check_batch_size(self.cfg)
self.cfg.graph.enabled = True
trainer = DefaultTrainer(self.cfg)
trainer.train()
@flow.unittest.skip_unless_1n4d()
def test_swinv2_with_zero(self):
# set distributed config
self.cfg.train.dist.data_parallel_size = 4
self.cfg.train.dist.tensor_parallel_size = 1
self.cfg.train.dist.pipeline_parallel_size = 1
dist.setup_dist_util(self.cfg.train.dist)
_check_batch_size(self.cfg)
self.cfg.graph.enabled = True
self.cfg.train.zero_optimization.enabled = True
self.cfg.train.zero_optimization.stage = 3
trainer = DefaultTrainer(self.cfg)
trainer.train()
if __name__ == "__main__":
unittest.main()
| 6,421 | 35.078652 | 125 | py |
libai | libai-main/tests/models/test_mt5.py | # coding=utf-8
# Copyright 2021 The OneFlow Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import shutil
import unittest
import oneflow as flow
import oneflow.unittest
from libai.config import LazyConfig
from libai.engine import DefaultTrainer, hooks
from libai.engine.default import _check_batch_size
from libai.utils import distributed as dist
from libai.utils.file_utils import get_data_from_cache
from libai.utils.logger import setup_logger
VOCAB_URL = "https://oneflow-static.oss-cn-beijing.aliyuncs.com/ci-files/dataset/libai/bert_dataset/bert-base-chinese-vocab.txt" # noqa
BIN_DATA_URL = "https://oneflow-static.oss-cn-beijing.aliyuncs.com/ci-files/dataset/libai/bert_dataset/loss_compara_content_sentence.bin" # noqa
IDX_DATA_URL = "https://oneflow-static.oss-cn-beijing.aliyuncs.com/ci-files/dataset/libai/bert_dataset/loss_compara_content_sentence.idx" # noqa
VOCAB_MD5 = "3b5b76c4aef48ecf8cb3abaafe960f09"
BIN_DATA_MD5 = "b842467bd5ea7e52f7a612ea6b4faecc"
IDX_DATA_MD5 = "cf5963b8543f0a7a867361eb980f0372"
TEST_OUTPUT = os.path.join(os.getenv("TEST_OUTPUT", "output_unittest"), "test_mt5")
setup_logger(distributed_rank=dist.get_rank())
class TestMT5Model(flow.unittest.TestCase):
def setUp(self) -> None:
cache_dir = os.path.join(os.getenv("ONEFLOW_TEST_CACHE_DIR", "./data_test"), "bert_data")
cfg = LazyConfig.load("projects/MT5/configs/mt5_pretrain.py")
# prepare dataset
if dist.get_local_rank() == 0:
# download dataset on main process of each node
get_data_from_cache(VOCAB_URL, cache_dir, md5=VOCAB_MD5)
get_data_from_cache(BIN_DATA_URL, cache_dir, md5=BIN_DATA_MD5)
get_data_from_cache(IDX_DATA_URL, cache_dir, md5=IDX_DATA_MD5)
os.makedirs(TEST_OUTPUT, exist_ok=True)
dist.synchronize()
vocab_path = get_data_from_cache(VOCAB_URL, cache_dir, md5=VOCAB_MD5)
data_prefix_path = get_data_from_cache(BIN_DATA_URL, cache_dir, md5=BIN_DATA_MD5)
data_prefix = data_prefix_path[:-4]
# set tokenizer and data config
cfg.tokenization.tokenizer.vocab_file = vocab_path
cfg.dataloader.train.dataset[0].data_prefix = data_prefix
cfg.dataloader.train.dataset[0].indexed_dataset.data_prefix = data_prefix
# FIXME(RenTianhe): fix dataloader worker bug
cfg.dataloader.train.num_workers = 0
# set training config
cfg.train.train_epoch = 0
cfg.train.train_iter = 10
cfg.train.evaluation.enabled = True
cfg.train.evaluation.eval_period = 10
cfg.train.evaluation.eval_iter = 10
cfg.train.log_period = 1
cfg.train.train_micro_batch_size = 8
cfg.train.num_accumulation_steps = 1
cfg.train.resume = False
cfg.train.output_dir = TEST_OUTPUT
# set model
cfg.model.cfg.num_attention_heads = 8
cfg.model.cfg.hidden_size = 384
cfg.model.cfg.hidden_layers = 3
cfg.train.activation_checkpoint.enabled = True
cfg.train.amp.enabled = True
cfg.train.rdma_enabled = False
self.cfg = cfg
def build_hooks(self):
ret = [
hooks.IterationTimer(),
hooks.LRScheduler(),
]
if dist.is_main_process():
# run writers in the end, so that evaluation metrics are written
ret.append(hooks.PeriodicWriter(self.build_writers(), self.cfg.train.log_period))
return ret
DefaultTrainer.build_hooks = build_hooks
@classmethod
def tearDownClass(cls) -> None:
if os.path.isdir(TEST_OUTPUT) and dist.get_local_rank() == 0:
shutil.rmtree(TEST_OUTPUT)
@flow.unittest.skip_unless_1n4d()
def test_mt5_eager_with_data_tensor_parallel(self):
# set distributed config
self.cfg.train.dist.data_parallel_size = 2
self.cfg.train.dist.tensor_parallel_size = 2
# pipeline parallelism not supported in eager global now!
self.cfg.train.dist.pipeline_parallel_size = 1
dist.setup_dist_util(self.cfg.train.dist)
_check_batch_size(self.cfg)
self.cfg.graph.enabled = False
trainer = DefaultTrainer(self.cfg)
trainer.train()
@flow.unittest.skip_unless_1n4d()
def test_mt5_eager_with_pipeline_parallel(self):
# set distributed config
self.cfg.train.dist.data_parallel_size = 1
self.cfg.train.dist.tensor_parallel_size = 1
self.cfg.train.dist.pipeline_parallel_size = 4
self.cfg.train.dist.pipeline_num_layers = 2 * self.cfg.model.cfg.hidden_layers
dist.setup_dist_util(self.cfg.train.dist)
_check_batch_size(self.cfg)
self.cfg.graph.enabled = False
trainer = DefaultTrainer(self.cfg)
trainer.train()
@flow.unittest.skip_unless_1n4d()
def test_mt5_graph_with_data_tensor_parallel(self):
# set distributed config
self.cfg.train.dist.data_parallel_size = 2
self.cfg.train.dist.tensor_parallel_size = 2
self.cfg.train.dist.pipeline_parallel_size = 1
dist.setup_dist_util(self.cfg.train.dist)
_check_batch_size(self.cfg)
self.cfg.graph.enabled = True
trainer = DefaultTrainer(self.cfg)
trainer.train()
@flow.unittest.skip_unless_1n4d()
def test_mt5_graph_with_data_tensor_pipeline_parallel(self):
self.cfg.train.num_accumulation_steps = 4
# set distributed config
self.cfg.train.dist.data_parallel_size = 2
# change to 2 when 2d sbp bugfix
self.cfg.train.dist.tensor_parallel_size = 1
self.cfg.train.dist.pipeline_parallel_size = 2
# encoder_layers + decoder_layers
self.cfg.train.dist.pipeline_num_layers = 2 * self.cfg.model.cfg.hidden_layers
dist.setup_dist_util(self.cfg.train.dist)
_check_batch_size(self.cfg)
self.cfg.graph.enabled = True
trainer = DefaultTrainer(self.cfg)
trainer.train()
@flow.unittest.skip_unless_1n4d()
def test_mt5_with_zero(self):
# set distributed config
self.cfg.train.dist.data_parallel_size = 4
self.cfg.train.dist.tensor_parallel_size = 1
self.cfg.train.dist.pipeline_parallel_size = 1
dist.setup_dist_util(self.cfg.train.dist)
_check_batch_size(self.cfg)
self.cfg.graph.enabled = True
self.cfg.train.zero_optimization.enabled = True
self.cfg.train.zero_optimization.stage = 3
trainer = DefaultTrainer(self.cfg)
trainer.train()
if __name__ == "__main__":
unittest.main()
| 7,176 | 36.186528 | 145 | py |
libai | libai-main/tests/models/test_bert.py | # coding=utf-8
# Copyright 2021 The OneFlow Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import shutil
import unittest
import oneflow as flow
import oneflow.unittest
from libai.config import LazyConfig
from libai.engine import DefaultTrainer
from libai.engine.default import _check_batch_size
from libai.utils import distributed as dist
from libai.utils.file_utils import get_data_from_cache
from libai.utils.logger import setup_logger
VOCAB_URL = "https://oneflow-static.oss-cn-beijing.aliyuncs.com/ci-files/dataset/libai/bert_dataset/bert-base-chinese-vocab.txt" # noqa
BIN_DATA_URL = "https://oneflow-static.oss-cn-beijing.aliyuncs.com/ci-files/dataset/libai/bert_dataset/loss_compara_content_sentence.bin" # noqa
IDX_DATA_URL = "https://oneflow-static.oss-cn-beijing.aliyuncs.com/ci-files/dataset/libai/bert_dataset/loss_compara_content_sentence.idx" # noqa
VOCAB_MD5 = "3b5b76c4aef48ecf8cb3abaafe960f09"
BIN_DATA_MD5 = "b842467bd5ea7e52f7a612ea6b4faecc"
IDX_DATA_MD5 = "cf5963b8543f0a7a867361eb980f0372"
TEST_OUTPUT = os.path.join(os.getenv("TEST_OUTPUT", "output_unittest"), "test_bert")
setup_logger(distributed_rank=dist.get_rank())
class TestBertModel(flow.unittest.TestCase):
def setUp(self) -> None:
cache_dir = os.path.join(os.getenv("ONEFLOW_TEST_CACHE_DIR", "./data_test"), "bert_data")
cfg = LazyConfig.load("configs/bert_large_pretrain.py")
# prepare dataset
if dist.get_local_rank() == 0:
# download dataset on main process of each node
get_data_from_cache(VOCAB_URL, cache_dir, md5=VOCAB_MD5)
get_data_from_cache(BIN_DATA_URL, cache_dir, md5=BIN_DATA_MD5)
get_data_from_cache(IDX_DATA_URL, cache_dir, md5=IDX_DATA_MD5)
os.makedirs(TEST_OUTPUT, exist_ok=True)
dist.synchronize()
vocab_path = get_data_from_cache(VOCAB_URL, cache_dir, md5=VOCAB_MD5)
data_prefix_path = get_data_from_cache(BIN_DATA_URL, cache_dir, md5=BIN_DATA_MD5)
data_prefix = data_prefix_path[:-4]
# set tokenizer and data config
cfg.tokenization.tokenizer.vocab_file = vocab_path
cfg.dataloader.train.dataset[0].data_prefix = data_prefix
cfg.dataloader.train.dataset[0].indexed_dataset.data_prefix = data_prefix
# FIXME(RenTianhe): fix dataloader worker bug
cfg.dataloader.train.num_workers = 0
# set training config
cfg.train.train_epoch = 0
cfg.train.train_iter = 10
cfg.train.evaluation.eval_period = 10
cfg.train.evaluation.eval_iter = 10
cfg.train.log_period = 1
cfg.train.train_micro_batch_size = 8
cfg.train.test_micro_batch_size = 4
cfg.train.num_accumulation_steps = 1
cfg.train.resume = False
cfg.train.output_dir = TEST_OUTPUT
# set model
cfg.model.cfg.num_attention_heads = 8
cfg.model.cfg.hidden_size = 384
cfg.model.cfg.hidden_layers = 4
cfg.train.activation_checkpoint.enabled = True
cfg.train.amp.enabled = True
cfg.train.rdma_enabled = False
self.cfg = cfg
@classmethod
def tearDownClass(cls) -> None:
if os.path.isdir(TEST_OUTPUT) and dist.get_local_rank() == 0:
shutil.rmtree(TEST_OUTPUT)
@flow.unittest.skip_unless_1n4d()
def test_bert_eager_with_data_tensor_parallel(self):
# set distributed config
self.cfg.train.dist.data_parallel_size = 2
self.cfg.train.dist.tensor_parallel_size = 2
# pipeline parallelism not supported in eager global now!
self.cfg.train.dist.pipeline_parallel_size = 1
dist.setup_dist_util(self.cfg.train.dist)
_check_batch_size(self.cfg)
self.cfg.graph.enabled = False
trainer = DefaultTrainer(self.cfg)
trainer.train()
@flow.unittest.skip_unless_1n4d()
def test_bert_eager_with_pipeline_parallel(self):
# set distributed config
self.cfg.train.dist.data_parallel_size = 1
self.cfg.train.dist.tensor_parallel_size = 1
self.cfg.train.dist.pipeline_parallel_size = 4
self.cfg.train.dist.pipeline_num_layers = self.cfg.model.cfg.hidden_layers
dist.setup_dist_util(self.cfg.train.dist)
_check_batch_size(self.cfg)
self.cfg.graph.enabled = False
trainer = DefaultTrainer(self.cfg)
trainer.train()
@flow.unittest.skip_unless_1n4d()
def test_bert_graph_with_data_tensor_parallel(self):
self.cfg.train.num_accumulation_steps = 1
# set distributed config
self.cfg.train.dist.data_parallel_size = 2
self.cfg.train.dist.tensor_parallel_size = 2
self.cfg.train.dist.pipeline_parallel_size = 1
dist.setup_dist_util(self.cfg.train.dist)
_check_batch_size(self.cfg)
self.cfg.graph.enabled = True
trainer = DefaultTrainer(self.cfg)
trainer.train()
@flow.unittest.skip_unless_1n4d()
def test_bert_graph_with_data_tensor_pipeline_parallel(self):
self.cfg.train.num_accumulation_steps = 4
# set distributed config
self.cfg.train.dist.data_parallel_size = 2
# change to 2 when 2d sbp bugfix
self.cfg.train.dist.tensor_parallel_size = 1
self.cfg.train.dist.pipeline_parallel_size = 2
self.cfg.train.dist.pipeline_num_layers = self.cfg.model.cfg.hidden_layers
dist.setup_dist_util(self.cfg.train.dist)
_check_batch_size(self.cfg)
self.cfg.graph.enabled = True
trainer = DefaultTrainer(self.cfg)
trainer.train()
@flow.unittest.skip_unless_1n4d()
def test_bert_with_zero(self):
# set distributed config
self.cfg.train.dist.data_parallel_size = 4
self.cfg.train.dist.tensor_parallel_size = 1
self.cfg.train.dist.pipeline_parallel_size = 1
dist.setup_dist_util(self.cfg.train.dist)
_check_batch_size(self.cfg)
self.cfg.graph.enabled = True
self.cfg.train.zero_optimization.enabled = True
self.cfg.train.zero_optimization.stage = 3
trainer = DefaultTrainer(self.cfg)
trainer.train()
if __name__ == "__main__":
unittest.main()
| 6,735 | 36.422222 | 145 | py |
libai | libai-main/tests/models/test_roberta.py | # coding=utf-8
# Copyright 2021 The OneFlow Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import shutil
import unittest
import oneflow as flow
import oneflow.unittest
from libai.config import LazyConfig
from libai.engine import DefaultTrainer
from libai.engine.default import _check_batch_size
from libai.utils import distributed as dist
from libai.utils.file_utils import get_data_from_cache
from libai.utils.logger import setup_logger
VOCAB_URL = "http://oneflow-static.oss-cn-beijing.aliyuncs.com/ci-files/dataset/libai/roberta_dataset/roberta-vocab.json" # noqa
MERGE_FILE_URL = "http://oneflow-static.oss-cn-beijing.aliyuncs.com/ci-files/dataset/libai/roberta_dataset/roberta-merges.txt" # noqa
BIN_DATA_URL = "http://oneflow-static.oss-cn-beijing.aliyuncs.com/ci-files/dataset/libai/roberta_dataset/loss_compara_content_sentence.bin" # noqa
IDX_DATA_URL = "http://oneflow-static.oss-cn-beijing.aliyuncs.com/ci-files/dataset/libai/roberta_dataset/loss_compara_content_sentence.idx" # noqa
VOCAB_MD5 = "be4d3c6f3f5495426b2c03b334334354"
MERGES_FILE_MD5 = "75a37753dd7a28a2c5df80c28bf06e4e"
BIN_DATA_MD5 = "b842467bd5ea7e52f7a612ea6b4faecc"
IDX_DATA_MD5 = "cf5963b8543f0a7a867361eb980f0372"
TEST_OUTPUT = os.path.join(os.getenv("TEST_OUTPUT", "output_unittest"), "test_roberta")
setup_logger(distributed_rank=dist.get_rank())
class TestRoBERTaModel(flow.unittest.TestCase):
def setUp(self) -> None:
cache_dir = os.path.join(os.getenv("ONEFLOW_TEST_CACHE_DIR", "./data_test"), "roberta_data")
cfg = LazyConfig.load("configs/roberta_pretrain.py")
# prepare dataset
if dist.get_local_rank() == 0:
# download dataset on main process of each node
get_data_from_cache(VOCAB_URL, cache_dir, md5=VOCAB_MD5)
get_data_from_cache(MERGE_FILE_URL, cache_dir, md5=MERGES_FILE_MD5)
get_data_from_cache(BIN_DATA_URL, cache_dir, md5=BIN_DATA_MD5)
get_data_from_cache(IDX_DATA_URL, cache_dir, md5=IDX_DATA_MD5)
os.makedirs(TEST_OUTPUT, exist_ok=True)
dist.synchronize()
vocab_path = get_data_from_cache(VOCAB_URL, cache_dir, md5=VOCAB_MD5)
merges_file_path = get_data_from_cache(MERGE_FILE_URL, cache_dir, md5=MERGES_FILE_MD5)
data_prefix_path = get_data_from_cache(BIN_DATA_URL, cache_dir, md5=BIN_DATA_MD5)
data_prefix = data_prefix_path[:-4]
# set tokenizer and data config
cfg.tokenization.tokenizer.vocab_file = vocab_path
cfg.tokenization.tokenizer.merges_file = merges_file_path
cfg.dataloader.train.dataset[0].data_prefix = data_prefix
cfg.dataloader.train.dataset[0].indexed_dataset.data_prefix = data_prefix
cfg.dataloader.train.num_workers = 0
# set training config
cfg.train.train_epoch = 0
cfg.train.train_iter = 10
cfg.train.evaluation.eval_period = 10
cfg.train.evaluation.eval_iter = 10
cfg.train.log_period = 1
cfg.train.train_micro_batch_size = 8
cfg.train.test_micro_batch_size = 4
cfg.train.num_accumulation_steps = 1
cfg.train.resume = False
cfg.train.output_dir = TEST_OUTPUT
# set model
cfg.model.cfg.num_attention_heads = 8
cfg.model.cfg.hidden_size = 384
cfg.model.cfg.hidden_layers = 4
cfg.train.activation_checkpoint.enabled = True
cfg.train.amp.enabled = True
cfg.train.rdma_enabled = False
self.cfg = cfg
@classmethod
def tearDownClass(cls) -> None:
if os.path.isdir(TEST_OUTPUT) and dist.get_local_rank() == 0:
shutil.rmtree(TEST_OUTPUT)
@flow.unittest.skip_unless_1n4d()
def test_roberta_eager_with_data_tensor_parallel(self):
# set distributed config
self.cfg.train.dist.data_parallel_size = 2
self.cfg.train.dist.tensor_parallel_size = 2
# pipeline parallelism not supported in eager global now!
self.cfg.train.dist.pipeline_parallel_size = 1
dist.setup_dist_util(self.cfg.train.dist)
_check_batch_size(self.cfg)
self.cfg.graph.enabled = False
trainer = DefaultTrainer(self.cfg)
trainer.train()
@flow.unittest.skip_unless_1n4d()
def test_roberta_eager_with_pipeline_parallel(self):
# set distributed config
self.cfg.train.dist.data_parallel_size = 1
self.cfg.train.dist.tensor_parallel_size = 1
self.cfg.train.dist.pipeline_parallel_size = 4
self.cfg.train.dist.pipeline_num_layers = self.cfg.model.cfg.hidden_layers
dist.setup_dist_util(self.cfg.train.dist)
_check_batch_size(self.cfg)
self.cfg.graph.enabled = False
trainer = DefaultTrainer(self.cfg)
trainer.train()
@flow.unittest.skip_unless_1n4d()
def test_roberta_graph_with_data_tensor_parallel(self):
self.cfg.train.num_accumulation_steps = 1
# set distributed config
self.cfg.train.dist.data_parallel_size = 2
self.cfg.train.dist.tensor_parallel_size = 2
self.cfg.train.dist.pipeline_parallel_size = 1
dist.setup_dist_util(self.cfg.train.dist)
_check_batch_size(self.cfg)
self.cfg.graph.enabled = True
trainer = DefaultTrainer(self.cfg)
trainer.train()
@flow.unittest.skip_unless_1n4d()
def test_roberta_graph_with_data_tensor_pipeline_parallel(self):
self.cfg.train.num_accumulation_steps = 4
# set distributed config
self.cfg.train.dist.data_parallel_size = 2
# change to 2 when 2d sbp bugfix
self.cfg.train.dist.tensor_parallel_size = 1
self.cfg.train.dist.pipeline_parallel_size = 2
self.cfg.train.dist.pipeline_num_layers = self.cfg.model.cfg.hidden_layers
dist.setup_dist_util(self.cfg.train.dist)
_check_batch_size(self.cfg)
self.cfg.graph.enabled = True
trainer = DefaultTrainer(self.cfg)
trainer.train()
@flow.unittest.skip_unless_1n4d()
def test_roberta_with_zero(self):
# set distributed config
self.cfg.train.dist.data_parallel_size = 4
self.cfg.train.dist.tensor_parallel_size = 1
self.cfg.train.dist.pipeline_parallel_size = 1
dist.setup_dist_util(self.cfg.train.dist)
_check_batch_size(self.cfg)
self.cfg.graph.enabled = True
self.cfg.train.zero_optimization.enabled = True
self.cfg.train.zero_optimization.stage = 3
trainer = DefaultTrainer(self.cfg)
trainer.train()
if __name__ == "__main__":
unittest.main()
| 7,129 | 37.540541 | 147 | py |
libai | libai-main/tests/models/__init__.py | 0 | 0 | 0 | py | |
libai | libai-main/tests/models/test_gpt.py | # coding=utf-8
# Copyright 2021 The OneFlow Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import shutil
import unittest
import oneflow as flow
import oneflow.unittest
from libai.config import LazyConfig
from libai.engine import DefaultTrainer, hooks
from libai.engine.default import _check_batch_size
from libai.utils import distributed as dist
from libai.utils.file_utils import get_data_from_cache
from libai.utils.logger import setup_logger
VOCAB_URL = "https://oneflow-static.oss-cn-beijing.aliyuncs.com/ci-files/dataset/libai/gpt_dataset/gpt2-vocab.json" # noqa
MERGE_FILE_URL = "https://oneflow-static.oss-cn-beijing.aliyuncs.com/ci-files/dataset/libai/gpt_dataset/gpt2-merges.txt" # noqa
BIN_DATA_URL = "https://oneflow-static.oss-cn-beijing.aliyuncs.com/ci-files/dataset/libai/bert_dataset/loss_compara_content_sentence.bin" # noqa
IDX_DATA_URL = "https://oneflow-static.oss-cn-beijing.aliyuncs.com/ci-files/dataset/libai/bert_dataset/loss_compara_content_sentence.idx" # noqa
VOCAB_MD5 = "dffec25a898b1f5e569bec4dffd7e5c0"
MERGE_FILE_MD5 = "75a37753dd7a28a2c5df80c28bf06e4e"
BIN_DATA_MD5 = "b842467bd5ea7e52f7a612ea6b4faecc"
IDX_DATA_MD5 = "cf5963b8543f0a7a867361eb980f0372"
TEST_OUTPUT = os.path.join(os.getenv("TEST_OUTPUT", "output_unittest"), "test_gpt")
setup_logger(distributed_rank=dist.get_rank())
class TestGPTModel(flow.unittest.TestCase):
def setUp(self) -> None:
cache_dir = os.path.join(os.getenv("ONEFLOW_TEST_CACHE_DIR", "./data_test"), "gpt_data")
cfg = LazyConfig.load("configs/gpt2_pretrain.py")
# prepare dataset
if dist.get_local_rank() == 0:
# download dataset on main process of each node
get_data_from_cache(VOCAB_URL, cache_dir, md5=VOCAB_MD5)
get_data_from_cache(MERGE_FILE_URL, cache_dir, md5=MERGE_FILE_MD5)
get_data_from_cache(BIN_DATA_URL, cache_dir, md5=BIN_DATA_MD5)
get_data_from_cache(IDX_DATA_URL, cache_dir, md5=IDX_DATA_MD5)
os.makedirs(TEST_OUTPUT, exist_ok=True)
dist.synchronize()
vocab_path = get_data_from_cache(VOCAB_URL, cache_dir, md5=VOCAB_MD5)
merges_file = get_data_from_cache(MERGE_FILE_URL, cache_dir, md5=MERGE_FILE_MD5)
data_prefix_path = get_data_from_cache(BIN_DATA_URL, cache_dir, md5=BIN_DATA_MD5)
data_prefix = data_prefix_path[:-4]
# set tokenizer and data config
cfg.tokenization.tokenizer.vocab_file = vocab_path
cfg.tokenization.tokenizer.merges_file = merges_file
cfg.dataloader.train.dataset[0].data_prefix = data_prefix
cfg.dataloader.train.dataset[0].indexed_dataset.data_prefix = data_prefix
cfg.dataloader.train.num_workers = 0
# set training config
cfg.train.train_epoch = 0
cfg.train.train_iter = 10
cfg.train.evaluation.enabled = True
cfg.train.evaluation.eval_period = 10
cfg.train.evaluation.eval_iter = 10
cfg.train.log_period = 1
cfg.train.train_micro_batch_size = 4
cfg.train.num_accumulation_steps = 1
cfg.train.resume = False
cfg.train.output_dir = TEST_OUTPUT
# set model
cfg.model.cfg.max_seq_length = 256
cfg.model.cfg.num_attention_heads = 8
cfg.model.cfg.hidden_size = 384
cfg.model.cfg.hidden_layers = 4
cfg.model.cfg.hidden_layers = 4
cfg.train.activation_checkpoint.enabled = True
cfg.train.amp.enabled = True
cfg.train.rdma_enabled = False
for ds in cfg.dataloader.train.dataset:
ds.max_seq_length = cfg.model.cfg.max_seq_length
self.cfg = cfg
def build_hooks(self):
ret = [
hooks.IterationTimer(),
hooks.LRScheduler(),
]
if dist.is_main_process():
# run writers in the end, so that evaluation metrics are written
ret.append(hooks.PeriodicWriter(self.build_writers(), self.cfg.train.log_period))
return ret
DefaultTrainer.build_hooks = build_hooks
@classmethod
def tearDownClass(cls) -> None:
if os.path.isdir(TEST_OUTPUT) and dist.get_local_rank() == 0:
shutil.rmtree(TEST_OUTPUT)
@flow.unittest.skip_unless_1n4d()
def test_gpt_eager_with_data_tensor_parallel(self):
# set distributed config
self.cfg.train.dist.data_parallel_size = 2
self.cfg.train.dist.tensor_parallel_size = 2
# pipeline parallelism not supported in eager global now!
self.cfg.train.dist.pipeline_parallel_size = 1
dist.setup_dist_util(self.cfg.train.dist)
_check_batch_size(self.cfg)
self.cfg.graph.enabled = False
trainer = DefaultTrainer(self.cfg)
trainer.train()
@flow.unittest.skip_unless_1n4d()
def test_gpt_eager_with_pipeline_parallel(self):
# set distributed config
self.cfg.train.dist.data_parallel_size = 1
self.cfg.train.dist.tensor_parallel_size = 1
self.cfg.train.dist.pipeline_parallel_size = 4
self.cfg.train.dist.pipeline_num_layers = self.cfg.model.cfg.hidden_layers
dist.setup_dist_util(self.cfg.train.dist)
_check_batch_size(self.cfg)
self.cfg.graph.enabled = False
trainer = DefaultTrainer(self.cfg)
trainer.train()
@flow.unittest.skip_unless_1n4d()
def test_gpt_graph_with_data_tensor_parallel(self):
self.cfg.train.num_accumulation_steps = 1
# set distributed config
self.cfg.train.dist.data_parallel_size = 4
self.cfg.train.dist.tensor_parallel_size = 2
self.cfg.train.dist.pipeline_parallel_size = 1
dist.setup_dist_util(self.cfg.train.dist)
_check_batch_size(self.cfg)
self.cfg.graph.enabled = True
trainer = DefaultTrainer(self.cfg)
trainer.train()
@flow.unittest.skip_unless_1n4d()
def test_gpt_graph_with_data_tensor_pipeline_parallel(self):
self.cfg.train.num_accumulation_steps = 4
# set distributed config
self.cfg.train.dist.data_parallel_size = 2
# change to 2 when 2d sbp bugfix
self.cfg.train.dist.tensor_parallel_size = 1
self.cfg.train.dist.pipeline_parallel_size = 2
self.cfg.train.dist.pipeline_num_layers = self.cfg.model.cfg.hidden_layers
dist.setup_dist_util(self.cfg.train.dist)
_check_batch_size(self.cfg)
self.cfg.graph.enabled = True
trainer = DefaultTrainer(self.cfg)
trainer.train()
@flow.unittest.skip_unless_1n4d()
def test_gpt_with_zero(self):
# set distributed config
self.cfg.train.dist.data_parallel_size = 4
self.cfg.train.dist.tensor_parallel_size = 1
self.cfg.train.dist.pipeline_parallel_size = 1
dist.setup_dist_util(self.cfg.train.dist)
_check_batch_size(self.cfg)
self.cfg.graph.enabled = True
self.cfg.train.zero_optimization.enabled = True
self.cfg.train.zero_optimization.stage = 3
trainer = DefaultTrainer(self.cfg)
trainer.train()
if __name__ == "__main__":
unittest.main()
| 7,701 | 36.754902 | 145 | py |
libai | libai-main/tests/models/test_t5.py | # coding=utf-8
# Copyright 2021 The OneFlow Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import shutil
import unittest
import oneflow as flow
import oneflow.unittest
from libai.config import LazyConfig
from libai.engine import DefaultTrainer, hooks
from libai.engine.default import _check_batch_size
from libai.utils import distributed as dist
from libai.utils.file_utils import get_data_from_cache
from libai.utils.logger import setup_logger
VOCAB_URL = "https://oneflow-static.oss-cn-beijing.aliyuncs.com/ci-files/dataset/libai/bert_dataset/bert-base-chinese-vocab.txt" # noqa
BIN_DATA_URL = "https://oneflow-static.oss-cn-beijing.aliyuncs.com/ci-files/dataset/libai/bert_dataset/loss_compara_content_sentence.bin" # noqa
IDX_DATA_URL = "https://oneflow-static.oss-cn-beijing.aliyuncs.com/ci-files/dataset/libai/bert_dataset/loss_compara_content_sentence.idx" # noqa
VOCAB_MD5 = "3b5b76c4aef48ecf8cb3abaafe960f09"
BIN_DATA_MD5 = "b842467bd5ea7e52f7a612ea6b4faecc"
IDX_DATA_MD5 = "cf5963b8543f0a7a867361eb980f0372"
TEST_OUTPUT = os.path.join(os.getenv("TEST_OUTPUT", "output_unittest"), "test_t5")
setup_logger(distributed_rank=dist.get_rank())
class TestT5Model(flow.unittest.TestCase):
def setUp(self) -> None:
cache_dir = os.path.join(os.getenv("ONEFLOW_TEST_CACHE_DIR", "./data_test"), "bert_data")
cfg = LazyConfig.load("configs/t5_large_pretrain.py")
# prepare dataset
if dist.get_local_rank() == 0:
# download dataset on main process of each node
get_data_from_cache(VOCAB_URL, cache_dir, md5=VOCAB_MD5)
get_data_from_cache(BIN_DATA_URL, cache_dir, md5=BIN_DATA_MD5)
get_data_from_cache(IDX_DATA_URL, cache_dir, md5=IDX_DATA_MD5)
os.makedirs(TEST_OUTPUT, exist_ok=True)
dist.synchronize()
vocab_path = get_data_from_cache(VOCAB_URL, cache_dir, md5=VOCAB_MD5)
data_prefix_path = get_data_from_cache(BIN_DATA_URL, cache_dir, md5=BIN_DATA_MD5)
data_prefix = data_prefix_path[:-4]
# set tokenizer and data config
cfg.tokenization.tokenizer.vocab_file = vocab_path
cfg.dataloader.train.dataset[0].data_prefix = data_prefix
cfg.dataloader.train.dataset[0].indexed_dataset.data_prefix = data_prefix
# FIXME(RenTianhe): fix dataloader worker bug
cfg.dataloader.train.num_workers = 0
# set training config
cfg.train.train_epoch = 0
cfg.train.train_iter = 10
cfg.train.evaluation.enabled = True
cfg.train.evaluation.eval_period = 10
cfg.train.evaluation.eval_iter = 10
cfg.train.log_period = 1
cfg.train.train_micro_batch_size = 8
cfg.train.num_accumulation_steps = 1
cfg.train.resume = False
cfg.train.output_dir = TEST_OUTPUT
# set model
cfg.model.cfg.num_attention_heads = 8
cfg.model.cfg.hidden_size = 384
cfg.model.cfg.hidden_layers = 3
cfg.train.activation_checkpoint.enabled = True
cfg.train.amp.enabled = True
cfg.train.rdma_enabled = False
self.cfg = cfg
def build_hooks(self):
ret = [
hooks.IterationTimer(),
hooks.LRScheduler(),
]
if dist.is_main_process():
# run writers in the end, so that evaluation metrics are written
ret.append(hooks.PeriodicWriter(self.build_writers(), self.cfg.train.log_period))
return ret
DefaultTrainer.build_hooks = build_hooks
@classmethod
def tearDownClass(cls) -> None:
if os.path.isdir(TEST_OUTPUT) and dist.get_local_rank() == 0:
shutil.rmtree(TEST_OUTPUT)
@flow.unittest.skip_unless_1n4d()
def test_t5_eager_with_data_tensor_parallel(self):
# set distributed config
self.cfg.train.dist.data_parallel_size = 2
self.cfg.train.dist.tensor_parallel_size = 2
# pipeline parallelism not supported in eager global now!
self.cfg.train.dist.pipeline_parallel_size = 1
dist.setup_dist_util(self.cfg.train.dist)
_check_batch_size(self.cfg)
self.cfg.graph.enabled = False
trainer = DefaultTrainer(self.cfg)
trainer.train()
@flow.unittest.skip_unless_1n4d()
def test_t5_eager_with_pipeline_parallel(self):
# set distributed config
self.cfg.train.dist.data_parallel_size = 1
self.cfg.train.dist.tensor_parallel_size = 1
self.cfg.train.dist.pipeline_parallel_size = 4
self.cfg.train.dist.pipeline_num_layers = 2 * self.cfg.model.cfg.hidden_layers
dist.setup_dist_util(self.cfg.train.dist)
_check_batch_size(self.cfg)
self.cfg.graph.enabled = False
trainer = DefaultTrainer(self.cfg)
trainer.train()
@flow.unittest.skip_unless_1n4d()
def test_t5_graph_with_data_tensor_parallel(self):
# set distributed config
self.cfg.train.dist.data_parallel_size = 2
self.cfg.train.dist.tensor_parallel_size = 2
self.cfg.train.dist.pipeline_parallel_size = 1
dist.setup_dist_util(self.cfg.train.dist)
_check_batch_size(self.cfg)
self.cfg.graph.enabled = True
trainer = DefaultTrainer(self.cfg)
trainer.train()
@flow.unittest.skip_unless_1n4d()
def test_t5_graph_with_data_tensor_pipeline_parallel(self):
self.cfg.train.num_accumulation_steps = 4
# set distributed config
self.cfg.train.dist.data_parallel_size = 2
# change to 2 when 2d sbp bugfix
self.cfg.train.dist.tensor_parallel_size = 1
self.cfg.train.dist.pipeline_parallel_size = 2
# encoder_layers + decoder_layers
self.cfg.train.dist.pipeline_num_layers = 2 * self.cfg.model.cfg.hidden_layers
dist.setup_dist_util(self.cfg.train.dist)
_check_batch_size(self.cfg)
self.cfg.graph.enabled = True
trainer = DefaultTrainer(self.cfg)
trainer.train()
@flow.unittest.skip_unless_1n4d()
def test_t5_with_zero(self):
# set distributed config
self.cfg.train.dist.data_parallel_size = 4
self.cfg.train.dist.tensor_parallel_size = 1
self.cfg.train.dist.pipeline_parallel_size = 1
dist.setup_dist_util(self.cfg.train.dist)
_check_batch_size(self.cfg)
self.cfg.graph.enabled = True
self.cfg.train.zero_optimization.enabled = True
self.cfg.train.zero_optimization.stage = 3
trainer = DefaultTrainer(self.cfg)
trainer.train()
if __name__ == "__main__":
unittest.main()
| 7,161 | 36.108808 | 145 | py |
libai | libai-main/tests/tokenizer/test_tokenization_roberta.py | # coding=utf-8
# Copyright 2021 The OneFlow Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
import unittest
from libai.tokenizer.tokenization_roberta import VOCAB_FILES_NAMES, RobertaTokenizer
from tests.tokenizer.test_tokenization_common import TokenizerTesterMixin
class RobertaTokenizationTest(TokenizerTesterMixin, unittest.TestCase):
tokenizer_class = RobertaTokenizer
def setUp(self):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
vocab = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
]
vocab_tokens = dict(zip(vocab, range(len(vocab))))
merges = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
self.special_tokens_map = {"unk_token": "<unk>"}
self.vocab_file = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["vocab_file"])
self.merges_file = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["merges_file"])
with open(self.vocab_file, "w", encoding="utf-8") as fp:
fp.write(json.dumps(vocab_tokens) + "\n")
with open(self.merges_file, "w", encoding="utf-8") as fp:
fp.write("\n".join(merges))
def get_tokenizer(self, **kwargs):
kwargs.update(self.special_tokens_map)
return RobertaTokenizer.from_pretrained(self.tmpdirname, **kwargs)
def get_input_output_texts(self, tokenizer):
input_text = "lower newer"
output_text = "lower newer"
return input_text, output_text
def test_full_tokenizer(self):
tokenizer = RobertaTokenizer(self.vocab_file, self.merges_file, **self.special_tokens_map)
text = "lower newer"
bpe_tokens = ["l", "o", "w", "er", "\u0120", "n", "e", "w", "er"]
tokens = tokenizer.tokenize(text)
self.assertListEqual(tokens, bpe_tokens)
input_tokens = tokens + [tokenizer.unk_token]
input_bpe_tokens = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(input_tokens), input_bpe_tokens)
def roberta_dict_integration_testing(self):
tokenizer = self.get_tokenizer()
self.assertListEqual(
tokenizer.encode("Hello world!", add_special_tokens=False), [0, 31414, 232, 328, 2]
)
self.assertListEqual(
tokenizer.encode("Hello world! cécé herlolip 418", add_special_tokens=False),
[0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2],
)
if __name__ == "__main__":
unittest.main()
| 3,451 | 34.22449 | 98 | py |
libai | libai-main/tests/tokenizer/test_tokenization_common.py | # coding=utf-8
# Copyright 2021 The OneFlow Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import inspect
import os
import pickle
import re
import shutil
import tempfile
from typing import Tuple
from libai.tokenizer import PreTrainedTokenizer
from tests.fixtures.utils import get_fixtures
def get_tests_dir(append_path=None):
"""
Args:
append_path: optional path to append to the tests dir path
Return:
The full path to the `tests` dir, so that the tests can be invoked from anywhere.
Optionally `append_path` is joined after the `tests` dir the former is provided.
"""
# this function caller's __file__
caller__file__ = inspect.stack()[1][1]
tests_dir = os.path.abspath(os.path.dirname(caller__file__))
if append_path:
return os.path.join(tests_dir, append_path)
else:
return tests_dir
class TokenizerTesterMixin:
tokenizer_class = None
def setUp(self):
self.tokenizers_list = []
get_fixtures("sample_text.txt")
with open(f"{get_tests_dir()}/../fixtures/sample_text.txt", encoding="utf-8") as f_data:
self._data = f_data.read().replace("\n\n", "\n").strip()
self.tmpdirname = tempfile.mkdtemp()
def tearDown(self):
shutil.rmtree(self.tmpdirname)
def get_input_output_texts(self, tokenizer):
input_txt = self.get_clean_sequence(tokenizer)[0]
return input_txt, input_txt
def get_clean_sequence(
self, tokenizer, with_prefix_space=False, max_length=20
) -> Tuple[str, list]:
toks = [
(i, tokenizer.decode([i], clean_up_tokenization_spaces=False))
for i in range(len(tokenizer))
]
toks = list(filter(lambda t: re.match(r"^[ a-zA-Z]+$", t[1]), toks))
toks = list(filter(lambda t: [t[0]] == tokenizer.encode(t[1]), toks))
if max_length is not None and len(toks) > max_length:
toks = toks[:max_length]
toks_ids = [t[0] for t in toks]
# Ensure consistency
output_txt = tokenizer.decode(toks_ids, clean_up_tokenization_spaces=False)
if " " not in output_txt and len(toks_ids) > 1:
output_txt = (
tokenizer.decode([toks_ids[0]], clean_up_tokenization_spaces=False)
+ " "
+ tokenizer.decode(toks_ids[1:], clean_up_tokenization_spaces=False)
)
if with_prefix_space:
output_txt = " " + output_txt
output_ids = tokenizer.encode(output_txt)
return output_txt, output_ids
def get_tokenizers(self, **kwargs):
return [self.get_tokenizer(**kwargs)]
def get_tokenizer(self, **kwargs) -> PreTrainedTokenizer:
return self.tokenizer_class.from_pretrained(self.tmpdirname, **kwargs)
def test_tokenizers_common_properties(self):
tokenizers = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
attributes_list = [
"bos_token",
"eos_token",
"unk_token",
"sep_token",
"pad_token",
"cls_token",
"mask_token",
]
for attr in attributes_list:
self.assertTrue(hasattr(tokenizer, attr))
self.assertTrue(hasattr(tokenizer, attr + "_id"))
self.assertTrue(hasattr(tokenizer, "additional_special_tokens"))
self.assertTrue(hasattr(tokenizer, "additional_special_tokens_ids"))
attributes_list = [
"init_inputs",
"init_kwargs",
"added_tokens_encoder",
"added_tokens_decoder",
]
for attr in attributes_list:
self.assertTrue(hasattr(tokenizer, attr))
def test_save_and_load_tokenizer(self):
# Now let's start the test
tokenizers = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
# Isolate this from the other tests because we save additional tokens/etc
tmpdirname = tempfile.mkdtemp()
sample_text = " He is very happy, UNwant\u00E9d,running"
before_tokens = tokenizer.encode(sample_text)
before_vocab = tokenizer.get_vocab()
tokenizer.save_pretrained(tmpdirname)
after_tokenizer = tokenizer.__class__.from_pretrained(tmpdirname)
after_tokens = after_tokenizer.encode(sample_text)
after_vocab = after_tokenizer.get_vocab()
self.assertListEqual(before_tokens, after_tokens)
self.assertDictEqual(before_vocab, after_vocab)
shutil.rmtree(tmpdirname)
# Now let's start the test
tokenizers = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
# Isolate this from the other tests because we save additional tokens/etc
tmpdirname = tempfile.mkdtemp()
sample_text = " He is very happy, UNwant\u00E9d,running"
tokenizer.add_tokens(["bim", "bambam"])
additional_special_tokens = tokenizer.additional_special_tokens
additional_special_tokens.append("new_additional_special_token")
tokenizer.add_special_tokens(
{"additional_special_tokens": additional_special_tokens}
)
before_tokens = tokenizer.encode(sample_text)
before_vocab = tokenizer.get_vocab()
tokenizer.save_pretrained(tmpdirname)
after_tokenizer = tokenizer.__class__.from_pretrained(tmpdirname)
after_tokens = after_tokenizer.encode(sample_text)
after_vocab = after_tokenizer.get_vocab()
self.assertListEqual(before_tokens, after_tokens)
self.assertDictEqual(before_vocab, after_vocab)
self.assertIn("bim", after_vocab)
self.assertIn("bambam", after_vocab)
self.assertIn(
"new_additional_special_token", after_tokenizer.additional_special_tokens
)
shutil.rmtree(tmpdirname)
def test_pickle_tokenizer(self):
"""Google pickle __getstate__ __setstate__ if you are struggling with this."""
tokenizers = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
self.assertIsNotNone(tokenizer)
text = "Munich and Berlin are nice cities"
subwords = tokenizer.tokenize(text)
filename = os.path.join(self.tmpdirname, "tokenizer.bin")
with open(filename, "wb") as handle:
pickle.dump(tokenizer, handle)
with open(filename, "rb") as handle:
tokenizer_new = pickle.load(handle)
subwords_loaded = tokenizer_new.tokenize(text)
self.assertListEqual(subwords, subwords_loaded)
def test_added_tokens_do_lower_case(self):
tokenizers = self.get_tokenizers(do_lower_case=True)
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
if not hasattr(tokenizer, "do_lower_case") or not tokenizer.do_lower_case:
continue
special_token = tokenizer.all_special_tokens[0]
text = special_token + " aaaaa bbbbbb low cccccccccdddddddd l " + special_token
text2 = special_token + " AAAAA BBBBBB low CCCCCCCCCDDDDDDDD l " + special_token
toks0 = tokenizer.tokenize(text) # toks before adding new_toks
new_toks = [
"aaaaa bbbbbb",
"cccccccccdddddddd",
"AAAAA BBBBBB",
"CCCCCCCCCDDDDDDDD",
]
added = tokenizer.add_tokens(new_toks)
self.assertEqual(added, 2)
toks = tokenizer.tokenize(text)
toks2 = tokenizer.tokenize(text2)
self.assertEqual(len(toks), len(toks2))
self.assertListEqual(toks, toks2)
self.assertNotEqual(len(toks), len(toks0)) # toks0 should be longer
# Check that none of the special tokens are lowercased
sequence_with_special_tokens = (
"A " + " yEs ".join(tokenizer.all_special_tokens) + " B"
)
tokenized_sequence = tokenizer.tokenize(sequence_with_special_tokens)
for special_token in tokenizer.all_special_tokens:
self.assertTrue(special_token in tokenized_sequence)
tokenizers = self.get_tokenizers(do_lower_case=False)
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
special_token = tokenizer.all_special_tokens[0]
text = special_token + " aaaaa bbbbbb low cccccccccdddddddd l " + special_token
text2 = special_token + " AAAAA BBBBBB low CCCCCCCCCDDDDDDDD l " + special_token
new_toks = [
"aaaaa bbbbbb",
"cccccccccdddddddd",
"AAAAA BBBBBB",
"CCCCCCCCCDDDDDDDD",
]
toks0 = tokenizer.tokenize(text) # toks before adding new_toks
added = tokenizer.add_tokens(new_toks)
self.assertEqual(added, 4)
toks = tokenizer.tokenize(text)
toks2 = tokenizer.tokenize(text2)
self.assertEqual(len(toks), len(toks2)) # Length should still be the same
self.assertNotEqual(
toks[1], toks2[1]
) # But at least the first non-special tokens should differ
self.assertNotEqual(len(toks), len(toks0)) # toks0 should be longer
def test_add_tokens_tokenizer(self):
tokenizers = self.get_tokenizers(do_lower_case=False)
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
vocab_size = tokenizer.vocab_size
all_size = len(tokenizer)
self.assertNotEqual(vocab_size, 0)
# We usually have added tokens from the start in tests
# because our vocab fixtures are smaller than the original vocabs
# let's not assert this self.assertEqual(vocab_size, all_size)
new_toks = ["aaaaa bbbbbb", "cccccccccdddddddd"]
added_toks = tokenizer.add_tokens(new_toks)
vocab_size_2 = tokenizer.vocab_size
all_size_2 = len(tokenizer)
self.assertNotEqual(vocab_size_2, 0)
self.assertEqual(vocab_size, vocab_size_2)
self.assertEqual(added_toks, len(new_toks))
self.assertEqual(all_size_2, all_size + len(new_toks))
tokens = tokenizer.encode("aaaaa bbbbbb low cccccccccdddddddd l")
self.assertGreaterEqual(len(tokens), 4)
self.assertGreater(tokens[0], tokenizer.vocab_size - 1)
self.assertGreater(tokens[-2], tokenizer.vocab_size - 1)
new_toks_2 = {"eos_token": ">>>>|||<||<<|<<", "pad_token": "<<<<<|||>|>>>>|>"}
added_toks_2 = tokenizer.add_special_tokens(new_toks_2)
vocab_size_3 = tokenizer.vocab_size
all_size_3 = len(tokenizer)
self.assertNotEqual(vocab_size_3, 0)
self.assertEqual(vocab_size, vocab_size_3)
self.assertEqual(added_toks_2, len(new_toks_2))
self.assertEqual(all_size_3, all_size_2 + len(new_toks_2))
tokens = tokenizer.encode(
">>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l"
)
self.assertGreaterEqual(len(tokens), 6)
self.assertGreater(tokens[0], tokenizer.vocab_size - 1)
self.assertGreater(tokens[0], tokens[1])
self.assertGreater(tokens[-2], tokenizer.vocab_size - 1)
self.assertGreater(tokens[-2], tokens[-3])
self.assertEqual(tokens[0], tokenizer.eos_token_id)
self.assertEqual(tokens[-2], tokenizer.pad_token_id)
def test_add_special_tokens(self):
tokenizers = self.get_tokenizers(do_lower_case=False)
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
input_text, ids = self.get_clean_sequence(tokenizer)
special_token = "[SPECIAL_TOKEN]"
tokenizer.add_special_tokens({"cls_token": special_token})
encoded_special_token = tokenizer.encode(special_token)
self.assertEqual(len(encoded_special_token), 1)
text = tokenizer.decode(
ids + encoded_special_token, clean_up_tokenization_spaces=False
)
encoded = tokenizer.encode(text)
input_encoded = tokenizer.encode(input_text)
special_token_id = tokenizer.encode(special_token)
self.assertEqual(encoded, input_encoded + special_token_id)
decoded = tokenizer.decode(encoded, skip_special_tokens=True)
self.assertTrue(special_token not in decoded)
def test_internal_consistency(self):
tokenizers = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
input_text, output_text = self.get_input_output_texts(tokenizer)
tokens = tokenizer.tokenize(input_text)
ids = tokenizer.convert_tokens_to_ids(tokens)
ids_2 = tokenizer.encode(input_text)
self.assertListEqual(ids, ids_2)
tokens_2 = tokenizer.convert_ids_to_tokens(ids)
self.assertNotEqual(len(tokens_2), 0)
text_2 = tokenizer.decode(ids)
self.assertIsInstance(text_2, str)
self.assertEqual(text_2, output_text)
def test_encode_decode_with_spaces(self):
tokenizers = self.get_tokenizers(do_lower_case=False)
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
new_toks = ["[ABC]", "[DEF]"]
tokenizer.add_tokens(new_toks)
input = "[ABC] [DEF] [ABC] [DEF]"
encoded = tokenizer.encode(input)
decoded = tokenizer.decode(encoded)
self.assertEqual(decoded, input)
def test_pretrained_model_lists(self):
weights_list = list(self.tokenizer_class.max_model_input_sizes.keys())
weights_lists_2 = []
for file_id, map_list in self.tokenizer_class.pretrained_vocab_files_map.items():
weights_lists_2.append(list(map_list.keys()))
for weights_list_2 in weights_lists_2:
self.assertListEqual(weights_list, weights_list_2)
def test_get_vocab(self):
tokenizers = self.get_tokenizers(do_lower_case=False)
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
vocab = tokenizer.get_vocab()
self.assertIsInstance(vocab, dict)
self.assertEqual(len(vocab), len(tokenizer))
for word, ind in vocab.items():
self.assertEqual(tokenizer.convert_tokens_to_ids(word), ind)
self.assertEqual(tokenizer.convert_ids_to_tokens(ind), word)
tokenizer.add_tokens(["asdfasdfasdfasdf"])
vocab = tokenizer.get_vocab()
self.assertIsInstance(vocab, dict)
self.assertEqual(len(vocab), len(tokenizer))
| 16,794 | 40.882793 | 96 | py |
libai | libai-main/tests/tokenizer/test_tokenization_bert.py | # coding=utf-8
# Copyright 2021 The OneFlow Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import unittest
from libai.tokenizer.tokenization_base import _is_control, _is_punctuation, _is_whitespace
from libai.tokenizer.tokenization_bert import (
VOCAB_FILES_NAMES,
BasicTokenizer,
BertTokenizer,
WordpieceTokenizer,
)
from tests.tokenizer.test_tokenization_common import TokenizerTesterMixin
class BertTokenizationTest(TokenizerTesterMixin, unittest.TestCase):
tokenizer_class = BertTokenizer
def setUp(self):
super().setUp()
vocab_tokens = [
"[UNK]",
"[CLS]",
"[SEP]",
"[PAD]",
"[MASK]",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
self.vocab_file = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["vocab_file"])
with open(self.vocab_file, "w", encoding="utf-8") as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens]))
def get_input_output_texts(self, tokenizer):
input_text = "UNwant\u00E9d,running"
output_text = "unwanted, running"
return input_text, output_text
def test_full_tokenizer(self):
tokenizer = self.tokenizer_class(self.vocab_file)
tokens = tokenizer.tokenize("UNwant\u00E9d,running")
self.assertListEqual(tokens, ["un", "##want", "##ed", ",", "runn", "##ing"])
self.assertListEqual(tokenizer.convert_tokens_to_ids(tokens), [9, 6, 7, 12, 10, 11])
def test_chinese(self):
tokenizer = BasicTokenizer()
self.assertListEqual(
tokenizer.tokenize("ah\u535A\u63A8zz"), ["ah", "\u535A", "\u63A8", "zz"]
)
def test_basic_tokenizer_lower(self):
tokenizer = BasicTokenizer(do_lower_case=True)
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? "),
["hello", "!", "how", "are", "you", "?"],
)
self.assertListEqual(tokenizer.tokenize("H\u00E9llo"), ["hello"])
def test_basic_tokenizer_no_lower(self):
tokenizer = BasicTokenizer(do_lower_case=False)
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? "),
["HeLLo", "!", "how", "Are", "yoU", "?"],
)
def test_basic_tokenizer_respects_never_split_tokens(self):
tokenizer = BasicTokenizer(do_lower_case=False, never_split=["[UNK]"])
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? [UNK]"),
["HeLLo", "!", "how", "Are", "yoU", "?", "[UNK]"],
)
def test_wordpiece_tokenizer(self):
vocab_tokens = [
"[UNK]",
"[CLS]",
"[SEP]",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
]
vocab = {}
for (i, token) in enumerate(vocab_tokens):
vocab[token] = i
tokenizer = WordpieceTokenizer(vocab=vocab, unk_token="[UNK]")
self.assertListEqual(tokenizer.tokenize(""), [])
self.assertListEqual(
tokenizer.tokenize("unwanted running"), ["un", "##want", "##ed", "runn", "##ing"]
)
self.assertListEqual(tokenizer.tokenize("unwantedX running"), ["[UNK]", "runn", "##ing"])
def test_is_whitespace(self):
self.assertTrue(_is_whitespace(" "))
self.assertTrue(_is_whitespace("\t"))
self.assertTrue(_is_whitespace("\r"))
self.assertTrue(_is_whitespace("\n"))
self.assertTrue(_is_whitespace("\u00A0"))
self.assertFalse(_is_whitespace("A"))
self.assertFalse(_is_whitespace("-"))
def test_is_control(self):
self.assertTrue(_is_control("\u0005"))
self.assertFalse(_is_control("A"))
self.assertFalse(_is_control(" "))
self.assertFalse(_is_control("\t"))
self.assertFalse(_is_control("\r"))
def test_is_punctuation(self):
self.assertTrue(_is_punctuation("-"))
self.assertTrue(_is_punctuation("$"))
self.assertTrue(_is_punctuation("`"))
self.assertTrue(_is_punctuation("."))
self.assertFalse(_is_punctuation("A"))
self.assertFalse(_is_punctuation(" "))
if __name__ == "__main__":
unittest.main()
| 5,007 | 30.696203 | 97 | py |
libai | libai-main/tests/tokenizer/test_tokenization_gpt2.py | # coding=utf-8
# Copyright 2021 The OneFlow Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
import unittest
from libai.tokenizer.tokenization_gpt2 import VOCAB_FILES_NAMES, GPT2Tokenizer
from tests.tokenizer.test_tokenization_common import TokenizerTesterMixin
class GPT2TokenizationTest(TokenizerTesterMixin, unittest.TestCase):
tokenizer_class = GPT2Tokenizer
def setUp(self):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
vocab = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
"<|endoftext|>",
]
vocab_tokens = dict(zip(vocab, range(len(vocab))))
merges = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
self.special_tokens_map = {"unk_token": "<unk>"}
self.vocab_file = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["vocab_file"])
self.merges_file = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["merges_file"])
with open(self.vocab_file, "w", encoding="utf-8") as fp:
fp.write(json.dumps(vocab_tokens) + "\n")
with open(self.merges_file, "w", encoding="utf-8") as fp:
fp.write("\n".join(merges))
def get_tokenizer(self, **kwargs):
kwargs.update(self.special_tokens_map)
return GPT2Tokenizer.from_pretrained(self.tmpdirname, **kwargs)
def get_input_output_texts(self, tokenizer):
input_text = "lower newer"
output_text = "lower newer"
return input_text, output_text
def test_full_tokenizer(self):
tokenizer = GPT2Tokenizer(self.vocab_file, self.merges_file, **self.special_tokens_map)
text = " lower newer"
bpe_tokens = ["\u0120low", "er", "\u0120", "n", "e", "w", "er"]
tokens = tokenizer.tokenize(text)
self.assertListEqual(tokens, bpe_tokens)
input_tokens = tokens + [tokenizer.unk_token]
input_bpe_tokens = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(input_tokens), input_bpe_tokens)
if __name__ == "__main__":
unittest.main()
| 3,028 | 33.033708 | 95 | py |
libai | libai-main/tests/tokenizer/__init__.py | 0 | 0 | 0 | py | |
libai | libai-main/tests/tokenizer/test_tokenization_t5.py | # coding=utf-8
# Copyright 2021 The OneFlow Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import shutil
import tempfile
import unittest
from libai.tokenizer.tokenization_t5 import T5Tokenizer
from tests.fixtures.utils import get_fixtures
from tests.tokenizer.test_tokenization_common import TokenizerTesterMixin
SPIECE_UNDERLINE = "▁"
SAMPLE_VOCAB = os.path.join(
os.path.dirname(os.path.abspath(__file__)), "../fixtures/test_sentencepiece.model"
)
class T5TokenizationTest(TokenizerTesterMixin, unittest.TestCase):
tokenizer_class = T5Tokenizer
def setUp(self):
super().setUp()
# We have a SentencePiece fixture for testing
get_fixtures(SAMPLE_VOCAB)
tokenizer = T5Tokenizer(SAMPLE_VOCAB)
tokenizer.save_pretrained(self.tmpdirname)
def test_convert_token_and_id(self):
"""Test ``_convert_token_to_id`` and ``_convert_id_to_token``."""
token = "<s>"
token_id = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(token), token_id)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(token_id), token)
def test_get_vocab(self):
vocab_keys = list(self.get_tokenizer().get_vocab().keys())
self.assertEqual(vocab_keys[0], "<unk>")
self.assertEqual(vocab_keys[1], "<s>")
self.assertEqual(vocab_keys[-1], "<pad>")
self.assertEqual(len(vocab_keys), 1_101)
def test_vocab_size(self):
self.assertEqual(self.get_tokenizer().vocab_size, 1_100)
def test_full_tokenizer(self):
tokenizer = T5Tokenizer(SAMPLE_VOCAB)
tokens = tokenizer.tokenize("This is a test")
self.assertListEqual(tokens, ["▁This", "▁is", "▁a", "▁t", "est"])
self.assertListEqual(tokenizer.convert_tokens_to_ids(tokens), [285, 46, 10, 170, 382])
tokens = tokenizer.tokenize("I was born in 92000, and this is falsé.")
self.assertListEqual(
tokens,
[
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
],
)
ids = tokenizer.convert_tokens_to_ids(tokens)
self.assertListEqual(
ids, [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4]
)
back_tokens = tokenizer.convert_ids_to_tokens(ids)
self.assertListEqual(
back_tokens,
[
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
],
)
def test_save_and_load_tokenizer(self):
# Now let's start the test
tokenizers = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
# Isolate this from the other tests because we save additional tokens/etc
tmpdirname = tempfile.mkdtemp()
sample_text = " He is very happy, UNwant\u00E9d,running"
before_tokens = tokenizer.encode(sample_text)
before_vocab = tokenizer.get_vocab()
tokenizer.save_pretrained(tmpdirname)
after_tokenizer = tokenizer.__class__.from_pretrained(tmpdirname)
after_tokens = after_tokenizer.encode(sample_text)
after_vocab = after_tokenizer.get_vocab()
self.assertListEqual(before_tokens, after_tokens)
self.assertDictEqual(before_vocab, after_vocab)
shutil.rmtree(tmpdirname)
if __name__ == "__main__":
unittest.main()
| 5,168 | 32.134615 | 98 | py |
libai | libai-main/tests/model_loader/test_mt5_loader.py | # coding=utf-8
# Copyright 2021 The OneFlow Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import shutil
import unittest
import numpy as np
import oneflow as flow
import oneflow.unittest
from omegaconf import DictConfig
from libai.utils import distributed as dist
from libai.utils.file_utils import get_data_from_cache
from libai.utils.logger import setup_logger
from projects.MT5.configs.mt5_base import cfg as libai_cfg
from projects.MT5.mt5_model import MT5Model
from projects.MT5.utils.mt5_loader import T5LoaderHuggerFace
PRETRAINED_MODEL_URL = "http://oneflow-static.oss-cn-beijing.aliyuncs.com/ci-files/dataset/libai/model_utils_test/mt5_utils/pytorch_model.bin" # noqa
PRETRAINED_MODEL_CONFIG_URL = "http://oneflow-static.oss-cn-beijing.aliyuncs.com/ci-files/dataset/libai/model_utils_test/mt5_utils/config.json" # noqa
PRETRAINED_MODEL_MD5 = "4c9c0be541b89de9b01c597ec4cc371a"
PRETRAINED_MODEL_CONFIG_MD5 = "b159e41603b7eeaf9a9c489165bbcaca"
TEST_OUTPUT = os.path.join(os.getenv("TEST_OUTPUT", "output_unittest"), "test_mt5_utils")
setup_logger(distributed_rank=dist.get_rank())
class TestMT5Loader(flow.unittest.TestCase):
def setUp(self) -> None:
cache_dir = os.path.join(
os.getenv("ONEFLOW_TEST_CACHE_DIR", "./data_test"), "mt5_utils_data"
)
self.pretrained_model_path = cache_dir
# prepare dataset
if dist.get_local_rank() == 0:
# download dataset on main process of each node
get_data_from_cache(PRETRAINED_MODEL_URL, cache_dir, md5=PRETRAINED_MODEL_MD5)
get_data_from_cache(
PRETRAINED_MODEL_CONFIG_URL, cache_dir, md5=PRETRAINED_MODEL_CONFIG_MD5
)
os.makedirs(TEST_OUTPUT, exist_ok=True)
dist.synchronize()
# prepare input data
self.encoder_input_ids = [
[101, 2009, 1005, 1055, 2986, 2651, 1012, 102],
[101, 2028, 12314, 3377, 102, 0, 0, 0],
[101, 2064, 2017, 3305, 2009, 102, 0, 0],
]
self.encoder_att_mask = [
[1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1],
]
self.decoder_input_ids = [
[101, 2009, 1005, 1055, 2986],
[101, 2028, 12314, 3377, 102],
[101, 2064, 2017, 3305, 2009],
]
self.decoder_att_mask = [[1, 1, 1, 1, 1], [1, 1, 1, 1, 1], [1, 1, 1, 1, 1]]
@classmethod
def tearDownClass(cls) -> None:
if os.path.isdir(TEST_OUTPUT) and dist.get_local_rank() == 0:
shutil.rmtree(TEST_OUTPUT)
@flow.unittest.skip_unless_1n4d()
def test_mt5_loader_with_data_tensor_parallel(self):
# set distributed config
dist_cfg = DictConfig(
dict(
data_parallel_size=2,
tensor_parallel_size=2,
pipeline_parallel_size=1,
)
)
dist.setup_dist_util(dist_cfg)
# load model
load_func = T5LoaderHuggerFace(
model=MT5Model,
libai_cfg=libai_cfg,
pretrained_model_path=self.pretrained_model_path,
hidden_dropout_prob=0.0,
attention_probs_dropout_prob=0.0,
embedding_dropout_prob=0.0,
model_type="mt5",
)
model = load_func.load()
model.eval()
encoder_input_ids = flow.tensor(
self.encoder_input_ids,
dtype=flow.long,
sbp=dist.get_nd_sbp([flow.sbp.broadcast, flow.sbp.broadcast]),
placement=dist.get_layer_placement(0),
)
decoder_input_ids = flow.tensor(
self.decoder_input_ids,
dtype=flow.long,
sbp=dist.get_nd_sbp([flow.sbp.broadcast, flow.sbp.broadcast]),
placement=dist.get_layer_placement(0),
)
encode_att_mask = flow.tensor(
self.encoder_att_mask,
dtype=flow.bool,
sbp=dist.get_nd_sbp([flow.sbp.broadcast, flow.sbp.broadcast]),
placement=dist.get_layer_placement(0),
)
decoder_att_mask = flow.tensor(
self.decoder_att_mask,
dtype=flow.bool,
sbp=dist.get_nd_sbp([flow.sbp.broadcast, flow.sbp.broadcast]),
placement=dist.get_layer_placement(0),
)
logits = model(
encoder_input_ids, decoder_input_ids, encode_att_mask, decoder_att_mask, encode_att_mask
)["logits"]
self.assertTrue(
np.allclose(
np.array(-83584720.0),
logits.sum().data.numpy(),
)
)
@flow.unittest.skip_unless_1n4d()
def test_mt5_loader_with_data_tensor_pipeline_parallel(self):
# set distributed config
dist_cfg = DictConfig(
dict(
data_parallel_size=2,
tensor_parallel_size=1,
pipeline_parallel_size=2,
pipeline_num_layers=16,
)
)
dist.setup_dist_util(dist_cfg)
# load model
load_func = T5LoaderHuggerFace(
model=MT5Model,
libai_cfg=libai_cfg,
pretrained_model_path=self.pretrained_model_path,
hidden_dropout_prob=0.0,
attention_probs_dropout_prob=0.0,
embedding_dropout_prob=0.0,
model_type="mt5",
)
model = load_func.load()
model.eval()
encoder_input_ids = flow.tensor(
self.encoder_input_ids,
dtype=flow.long,
sbp=dist.get_nd_sbp([flow.sbp.broadcast, flow.sbp.broadcast]),
placement=dist.get_layer_placement(0),
)
decoder_input_ids = flow.tensor(
self.decoder_input_ids,
dtype=flow.long,
sbp=dist.get_nd_sbp([flow.sbp.broadcast, flow.sbp.broadcast]),
placement=dist.get_layer_placement(0),
)
encode_att_mask = flow.tensor(
self.encoder_att_mask,
dtype=flow.bool,
sbp=dist.get_nd_sbp([flow.sbp.broadcast, flow.sbp.broadcast]),
placement=dist.get_layer_placement(0),
)
decoder_att_mask = flow.tensor(
self.decoder_att_mask,
dtype=flow.bool,
sbp=dist.get_nd_sbp([flow.sbp.broadcast, flow.sbp.broadcast]),
placement=dist.get_layer_placement(0),
)
logits = model(
encoder_input_ids, decoder_input_ids, encode_att_mask, decoder_att_mask, encode_att_mask
)["logits"]
self.assertTrue(
np.allclose(
np.array(-83584720.0),
logits.sum().data.numpy(),
)
)
if __name__ == "__main__":
unittest.main()
| 7,293 | 33.899522 | 151 | py |
libai | libai-main/tests/model_loader/test_t5_loader.py | # coding=utf-8
# Copyright 2021 The OneFlow Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import shutil
import unittest
import numpy as np
import oneflow as flow
import oneflow.unittest
from omegaconf import DictConfig
from libai.utils import distributed as dist
from libai.utils.file_utils import get_data_from_cache
from libai.utils.logger import setup_logger
from projects.MT5.configs.mt5_base import cfg as libai_cfg
from projects.MT5.mt5_model import MT5Model
from projects.MT5.utils.mt5_loader import T5LoaderHuggerFace
PRETRAINED_MODEL_URL = "http://oneflow-static.oss-cn-beijing.aliyuncs.com/ci-files/dataset/libai/model_utils_test/t5_utils/pytorch_model.bin" # noqa
PRETRAINED_MODEL_CONFIG_URL = "http://oneflow-static.oss-cn-beijing.aliyuncs.com/ci-files/dataset/libai/model_utils_test/t5_utils/config.json" # noqa
PRETRAINED_MODEL_MD5 = "952862a8ba425a25739a69e5f33b0df8"
PRETRAINED_MODEL_CONFIG_MD5 = "7ebc91dc4377c01190f4116c3c1ac6cd"
TEST_OUTPUT = os.path.join(os.getenv("TEST_OUTPUT", "output_unittest"), "test_t5_utils")
setup_logger(distributed_rank=dist.get_rank())
class TestT5Loader(flow.unittest.TestCase):
def setUp(self) -> None:
cache_dir = os.path.join(
os.getenv("ONEFLOW_TEST_CACHE_DIR", "./data_test"), "t5_utils_data"
)
self.pretrained_model_path = cache_dir
# prepare dataset
if dist.get_local_rank() == 0:
# download dataset on main process of each node
get_data_from_cache(PRETRAINED_MODEL_URL, cache_dir, md5=PRETRAINED_MODEL_MD5)
get_data_from_cache(
PRETRAINED_MODEL_CONFIG_URL, cache_dir, md5=PRETRAINED_MODEL_CONFIG_MD5
)
os.makedirs(TEST_OUTPUT, exist_ok=True)
dist.synchronize()
# prepare input data
self.encoder_input_ids = [
[101, 2009, 1005, 1055, 2986, 2651, 1012, 102],
[101, 2028, 12314, 3377, 102, 0, 0, 0],
[101, 2064, 2017, 3305, 2009, 102, 0, 0],
]
self.encoder_att_mask = [
[1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1],
]
self.decoder_input_ids = [
[101, 2009, 1005, 1055, 2986],
[101, 2028, 12314, 3377, 102],
[101, 2064, 2017, 3305, 2009],
]
self.decoder_att_mask = [[1, 1, 1, 1, 1], [1, 1, 1, 1, 1], [1, 1, 1, 1, 1]]
@classmethod
def tearDownClass(cls) -> None:
if os.path.isdir(TEST_OUTPUT) and dist.get_local_rank() == 0:
shutil.rmtree(TEST_OUTPUT)
@flow.unittest.skip_unless_1n4d()
def test_t5_loader_with_data_tensor_parallel(self):
# set distributed config
dist_cfg = DictConfig(
dict(
data_parallel_size=2,
tensor_parallel_size=2,
pipeline_parallel_size=1,
)
)
dist.setup_dist_util(dist_cfg)
# load model
load_func = T5LoaderHuggerFace(
model=MT5Model,
libai_cfg=libai_cfg,
pretrained_model_path=self.pretrained_model_path,
hidden_dropout_prob=0.0,
attention_probs_dropout_prob=0.0,
embedding_dropout_prob=0.0,
model_type="t5",
)
model = load_func.load()
model.eval()
encoder_input_ids = flow.tensor(
self.encoder_input_ids,
dtype=flow.long,
sbp=dist.get_nd_sbp([flow.sbp.broadcast, flow.sbp.broadcast]),
placement=dist.get_layer_placement(0),
)
decoder_input_ids = flow.tensor(
self.decoder_input_ids,
dtype=flow.long,
sbp=dist.get_nd_sbp([flow.sbp.broadcast, flow.sbp.broadcast]),
placement=dist.get_layer_placement(0),
)
encode_att_mask = flow.tensor(
self.encoder_att_mask,
dtype=flow.bool,
sbp=dist.get_nd_sbp([flow.sbp.broadcast, flow.sbp.broadcast]),
placement=dist.get_layer_placement(0),
)
decoder_att_mask = flow.tensor(
self.decoder_att_mask,
dtype=flow.bool,
sbp=dist.get_nd_sbp([flow.sbp.broadcast, flow.sbp.broadcast]),
placement=dist.get_layer_placement(0),
)
logits = model(
encoder_input_ids, decoder_input_ids, encode_att_mask, decoder_att_mask, encode_att_mask
)["logits"]
self.assertTrue(
np.allclose(
np.array(-9836561.0),
logits.sum().data.numpy(),
)
)
@flow.unittest.skip_unless_1n4d()
def test_t5_loader_with_data_tensor_pipeline_parallel(self):
# set distributed config
dist_cfg = DictConfig(
dict(
data_parallel_size=2,
tensor_parallel_size=1,
pipeline_parallel_size=2,
pipeline_num_layers=24,
)
)
dist.setup_dist_util(dist_cfg)
# load model
load_func = T5LoaderHuggerFace(
model=MT5Model,
libai_cfg=libai_cfg,
pretrained_model_path=self.pretrained_model_path,
hidden_dropout_prob=0.0,
attention_probs_dropout_prob=0.0,
embedding_dropout_prob=0.0,
model_type="t5",
)
model = load_func.load()
model.eval()
encoder_input_ids = flow.tensor(
self.encoder_input_ids,
dtype=flow.long,
sbp=dist.get_nd_sbp([flow.sbp.broadcast, flow.sbp.broadcast]),
placement=dist.get_layer_placement(0),
)
decoder_input_ids = flow.tensor(
self.decoder_input_ids,
dtype=flow.long,
sbp=dist.get_nd_sbp([flow.sbp.broadcast, flow.sbp.broadcast]),
placement=dist.get_layer_placement(0),
)
encode_att_mask = flow.tensor(
self.encoder_att_mask,
dtype=flow.bool,
sbp=dist.get_nd_sbp([flow.sbp.broadcast, flow.sbp.broadcast]),
placement=dist.get_layer_placement(0),
)
decoder_att_mask = flow.tensor(
self.decoder_att_mask,
dtype=flow.bool,
sbp=dist.get_nd_sbp([flow.sbp.broadcast, flow.sbp.broadcast]),
placement=dist.get_layer_placement(0),
)
logits = model(
encoder_input_ids, decoder_input_ids, encode_att_mask, decoder_att_mask, encode_att_mask
)["logits"]
self.assertTrue(
np.allclose(
np.array(-9836561.0),
logits.sum().data.numpy(),
)
)
if __name__ == "__main__":
unittest.main()
| 7,282 | 33.84689 | 150 | py |
libai | libai-main/tests/model_loader/test_roberta_loader.py | # coding=utf-8
# Copyright 2021 The OneFlow Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import shutil
import unittest
import numpy as np
import oneflow as flow
import oneflow.unittest
from omegaconf import DictConfig
import libai
from configs.common.models.roberta import cfg as libai_cfg
from libai.models.utils import RobertaLoaderHuggerFace
from libai.utils import distributed as dist
from libai.utils.file_utils import get_data_from_cache
from libai.utils.logger import setup_logger
PRETRAINED_MODEL_URL = "http://oneflow-static.oss-cn-beijing.aliyuncs.com/ci-files/dataset/libai/model_utils_test/roberta_utils/pytorch_model.bin" # noqa
PRETRAINED_MODEL_CONFIG_URL = "http://oneflow-static.oss-cn-beijing.aliyuncs.com/ci-files/dataset/libai/model_utils_test/roberta_utils/config.json" # noqa
PRETRAINED_MODEL_MD5 = "73db58b6c51b028e0ee031f12261b51d" # noqa
PRETRAINED_MODEL_CONFIG_MD5 = "a53c22291c7f25d5077260ad5ca4d5fa" # noqa
TEST_OUTPUT = os.path.join(os.getenv("TEST_OUTPUT", "output_unittest"), "test_roberta_utils")
setup_logger(distributed_rank=dist.get_rank())
class TestRobertaLoader(flow.unittest.TestCase):
def setUp(self) -> None:
cache_dir = os.path.join(
os.getenv("ONEFLOW_TEST_CACHE_DIR", "./data_test"), "roberta_utils_data"
)
self.pretrained_model_path = cache_dir
# prepare dataset
if dist.get_local_rank() == 0:
# download dataset on main process of each node
get_data_from_cache(PRETRAINED_MODEL_URL, cache_dir, md5=PRETRAINED_MODEL_MD5)
get_data_from_cache(
PRETRAINED_MODEL_CONFIG_URL, cache_dir, md5=PRETRAINED_MODEL_CONFIG_MD5
)
os.makedirs(TEST_OUTPUT, exist_ok=True)
dist.synchronize()
# prepare input data
self.input_ids = [
[101, 2009, 1005, 1055, 2986, 2651, 1012, 102],
[101, 2028, 12314, 3377, 102, 0, 0, 0],
[101, 2064, 2017, 3305, 2009, 102, 0, 0],
]
self.mask = [[1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1]]
@classmethod
def tearDownClass(cls) -> None:
if os.path.isdir(TEST_OUTPUT) and dist.get_local_rank() == 0:
shutil.rmtree(TEST_OUTPUT)
@flow.unittest.skip_unless_1n4d()
def test_roberta_loader_with_data_tensor_parallel(self):
# set distributed config
dist_cfg = DictConfig(
dict(
data_parallel_size=2,
tensor_parallel_size=2,
pipeline_parallel_size=1,
)
)
dist.setup_dist_util(dist_cfg)
# load model
load_func = RobertaLoaderHuggerFace(
model=libai.models.RobertaModel,
libai_cfg=libai_cfg,
pretrained_model_path=self.pretrained_model_path,
bias_gelu_fusion=False,
bias_dropout_fusion=False,
scale_mask_softmax_fusion=False,
apply_query_key_layer_scaling=False,
apply_residual_post_layernorm=True,
amp_enabled=False,
)
model = load_func.load()
model.eval()
input_ids = flow.tensor(
self.input_ids,
dtype=flow.long,
sbp=dist.get_nd_sbp([flow.sbp.broadcast, flow.sbp.broadcast]),
placement=model.embeddings.vocab_embeddings.weight.placement,
)
mask = flow.tensor(
self.mask,
dtype=flow.bool,
sbp=dist.get_nd_sbp([flow.sbp.broadcast, flow.sbp.broadcast]),
placement=model.embeddings.vocab_embeddings.weight.placement,
)
last_hidden_state, _ = model(input_ids, mask)
self.assertTrue(
np.allclose(np.array(341.5831), last_hidden_state.sum().data.numpy(), 1e-4, 1e-4)
)
@flow.unittest.skip_unless_1n4d()
def test_roberta_loader_with_data_tensor_pipeline_parallel(self):
# set distributed config
dist_cfg = DictConfig(
dict(
data_parallel_size=2,
tensor_parallel_size=1,
pipeline_parallel_size=2,
pipeline_num_layers=12,
)
)
dist.setup_dist_util(dist_cfg)
# load model
load_func = RobertaLoaderHuggerFace(
model=libai.models.RobertaModel,
libai_cfg=libai_cfg,
pretrained_model_path=self.pretrained_model_path,
bias_gelu_fusion=False,
bias_dropout_fusion=False,
scale_mask_softmax_fusion=False,
apply_query_key_layer_scaling=False,
apply_residual_post_layernorm=True,
amp_enabled=False,
)
model = load_func.load()
model.eval()
input_ids = flow.tensor(
self.input_ids,
dtype=flow.long,
sbp=dist.get_nd_sbp([flow.sbp.broadcast, flow.sbp.broadcast]),
placement=model.embeddings.vocab_embeddings.weight.placement,
)
mask = flow.tensor(
self.mask,
dtype=flow.bool,
sbp=dist.get_nd_sbp([flow.sbp.broadcast, flow.sbp.broadcast]),
placement=model.embeddings.vocab_embeddings.weight.placement,
)
last_hidden_state, _ = model(input_ids, mask)
self.assertTrue(
np.allclose(np.array(341.5831), last_hidden_state.sum().data.numpy(), 1e-4, 1e-4)
)
if __name__ == "__main__":
unittest.main()
| 6,023 | 34.64497 | 155 | py |
libai | libai-main/tests/model_loader/test_gpt_loader.py | # coding=utf-8
# Copyright 2021 The OneFlow Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import shutil
import unittest
import numpy as np
import oneflow as flow
import oneflow.unittest
from omegaconf import DictConfig
import libai
from configs.common.models.gpt import cfg as libai_cfg
from libai.models.utils import GPT2LoaderHuggerFace
from libai.utils import distributed as dist
from libai.utils.file_utils import get_data_from_cache
from libai.utils.logger import setup_logger
PRETRAINED_MODEL_URL = "http://oneflow-static.oss-cn-beijing.aliyuncs.com/ci-files/dataset/libai/model_utils_test/gpt_utils/pytorch_model.bin" # noqa
PRETRAINED_MODEL_CONFIG_URL = "http://oneflow-static.oss-cn-beijing.aliyuncs.com/ci-files/dataset/libai/model_utils_test/gpt_utils/config.json" # noqa
PRETRAINED_MODEL_MD5 = "c086214036308afc71896da17ca0442a"
PRETRAINED_MODEL_CONFIG_MD5 = "6e1dba197b511b8759d6ad4551095a29"
TEST_OUTPUT = os.path.join(os.getenv("TEST_OUTPUT", "output_unittest"), "test_gpt_utils")
setup_logger(distributed_rank=dist.get_rank())
class TestGPT2Loader(flow.unittest.TestCase):
"""The activation function of gpt2 in LiBai is GELU, so the result here is to
replace the activation function of gpt2 in huggingface from gelu_new to gelu.
"""
def setUp(self) -> None:
cache_dir = os.path.join(
os.getenv("ONEFLOW_TEST_CACHE_DIR", "./data_test"), "gpt_utils_data"
)
self.pretrained_model_path = cache_dir
# prepare dataset
if dist.get_local_rank() == 0:
# download dataset on main process of each node
get_data_from_cache(PRETRAINED_MODEL_URL, cache_dir, md5=PRETRAINED_MODEL_MD5)
get_data_from_cache(
PRETRAINED_MODEL_CONFIG_URL, cache_dir, md5=PRETRAINED_MODEL_CONFIG_MD5
)
os.makedirs(TEST_OUTPUT, exist_ok=True)
dist.synchronize()
# prepare input data
self.input_ids = [
[101, 2009, 1005, 1055, 2986, 2651, 1012, 102],
[101, 2028, 12314, 3377, 102, 0, 0, 0],
[101, 2064, 2017, 3305, 2009, 102, 0, 0],
]
@classmethod
def tearDownClass(cls) -> None:
if os.path.isdir(TEST_OUTPUT) and dist.get_local_rank() == 0:
shutil.rmtree(TEST_OUTPUT)
@flow.unittest.skip_unless_1n4d()
def test_gpt_loader_with_data_tensor_parallel(self):
# set distributed config
dist_cfg = DictConfig(
dict(
data_parallel_size=2,
tensor_parallel_size=2,
pipeline_parallel_size=1,
)
)
dist.setup_dist_util(dist_cfg)
# load model
load_func = GPT2LoaderHuggerFace(
model=libai.models.GPTModel,
libai_cfg=libai_cfg,
pretrained_model_path=self.pretrained_model_path,
bias_gelu_fusion=False,
bias_dropout_fusion=False,
scale_mask_softmax_fusion=True,
apply_query_key_layer_scaling=True,
apply_residual_post_layernorm=False,
amp_enabled=False,
attention_dropout_prob=0,
output_dropout_prob=0,
)
model = load_func.load()
model.eval()
input_ids = flow.tensor(
self.input_ids,
dtype=flow.long,
sbp=dist.get_nd_sbp([flow.sbp.broadcast, flow.sbp.broadcast]),
placement=model.embeddings.token_embeddings.weight.placement,
)
logits = model(input_ids)
self.assertTrue(
np.allclose(
np.array(-93505050.0),
logits.sum().data.numpy(),
)
)
@flow.unittest.skip_unless_1n4d()
def test_gpt_loader_with_data_tensor_pipeline_parallel(self):
# set distributed config
dist_cfg = DictConfig(
dict(
data_parallel_size=2,
tensor_parallel_size=1,
pipeline_parallel_size=2,
pipeline_num_layers=12,
)
)
dist.setup_dist_util(dist_cfg)
# load model
load_func = GPT2LoaderHuggerFace(
model=libai.models.GPTModel,
libai_cfg=libai_cfg,
pretrained_model_path=self.pretrained_model_path,
bias_gelu_fusion=False,
bias_dropout_fusion=False,
scale_mask_softmax_fusion=True,
apply_query_key_layer_scaling=True,
apply_residual_post_layernorm=False,
amp_enabled=False,
attention_dropout_prob=0,
output_dropout_prob=0,
)
model = load_func.load()
model.eval()
input_ids = flow.tensor(
self.input_ids,
dtype=flow.long,
sbp=dist.get_nd_sbp([flow.sbp.broadcast, flow.sbp.broadcast]),
placement=model.embeddings.token_embeddings.weight.placement,
)
logits = model(input_ids)
self.assertTrue(
np.allclose(
np.array(-93505050.0),
logits.sum().data.numpy(),
)
)
@flow.unittest.skip_unless_1n4d()
def test_gpt_loader_with_data_tensor_parallel_backward(self):
# set distributed config
dist_cfg = DictConfig(
dict(
data_parallel_size=2,
tensor_parallel_size=2,
pipeline_parallel_size=1,
)
)
dist.setup_dist_util(dist_cfg)
# load model
load_func = GPT2LoaderHuggerFace(
model=libai.models.GPTModel,
libai_cfg=libai_cfg,
pretrained_model_path=self.pretrained_model_path,
bias_gelu_fusion=False,
bias_dropout_fusion=False,
scale_mask_softmax_fusion=True,
apply_query_key_layer_scaling=True,
apply_residual_post_layernorm=False,
amp_enabled=False,
attention_dropout_prob=0,
output_dropout_prob=0,
embedding_dropout_prob=0,
)
model = load_func.load()
input_ids = flow.tensor(
self.input_ids,
dtype=flow.long,
sbp=dist.get_nd_sbp([flow.sbp.broadcast, flow.sbp.broadcast]),
placement=model.embeddings.token_embeddings.weight.placement,
)
logits = model(input_ids)
loss = logits.sum()
loss.backward()
self.assertTrue(
np.allclose(-24882176.0, model.transformer.layernorm_f.weight.grad.sum().numpy())
)
self.assertTrue(
np.allclose(
3.1779e08, model.embeddings.token_embeddings.weight.grad.sum().numpy(), 1e-3
)
)
@flow.unittest.skip_unless_1n4d()
def test_gpt_loader_with_data_tensor_pipeline_parallel_backward(self):
# set distributed config
dist_cfg = DictConfig(
dict(
data_parallel_size=2,
tensor_parallel_size=1,
pipeline_parallel_size=2,
pipeline_num_layers=12,
)
)
dist.setup_dist_util(dist_cfg)
# load model
load_func = GPT2LoaderHuggerFace(
model=libai.models.GPTModel,
libai_cfg=libai_cfg,
pretrained_model_path=self.pretrained_model_path,
bias_gelu_fusion=False,
bias_dropout_fusion=False,
scale_mask_softmax_fusion=True,
apply_query_key_layer_scaling=True,
apply_residual_post_layernorm=False,
amp_enabled=False,
attention_dropout_prob=0,
output_dropout_prob=0,
embedding_dropout_prob=0,
)
model = load_func.load()
input_ids = flow.tensor(
self.input_ids,
dtype=flow.long,
sbp=dist.get_nd_sbp([flow.sbp.broadcast, flow.sbp.broadcast]),
placement=model.embeddings.token_embeddings.weight.placement,
)
logits = model(input_ids)
loss = logits.sum()
loss.backward()
self.assertTrue(
np.allclose(-24882176.0, model.transformer.layernorm_f.weight.grad.sum().numpy())
)
self.assertTrue(
np.allclose(317785760.0, model.embeddings.token_embeddings.weight.grad.sum().numpy())
)
if __name__ == "__main__":
unittest.main()
| 8,936 | 32.724528 | 151 | py |
libai | libai-main/tests/model_loader/test_swin_loader.py | # coding=utf-8
# Copyright 2021 The OneFlow Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import shutil
import unittest
import numpy as np
import oneflow as flow
import oneflow.unittest
from omegaconf import DictConfig
import libai
from configs.common.models.swin.swin_tiny_patch4_window7_224 import cfg as libai_cfg
from libai.models.utils import SwinLoaderHuggerFace
from libai.utils import distributed as dist
from libai.utils.file_utils import get_data_from_cache
from libai.utils.logger import setup_logger
PRETRAINED_MODEL_URL = "http://oneflow-static.oss-cn-beijing.aliyuncs.com/ci-files/dataset/libai/model_utils_test/swin_utils/pytorch_model.bin" # noqa
PRETRAINED_MODEL_CONFIG_URL = "http://oneflow-static.oss-cn-beijing.aliyuncs.com/ci-files/dataset/libai/model_utils_test/swin_utils/config.json" # noqa
INIT_DATA = "http://oneflow-static.oss-cn-beijing.aliyuncs.com/ci-files/dataset/libai/model_utils_test/swin_utils/init_data.npz" # noqa
PRETRAINED_MODEL_MD5 = "cd8c03d9cd4a9c536a5a245f663035b6"
PRETRAINED_MODEL_CONFIG_MD5 = "a8a71ed22b99323edd6a1457bede5819"
INIT_DATA_MD5 = "5fecdcd8d46bfefa310d19e084bd4815"
TEST_OUTPUT = os.path.join(os.getenv("TEST_OUTPUT", "output_unittest"), "test_swin_utils")
setup_logger(distributed_rank=dist.get_rank())
class TestSwinLoder(flow.unittest.TestCase):
def setUp(self) -> None:
cache_dir = os.path.join(
os.getenv("ONEFLOW_TEST_CACHE_DIR", "./data_test"), "swin_utils_data"
)
self.pretrained_model_path = cache_dir
self.init_data_path = os.path.join(cache_dir, "init_data.npz")
# download model and data
if dist.get_local_rank() == 0:
# download dataset on main process of each node
get_data_from_cache(PRETRAINED_MODEL_URL, cache_dir, md5=PRETRAINED_MODEL_MD5)
get_data_from_cache(
PRETRAINED_MODEL_CONFIG_URL, cache_dir, md5=PRETRAINED_MODEL_CONFIG_MD5
)
get_data_from_cache(INIT_DATA, cache_dir, md5=INIT_DATA_MD5)
os.makedirs(TEST_OUTPUT, exist_ok=True)
dist.synchronize()
# prepare input data
self.input_image = np.load(self.init_data_path)["arr_0"]
@classmethod
def tearDownClass(cls) -> None:
if os.path.isdir(TEST_OUTPUT) and dist.get_local_rank() == 0:
shutil.rmtree(TEST_OUTPUT)
@flow.unittest.skip_unless_1n4d()
def test_swin_loader_with_data_tensor_parallel(self):
# set distributed config
dist_cfg = DictConfig(
dict(
data_parallel_size=2,
tensor_parallel_size=2,
pipeline_parallel_size=1,
)
)
dist.setup_dist_util(dist_cfg)
# load model
load_func = SwinLoaderHuggerFace(
model=libai.models.SwinTransformer,
libai_cfg=libai_cfg,
pretrained_model_path=self.pretrained_model_path,
)
model = load_func.load()
model.eval()
input_image = flow.tensor(
self.input_image.tolist(),
dtype=flow.float32,
sbp=dist.get_nd_sbp([flow.sbp.broadcast, flow.sbp.broadcast]),
placement=model.patch_embed.proj.weight.placement,
)
prediction_scores = model(input_image)["prediction_scores"]
self.assertTrue(np.allclose(np.array(80.9373), prediction_scores.sum().data.numpy(), 1e-3))
@flow.unittest.skip_unless_1n4d()
def test_swin_loader_with_data_tensor_pipeline_parallel(self):
# set distributed config
dist_cfg = DictConfig(
dict(
data_parallel_size=2,
tensor_parallel_size=1,
pipeline_parallel_size=2,
pipeline_num_layers=12,
)
)
dist.setup_dist_util(dist_cfg)
# load model
load_func = SwinLoaderHuggerFace(
model=libai.models.SwinTransformer,
libai_cfg=libai_cfg,
pretrained_model_path=self.pretrained_model_path,
)
model = load_func.load()
model.eval()
input_image = flow.tensor(
self.input_image,
dtype=flow.float32,
sbp=dist.get_nd_sbp([flow.sbp.broadcast, flow.sbp.broadcast]),
placement=model.patch_embed.proj.weight.placement,
)
prediction_scores = model(input_image)["prediction_scores"]
self.assertTrue(np.allclose(np.array(80.9373), prediction_scores.sum().data.numpy(), 1e-3))
@flow.unittest.skip_unless_1n4d()
def test_swin_loader_with_data_tensor_parallel_backward(self):
# set distributed config
dist_cfg = DictConfig(
dict(
data_parallel_size=2,
tensor_parallel_size=2,
pipeline_parallel_size=1,
)
)
dist.setup_dist_util(dist_cfg)
# load model
load_func = SwinLoaderHuggerFace(
model=libai.models.SwinTransformer,
libai_cfg=libai_cfg,
pretrained_model_path=self.pretrained_model_path,
drop_rate=0.0,
drop_path_rate=0.0,
)
model = load_func.load()
input_image = flow.tensor(
self.input_image,
dtype=flow.float32,
sbp=dist.get_nd_sbp([flow.sbp.broadcast, flow.sbp.broadcast]),
placement=model.patch_embed.proj.weight.placement,
)
prediction_scores = model(input_image)["prediction_scores"]
loss = prediction_scores.sum()
loss.backward()
self.assertTrue(np.allclose(108775.88, model.head.weight.grad.sum().numpy(), 1e-3))
self.assertTrue(
np.allclose(24.320518, model.patch_embed.norm.weight.grad.sum().numpy(), 1e-2)
)
@flow.unittest.skip_unless_1n4d()
def test_swin_loader_with_data_tensor_pipeline_parallel_backward(self):
# set distributed config
dist_cfg = DictConfig(
dict(
data_parallel_size=2,
tensor_parallel_size=1,
pipeline_parallel_size=2,
pipeline_num_layers=12,
)
)
dist.setup_dist_util(dist_cfg)
# load model
load_func = SwinLoaderHuggerFace(
model=libai.models.SwinTransformer,
libai_cfg=libai_cfg,
pretrained_model_path=self.pretrained_model_path,
drop_rate=0.0,
drop_path_rate=0.0,
)
model = load_func.load()
input_image = flow.tensor(
self.input_image,
dtype=flow.float32,
sbp=dist.get_nd_sbp([flow.sbp.broadcast, flow.sbp.broadcast]),
placement=model.patch_embed.proj.weight.placement,
)
prediction_scores = model(input_image)["prediction_scores"]
loss = prediction_scores.sum()
loss.backward()
self.assertTrue(np.allclose(108775.88, model.head.weight.grad.sum().numpy(), 1e-3))
self.assertTrue(
np.allclose(24.320518, model.patch_embed.norm.weight.grad.sum().numpy(), 1e-2)
)
if __name__ == "__main__":
unittest.main()
| 7,713 | 34.223744 | 152 | py |
libai | libai-main/tests/model_loader/test_vit_loader.py | # coding=utf-8
# Copyright 2021 The OneFlow Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import shutil
import unittest
import numpy as np
import oneflow as flow
import oneflow.unittest
from omegaconf import DictConfig
import libai
from configs.common.models.vit.vit_tiny_patch16_224 import cfg as libai_cfg
from libai.models.utils import ViTLoaderHuggerFace
from libai.utils import distributed as dist
from libai.utils.file_utils import get_data_from_cache
from libai.utils.logger import setup_logger
PRETRAINED_MODEL_URL = "http://oneflow-static.oss-cn-beijing.aliyuncs.com/ci-files/dataset/libai/model_utils_test/vit_utils/pytorch_model.bin" # noqa
PRETRAINED_MODEL_CONFIG_URL = "http://oneflow-static.oss-cn-beijing.aliyuncs.com/ci-files/dataset/libai/model_utils_test/vit_utils/config.json" # noqa
INIT_DATA = "http://oneflow-static.oss-cn-beijing.aliyuncs.com/ci-files/dataset/libai/model_utils_test/vit_utils/init_data.npz" # noqa
PRETRAINED_MODEL_MD5 = "c587693e5e312064c56f27aa2d4f1e81"
PRETRAINED_MODEL_CONFIG_MD5 = "9ea94d9e5bc3543b1de7d12956321c50"
INIT_DATA_MD5 = "5fecdcd8d46bfefa310d19e084bd4815"
TEST_OUTPUT = os.path.join(os.getenv("TEST_OUTPUT", "output_unittest"), "test_vit_utils")
setup_logger(distributed_rank=dist.get_rank())
class TestViTLoder(flow.unittest.TestCase):
def setUp(self) -> None:
cache_dir = os.path.join(
os.getenv("ONEFLOW_TEST_CACHE_DIR", "./data_test"), "vit_utils_data"
)
self.pretrained_model_path = cache_dir
self.init_data_path = os.path.join(cache_dir, "init_data.npz")
# download model and data
if dist.get_local_rank() == 0:
# download dataset on main process of each node
get_data_from_cache(PRETRAINED_MODEL_URL, cache_dir, md5=PRETRAINED_MODEL_MD5)
get_data_from_cache(
PRETRAINED_MODEL_CONFIG_URL, cache_dir, md5=PRETRAINED_MODEL_CONFIG_MD5
)
get_data_from_cache(INIT_DATA, cache_dir, md5=INIT_DATA_MD5)
os.makedirs(TEST_OUTPUT, exist_ok=True)
dist.synchronize()
# prepare input data
self.input_image = np.load(self.init_data_path)["arr_0"]
@classmethod
def tearDownClass(cls) -> None:
if os.path.isdir(TEST_OUTPUT) and dist.get_local_rank() == 0:
shutil.rmtree(TEST_OUTPUT)
@flow.unittest.skip_unless_1n4d()
def test_vit_loader_with_data_tensor_parallel(self):
# set distributed config
dist_cfg = DictConfig(
dict(
data_parallel_size=2,
tensor_parallel_size=2,
pipeline_parallel_size=1,
)
)
dist.setup_dist_util(dist_cfg)
# load model
load_func = ViTLoaderHuggerFace(
model=libai.models.VisionTransformer,
libai_cfg=libai_cfg,
pretrained_model_path=self.pretrained_model_path,
)
model = load_func.load()
model.eval()
input_image = flow.tensor(
self.input_image.tolist(),
dtype=flow.float32,
sbp=dist.get_nd_sbp([flow.sbp.broadcast, flow.sbp.broadcast]),
placement=model.patch_embed.proj.weight.placement,
)
prediction_scores = model(input_image)["prediction_scores"]
self.assertTrue(
np.allclose(np.array(3.1374), prediction_scores.sum().data.numpy(), 1e-4, 1e-4)
)
@flow.unittest.skip_unless_1n4d()
def test_vit_loader_with_data_tensor_pipeline_parallel(self):
# set distributed config
dist_cfg = DictConfig(
dict(
data_parallel_size=2,
tensor_parallel_size=1,
pipeline_parallel_size=2,
pipeline_num_layers=12,
)
)
dist.setup_dist_util(dist_cfg)
# load model
load_func = ViTLoaderHuggerFace(
model=libai.models.VisionTransformer,
libai_cfg=libai_cfg,
pretrained_model_path=self.pretrained_model_path,
)
model = load_func.load()
model.eval()
input_image = flow.tensor(
self.input_image,
dtype=flow.float32,
sbp=dist.get_nd_sbp([flow.sbp.broadcast, flow.sbp.broadcast]),
placement=model.patch_embed.proj.weight.placement,
)
prediction_scores = model(input_image)["prediction_scores"]
self.assertTrue(
np.allclose(np.array(3.1374), prediction_scores.sum().data.numpy(), 1e-4, 1e-4)
)
@flow.unittest.skip_unless_1n4d()
def test_vit_loader_with_data_tensor_parallel_backward(self):
# set distributed config
dist_cfg = DictConfig(
dict(
data_parallel_size=2,
tensor_parallel_size=2,
pipeline_parallel_size=1,
)
)
dist.setup_dist_util(dist_cfg)
# load model
load_func = ViTLoaderHuggerFace(
model=libai.models.VisionTransformer,
libai_cfg=libai_cfg,
pretrained_model_path=self.pretrained_model_path,
drop_rate=0,
attn_drop_rate=0,
drop_path_rate=0,
)
model = load_func.load()
input_image = flow.tensor(
self.input_image,
dtype=flow.float32,
sbp=dist.get_nd_sbp([flow.sbp.broadcast, flow.sbp.broadcast]),
placement=model.patch_embed.proj.weight.placement,
)
prediction_scores = model(input_image)["prediction_scores"]
loss = prediction_scores.sum()
loss.backward()
self.assertTrue(np.allclose(-173459.77, model.head.weight.grad.sum().numpy(), 1e-3))
@flow.unittest.skip_unless_1n4d()
def test_vit_loader_with_data_tensor_pipeline_parallel_backward(self):
# set distributed config
dist_cfg = DictConfig(
dict(
data_parallel_size=2,
tensor_parallel_size=1,
pipeline_parallel_size=2,
pipeline_num_layers=12,
)
)
dist.setup_dist_util(dist_cfg)
# load model
load_func = ViTLoaderHuggerFace(
model=libai.models.VisionTransformer,
libai_cfg=libai_cfg,
pretrained_model_path=self.pretrained_model_path,
drop_rate=0,
attn_drop_rate=0,
drop_path_rate=0,
)
model = load_func.load()
input_image = flow.tensor(
self.input_image,
dtype=flow.float32,
sbp=dist.get_nd_sbp([flow.sbp.broadcast, flow.sbp.broadcast]),
placement=model.patch_embed.proj.weight.placement,
)
prediction_scores = model(input_image)["prediction_scores"]
loss = prediction_scores.sum()
loss.backward()
self.assertTrue(np.allclose(-173459.77, model.head.weight.grad.sum().numpy(), 1e-3))
if __name__ == "__main__":
unittest.main()
| 7,553 | 33.493151 | 151 | py |
libai | libai-main/tests/model_loader/test_bert_loader.py | # coding=utf-8
# Copyright 2021 The OneFlow Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import shutil
import unittest
import numpy as np
import oneflow as flow
import oneflow.unittest
from omegaconf import DictConfig
import libai
from configs.common.models.bert import cfg as libai_cfg
from libai.models.utils import BertLoaderHuggerFace
from libai.utils import distributed as dist
from libai.utils.file_utils import get_data_from_cache
from libai.utils.logger import setup_logger
PRETRAINED_MODEL_URL = "http://oneflow-static.oss-cn-beijing.aliyuncs.com/ci-files/dataset/libai/model_utils_test/bert_utils/pytorch_model.bin" # noqa
PRETRAINED_MODEL_CONFIG_URL = "http://oneflow-static.oss-cn-beijing.aliyuncs.com/ci-files/dataset/libai/model_utils_test/bert_utils/config.json" # noqa
PRETRAINED_MODEL_MD5 = "ea97b42698d3b5f6d8e8011eba3d1611"
PRETRAINED_MODEL_CONFIG_MD5 = "0939b914fc32135f6c12d8ef281dbd7a"
TEST_OUTPUT = os.path.join(os.getenv("TEST_OUTPUT", "output_unittest"), "test_bert_utils")
setup_logger(distributed_rank=dist.get_rank())
class TestBertLoder(flow.unittest.TestCase):
def setUp(self) -> None:
cache_dir = os.path.join(
os.getenv("ONEFLOW_TEST_CACHE_DIR", "./data_test"), "bert_utils_data"
)
self.pretrained_model_path = cache_dir
# prepare dataset
if dist.get_local_rank() == 0:
# download dataset on main process of each node
get_data_from_cache(PRETRAINED_MODEL_URL, cache_dir, md5=PRETRAINED_MODEL_MD5)
get_data_from_cache(
PRETRAINED_MODEL_CONFIG_URL, cache_dir, md5=PRETRAINED_MODEL_CONFIG_MD5
)
os.makedirs(TEST_OUTPUT, exist_ok=True)
dist.synchronize()
# prepare input data
self.input_ids = [
[101, 2009, 1005, 1055, 2986, 2651, 1012, 102],
[101, 2028, 12314, 3377, 102, 0, 0, 0],
[101, 2064, 2017, 3305, 2009, 102, 0, 0],
]
self.mask = [[1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1]]
@classmethod
def tearDownClass(cls) -> None:
if os.path.isdir(TEST_OUTPUT) and dist.get_local_rank() == 0:
shutil.rmtree(TEST_OUTPUT)
@flow.unittest.skip_unless_1n4d()
def test_bert_loader_with_data_tensor_parallel(self):
# set distributed config
dist_cfg = DictConfig(
dict(
data_parallel_size=2,
tensor_parallel_size=2,
pipeline_parallel_size=1,
)
)
dist.setup_dist_util(dist_cfg)
# load model
load_func = BertLoaderHuggerFace(
model=libai.models.BertModel,
libai_cfg=libai_cfg,
pretrained_model_path=self.pretrained_model_path,
bias_gelu_fusion=False,
bias_dropout_fusion=False,
scale_mask_softmax_fusion=False,
apply_query_key_layer_scaling=False,
apply_residual_post_layernorm=True,
amp_enabled=False,
)
model = load_func.load()
model.eval()
input_ids = flow.tensor(
self.input_ids,
dtype=flow.long,
sbp=dist.get_nd_sbp([flow.sbp.broadcast, flow.sbp.broadcast]),
placement=model.embeddings.vocab_embeddings.weight.placement,
)
mask = flow.tensor(
self.mask,
dtype=flow.bool,
sbp=dist.get_nd_sbp([flow.sbp.broadcast, flow.sbp.broadcast]),
placement=model.embeddings.vocab_embeddings.weight.placement,
)
last_hidden_state, _ = model(input_ids, mask)
self.assertTrue(
np.allclose(np.array(-214.9335), last_hidden_state.sum().data.numpy(), 1e-4, 1e-4)
)
@flow.unittest.skip_unless_1n4d()
def test_bert_loader_with_data_tensor_pipeline_parallel(self):
# set distributed config
dist_cfg = DictConfig(
dict(
data_parallel_size=2,
tensor_parallel_size=1,
pipeline_parallel_size=2,
pipeline_num_layers=12,
)
)
dist.setup_dist_util(dist_cfg)
# load model
load_func = BertLoaderHuggerFace(
model=libai.models.BertModel,
libai_cfg=libai_cfg,
pretrained_model_path=self.pretrained_model_path,
bias_gelu_fusion=False,
bias_dropout_fusion=False,
scale_mask_softmax_fusion=False,
apply_query_key_layer_scaling=False,
apply_residual_post_layernorm=True,
amp_enabled=False,
)
model = load_func.load()
model.eval()
input_ids = flow.tensor(
self.input_ids,
dtype=flow.long,
sbp=dist.get_nd_sbp([flow.sbp.broadcast, flow.sbp.broadcast]),
placement=model.embeddings.vocab_embeddings.weight.placement,
)
mask = flow.tensor(
self.mask,
dtype=flow.bool,
sbp=dist.get_nd_sbp([flow.sbp.broadcast, flow.sbp.broadcast]),
placement=model.embeddings.vocab_embeddings.weight.placement,
)
last_hidden_state, _ = model(input_ids, mask)
self.assertTrue(
np.allclose(np.array(-214.9335), last_hidden_state.sum().data.numpy(), 1e-4, 1e-4)
)
if __name__ == "__main__":
unittest.main()
| 5,969 | 34.325444 | 152 | py |
libai | libai-main/tests/model_loader/__init__.py | 0 | 0 | 0 | py | |
libai | libai-main/tests/model_loader/test_swinv2_loader.py | # coding=utf-8
# Copyright 2021 The OneFlow Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import shutil
import unittest
import numpy as np
import oneflow as flow
import oneflow.unittest
from omegaconf import DictConfig
import libai
from configs.common.models.swinv2.swinv2_tiny_patch4_window8_256 import cfg as libai_cfg
from libai.models.utils import SwinV2LoaderHuggerFace
from libai.utils import distributed as dist
from libai.utils.file_utils import get_data_from_cache
from libai.utils.logger import setup_logger
PRETRAINED_MODEL_URL = "http://oneflow-static.oss-cn-beijing.aliyuncs.com/ci-files/dataset/libai/model_utils_test/swinv2_utils/pytorch_model.bin" # noqa
PRETRAINED_MODEL_CONFIG_URL = "http://oneflow-static.oss-cn-beijing.aliyuncs.com/ci-files/dataset/libai/model_utils_test/swinv2_utils/config.json" # noqa
INIT_DATA = "http://oneflow-static.oss-cn-beijing.aliyuncs.com/ci-files/dataset/libai/model_utils_test/swinv2_utils/init_data.npz" # noqa
PRETRAINED_MODEL_MD5 = "40f085f8916974dcb5d86fc6e03aa0df"
PRETRAINED_MODEL_CONFIG_MD5 = "2d3874d58f3d5684f51f70ca29a7de9f"
INIT_DATA_MD5 = "c19b2ad8afe9a708aac9d2a0ff15f7bd"
TEST_OUTPUT = os.path.join(os.getenv("TEST_OUTPUT", "output_unittest"), "test_swinv2_utils")
setup_logger(distributed_rank=dist.get_rank())
class TestSwinV2Loder(flow.unittest.TestCase):
def setUp(self) -> None:
cache_dir = os.path.join(
os.getenv("ONEFLOW_TEST_CACHE_DIR", "./data_test"), "swinv2_utils_data"
)
self.pretrained_model_path = cache_dir
self.init_data_path = os.path.join(cache_dir, "init_data.npz")
# download model and data
if dist.get_local_rank() == 0:
# download dataset on main process of each node
get_data_from_cache(PRETRAINED_MODEL_URL, cache_dir, md5=PRETRAINED_MODEL_MD5)
get_data_from_cache(
PRETRAINED_MODEL_CONFIG_URL, cache_dir, md5=PRETRAINED_MODEL_CONFIG_MD5
)
get_data_from_cache(INIT_DATA, cache_dir, md5=INIT_DATA_MD5)
os.makedirs(TEST_OUTPUT, exist_ok=True)
dist.synchronize()
# prepare input data
self.input_image = np.load(self.init_data_path)["arr_0"]
@classmethod
def tearDownClass(cls) -> None:
if os.path.isdir(TEST_OUTPUT) and dist.get_local_rank() == 0:
shutil.rmtree(TEST_OUTPUT)
@flow.unittest.skip_unless_1n4d()
def test_swinv2_loader_with_data_tensor_parallel(self):
# set distributed config
dist_cfg = DictConfig(
dict(
data_parallel_size=2,
tensor_parallel_size=2,
pipeline_parallel_size=1,
)
)
dist.setup_dist_util(dist_cfg)
# load model
load_func = SwinV2LoaderHuggerFace(
model=libai.models.SwinTransformerV2,
libai_cfg=libai_cfg,
pretrained_model_path=self.pretrained_model_path,
)
model = load_func.load()
model.eval()
input_image = flow.tensor(
self.input_image.tolist(),
dtype=flow.float32,
sbp=dist.get_nd_sbp([flow.sbp.broadcast, flow.sbp.broadcast]),
placement=model.patch_embed.proj.weight.placement,
)
prediction_scores = model(input_image)["prediction_scores"]
self.assertTrue(
np.allclose(np.array(221.7827), prediction_scores.sum().data.numpy(), 1e-4, 1e-4)
)
@flow.unittest.skip_unless_1n4d()
def test_swinv2_loader_with_data_tensor_pipeline_parallel(self):
# set distributed config
dist_cfg = DictConfig(
dict(
data_parallel_size=2,
tensor_parallel_size=1,
pipeline_parallel_size=2,
pipeline_num_layers=12,
)
)
dist.setup_dist_util(dist_cfg)
# load model
load_func = SwinV2LoaderHuggerFace(
model=libai.models.SwinTransformerV2,
libai_cfg=libai_cfg,
pretrained_model_path=self.pretrained_model_path,
)
model = load_func.load()
model.eval()
input_image = flow.tensor(
self.input_image,
dtype=flow.float32,
sbp=dist.get_nd_sbp([flow.sbp.broadcast, flow.sbp.broadcast]),
placement=model.patch_embed.proj.weight.placement,
)
prediction_scores = model(input_image)["prediction_scores"]
self.assertTrue(
np.allclose(np.array(221.7827), prediction_scores.sum().data.numpy(), 1e-4, 1e-4)
)
@flow.unittest.skip_unless_1n4d()
def test_swinv2_loader_with_data_tensor_parallel_backward(self):
# set distributed config
dist_cfg = DictConfig(
dict(
data_parallel_size=2,
tensor_parallel_size=2,
pipeline_parallel_size=1,
)
)
dist.setup_dist_util(dist_cfg)
# load model
load_func = SwinV2LoaderHuggerFace(
model=libai.models.SwinTransformerV2,
libai_cfg=libai_cfg,
pretrained_model_path=self.pretrained_model_path,
drop_rate=0,
drop_path_rate=0,
)
model = load_func.load()
input_image = flow.tensor(
self.input_image.tolist(),
dtype=flow.float32,
sbp=dist.get_nd_sbp([flow.sbp.broadcast, flow.sbp.broadcast]),
placement=model.patch_embed.proj.weight.placement,
)
prediction_scores = model(input_image)["prediction_scores"]
loss = prediction_scores.sum()
loss.backward()
self.assertTrue(np.allclose(373520.47, model.head.weight.grad.sum().numpy(), 1e-3))
self.assertTrue(
np.allclose(259.379, model.patch_embed.norm.weight.grad.sum().numpy(), 1e-3)
)
@flow.unittest.skip_unless_1n4d()
def test_swinv2_loader_with_data_tensor_pipeline_parallel_backward(self):
# set distributed config
dist_cfg = DictConfig(
dict(
data_parallel_size=2,
tensor_parallel_size=1,
pipeline_parallel_size=2,
pipeline_num_layers=12,
)
)
dist.setup_dist_util(dist_cfg)
# load model
load_func = SwinV2LoaderHuggerFace(
model=libai.models.SwinTransformerV2,
libai_cfg=libai_cfg,
pretrained_model_path=self.pretrained_model_path,
drop_rate=0,
drop_path_rate=0,
)
model = load_func.load()
input_image = flow.tensor(
self.input_image.tolist(),
dtype=flow.float32,
sbp=dist.get_nd_sbp([flow.sbp.broadcast, flow.sbp.broadcast]),
placement=model.patch_embed.proj.weight.placement,
)
prediction_scores = model(input_image)["prediction_scores"]
loss = prediction_scores.sum()
loss.backward()
self.assertTrue(np.allclose(373520.47, model.head.weight.grad.sum().numpy(), 1e-3))
self.assertTrue(
np.allclose(259.379, model.patch_embed.norm.weight.grad.sum().numpy(), 1e-3)
)
if __name__ == "__main__":
unittest.main()
| 7,819 | 34.067265 | 154 | py |
libai | libai-main/tests/layers/test_trainer_model.py | # coding=utf-8
# Copyright 2021 The OneFlow Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import oneflow as flow
from oneflow import nn
class demo_model(nn.Module):
def __init__(self, input_dim=512, out_dim=64):
super().__init__()
self.linear1 = nn.Linear(input_dim, out_dim)
self.linear2 = nn.Linear(out_dim, out_dim)
def forward(self, x):
x = self.linear1(x)
x = self.linear2(x)
loss = self.get_loss(x)
return loss
def get_loss(self, x):
return x.sum()
def build_model(cfg):
model = demo_model()
placement = flow.env.all_device_placement("cuda")
model = model.to_global(placement=placement, sbp=flow.sbp.broadcast)
return model
def build_graph(cfg, model, optimizer, lr_scheduler, fp16=False):
class GraphModel(nn.Graph):
def __init__(self, model, optimizer, lr_scheduler, fp16=False):
super().__init__()
self.model = model
self.add_optimizer(optimizer, lr_sch=lr_scheduler)
self.config.allow_fuse_add_to_output(True)
self.config.allow_fuse_model_update_ops(True)
if fp16:
self.config.enable_amp(True)
grad_scaler = flow.amp.GradScaler(
init_scale=2 ** 30,
growth_factor=2.0,
backoff_factor=0.5,
growth_interval=2000,
)
self.set_grad_scaler(grad_scaler)
def build(self, x):
loss = self.model(x)
loss.backward()
return loss
if optimizer:
return GraphModel(model, optimizer, lr_scheduler, fp16=fp16)
else:
return None
| 2,247 | 31.114286 | 74 | py |
libai | libai-main/tests/layers/test_evaluator_model.py | # coding=utf-8
# Copyright 2021 The OneFlow Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import oneflow as flow
from oneflow import nn
from libai.models.utils.graph_base import GraphBase
class demo_model(nn.Module):
def __init__(self, input_dim=512, out_dim=3):
super().__init__()
self.linear1 = nn.Linear(input_dim, input_dim // 2)
self.linear2 = nn.Linear(input_dim // 2, out_dim)
def forward(self, x, label=None):
x = x.to(dtype=flow.float32)
x = self.linear1(x)
x = self.linear2(x)
if label is None:
return x
loss = self.get_loss(x)
return loss
def get_loss(self, x):
return x.sum()
def build_model(cfg):
model = demo_model()
placement = flow.env.all_device_placement("cuda")
model = model.to_global(placement=placement, sbp=flow.sbp.broadcast)
return model
class GraphModel(GraphBase):
def build(self, x, label=None):
if self.is_train:
loss = self.model(x, label)
loss.backward()
return loss
else:
return self.model(x)
| 1,653 | 28.535714 | 74 | py |
libai | libai-main/tests/layers/test_linear.py | # coding=utf-8
# Copyright 2021 The OneFlow Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import numpy as np
import oneflow as flow
import oneflow.unittest
from omegaconf import DictConfig
from oneflow import nn
from libai.layers import Linear
from libai.utils import distributed as dist
class TestLinear(flow.unittest.TestCase):
@unittest.skipIf(not flow.cuda.is_available(), "only test gpu cases")
@flow.unittest.skip_unless_1n1d()
def test_nn_linear(self):
dist.setup_dist_util(
DictConfig(
dict(
data_parallel_size=1,
tensor_parallel_size=1,
pipeline_parallel_size=1,
)
)
)
inputs = flow.rand(8, 8, sbp=flow.sbp.broadcast, placement=dist.get_layer_placement(0))
weight = flow.rand(4, 8, sbp=flow.sbp.broadcast, placement=dist.get_layer_placement(0))
bias = flow.rand(4, sbp=flow.sbp.broadcast, placement=dist.get_layer_placement(0))
nn_linear = nn.Linear(8, 4).to("cuda")
nn_linear.weight.data.copy_(dist.ttol(weight).to("cuda"))
nn_linear.bias.data.copy_(dist.ttol(bias).to("cuda"))
nn_output = nn_linear(dist.ttol(inputs).to("cuda"))
libai_linear = Linear(8, 4)
libai_linear.weight.data.copy_(weight)
libai_linear.bias.data.copy_(bias)
libai_output = libai_linear(inputs)
self.assertTrue(np.allclose(nn_output.cpu().numpy(), dist.tton(libai_output), 1e-7, 1e-7))
@flow.unittest.skip_unless_1n2d()
def test_col_parallel_linear(self):
dist.setup_dist_util(
DictConfig(
dict(
data_parallel_size=1,
tensor_parallel_size=2,
pipeline_parallel_size=1,
)
)
)
inputs = flow.rand(8, 8, sbp=flow.sbp.broadcast, placement=dist.get_layer_placement(0))
weight = flow.rand(4, 8, sbp=flow.sbp.split(0), placement=dist.get_layer_placement(0))
bias = flow.rand(4, sbp=flow.sbp.split(0), placement=dist.get_layer_placement(0))
nn_linear = nn.Linear(8, 4).to("cuda")
nn_linear.weight.data.copy_(dist.ttol(weight).to("cuda"))
nn_linear.bias.data.copy_(dist.ttol(bias).to("cuda"))
nn_output = nn_linear(dist.ttol(inputs).to("cuda"))
libai_linear = Linear(8, 4, parallel="col")
libai_linear.weight.data.copy_(weight)
libai_linear.bias.data.copy_(bias)
libai_output = libai_linear(inputs)
self.assertTrue(np.allclose(nn_output.cpu().numpy(), dist.tton(libai_output), 1e-7, 1e-7))
@flow.unittest.skip_unless_1n2d()
def test_row_parallel_linear(self):
dist.setup_dist_util(
DictConfig(
dict(
data_parallel_size=1,
tensor_parallel_size=2,
pipeline_parallel_size=1,
)
)
)
inputs = flow.rand(8, 8, sbp=flow.sbp.broadcast, placement=dist.get_layer_placement(0))
weight = flow.rand(4, 8, sbp=flow.sbp.split(1), placement=dist.get_layer_placement(0))
bias = flow.rand(4, sbp=flow.sbp.broadcast, placement=dist.get_layer_placement(0))
# move local tensor to cuda
nn_linear = nn.Linear(8, 4).to("cuda")
nn_linear.weight.data.copy_(dist.ttol(weight).to("cuda"))
nn_linear.bias.data.copy_(dist.ttol(bias).to("cuda"))
nn_output = nn_linear(dist.ttol(inputs).to("cuda"))
libai_linear = Linear(8, 4, parallel="row")
libai_linear.weight.data.copy_(weight)
libai_linear.bias.data.copy_(bias)
libai_output = libai_linear(inputs)
self.assertTrue(np.allclose(nn_output.cpu().numpy(), dist.tton(libai_output), 1e-7, 1e-7))
| 4,370 | 37.008696 | 98 | py |
libai | libai-main/tests/layers/__init__.py | 0 | 0 | 0 | py | |
libai | libai-main/tests/data/test_sampler.py | # coding=utf-8
# Copyright 2021 The OneFlow Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# import itertools
# import unittest
# import oneflow.utils.data as flowdata
# from libai.data.samplers import CyclicSampler, SingleRoundSampler
# class TestCyclicSampler(unittest.TestCase):
# def test_cyclic_sampler_iterable(self):
# sampler = CyclicSampler(
# list(range(100)),
# micro_batch_size=4,
# shuffle=True,
# consumed_samples=0,
# seed=123,
# )
# output_iter = itertools.islice(sampler, 25) # iteration=100/4=25
# sample_output = list()
# for batch in output_iter:
# sample_output.extend(batch)
# self.assertEqual(set(sample_output), set(range(100)))
# data_sampler = CyclicSampler(
# list(range(100)),
# micro_batch_size=4,
# shuffle=True,
# consumed_samples=0,
# seed=123,
# )
# data_loader = flowdata.DataLoader(
# list(range(100)), batch_sampler=data_sampler, num_workers=0, collate_fn=lambda x: x
# )
# data_loader_iter = itertools.islice(data_loader, 25)
# output = list()
# for data in data_loader_iter:
# output.extend(data)
# self.assertEqual(output, sample_output)
# def test_cyclic_sampler_seed(self):
# sampler = CyclicSampler(
# list(range(100)),
# micro_batch_size=4,
# shuffle=True,
# seed=123,
# )
# data = list(itertools.islice(sampler, 65))
# sampler = CyclicSampler(
# list(range(100)),
# micro_batch_size=4,
# shuffle=True,
# seed=123,
# )
# data2 = list(itertools.islice(sampler, 65))
# self.assertEqual(data, data2)
# def test_cyclic_sampler_resume(self):
# # Single rank
# sampler = CyclicSampler(
# list(range(10)),
# micro_batch_size=4,
# shuffle=True,
# seed=123,
# )
# all_output = list(itertools.islice(sampler, 50)) # iteration 50 times
# sampler = CyclicSampler(
# list(range(10)),
# micro_batch_size=4,
# shuffle=True,
# seed=123,
# consumed_samples=4 * 11, # consumed 11 iters
# )
# resume_output = list(itertools.islice(sampler, 39))
# self.assertEqual(all_output[11:], resume_output)
# def test_cyclic_sampler_resume_multi_rank(self):
# # Multiple ranks
# sampler_rank0 = CyclicSampler(
# list(range(10)),
# micro_batch_size=4,
# shuffle=True,
# seed=123,
# data_parallel_rank=0,
# data_parallel_size=2,
# )
# sampler_rank1 = CyclicSampler(
# list(range(10)),
# micro_batch_size=4,
# shuffle=True,
# seed=123,
# data_parallel_rank=1,
# data_parallel_size=2,
# )
# all_output_rank0 = list(itertools.islice(sampler_rank0, 50)) # iteration 50 times
# all_output_rank1 = list(itertools.islice(sampler_rank1, 50)) # iteration 50 times
# sampler_rank0 = CyclicSampler(
# list(range(10)),
# micro_batch_size=4,
# shuffle=True,
# seed=123,
# data_parallel_rank=0,
# data_parallel_size=2,
# consumed_samples=4 * 11, # consumed 11 iters
# )
# sampler_rank1 = CyclicSampler(
# list(range(10)),
# micro_batch_size=4,
# shuffle=True,
# seed=123,
# data_parallel_rank=1,
# data_parallel_size=2,
# consumed_samples=4 * 11, # consumed 11 iters
# )
# resume_output_rank0 = list(itertools.islice(sampler_rank0, 39))
# resume_output_rank1 = list(itertools.islice(sampler_rank1, 39))
# self.assertEqual(all_output_rank0[11:], resume_output_rank0)
# self.assertEqual(all_output_rank1[11:], resume_output_rank1)
# class TestSingleRoundSampler(unittest.TestCase):
# def test_single_sampler_iterable(self):
# sampler = SingleRoundSampler(
# list(range(100)),
# micro_batch_size=4,
# shuffle=False,
# )
# output_iter = itertools.islice(sampler, 30) # exceed iteration number
# sample_output = list()
# for batch in output_iter:
# sample_output.extend(batch)
# self.assertEqual(sample_output, list(range(100)))
# def test_single_sampler_multi_rank(self):
# sampler_rank0 = SingleRoundSampler(
# list(range(101)),
# micro_batch_size=4,
# shuffle=False,
# data_parallel_rank=0,
# data_parallel_size=2,
# )
# sampler_rank1 = SingleRoundSampler(
# list(range(101)),
# micro_batch_size=4,
# shuffle=False,
# data_parallel_rank=1,
# data_parallel_size=2,
# )
# output_iter_rank0 = itertools.islice(sampler_rank0, 30)
# sample_output_rank0 = list()
# for batch in output_iter_rank0:
# sample_output_rank0.extend(batch)
# output_iter_rank1 = itertools.islice(sampler_rank1, 30)
# sample_output_rank1 = list()
# for batch in output_iter_rank1:
# sample_output_rank1.extend(batch)
# # Padding 0 if it's not enough for a batch, otherwise `to_global`
# # will raise errors for imbalanced data shape in different ranks
# self.assertEqual(sample_output_rank0, list(range(51)))
# self.assertEqual(sample_output_rank1, list(range(51, 101)) + [0])
# if __name__ == "__main__":
# unittest.main()
| 6,458 | 32.293814 | 97 | py |
libai | libai-main/tests/data/__init__.py | 0 | 0 | 0 | py | |
libai | libai-main/tests/inference/test_text_generation.py | # coding=utf-8
# Copyright 2021 The OneFlow Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import unittest
import numpy as np
import oneflow as flow
import oneflow.unittest
from libai.inference.text_generation import TextGenerationPipeline
from libai.utils import distributed as dist
from libai.utils.file_utils import get_data_from_cache
VOCAB_URL = "https://oneflow-static.oss-cn-beijing.aliyuncs.com/ci-files/dataset/libai/bert_dataset/bert-base-chinese-vocab.txt" # noqa
VOCAB_MD5 = "65ac8a72466e859cd3c6b279ed8e532a"
class TestTextGenerationPipeline(flow.unittest.TestCase):
def setUp(self) -> None:
self.texts = ["cat ", "you ", "dog ", "dragon ", "牛 ", "羊 "]
cache_dir = os.path.join(os.getenv("ONEFLOW_TEST_CACHE_DIR", "./data_test"), "bert_data")
# prepare tokenizer
if dist.get_local_rank() == 0:
# download tokenzier vocab on main process of each node
get_data_from_cache(VOCAB_URL, cache_dir, md5=VOCAB_MD5)
@unittest.skipIf(not flow.cuda.is_available(), "only test gpu cases")
@flow.unittest.skip_unless_1n4d()
def test_pipeline_with_tensor_parallel(self):
self.pipeline = TextGenerationPipeline("configs/t5_large_pretrain.py", 1, 4, 1)
for _ in range(5):
text = list(np.random.randint(0, 5, 10))
text = "".join([self.texts[i] for i in text])
dict1 = self.pipeline(
text, use_cache=False, max_generate_length=15, return_type="new_text"
)
dict2 = self.pipeline(
text, use_cache=True, max_generate_length=15, return_type="new_text"
)
if dist.is_main_process():
assert dict1["generated_text"] == dict2["generated_text"]
@unittest.skipIf(not flow.cuda.is_available(), "only test gpu cases")
@flow.unittest.skip_unless_1n4d()
def test_pipeline_with_pipeline_parallel(self):
self.pipeline = TextGenerationPipeline("configs/t5_large_pretrain.py", 1, 1, 4)
for _ in range(5):
text = list(np.random.randint(0, 5, 10))
text = "".join([self.texts[i] for i in text])
dict1 = self.pipeline(
text, use_cache=False, max_generate_length=15, return_type="new_text"
)
dict2 = self.pipeline(
text, use_cache=True, max_generate_length=15, return_type="new_text"
)
if dist.is_main_process():
assert dict1["generated_text"] == dict2["generated_text"]
@unittest.skipIf(not flow.cuda.is_available(), "only test gpu cases")
@flow.unittest.skip_unless_1n4d()
def test_pipeline_with_tensor_pipeline_parallel(self):
self.pipeline = TextGenerationPipeline("configs/t5_large_pretrain.py", 1, 2, 2)
for _ in range(5):
text = list(np.random.randint(0, 5, 10))
text = "".join([self.texts[i] for i in text])
dict1 = self.pipeline(
text, use_cache=False, max_generate_length=15, return_type="new_text"
)
dict2 = self.pipeline(
text, use_cache=True, max_generate_length=15, return_type="new_text"
)
if dist.is_main_process():
assert dict1["generated_text"] == dict2["generated_text"]
if __name__ == "__main__":
unittest.main()
| 3,903 | 40.094737 | 136 | py |
libai | libai-main/tests/inference/test_image_classification.py | # coding=utf-8
# Copyright 2021 The OneFlow Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import unittest
import zipfile
import numpy as np
import oneflow as flow
import oneflow.unittest
from libai.inference.image_classification import ImageClassificationPipeline
from libai.utils import distributed as dist
from libai.utils.file_utils import get_data_from_cache
IMAGE_URL = "https://oneflow-public.oss-cn-beijing.aliyuncs.com/model_zoo/LiBai/Inference/ILSVRC2012_val_00000293.JPEG" # noqa
CONFIG_URL = "https://oneflow-public.oss-cn-beijing.aliyuncs.com/model_zoo/LiBai/ImageNet/vit_small_patch16_224/config.yaml" # noqa
MODEL_URL = "https://oneflow-public.oss-cn-beijing.aliyuncs.com/model_zoo/LiBai/ImageNet/vit_small_patch16_224/model_best.zip" # noqa
IMAGE_MD5 = "65ac8a72466e859cd3c6b279ed8e532a"
CONFIG_MD5 = "4cf8e662d76f855f4d99ce7129050e79"
MODEL_MD5 = "2bfc9cb7df5739d1a1d11db97f54d93f"
def _legacy_zip_load(filename, model_dir):
# Note: extractall() defaults to overwrite file if exists. No need to clean up beforehand.
# We deliberately don't handle tarfile here since our legacy serialization format was in tar.
with zipfile.ZipFile(filename) as f:
members = f.infolist()
extracted_name = members[0].filename
extracted_file = os.path.join(model_dir, extracted_name)
if not os.path.exists(extracted_file):
os.mkdir(extracted_file)
f.extractall(model_dir)
class TestImageClassificationPipeline(flow.unittest.TestCase):
def setUp(self) -> None:
cache_dir = os.path.join(
os.getenv("ONEFLOW_TEST_CACHE_DIR", "./data_test"), "inference_test_data"
)
model_path = os.path.join(cache_dir, MODEL_URL.split("/")[-1])
if dist.get_local_rank() == 0:
# download dataset on main process of each node
get_data_from_cache(IMAGE_URL, cache_dir, md5=IMAGE_MD5)
get_data_from_cache(CONFIG_URL, cache_dir, md5=CONFIG_MD5)
get_data_from_cache(MODEL_URL, cache_dir, md5=MODEL_MD5)
_legacy_zip_load(model_path, os.path.dirname(model_path))
self.image_path = os.path.join(cache_dir, IMAGE_URL.split("/")[-1])
self.config_path = os.path.join(cache_dir, CONFIG_URL.split("/")[-1])
self.model_path = model_path.replace(".zip", "")
assert os.path.isdir(self.model_path)
@unittest.skipIf(not flow.cuda.is_available(), "only test gpu cases")
@flow.unittest.skip_unless_1n4d()
def test_smallvitpipeline_with_pipeline_parallel(self):
self.pipeline = ImageClassificationPipeline(self.config_path, 1, 1, 4, self.model_path)
rst = self.pipeline(self.image_path)
if flow.env.get_rank() == 0:
self.assertTrue(rst["label"] == "tench, Tinca tinca")
self.assertTrue(
np.allclose(np.array(0.7100194096565247), np.array(rst["score"]), 1e-4, 1e-4)
)
@unittest.skipIf(not flow.cuda.is_available(), "only test gpu cases")
@flow.unittest.skip_unless_1n4d()
def test_pipeline_with_pipeline_parallel(self):
self.pipeline = ImageClassificationPipeline("configs/vit_imagenet.py", 1, 1, 4)
self.pipeline(self.image_path)
@unittest.skipIf(not flow.cuda.is_available(), "only test gpu cases")
@flow.unittest.skip_unless_1n4d()
def test_pipeline_with_tensor_parallel(self):
pass
# TODO: bug occurs when tensor parallel
# self.pipeline = ImageClassificationPipeline("configs/vit_imagenet.py", 1, 4, 1)
# self.pipeline(self.image_path)
if __name__ == "__main__":
unittest.main()
| 4,163 | 42.831579 | 134 | py |
libai | libai-main/tests/inference/__init__.py | 0 | 0 | 0 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.