repo stringlengths 2 99 | file stringlengths 13 225 | code stringlengths 0 18.3M | file_length int64 0 18.3M | avg_line_length float64 0 1.36M | max_line_length int64 0 4.26M | extension_type stringclasses 1 value |
|---|---|---|---|---|---|---|
libai | libai-main/projects/text_classification/dataset/download_glue_data.py | # flake8: noqa
""" Script for downloading all GLUE data.
Modify from https://github.com/nyu-mll/GLUE-baselines/blob/master/download_glue_data.py
Note: for legal reasons, we are unable to host MRPC.
You can either use the version hosted by the SentEval team, which is already tokenized,
or you can download the original data from (https://download.microsoft.com/download/D/4/6/D46FF87A-F6B9-4252-AA8B-3604ED519838/MSRParaphraseCorpus.msi) and extract the data from it manually.
For Windows users, you can run the .msi file. For Mac and Linux users, consider an external library such as 'cabextract' (see below for an example).
You should then rename and place specific files in a folder (see below for an example).
mkdir MRPC
cabextract MSRParaphraseCorpus.msi -d MRPC
cat MRPC/_2DEC3DBE877E4DB192D17C0256E90F1D | tr -d $'\r' > MRPC/msr_paraphrase_train.txt
cat MRPC/_D7B391F9EAFF4B1B8BCE8F21B20B1B61 | tr -d $'\r' > MRPC/msr_paraphrase_test.txt
rm MRPC/_*
rm MSRParaphraseCorpus.msi
"""
import argparse
import io
import os
import sys
import urllib
if sys.version_info >= (3, 0):
import urllib.request
import zipfile
URLLIB = urllib
if sys.version_info >= (3, 0):
URLLIB = urllib.request
TASKS = ["CoLA", "SST", "MRPC", "QQP", "STS", "MNLI", "QNLI", "RTE", "WNLI", "diagnostic"]
TASK2PATH = {
"CoLA": "https://dl.fbaipublicfiles.com/glue/data/CoLA.zip",
"SST": "https://dl.fbaipublicfiles.com/glue/data/SST-2.zip",
"QQP": "https://dl.fbaipublicfiles.com/glue/data/STS-B.zip",
"STS": "https://dl.fbaipublicfiles.com/glue/data/QQP-clean.zip",
"MNLI": "https://dl.fbaipublicfiles.com/glue/data/MNLI.zip",
"QNLI": "https://dl.fbaipublicfiles.com/glue/data/QNLIv2.zip",
"RTE": "https://dl.fbaipublicfiles.com/glue/data/RTE.zip",
"WNLI": "https://dl.fbaipublicfiles.com/glue/data/WNLI.zip",
"diagnostic": "https://dl.fbaipublicfiles.com/glue/data/AX.tsv",
}
MRPC_TRAIN = "https://dl.fbaipublicfiles.com/senteval/senteval_data/msr_paraphrase_train.txt"
MRPC_TEST = "https://dl.fbaipublicfiles.com/senteval/senteval_data/msr_paraphrase_test.txt"
def download_and_extract(task, data_dir):
print("Downloading and extracting %s..." % task)
if task == "MNLI":
print(
"\tNote (12/10/20): This script no longer downloads SNLI. You will need to manually download and format the data to use SNLI."
)
data_file = "%s.zip" % task
URLLIB.urlretrieve(TASK2PATH[task], data_file)
with zipfile.ZipFile(data_file) as zip_ref:
zip_ref.extractall(data_dir)
os.remove(data_file)
print("\tCompleted!")
def format_mrpc(data_dir, path_to_data):
print("Processing MRPC...")
mrpc_dir = os.path.join(data_dir, "MRPC")
if not os.path.isdir(mrpc_dir):
os.mkdir(mrpc_dir)
if path_to_data:
mrpc_train_file = os.path.join(path_to_data, "msr_paraphrase_train.txt")
mrpc_test_file = os.path.join(path_to_data, "msr_paraphrase_test.txt")
else:
try:
mrpc_train_file = os.path.join(mrpc_dir, "msr_paraphrase_train.txt")
mrpc_test_file = os.path.join(mrpc_dir, "msr_paraphrase_test.txt")
URLLIB.urlretrieve(MRPC_TRAIN, mrpc_train_file)
URLLIB.urlretrieve(MRPC_TEST, mrpc_test_file)
except urllib.error.HTTPError:
print("Error downloading MRPC")
return
assert os.path.isfile(mrpc_train_file), "Train data not found at %s" % mrpc_train_file
assert os.path.isfile(mrpc_test_file), "Test data not found at %s" % mrpc_test_file
with io.open(mrpc_test_file, encoding="utf-8") as data_fh, io.open(
os.path.join(mrpc_dir, "test.tsv"), "w", encoding="utf-8"
) as test_fh:
header = data_fh.readline()
test_fh.write("index\t#1 ID\t#2 ID\t#1 String\t#2 String\n")
for idx, row in enumerate(data_fh):
label, id1, id2, s1, s2 = row.strip().split("\t")
test_fh.write("%d\t%s\t%s\t%s\t%s\n" % (idx, id1, id2, s1, s2))
try:
URLLIB.urlretrieve(TASK2PATH["MRPC"], os.path.join(mrpc_dir, "dev_ids.tsv"))
except KeyError or urllib.error.HTTPError:
print(
"\tError downloading standard development IDs for MRPC. You will need to manually split your data."
)
return
dev_ids = []
with io.open(os.path.join(mrpc_dir, "dev_ids.tsv"), encoding="utf-8") as ids_fh:
for row in ids_fh:
dev_ids.append(row.strip().split("\t"))
with io.open(mrpc_train_file, encoding="utf-8") as data_fh, io.open(
os.path.join(mrpc_dir, "train.tsv"), "w", encoding="utf-8"
) as train_fh, io.open(os.path.join(mrpc_dir, "dev.tsv"), "w", encoding="utf-8") as dev_fh:
header = data_fh.readline()
train_fh.write(header)
dev_fh.write(header)
for row in data_fh:
label, id1, id2, s1, s2 = row.strip().split("\t")
if [id1, id2] in dev_ids:
dev_fh.write("%s\t%s\t%s\t%s\t%s\n" % (label, id1, id2, s1, s2))
else:
train_fh.write("%s\t%s\t%s\t%s\t%s\n" % (label, id1, id2, s1, s2))
print("\tCompleted!")
def download_diagnostic(data_dir):
print("Downloading and extracting diagnostic...")
if not os.path.isdir(os.path.join(data_dir, "diagnostic")):
os.mkdir(os.path.join(data_dir, "diagnostic"))
data_file = os.path.join(data_dir, "diagnostic", "diagnostic.tsv")
URLLIB.urlretrieve(TASK2PATH["diagnostic"], data_file)
print("\tCompleted!")
return
def get_tasks(task_names):
task_names = task_names.split(",")
if "all" in task_names:
tasks = TASKS
else:
tasks = []
for task_name in task_names:
assert task_name in TASKS, "Task %s not found!" % task_name
tasks.append(task_name)
return tasks
def main(arguments):
parser = argparse.ArgumentParser()
parser.add_argument(
"-d", "--data_dir", help="directory to save data to", type=str, default="glue_data"
)
parser.add_argument(
"-t",
"--tasks",
help="tasks to download data for as a comma separated string",
type=str,
default="all",
)
parser.add_argument(
"--path_to_mrpc",
help="path to directory containing extracted MRPC data, msr_paraphrase_train.txt and msr_paraphrase_text.txt",
type=str,
default="",
)
args = parser.parse_args(arguments)
if not os.path.isdir(args.data_dir):
os.mkdir(args.data_dir)
tasks = get_tasks(args.tasks)
for task in tasks:
if task == "MRPC":
format_mrpc(args.data_dir, args.path_to_mrpc)
elif task == "diagnostic":
download_diagnostic(args.data_dir)
else:
download_and_extract(task, args.data_dir)
if __name__ == "__main__":
sys.exit(main(sys.argv[1:]))
| 6,858 | 37.318436 | 190 | py |
libai | libai-main/projects/text_classification/dataset/__init__.py | # coding=utf-8
# Copyright 2021 The OneFlow Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .clue_dataset import ClueDataset
from .glue_dataset import GlueDataset
| 697 | 37.777778 | 74 | py |
libai | libai-main/projects/text_classification/dataset/clue_dataset.py | # coding=utf-8
# Copyright 2021 The OneFlow Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
import time
from enum import Enum
from typing import Optional, Union
import oneflow as flow
from filelock import FileLock
from oneflow.utils.data import Dataset
from libai.data.structures import DistTensorData, Instance
from .utils import EncodePattern
from .utils_clue import clue_convert_examples_to_features, clue_output_modes, clue_processors
logger = logging.getLogger(__name__)
class Split(Enum):
train = "train"
dev = "dev"
test = "test"
class ClueDataset(Dataset):
def __init__(
self,
task_name,
data_dir,
tokenizer,
max_seq_length: int = 128,
mode: Union[str, Split] = Split.train,
pattern: Union[str, EncodePattern] = EncodePattern.bert_pattern,
cache_dir: Optional[str] = None,
overwrite_cache: bool = True,
):
self.processor = clue_processors[task_name]()
self.output_mode = clue_output_modes[task_name]
if isinstance(mode, str):
try:
mode = Split[mode]
except KeyError:
raise KeyError("mode is not a valid split name")
# Load data features from cache or dataset file
cached_features_file = os.path.join(
cache_dir if cache_dir is not None else data_dir,
f"cached_{mode.value}_{tokenizer.__class__.__name__}_{max_seq_length}_{task_name}",
)
label_list = self.processor.get_labels()
self.label_list = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
lock_path = cached_features_file + ".lock"
with FileLock(lock_path):
if os.path.exists(cached_features_file) and not overwrite_cache:
start = time.time()
self.features = flow.load(cached_features_file)
logger.info(
f"Loading features from cached file {cached_features_file} [took %.3f s]",
time.time() - start,
)
else:
logger.info(f"Creating features from dataset file at {data_dir}")
if mode == Split.dev:
examples = self.processor.get_dev_examples(data_dir)
elif mode == Split.test:
examples = self.processor.get_test_examples(data_dir)
else:
examples = self.processor.get_train_examples(data_dir)
self.features = clue_convert_examples_to_features(
examples,
tokenizer,
max_length=max_seq_length,
pattern=pattern,
label_list=label_list,
output_mode=self.output_mode,
)
start = time.time()
flow.save(self.features, cached_features_file)
logger.info(
f"Saving features into cached file {cached_features_file} "
f"[took {time.time() - start:.3f} s]"
)
def __len__(self):
return len(self.features)
def __getitem__(self, i):
feature = self.features[i]
tensors = {}
for k, v in feature.__dict__.items():
if v is not None:
if k == "labels":
dtype = flow.long if isinstance(v, int) else flow.float
t = flow.tensor(v, dtype=dtype)
tensors[k] = DistTensorData(t, placement_idx=-1)
elif k == "attention_mask":
t = flow.tensor(v, dtype=flow.bool)
tensors[k] = DistTensorData(t)
else:
t = flow.tensor(v, dtype=flow.long)
tensors[k] = DistTensorData(t)
sample = Instance(**tensors)
return sample
def get_labels(self):
return self.label_list
| 4,576 | 35.03937 | 95 | py |
libai | libai-main/projects/text_classification/configs/config.py | from omegaconf import OmegaConf
from libai.config import get_config
from libai.config import LazyCall
from libai.data.build import build_nlp_test_loader, build_nlp_train_loader
from libai.tokenizer import BertTokenizer
from projects.text_classification.modeling.model import ModelForSequenceClassification
from projects.text_classification.dataset import ClueDataset
tokenization = get_config("common/data/bert_dataset.py").tokenization
optim = get_config("common/optim.py").optim
model_cfg = get_config("common/models/bert.py").cfg
graph = get_config("common/models/graph.py").graph
train = get_config("common/train.py").train
tokenization.tokenizer = LazyCall(BertTokenizer)(
vocab_file="/DATA/disk1/liuchi/work/bert-base-chinese-vocab.txt",
do_lower_case=True,
do_chinese_wwm=False,
)
tokenization.append_eod = False
tokenization.make_vocab_size_divisible_by = 128
dataloader = OmegaConf.create()
dataloader.train = LazyCall(build_nlp_train_loader)(
dataset=[
LazyCall(ClueDataset)(
task_name="afqmc",
data_dir="./projects/text_classification/dataset/clue_data/afqmc",
tokenizer=tokenization.tokenizer,
max_seq_length=128,
mode="train",
),
],
num_workers=4,
)
dataloader.test = [
LazyCall(build_nlp_test_loader)(
dataset=LazyCall(ClueDataset)(
task_name="afqmc",
data_dir="./projects/text_classification/dataset/clue_data/afqmc",
tokenizer=tokenization.tokenizer,
max_seq_length=512,
mode="dev",
),
num_workers=4,
),
]
model_cfg.update(
dict(
# exist key
vocab_size=21248,
hidden_size=1024,
hidden_layers=24,
num_attention_heads=16,
# new key
num_classes=2,
pretrain_megatron_weight=None,
)
)
model = LazyCall(ModelForSequenceClassification)(cfg=model_cfg)
train.update(
dict(
activation_checkpoint=dict(enabled=True),
output_dir="output/benchmark/",
train_micro_batch_size=4,
test_micro_batch_size=4,
train_epoch=1,
train_iter=0,
evaluation=dict(
enabled=True,
eval_period=500,
),
log_period=50,
dist=dict(
data_parallel_size=1,
tensor_parallel_size=1,
pipeline_parallel_size=1,
),
)
)
| 2,416 | 27.77381 | 86 | py |
libai | libai-main/projects/text_classification/modeling/model.py | # coding=utf-8
# Copyright 2021 The OneFlow Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import oneflow as flow
from oneflow import nn
from libai.layers import Linear
from libai.models.bert_model import BertModel
from libai.models.utils import init_method_normal
from libai.utils import distributed as dist
logger = logging.getLogger("libai." + __name__)
class ClassificationLoss(nn.Module):
def __init__(self):
super().__init__()
def forward(self, classification_logits, label):
loss = nn.CrossEntropyLoss()(classification_logits, label)
# NOTE: Change loss sbp sign [P, P] -> [P, B] to add with sop loss
# whose sbp sign: [P, B]
loss = loss.to_global(sbp=dist.get_nd_sbp([flow.sbp.partial_sum, flow.sbp.broadcast]))
return loss
class ModelForSequenceClassification(nn.Module):
def __init__(self, cfg):
super().__init__()
self.num_classes = cfg.num_classes
self.model = BertModel(cfg)
if cfg.pretrain_megatron_weight is not None:
from .load_megatron_weight import load_megatron_bert
logger.info(f"loading pretraining: {cfg.pretrain_megatron_weight}")
load_megatron_bert(self.model, cfg.pretrain_megatron_weight)
logger.info("load succeed")
init_method = init_method_normal(cfg.initializer_range)
self.dropout = nn.Dropout(cfg.hidden_dropout_prob)
self.classifier = Linear(
cfg.hidden_size,
self.num_classes,
bias=True,
parallel="row",
init_method=init_method,
layer_idx=-1,
)
self.loss_fct = ClassificationLoss()
def forward(self, input_ids, attention_mask, token_type_ids=None, labels=None):
encoder_output, pooled_output = self.model(input_ids, attention_mask, token_type_ids)
pooled_output = self.dropout(pooled_output)
logits = self.classifier(pooled_output)
if self.training and labels is not None:
loss = self.loss_fct(logits.view(-1, self.num_classes), labels.view(-1))
loss_dict = {"loss": loss}
return loss_dict
return {"prediction_scores": logits}
| 2,753 | 34.307692 | 94 | py |
libai | libai-main/projects/text_classification/modeling/load_megatron_weight.py | # coding=utf-8
# Copyright 2021 The OneFlow Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import oneflow as flow
import torch
from libai.utils.checkpoint import get_missing_parameters_message, get_unexpected_parameters_message
logger = logging.getLogger("libai." + __name__)
def convert_tensor(tensor: torch.Tensor):
tensor = tensor.float()
return flow.Tensor(tensor.cpu().numpy())
def change_megatron_key(state_dict):
of_state_dict = {}
# Language model.
language_model = state_dict["language_model"]
# Embedding.
embedding = language_model["embedding"]
of_state_dict["embeddings.vocab_embeddings.weight"] = convert_tensor(
embedding["word_embeddings"]["weight"]
)
of_state_dict["embeddings.position_embeddings.weight"] = convert_tensor(
embedding["position_embeddings"]["weight"]
)
of_state_dict["embeddings.tokentype_embeddings.weight"] = convert_tensor(
embedding["tokentype_embeddings"]["weight"]
)
# Encoder.
encoder = language_model["encoder"]
for key, value in encoder.items():
# Change layers.0.input_layernorm.weight -> encoder.layers_0.input_layernorm.weight
key = "encoders." + key.replace("layers.", "")
if key.startswith("encoders.final_layernorm"):
key = key.replace("encoders.", "")
of_state_dict[key] = convert_tensor(value)
# Pooler.
pooler = language_model["pooler"]
of_state_dict["pooler.dense.weight"] = convert_tensor(pooler["dense.weight"])
of_state_dict["pooler.dense.bias"] = convert_tensor(pooler["dense.bias"])
# LM head.
lm_head = state_dict["lm_head"]
of_state_dict["cls.predictions.dense.weight"] = convert_tensor(lm_head["dense.weight"])
of_state_dict["cls.predictions.dense.bias"] = convert_tensor(lm_head["dense.bias"])
of_state_dict["cls.predictions.layernorm.weight"] = convert_tensor(lm_head["layernorm.weight"])
of_state_dict["cls.predictions.layernorm.bias"] = convert_tensor(lm_head["layernorm.bias"])
of_state_dict["lm_logits.bias"] = convert_tensor(lm_head["bias"])
# Binary head.
binary_head = state_dict["binary_head"]
of_state_dict["cls.seq_relationship.weight"] = convert_tensor(binary_head["weight"])
of_state_dict["cls.seq_relationship.bias"] = convert_tensor((binary_head["bias"]))
return of_state_dict
def load_tensor(tensor_lhs, tensor_rhs):
tensor_rhs = flow.to_global(tensor_rhs, placement=tensor_lhs.placement, sbp=tensor_lhs.sbp)
tensor_lhs.copy_(tensor_rhs)
def load_model(model: flow.nn.Module, state_dict):
model_state_dict = model.state_dict()
# Decide shape
incorrect_shapes = []
for k in list(state_dict.keys()):
if k in model_state_dict:
if (
(k.find("weight") != -1)
and (k.find("embeddings") == -1)
and (k.find("layernorm") == -1)
):
# Transpose from (M, N) -> (N, M), because the weight
# shape in megatron and oneflow missing one transpose.
shape_model = tuple(model_state_dict[k].shape[::-1])
else:
shape_model = tuple(model_state_dict[k].shape)
shape_ckpt = tuple(state_dict[k].shape)
if shape_model != shape_ckpt:
incorrect_shapes.append((k, shape_ckpt, shape_model))
state_dict.pop(k)
unexpected_keys = []
for key, value in state_dict.items():
if key not in model_state_dict:
unexpected_keys.append(key)
continue
model_state_dict.pop(key)
if (
(key.find("weight") != -1)
and (key.find("embeddings") == -1)
and (key.find("layernorm") == -1)
):
value = flow.transpose(value, 0, 1)
load_tensor(model.state_dict()[key], value)
missing_keys = list(model_state_dict.keys())
for k, shape_checkpoint, shape_model in incorrect_shapes:
logger.warning(
"Skip loading parameter '{}' to the model due to incompatible "
"shapes: {} in the checkpoint but {} in the "
"model! You might want to double check if this is expected.".format(
k, shape_checkpoint, shape_model
)
)
if missing_keys:
logger.info(get_missing_parameters_message(missing_keys))
if unexpected_keys:
logger.info(get_unexpected_parameters_message(unexpected_keys))
def load_megatron_bert(model: flow.nn.Module, model_weight_path: str):
import torch
megatron_state_dict = torch.load(model_weight_path, map_location="cpu")["model"]
of_state_dict = change_megatron_key(megatron_state_dict)
load_model(model, of_state_dict)
| 5,294 | 35.770833 | 100 | py |
libai | libai-main/projects/MAE/train_net.py | # coding=utf-8
# Copyright 2021 The OneFlow Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import random
import sys
import numpy as np
import oneflow as flow
from utils.weight_convert import load_torch_checkpoint
from libai.config import LazyConfig, default_argument_parser, try_get_key
from libai.engine import DefaultTrainer, default_setup
from libai.utils.checkpoint import Checkpointer
sys.path.append(".")
logger = logging.getLogger("libai.mae." + __name__)
class Trainer(DefaultTrainer):
@classmethod
def build_model(cls, cfg):
model = super().build_model(cfg)
if try_get_key(cfg, "finetune") is not None:
if cfg.finetune.enable is True:
logger.info("Loading pretrained weight for finetuning")
assert cfg.finetune.weight_style in ["oneflow", "pytorch"]
if cfg.finetune.weight_style == "oneflow":
Checkpointer(model).load(cfg.finetune.path)
elif cfg.finetune.weight_style == "pytorch":
model = load_torch_checkpoint(model, cfg, path=cfg.finetune.path, strict=False)
else:
raise NotImplementedError(
"Only support loading oneflow & pytorch pretrained weight now."
)
return model
def main(args):
cfg = LazyConfig.load(args.config_file)
cfg = LazyConfig.apply_overrides(cfg, args.opts)
default_setup(cfg, args)
if args.fast_dev_run:
cfg.train.train_epoch = 0
cfg.train.checkpointer.period = 5
cfg.train.train_iter = 10
cfg.train.evaluation.eval_period = 10
cfg.train.log_period = 1
if args.eval_only:
cfg.eval_only = True
tokenizer = None
if try_get_key(cfg, "tokenization.setup", default=False):
tokenizer = Trainer.build_tokenizer(cfg)
model = Trainer.build_model(cfg)
Checkpointer(model, save_dir=cfg.train.output_dir).resume_or_load(
cfg.train.load_weight, resume=args.resume
)
if try_get_key(cfg, "train.graph.enabled", default=False):
model = Trainer.build_graph(cfg, model, is_train=False)
test_loader = Trainer.build_test_loader(cfg, tokenizer)
if len(test_loader) == 0:
logger.info("No dataset in dataloader.test, please set dataset for dataloader.test")
_ = Trainer.test(cfg, test_loader, model)
return
# manual different seed for each rank
seed_for_rank = cfg.train.seed + flow.env.get_rank()
flow.manual_seed(seed_for_rank)
flow.cuda.manual_seed(seed_for_rank)
np.random.seed(seed_for_rank)
random.seed(seed_for_rank)
trainer = Trainer(cfg)
return trainer.train()
if __name__ == "__main__":
args = default_argument_parser().parse_args()
main(args)
| 3,384 | 35.010638 | 99 | py |
libai | libai-main/projects/MAE/configs/mae_pretraining.py | from flowvision.transforms import transforms, InterpolationMode
from flowvision.data.constants import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
from libai.config import LazyCall, get_config
from configs.models.mae_vit_base_patch16 import model
from data.pretraining_imagenet import PretrainingImageNetDataset
from utils.lr_decay import param_groups_weight_decay
from utils.scheduler import warmup_cosine_lr_scheduler
train = get_config("common/train.py").train
optim = get_config("common/optim.py").optim
graph = get_config("common/models/graph.py").graph
dataloader = get_config("common/data/imagenet.py").dataloader
# MAE Graph training for faster speed
graph.enabled = True
# Refine data path to imagenet
dataloader.train.dataset[0].root = "/path/to/imagenet"
dataloader.train.dataset[0]._target_ = PretrainingImageNetDataset
# No test data for pretraining
del dataloader.test
# Refine data transform to MAE's default settings
transform_train = LazyCall(transforms.Compose)(
transforms=[
LazyCall(transforms.RandomResizedCrop)(
size=(224, 224),
scale=(0.2, 1.0),
interpolation=InterpolationMode.BICUBIC,
),
LazyCall(transforms.RandomHorizontalFlip)(),
LazyCall(transforms.ToTensor)(),
LazyCall(transforms.Normalize)(
mean=IMAGENET_DEFAULT_MEAN,
std=IMAGENET_DEFAULT_STD,
),
]
)
dataloader.train.dataset[0].transform = transform_train
# number devices
n_gpus = 8
# Refine training settings for MAE
train.train_micro_batch_size = 64
train.num_accumulation_steps = 8
effective_batch_size = train.train_micro_batch_size * train.num_accumulation_steps * n_gpus
train.train_epoch = 800
train.warmup_ratio = 40 / 800
train.log_period = 20
train.checkpointer.save_model_after_n_epoch = 20
# enable activation checkpointing
# train.activation_checkpoint.enabled = True
# set rdma enabled when num nodes > 1
# train.rdma_enabled = False
# Base learning in MAE is set to 1.5e-4
# The actually learning rate should be computed by linear scaling rule as follows:
# lr = base_lr * batch_size / 256
# In LiBai, you should refine the actually learning rate due to your on settings
# Here we use 8 GPUs, 128 batch_size per GPU for training, batch_size equals to 1024
base_lr = 1.5e-4
actual_lr = base_lr * effective_batch_size / 256
# Refine optim settings
optim.params._target_ = param_groups_weight_decay
optim.params.weight_decay = 0.05
optim.lr = actual_lr
optim.betas = (0.9, 0.95)
del optim.params.clip_grad_max_norm
del optim.params.clip_grad_norm_type
del optim.params.weight_decay_norm
del optim.params.weight_decay_bias
del optim.weight_decay
# Refine scheduler
# Default scheduler in LiBai training config is WarmupCosineLR
train.scheduler = LazyCall(warmup_cosine_lr_scheduler)(
warmup_factor=0.0,
min_lr=0.0,
)
# AMP
train.amp.enabled = True
# Distributed Settings
train.dist.data_parallel_size = n_gpus
train.dist.tensor_parallel_size = 1
train.dist.pipeline_parallel_size = 1
# train.dist.pipeline_num_layers = model.depth
| 3,073 | 28.84466 | 91 | py |
libai | libai-main/projects/MAE/configs/mae_finetune.py | # coding=utf-8
# Copyright 2021 The OneFlow Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from omegaconf import OmegaConf
from flowvision.data import Mixup
# from flowvision.loss.cross_entropy import SoftTargetCrossEntropy
from libai.config import LazyCall, get_config
from modeling.cross_entropy import SoftTargetCrossEntropy
from configs.models.vit_base_patch16 import model
from utils.scheduler import (
warmup_layerscale_cosine_lr_scheduler,
warmup_cosine_lr_scheduler,
)
from utils.lr_decay import param_groups_lrd
# Get train, optim and graph configs
train = get_config("common/train.py").train
optim = get_config("common/optim.py").optim
graph = get_config("common/models/graph.py").graph
dataloader = get_config("common/data/imagenet.py").dataloader
# number devices
n_gpus = 8
# Graph training
graph.enabled = True
# Refine model cfg for vit training on imagenet
model.num_classes = 1000
model.loss_func = LazyCall(SoftTargetCrossEntropy)()
# Path to the weight for fine-tune
finetune = OmegaConf.create()
finetune.enable = True # only load weight if enable is True
finetune.weight_style = (
"oneflow" # Set "oneflow" for loading oneflow weights, set "pytorch" for loading torch weights
)
finetune.path = "/path/to/pretrained_mae_weight"
# Refine data path to imagenet
dataloader.train.dataset[0].root = "/path/to/imagenet"
dataloader.test[0].dataset.root = "/path/to/imagenet"
# Add Mixup Func
dataloader.train.mixup_func = LazyCall(Mixup)(
mixup_alpha=0.8,
cutmix_alpha=1.0,
prob=1.0,
switch_prob=0.5,
mode="batch",
label_smoothing=0.1,
num_classes=model.num_classes,
)
# Refine training settings for MAE finetune
train.train_micro_batch_size = 32
train.num_accumulation_steps = 4
train.test_micro_batch_size = 32
effective_batch_size = train.train_micro_batch_size * train.num_accumulation_steps * n_gpus
train.train_epoch = 100
train.warmup_ratio = 5 / 100
train.log_period = 20
train.evaluation.eval_after_n_epoch = 1
train.checkpointer.save_model_after_n_epoch = 1
# Set layer decay for MAE fine-tune
train.layer_decay = 0.65
# AMP
train.amp.enabled = True
# Base learning in MAE is set to 1.5e-4
# The actually learning rate should be computed by linear scaling rule as follows:
# lr = base_lr * batch_size / 256
# In LiBai, you should refine the actually learning rate due to your on settings
# Here we use 8 GPUs, 128 batch_size per GPU for training, batch_size equals to 1024
base_lr = 5e-4
actual_lr = base_lr * effective_batch_size / 256
# Refine optim settings
optim.params._target_ = param_groups_lrd
optim.params.weight_decay = 0.05
optim.params.layer_decay = 0.65
optim.lr = actual_lr
del optim.params.clip_grad_max_norm
del optim.params.clip_grad_norm_type
del optim.params.weight_decay_norm
del optim.params.weight_decay_bias
del optim.weight_decay
# Refine scheduler
if graph.enabled:
train.scheduler = LazyCall(warmup_cosine_lr_scheduler)(
warmup_factor=0.0,
min_lr=1e-6,
)
else:
train.scheduler = LazyCall(warmup_layerscale_cosine_lr_scheduler)(
warmup_factor=0.0,
min_lr=1e-6,
)
# Distributed Settings
train.dist.pipeline_num_layers = model.depth
train.dist.data_parallel_size = n_gpus
train.dist.tensor_parallel_size = 1
train.dist.pipeline_parallel_size = 1
eval_only = False
| 3,855 | 28.212121 | 99 | py |
libai | libai-main/projects/MAE/configs/models/vit_base_patch16.py | from libai.config import LazyCall
from modeling.vit import VisionTransformer
model = LazyCall(VisionTransformer)(
img_size=224,
patch_size=16,
in_chans=3,
embed_dim=768,
depth=12,
num_heads=12,
mlp_ratio=4,
drop_path_rate=0.1,
global_pool=True,
)
| 286 | 15.882353 | 42 | py |
libai | libai-main/projects/MAE/configs/models/vit_large_patch16.py | from .vit_base_patch16 import model
model.embed_dim = 1024
model.depth = 24
model.num_heads = 16
| 99 | 13.285714 | 35 | py |
libai | libai-main/projects/MAE/configs/models/mae_vit_huge_patch14.py | from .mae_vit_base_patch16 import model
model.patch_size = 14
model.embed_dim = 1280
model.depth = 32
model.num_heads = 16
| 125 | 14.75 | 39 | py |
libai | libai-main/projects/MAE/configs/models/vit_huge_patch14.py | from .vit_base_patch16 import model
model.patch_size = 14
model.embed_dim = 1280
model.depth = 32
model.num_heads = 16
model.drop_path_rate = 0.2
| 148 | 15.555556 | 35 | py |
libai | libai-main/projects/MAE/configs/models/mae_vit_large_patch16.py | from .mae_vit_base_patch16 import model
model.embed_dim = 1024
model.depth = 24
model.num_heads = 16
| 103 | 13.857143 | 39 | py |
libai | libai-main/projects/MAE/configs/models/mae_vit_base_patch16.py | from functools import partial
from libai.config import LazyCall
from libai.layers import LayerNorm
from modeling.mae import MaskedAutoencoderViT
model = LazyCall(MaskedAutoencoderViT)(
img_size=224,
patch_size=16,
in_chans=3,
embed_dim=768,
depth=12,
num_heads=12,
decoder_embed_dim=512,
decoder_depth=8,
decoder_num_heads=16,
mlp_ratio=4,
norm_layer=partial(LayerNorm, eps=1e-6),
norm_pix_loss=True,
mask_ratio=0.75,
)
| 476 | 18.875 | 45 | py |
libai | libai-main/projects/MAE/utils/lr_decay.py | # coding=utf-8
# Copyright 2021 The OneFlow Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# --------------------------------------------------------
# References:
# mae: https://github.com/facebookresearch/mae/blob/main/util/lr_decay.py
# --------------------------------------------------------
import logging
logger = logging.getLogger("libai.mae." + __name__)
def param_groups_lrd(model, weight_decay=0.05, layer_decay=0.75):
"""
Parameter groups for layer-wise lr decay
Modified from BEiT: https://github.com/microsoft/unilm/blob/master/beit/optim_factory.py#L58
"""
param_group_names = {}
param_groups = {}
no_weight_decay_list = model.no_weight_decay()
num_layers = len(model.blocks) + 1
layer_scales = list(layer_decay ** (num_layers - i) for i in range(num_layers + 1))
for name, param in model.named_parameters():
if not param.requires_grad:
continue
if param.ndim == 1 or name in no_weight_decay_list:
g_decay = "no_decay"
this_decay = 0.0
else:
g_decay = "decay"
this_decay = weight_decay
layer_idx = get_layer_idx_for_vit(name, num_layers)
group_name = "layer_%d_%s" % (layer_idx, g_decay)
# logger.info(
# f"{name}, shape={param.shape}, {g_decay}={this_decay}"
# f", layer_scale={layer_scales[layer_idx]}"
# )
if group_name not in param_group_names:
this_scale = layer_scales[layer_idx]
param_group_names[group_name] = {
"lr_scale": this_scale,
"weight_decay": this_decay,
"params": [],
}
param_groups[group_name] = {
"lr_scale": this_scale,
"weight_decay": this_decay,
"params": [],
}
param_group_names[group_name]["params"].append(name)
param_groups[group_name]["params"].append(param)
return list(param_groups.values())
def get_layer_idx_for_vit(name, num_layers):
"""
Assign a parameter with its layer id
Following BEiT: https://github.com/microsoft/unilm/blob/master/beit/optim_factory.py#L33
"""
if name in ["cls_token", "pos_embed"]:
return 0
elif name.startswith("patch_embed"):
return 0
elif name.startswith("blocks"):
return int(name.split(".")[1]) + 1
else:
return num_layers
# Refer to: add_weight_decay in
# https://github.com/rwightman/pytorch-image-models/blob/v0.3.3/timm/optim/optim_factory.py
def param_groups_weight_decay(model, weight_decay=1e-5, skip_list=()):
decay_params = []
no_decay_params = []
for name, param in model.named_parameters():
if not param.requires_grad:
continue # frozen weights
if len(param.shape) == 1 or name.endswith(".bias") or name in skip_list:
no_decay_params.append(param)
else:
decay_params.append(param)
return [
{"params": no_decay_params, "weight_decay": 0.0},
{"params": decay_params, "weight_decay": weight_decay},
]
| 3,656 | 32.550459 | 96 | py |
libai | libai-main/projects/MAE/utils/weight_convert.py | # coding=utf-8
# Copyright 2021 The OneFlow Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import oneflow as flow
import torch
from flowvision.layers.weight_init import trunc_normal_
logger = logging.getLogger("libai.mae." + __name__)
def convert_qkv_weight(cfg, value):
"""
Convert qkv.weight to be compatible with LiBai transformer layer
Args:
cfg: config file
value: qkv.weight in the loaded checkpoint
"""
num_heads = cfg.model.num_heads
hidden_size = cfg.model.embed_dim
head_size = int(hidden_size / num_heads)
qkv_weight = (
value.view([3, num_heads, head_size, hidden_size])
.permute(1, 0, 2, 3)
.contiguous()
.view(hidden_size * 3, hidden_size)
)
return qkv_weight
def convert_qkv_bias(cfg, value):
"""
Convert qkv.bias to be compatible with LiBai transformer layer
Args:
cfg: config file
value: qkv.bias in the loaded checkpoint
"""
num_heads = cfg.model.num_heads
hidden_size = cfg.model.embed_dim
head_size = int(hidden_size / num_heads)
qkv_bias = (
value.view(3, num_heads, head_size).permute(1, 0, 2).contiguous().view(hidden_size * 3)
)
return qkv_bias
def filter_keys(key, value, cfg):
"""
Filtering the state_dict keys and values to match LiBai's MAE model
"""
if key.startswith("decoder_"):
value = None
elif "norm1" in key:
key = key.replace("norm1", "input_layernorm")
elif "attn.qkv" in key:
key = key.replace("attn.qkv", "self_attention.query_key_value")
if "weight" in key:
value = convert_qkv_weight(cfg, value)
if "bias" in key:
value = convert_qkv_bias(cfg, value)
elif "attn.proj" in key:
key = key.replace("attn.proj", "self_attention.dense")
elif "norm2" in key:
key = key.replace("norm2", "post_attention_layernorm")
elif "mlp.fc1" in key:
key = key.replace("mlp.fc1", "mlp.dense_h_to_4h")
elif "mlp.fc2" in key:
key = key.replace("mlp.fc2", "mlp.dense_4h_to_h")
elif "fc_norm" in key:
key = key.replace("fc_norm", "norm")
elif key == "norm.weight" or key == "norm.bias":
value = None
return key, value
def log_param(key, value):
logger.info(f"{key}, shape={value.shape}")
def load_torch_checkpoint(model, cfg, path="./mae_finetuned_vit_base.pth", strict=False):
"""
Load checkpoint from the given torch weights.
Torch weight can be downloaded from the original repo:
https://github.com/facebookresearch/mae
"""
torch_dict = torch.load(path, map_location="cpu")["model"]
parameters = torch_dict
new_parameters = dict()
for key, value in parameters.items():
# log_param(key, value)
if "num_batches_tracked" not in key:
# to global tensor
key, val = filter_keys(key, value, cfg)
if val is None:
continue
val = val.detach().cpu().numpy()
val = flow.tensor(val).to_global(
sbp=flow.sbp.broadcast, placement=flow.placement("cuda", ranks=[0])
)
new_parameters[key] = val
msg = model.load_state_dict(new_parameters, strict=strict)
logger.info(msg)
if not cfg.eval_only:
trunc_normal_(model.head.weight, std=2e-5)
logger.info("Successfully load torch mae checkpoint.")
return model
| 3,986 | 31.153226 | 95 | py |
libai | libai-main/projects/MAE/utils/scheduler.py | # coding=utf-8
# Copyright 2021 The OneFlow Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import math
import oneflow as flow
from oneflow.optim.lr_scheduler import _LRScheduler
logger = logging.getLogger(__name__)
class LayerScaleWarmupCosineDecayLR(_LRScheduler):
def __init__(
self,
optimizer: flow.optim.Optimizer,
steps: int,
warmup_steps: int,
warmup_factor: float,
min_lr: float = 0.0,
last_step: int = -1,
verbose: bool = False,
):
self.total_steps = steps
self.decay_steps = steps - warmup_steps
self.warmup_steps = warmup_steps
self.warmup_factor = warmup_factor
self.min_lr = min_lr
super().__init__(optimizer, last_step, verbose)
def get_lr(self, base_lr, step):
if step < self.warmup_steps:
progress = step / self.warmup_steps
lr = base_lr * progress
elif step < self.total_steps:
progress = (step - self.warmup_steps) / self.decay_steps
lr = self.min_lr + (base_lr - self.min_lr) * 0.5 * (1.0 + math.cos(math.pi * progress))
else:
lr = self.min_lr
return lr
def update_lrs(self, lrs):
self._last_lr = []
for i, (group, lr) in enumerate(zip(self.optimizer.param_groups, lrs)):
if "lr_scale" in group:
group["lr"] = lr * group["lr_scale"]
else:
group["lr"] = lr
self._last_lr.append(lr)
if self.verbose:
self.print_lr(i, lr)
def warmup_layerscale_cosine_lr_scheduler(
optimizer: flow.optim.Optimizer,
max_iter: int,
warmup_iter: int,
warmup_factor: float,
min_lr: float = 0.0,
):
return LayerScaleWarmupCosineDecayLR(
optimizer,
steps=max_iter,
warmup_steps=warmup_iter,
warmup_factor=warmup_factor,
min_lr=min_lr,
)
def warmup_cosine_lr_scheduler(
optimizer: flow.optim.Optimizer,
max_iter: int,
warmup_iter: int,
warmup_factor: float = 0.0,
warmup_method: str = "linear",
min_lr: float = 0.0,
):
cosine_lr = flow.optim.lr_scheduler.CosineAnnealingLR(
optimizer, T_max=max_iter - warmup_iter, eta_min=min_lr
)
if warmup_iter == 0:
logger.warning("warmup iters equals to zero, return CosineLR")
return cosine_lr
if warmup_iter > max_iter:
logger.warning("warmup iters is larger than the total training iters")
warmup_cosine_lr = flow.optim.lr_scheduler.WarmupLR(
cosine_lr,
warmup_factor=warmup_factor,
warmup_iters=warmup_iter,
warmup_method=warmup_method,
)
return warmup_cosine_lr
| 3,271 | 29.018349 | 99 | py |
libai | libai-main/projects/MAE/data/pretraining_imagenet.py | # coding=utf-8
# Copyright 2021 The OneFlow Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from libai.data.datasets.imagenet import ImageNetDataset
from libai.data.structures import Instance
class PretrainingImageNetDataset(ImageNetDataset):
"""ImageNet Dataset in LiBai for Pretraining
Return:
images: ImageNet train set images
"""
def __getitem__(self, index: int):
data_sample = super().__getitem__(index)
return Instance(images=data_sample.get("images"))
| 1,033 | 33.466667 | 74 | py |
libai | libai-main/projects/MAE/modeling/mae.py | # coding=utf-8
# Copyright 2021 The OneFlow Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# --------------------------------------------------------
# MAE Model
# References:
# mae: https://github.com/facebookresearch/mae/blob/main/models_mae.py
# --------------------------------------------------------
import oneflow as flow
import oneflow.nn as nn
import libai.utils.distributed as dist
from libai.config import configurable
from libai.layers import LayerNorm, Linear, PatchEmbedding, TransformerLayer
from .pos_embed import get_2d_sincos_pos_embed
class MaskedAutoencoderViT(nn.Module):
"""Masked Autoencoder with VisionTransformer backbone"""
@configurable
def __init__(
self,
img_size=224,
patch_size=16,
in_chans=3,
embed_dim=1024,
depth=24,
num_heads=16,
decoder_embed_dim=512,
decoder_depth=8,
decoder_num_heads=16,
mlp_ratio=4.0,
norm_layer=LayerNorm,
norm_pix_loss=False,
mask_ratio=0.75,
):
super().__init__()
self.mask_ratio = mask_ratio
# --------------------------------------------------------------------------
# MAE encoder specifics
self.patch_embed = PatchEmbedding(img_size, patch_size, in_chans, embed_dim)
num_patches = self.patch_embed.num_patches
self.cls_token = nn.Parameter(
flow.zeros(
1,
1,
embed_dim,
sbp=dist.get_nd_sbp([flow.sbp.broadcast, flow.sbp.broadcast]),
placement=dist.get_layer_placement(0),
)
)
self.pos_embed = nn.Parameter(
flow.zeros(
1,
num_patches + 1,
embed_dim,
sbp=dist.get_nd_sbp([flow.sbp.broadcast, flow.sbp.broadcast]),
placement=dist.get_layer_placement(0),
)
)
self.blocks = nn.ModuleList(
[
TransformerLayer(
hidden_size=embed_dim,
ffn_hidden_size=int(embed_dim * mlp_ratio),
num_attention_heads=num_heads,
layer_idx=i,
)
for i in range(depth)
]
)
# TODO: set norm layer placement stage id
self.norm = norm_layer(embed_dim, layer_idx=depth)
# --------------------------------------------------------------------------
# --------------------------------------------------------------------------
# MAE decoder specifics
self.decoder_embed = Linear(embed_dim, decoder_embed_dim, bias=True, layer_idx=depth)
self.mask_token = nn.Parameter(
flow.zeros(
1,
1,
decoder_embed_dim,
sbp=dist.get_nd_sbp([flow.sbp.broadcast, flow.sbp.broadcast]),
placement=dist.get_layer_placement(depth),
)
)
self.decoder_pos_embed = nn.Parameter(
flow.zeros(
1,
num_patches + 1,
decoder_embed_dim,
sbp=dist.get_nd_sbp([flow.sbp.broadcast, flow.sbp.broadcast]),
placement=dist.get_layer_placement(depth),
)
)
self.decoder_blocks = nn.ModuleList(
[
TransformerLayer(
hidden_size=decoder_embed_dim,
ffn_hidden_size=int(decoder_embed_dim * mlp_ratio),
num_attention_heads=decoder_num_heads,
layer_idx=(i + depth),
)
for i in range(decoder_depth)
]
)
self.decoder_norm = norm_layer(decoder_embed_dim, layer_idx=-1)
self.decoder_pred = Linear(
decoder_embed_dim, patch_size ** 2 * in_chans, bias=True, layer_idx=-1
) # decoder to patch
# --------------------------------------------------------------------------
self.norm_pix_loss = norm_pix_loss
self.initialize_weights()
def initialize_weights(self):
# initialization
# initialize (and freeze) pos_embed by sin-cos embedding
pos_embed = get_2d_sincos_pos_embed(
self.pos_embed.shape[-1], int(self.patch_embed.num_patches ** 0.5), cls_token=True
)
self.pos_embed.data.copy_(
flow.from_numpy(pos_embed)
.float()
.unsqueeze(0)
.to_global(
sbp=dist.get_nd_sbp([flow.sbp.broadcast, flow.sbp.broadcast]),
placement=self.pos_embed.placement,
)
)
decoder_pos_embed = get_2d_sincos_pos_embed(
self.decoder_pos_embed.shape[-1],
int(self.patch_embed.num_patches ** 0.5),
cls_token=True,
)
self.decoder_pos_embed.data.copy_(
flow.from_numpy(decoder_pos_embed)
.float()
.unsqueeze(0)
.to_global(
sbp=dist.get_nd_sbp([flow.sbp.broadcast, flow.sbp.broadcast]),
placement=self.decoder_pos_embed.placement,
)
)
# initialize patch_embed like nn.Linear (instead of nn.Conv2d)
w = self.patch_embed.proj.weight.data
flow.nn.init.xavier_uniform_(w.view([w.shape[0], -1]))
# timm's trunc_normal_(std=.02) is effectively normal_(std=0.02) as cutoff is too big (2.)
flow.nn.init.normal_(self.cls_token, std=0.02)
flow.nn.init.normal_(self.mask_token, std=0.02)
# initialize nn.Linear and nn.LayerNorm
self.apply(self._init_weights)
def _init_weights(self, m):
if isinstance(m, Linear):
# we use xavier_uniform following official JAX ViT:
flow.nn.init.xavier_uniform_(m.weight)
if isinstance(m, Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
@classmethod
def from_config(cls, cfg):
return {
"img_size": cfg.img_size,
"patch_size": cfg.patch_size,
"in_chans": cfg.in_chans,
"embed_dim": cfg.embed_dim,
"depth": cfg.depth,
"num_heads": cfg.num_heads,
"decoder_embed_dim": cfg.decoder_embed_dim,
"decoder_depth": cfg.decoder_depth,
"decoder_num_heads": cfg.decoder_num_heads,
"mlp_ratio": cfg.mlp_ratio,
"norm_layer": cfg.norm_layer,
"norm_pix_loss": cfg.norm_pix_loss,
"mask_ratio": cfg.mask_ratio,
}
def patchify(self, imgs):
"""
imgs: (N, 3, H, W)
x: (N, L, patch_size**2 *3)
"""
p = self.patch_embed.patch_size[0]
assert imgs.shape[2] == imgs.shape[3] and imgs.shape[2] % p == 0
h = w = imgs.shape[2] // p
x = imgs.reshape(imgs.shape[0], 3, h, p, w, p)
# TODO: replace permute with flow.einsum
# (n c h p w q) -> (n h w p q c)
x = x.permute(0, 2, 4, 3, 5, 1)
x = x.reshape(imgs.shape[0], h * w, p ** 2 * 3)
return x
def unpatchify(self, x):
"""
x: (N, L, patch_size**2 *3)
imgs: (N, 3, H, W)
"""
p = self.patch_embed.patch_size[0]
h = w = int(x.shape[1] ** 0.5)
assert h * w == x.shape[1]
x = x.reshape(x.shape[0], h, w, p, p, 3)
# TODO: replace permute with flow.einsum
# (n h w p q c) -> (n c h p w q)
x = x.permute(0, 5, 1, 3, 2, 4)
imgs = x.reshape(x.shape[0], 3, h * p, h * p)
return imgs
def random_masking(self, x, mask_ratio):
"""
Perform per-sample random masking by per-sample shuffling.
Per-sample shuffling is done by argsort random noise.
x: [N, L, D], sequence
"""
N, L, D = x.shape
len_keep = int(L * (1 - mask_ratio))
noise = flow.rand(N, L, sbp=x.sbp, placement=x.placement) # noise in [0, 1]
# sort noise for each sample
ids_shuffle = flow.argsort(noise, dim=1) # ascend: small is keep, large is remove
ids_restore = flow.argsort(ids_shuffle, dim=1)
# keep the first subset
ids_keep = ids_shuffle[:, :len_keep]
x_masked = flow.gather(x, dim=1, index=ids_keep.unsqueeze(-1).repeat(1, 1, D))
# generate the binary mask: 0 is keep, 1 is remove
mask = flow.ones([N, L], sbp=x.sbp, placement=x.placement)
mask[:, :len_keep] = 0
# unshuffle to get binary mask
mask = flow.gather(mask, dim=1, index=ids_restore)
return x_masked, mask, ids_restore
def forward_encoder(self, x, mask_ratio):
# embed patches
x = self.patch_embed(x)
# add pos embed w/o cls token
x = x + self.pos_embed[:, 1:, :]
# masking: length -> length * mask_ratio
x, mask, ids_restore = self.random_masking(x, mask_ratio)
# append cls token
cls_token = self.cls_token + self.pos_embed[:, :1, :]
# Directly expanding cls_token (with shape=(1, 1, D) and sbp=B)
# will produce a huge tensor with shape [B*N, 1, D]
# (while B = local batch size, N = total num devices),
# however we only need an expanded cls_token with shape [B, 1, D],
# meanwhile local to global tensor is not avaible in graph mode for now,
# we have to use a two stage expanding way to expand cls_token as below.
world_size = flow.env.get_world_size()
# repeat to (N, 1, D), sbp = B
cls_token = cls_token.expand(world_size, -1, -1)
# to_global(sbp=S(0)), local shape = (1, 1, D)
cls_token = cls_token.to_global(sbp=x.sbp)
# second expand from (N, 1, D) to (B*N, 1, D)
# (global shape, sbp=S(0)), local shape=(B, 1, D),
# by this way we wouldn't produce a (B*N, 1, D) tensor in local view.
cls_tokens = cls_token.repeat(x.shape[0] // world_size, 1, 1)
x = flow.cat((cls_tokens, x), dim=1)
# apply Transformer blocks
for blk in self.blocks:
x = blk(x)
x = self.norm(x)
return x, mask, ids_restore
def forward_decoder(self, x, ids_restore):
# embed tokens
x = self.decoder_embed(x)
# append mask tokens to sequence
# mask_tokens = self.mask_token.repeat(x.shape[0], ids_restore.shape[1] + 1 - x.shape[1], 1)
# The line above will produce a huge mask_tokens with shape [B*N, L, D]
# (while B = local batch size, N = total num devices),
# actually we only need a mask_tokens with shape [B, L, D] in local view,
# meanwhile local to global tensor is not avaible in graph mode for now,
# we have to use a two stage repeat way as below.
world_size = flow.env.get_world_size()
# repeat to (N, 1, D), sbp = B
mask_token = self.mask_token.repeat(world_size, 1, 1)
# to_global(sbp=S(0)), local shape = (1, 1, D)
mask_token = mask_token.to_global(sbp=x.sbp)
# second repeat from (N, 1, D) to (B*N, L, D)
# (global shape, sbp=S(0)), local shape = (B, L, D),
# and the originally huge mask_tokens with shape (B*N, L, D)
# wouldn't be produced in local view.
mask_tokens = mask_token.repeat(
x.shape[0] // world_size, ids_restore.shape[1] + 1 - x.shape[1], 1
)
x_ = flow.cat([x[:, 1:, :], mask_tokens], dim=1) # no cls token
x_ = flow.gather(
x_, dim=1, index=ids_restore.unsqueeze(-1).repeat(1, 1, x.shape[2])
) # unshuffle
x = flow.cat([x[:, :1, :], x_], dim=1) # append cls token
# add pos embed
x = x + self.decoder_pos_embed
# apply Transformer blocks
for blk in self.decoder_blocks:
x = blk(x)
x = self.decoder_norm(x)
# predictor projection
x = self.decoder_pred(x)
# remove cls token
x = x[:, 1:, :]
return x
def forward_loss(self, imgs, pred, mask):
"""
imgs: [N, 3, H, W]
pred: [N, L, p*p*3]
mask: [N, L], 0 is keep, 1 is remove,
"""
target = self.patchify(imgs)
if self.norm_pix_loss:
mean = target.mean(dim=-1, keepdim=True)
var = target.var(dim=-1, keepdim=True)
target = (target - mean) / (var + 1.0e-6) ** 0.5
loss = (pred - target) ** 2
# We want the prev loss to be calculated with float16,
# and mean/sum below to be calculated with float32.
# this amp_white_identity will affect preceding ops to be float16
loss = flow._C.amp_white_identity(loss)
# this amp_black_identity will affect succeeding ops to be float32
loss = flow._C.amp_black_identity(loss)
loss = loss.mean(dim=-1) # [N, L], mean loss per patch
loss = (loss * mask).sum() / mask.sum() # mean loss on removed patches
return loss
def forward(self, images):
latent, mask, ids_restore = self.forward_encoder(images, self.mask_ratio)
pred = self.forward_decoder(latent, ids_restore) # [N, L, p*p*3]
loss = self.forward_loss(images, pred, mask)
if self.training:
return {"losses": loss}
else:
return {
"losses": loss,
"pred": pred,
"mask": mask,
}
| 14,102 | 35.44186 | 100 | py |
libai | libai-main/projects/MAE/modeling/pos_embed.py | # coding=utf-8
# Copyright 2021 The OneFlow Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
# --------------------------------------------------------
# 2D sine-cosine position embedding
# References:
# MoCo v3: https://github.com/facebookresearch/moco-v3
# --------------------------------------------------------
def get_2d_sincos_pos_embed(embed_dim, grid_size, cls_token=False):
"""
Arguments:
embed_dim: hidden_size of the input tokens
grid_size: int of the grid height and width
cls_token: with cls_token or not
Return:
pos_embed: [grid_size*grid_size, embed_dim]
or [1+grid_size*grid_size, embed_dim] (w/ or w/o cls_token)
"""
grid_h = np.arange(grid_size, dtype=np.float32)
grid_w = np.arange(grid_size, dtype=np.float32)
grid = np.meshgrid(grid_w, grid_h) # here w goes first
grid = np.stack(grid, axis=0)
grid = grid.reshape([2, 1, grid_size, grid_size])
pos_embed = get_2d_sincos_pos_embed_from_grid(embed_dim, grid)
if cls_token:
pos_embed = np.concatenate([np.zeros([1, embed_dim]), pos_embed], axis=0)
return pos_embed
def get_2d_sincos_pos_embed_from_grid(embed_dim, grid):
assert embed_dim % 2 == 0
# use half of dimensions to encode grid_h
emb_h = get_1d_sincos_pos_embed_from_grid(embed_dim // 2, grid[0]) # (H*W, D/2)
emb_w = get_1d_sincos_pos_embed_from_grid(embed_dim // 2, grid[1]) # (H*W, D/2)
emb = np.concatenate([emb_h, emb_w], axis=1) # (H*W, D)
return emb
def get_1d_sincos_pos_embed_from_grid(embed_dim, pos):
"""
embed_dim: output dimension for each position
pos: a list of positions to be encoded: size (M,)
out: (M, D)
"""
assert embed_dim % 2 == 0
omega = np.arange(embed_dim // 2, dtype=np.float)
omega /= embed_dim / 2.0
omega = 1.0 / 10000 ** omega # (D/2,)
pos = pos.reshape(-1) # (M,)
out = np.einsum("m,d->md", pos, omega) # (M, D/2), outer product
emb_sin = np.sin(out) # (M, D/2)
emb_cos = np.cos(out) # (M, D/2)
emb = np.concatenate([emb_sin, emb_cos], axis=1) # (M, D)
return emb
| 2,675 | 33.307692 | 84 | py |
libai | libai-main/projects/MAE/modeling/cross_entropy.py | # coding=utf-8
# Copyright 2021 The OneFlow Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import oneflow as flow
import oneflow.nn as nn
class SoftTargetCrossEntropy(nn.Module):
def __init__(self):
super(SoftTargetCrossEntropy, self).__init__()
def forward(self, x: flow.Tensor, target: flow.Tensor) -> flow.Tensor:
pred = flow.log_softmax(x, dim=-1)
loss = -target * pred
# sum and mean should be calculated with float32
# amp_white_identity ensure -target * pred using float16
# amp_black_identity ensure sum and mean using float32
loss = flow._C.amp_white_identity(loss)
loss = flow._C.amp_black_identity(loss)
loss = flow.sum(loss, dim=-1)
return loss.mean()
| 1,285 | 36.823529 | 74 | py |
libai | libai-main/projects/MAE/modeling/vit.py | # coding=utf-8
# Copyright 2021 The OneFlow Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# --------------------------------------------------------
# ViT Model
# References:
# mae: https://github.com/facebookresearch/mae/blob/main/models_vit.py
# --------------------------------------------------------
import oneflow as flow
import libai.models.vision_transformer
class VisionTransformer(libai.models.vision_transformer.VisionTransformer):
"""Vision Transformer for MAE
LiBai impl of: `An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale`
- https://arxiv.org/abs/2010.11929
"""
def __init__(
self,
img_size=224,
patch_size=16,
in_chans=3,
embed_dim=768,
depth=12,
num_heads=12,
mlp_ratio=4.0,
drop_rate=0.0,
attn_drop_rate=0.0,
drop_path_rate=0.0,
global_pool=False,
num_classes=1000,
loss_func=None,
):
super(VisionTransformer, self).__init__(
img_size=img_size,
patch_size=patch_size,
in_chans=in_chans,
embed_dim=embed_dim,
depth=depth,
num_heads=num_heads,
mlp_ratio=mlp_ratio,
drop_rate=drop_rate,
attn_drop_rate=attn_drop_rate,
drop_path_rate=drop_path_rate,
num_classes=num_classes,
loss_func=loss_func,
)
self.global_pool = global_pool
def no_weight_decay(self):
return {"pos_embed", "cls_token"}
def forward_head(self, x):
if self.global_pool:
x = x[:, 1:, :] # global pool without cls token
# we want mean to be calculated with float32
# the amp_white_identity pair make the calculation before and after mean using float16
# the amp_black_identity pair make mean using float32
x = flow._C.amp_white_identity(x)
x = flow._C.amp_black_identity(x)
x = x.mean(dim=1)
x = flow._C.amp_black_identity(x)
x = flow._C.amp_white_identity(x)
outcome = self.norm(x)
outcome = self.head(outcome)
else:
x = self.norm(x)
outcome = x[:, 0]
outcome = self.head(outcome)
return outcome
| 2,860 | 31.885057 | 98 | py |
libai | libai-main/projects/PaLM/palm_model.py | # coding=utf-8
# Copyright 2021 The OneFlow Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import oneflow as flow
import oneflow.nn.functional as F
from oneflow import einsum, nn
from libai.config import configurable
from libai.layers import LayerNorm, Linear, LMLogits, ParallelCrossEntropyLoss, VocabEmbedding
from libai.models.utils import init_method_normal
from libai.utils import distributed as dist
# rotary positional embedding
# https://arxiv.org/abs/2104.09864
class RotaryEmbedding(nn.Module):
def __init__(self, dim, *, layer_idx=0):
super().__init__()
inv_freq = flow.tensor(
1.0 / (10000 ** (np.arange(0, dim, 2, dtype=np.float32) / dim)),
sbp=dist.get_nd_sbp([flow.sbp.broadcast, flow.sbp.broadcast]),
placement=dist.get_layer_placement(layer_idx),
)
self.register_buffer("inv_freq", inv_freq)
def forward(self, max_seq_len):
seq = flow.arange(
max_seq_len,
dtype=self.inv_freq.dtype,
sbp=self.inv_freq.sbp,
placement=self.inv_freq.placement,
)
freqs = einsum("i , j -> i j", seq, self.inv_freq)
return flow.cat((freqs, freqs), dim=-1)
def rotate_half(x):
# x = rearrange(x, "... (j d) -> ... j d", j=2)
x = x.reshape(*list(x.shape[:-1]), 2, -1)
x1 = x[..., 0, :]
x2 = x[..., 1, :]
return flow.cat((-x2, x1), dim=-1)
def apply_rotary_pos_emb(pos, t):
return (t * pos.cos()) + (rotate_half(t) * pos.sin())
# feedforward
# classic Noam Shazeer paper, except here they use SwiGLU instead of the more popular GEGLU
# https://arxiv.org/abs/2002.05202
class SwiGLU(nn.Module):
def forward(self, x):
x, gate = x.chunk(2, dim=-1)
return F.silu(gate) * x
def FeedForward(dim, mult=4, *, layer_idx=0):
inner_dim = int(dim * mult)
return nn.Sequential(
LayerNorm(dim, bias=False, layer_idx=layer_idx),
Linear(dim, inner_dim * 2, bias=False, parallel="col", layer_idx=layer_idx),
SwiGLU(),
Linear(inner_dim, dim, bias=False, parallel="row", layer_idx=layer_idx),
)
class PalmTransformerLayer(nn.Module):
def __init__(
self,
dim: int,
dim_head: int = 64,
num_heads: int = 8,
ffn_mult: int = 4,
layernorm_epsilon: float = 1e-5,
*,
layer_idx=0
):
"""PaLM transformer block with hybrid parallelism"""
super().__init__()
self.num_heads = num_heads
self.dim_head = dim_head
inner_dim = dim_head * num_heads
self.attn_inner_dim = num_heads * dim_head
self.ffn_inner_dim = int(ffn_mult * dim)
self.ffn_mult = ffn_mult
self.layer_idx = layer_idx
# only query has multi head
# key and value remain as single head
self.to_q = Linear(dim, inner_dim, bias=False, parallel="col", layer_idx=layer_idx)
self.to_kv = Linear(dim, dim_head * 2, bias=False, parallel="col", layer_idx=layer_idx)
self.to_out = Linear(inner_dim, dim, bias=False, parallel="row", layer_idx=layer_idx)
self.rotary_emb = RotaryEmbedding(self.dim_head, layer_idx=layer_idx)
self.ffwd = FeedForward(dim, ffn_mult, layer_idx=layer_idx)
self.norm = LayerNorm(dim, eps=layernorm_epsilon, bias=False, layer_idx=layer_idx)
self.scale = dim_head ** -0.5
def get_mask(self, seq):
if hasattr(self, "mask") and self.mask.shape[-1] >= seq:
return self.mask[:seq, :seq]
mask = (
1
- flow.ones(
(seq, seq),
sbp=dist.get_nd_sbp([flow.sbp.broadcast, flow.sbp.broadcast]),
placement=dist.get_layer_placement(self.layer_idx),
dtype=flow.int8,
).triu(1)
)
self.register_buffer("mask", mask, persistent=False)
return mask
def get_rotary_embedding(self, seq):
if hasattr(self, "pos_emb") and self.pos_emb.shape[-2] >= seq:
return self.pos_emb[:seq]
pos_emb = self.rotary_emb(seq)
self.register_buffer("pos_emb", pos_emb, persistent=False)
return pos_emb
def forward(self, x):
# move x to the stage with right placement
x = x.to_global(placement=dist.get_layer_placement(self.layer_idx))
bsz, seq_length = x.size()[0:2]
# pre-layernorm
layernorm_output = self.norm(x)
# fused input linear layer
query = self.to_q(layernorm_output)
query = query.view(bsz, -1, self.num_heads, self.dim_head)
query = query.permute(0, 2, 1, 3)
key_value = self.to_kv(layernorm_output)
key, value = flow.chunk(key_value, chunks=2, dim=-1)
# apply position embedding
positions = self.get_rotary_embedding(seq_length)
query, key = map(lambda t: apply_rotary_pos_emb(positions, t), (query, key))
# apply scale
query = query * self.scale
# calculate similarity
attention_scores = einsum("b h s d, b j d -> b h s j", query, key)
# apply casual mask
attention_mask = self.get_mask(seq_length)
attention_scores = flow.mul(attention_scores, attention_mask)
attention_scores = attention_scores - 10000.0 * (1 - attention_mask)
attention_weights = flow.softmax(attention_scores, dim=-1)
# aggregate values
attn_out = einsum("b h i j, b j d -> b h i d", attention_weights, value)
# merge heads
attn_out = attn_out.transpose(1, 2)
attn_out = attn_out.view(bsz, seq_length, -1)
# attn_out = rearrange(attn_out, "b h s d -> b s (h d)")
attn_out = self.to_out(attn_out)
# feedforward
out = self.ffwd(x) + attn_out
return out + x
class PalmHead(nn.Module):
def __init__(self, vocab_size, word_embedding_weight):
super().__init__()
self.lm_head = LMLogits(vocab_size, bias=False)
self.loss_func = ParallelCrossEntropyLoss()
self.word_embedding_weight = word_embedding_weight
def forward(self, x, lm_labels):
logits = self.lm_head(x, self.word_embedding_weight)
if lm_labels is not None:
lm_loss = self.loss_func(logits, lm_labels)
lm_loss = lm_loss.mean()
return {"lm_loss": lm_loss}
else:
return {"prediction_scores": logits}
class PaLM(nn.Module):
@configurable
def __init__(
self,
vocab_size,
dim,
depth,
dim_head=64,
num_heads=8,
ffn_mult=4,
initializer_range=0.02,
layernorm_eps=1e-12,
amp_enabled=False,
):
super().__init__()
init_method = init_method_normal(initializer_range)
word_embedding = VocabEmbedding(
vocab_size,
dim,
init_method=init_method,
amp_enabled=amp_enabled,
)
self.net = nn.Sequential(
word_embedding,
*[
PalmTransformerLayer(
dim,
dim_head=dim_head,
num_heads=num_heads,
ffn_mult=ffn_mult,
layernorm_epsilon=layernorm_eps,
layer_idx=i,
)
for i in range(depth)
],
LayerNorm(dim, bias=False, eps=layernorm_eps, layer_idx=-1),
)
self.head = PalmHead(vocab_size, word_embedding.weight)
def forward(self, input_ids, labels=None):
output = self.net(input_ids)
return self.head(output, labels)
@classmethod
def from_config(cls, cfg):
return {
"vocab_size": cfg.vocab_size,
"dim": cfg.dim,
"depth": cfg.depth,
"dim_head": cfg.dim_head,
"num_heads": cfg.num_heads,
"ffn_mult": cfg.ffn_mult,
"initializer_range": cfg.initializer_range,
"layernorm_eps": cfg.layernorm_eps,
"amp_enabled": cfg.amp_enabled,
}
@staticmethod
def set_activation_checkpoint(model):
for module_block in model.modules():
if hasattr(module_block, "origin"):
if isinstance(module_block.origin, PalmTransformerLayer):
module_block.config.activation_checkpointing = True
else:
if isinstance(module_block.to(nn.Module), PalmTransformerLayer):
module_block.to(flow.nn.graph.GraphModule).activation_checkpointing = True
@staticmethod
def set_pipeline_stage_id(model: nn.Module):
dist_utils = dist.get_dist_util()
if hasattr(model.net[-1], "config"):
# Old API in OneFlow 0.8
for module_block in model.modules():
if isinstance(module_block.origin, VocabEmbedding):
module_block.config.set_stage(
dist_utils.get_layer_stage_id(0), dist.get_layer_placement(0)
)
elif isinstance(module_block.origin, PalmTransformerLayer):
module_block.config.set_stage(
dist_utils.get_layer_stage_id(module_block.layer_idx),
dist.get_layer_placement(module_block.layer_idx),
)
elif isinstance(module_block.origin, PalmHead):
module_block.config.set_stage(
dist_utils.get_layer_stage_id(-1), dist.get_layer_placement(-1)
)
# final layernorm
model.net[-1].config.set_stage(
dist_utils.get_layer_stage_id(-1), dist.get_layer_placement(-1)
)
else:
for module_block in model.modules():
if isinstance(module_block.to(nn.Module), VocabEmbedding):
module_block.to(flow.nn.graph.GraphModule).set_stage(
dist_utils.get_layer_stage_id(0), dist.get_layer_placement(0)
)
elif isinstance(module_block.to(nn.Module), PalmTransformerLayer):
module_block.to(flow.nn.graph.GraphModule).set_stage(
dist_utils.get_layer_stage_id(module_block.layer_idx),
dist.get_layer_placement(module_block.layer_idx),
)
elif isinstance(module_block.to(nn.Module), PalmHead):
module_block.to(flow.nn.graph.GraphModule).set_stage(
dist_utils.get_layer_stage_id(-1), dist.get_layer_placement(-1)
)
# final layernorm
model.net[-1].to(flow.nn.graph.GraphModule).set_stage(
dist_utils.get_layer_stage_id(-1), dist.get_layer_placement(-1)
)
| 11,389 | 34.154321 | 95 | py |
libai | libai-main/projects/PaLM/tools/download_demo_dataset.py | import argparse
from libai.utils.file_utils import get_data_from_cache
VOCAB_URL = "https://oneflow-static.oss-cn-beijing.aliyuncs.com/ci-files/dataset/libai/gpt_dataset/gpt2-vocab.json" # noqa
MERGE_FILE_URL = "https://oneflow-static.oss-cn-beijing.aliyuncs.com/ci-files/dataset/libai/gpt_dataset/gpt2-merges.txt" # noqa
BIN_DATA_URL = "https://oneflow-static.oss-cn-beijing.aliyuncs.com/ci-files/dataset/libai/bert_dataset/loss_compara_content_sentence.bin" # noqa
IDX_DATA_URL = "https://oneflow-static.oss-cn-beijing.aliyuncs.com/ci-files/dataset/libai/bert_dataset/loss_compara_content_sentence.idx" # noqa
VOCAB_MD5 = "dffec25a898b1f5e569bec4dffd7e5c0"
MERGE_FILE_MD5 = "75a37753dd7a28a2c5df80c28bf06e4e"
BIN_DATA_MD5 = "b842467bd5ea7e52f7a612ea6b4faecc"
IDX_DATA_MD5 = "cf5963b8543f0a7a867361eb980f0372"
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
"-o", "--output", default="./gpt_dataset", type=str, help="The output path to store data"
)
args = parser.parse_args()
cache_dir = args.output
get_data_from_cache(VOCAB_URL, cache_dir, md5=VOCAB_MD5)
get_data_from_cache(MERGE_FILE_URL, cache_dir, md5=MERGE_FILE_MD5)
get_data_from_cache(BIN_DATA_URL, cache_dir, md5=BIN_DATA_MD5)
get_data_from_cache(IDX_DATA_URL, cache_dir, md5=IDX_DATA_MD5)
if __name__ == "__main__":
main()
| 1,366 | 41.71875 | 145 | py |
libai | libai-main/projects/PaLM/configs/palm_pretrain.py | from libai.config import LazyCall, get_config
from .models.palm_small import model
from libai.evaluation import PPLEvaluator
graph = get_config("common/models/graph.py").graph
train = get_config("common/train.py").train
optim = get_config("common/optim.py").optim
data = get_config("common/data/gpt_dataset.py")
dataloader = data.dataloader
tokenization = data.tokenization
vocab_file = "./projects/PaLM/gpt_dataset/gpt2-vocab.json"
merge_files = "./projects/PaLM/gpt_dataset/gpt2-merges.txt"
data_prefix = "./projects/PaLM/gpt_dataset/loss_compara_content_sentence"
tokenization.tokenizer.vocab_file = vocab_file
tokenization.tokenizer.merges_file = merge_files
dataloader.train.dataset[0].data_prefix = data_prefix
dataloader.train.dataset[0].indexed_dataset.data_prefix = data_prefix
train.train_micro_batch_size = 4
train.activation_checkpoint.enabled = True
train.evaluation.evaluator = LazyCall(PPLEvaluator)()
train.output_dir = "./output/palm_output"
| 967 | 32.37931 | 73 | py |
libai | libai-main/projects/PaLM/configs/models/palm_small.py | from libai.config import LazyCall
from projects.PaLM.palm_model import PaLM
palm_cfg = dict(
vocab_size=50304,
dim=768,
depth=12,
dim_head=64,
num_heads=12,
ffn_mult=4,
initializer_range=0.02,
layernorm_eps=1e-12,
amp_enabled=False,
)
model = LazyCall(PaLM)(cfg=palm_cfg)
| 310 | 17.294118 | 41 | py |
libai | libai-main/projects/PaLM/configs/models/palm_8b.py | from .palm_small import model
model.cfg.dim = 4096
model.cfg.depth = 32
model.cfg.dim_head = 256
model.cfg.num_heads = 16
| 123 | 16.714286 | 29 | py |
libai | libai-main/projects/PaLM/configs/models/palm_30b.py | from .palm_small import model
model.cfg.dim = 6144
model.cfg.depth = 48
model.cfg.dim_head = 256
model.cfg.num_heads = 24
| 123 | 16.714286 | 29 | py |
libai | libai-main/projects/PaLM/configs/models/palm_62b.py | from .palm_small import model
model.cfg.dim = 8192
model.cfg.depth = 64
model.cfg.dim_head = 256
model.cfg.num_heads = 32
| 123 | 16.714286 | 29 | py |
libai | libai-main/projects/PaLM/configs/models/palm_16b.py | from .palm_small import model
model.cfg.dim = 4096
model.cfg.depth = 64
model.cfg.dim_head = 256
model.cfg.num_heads = 16
| 123 | 16.714286 | 29 | py |
libai | libai-main/projects/SimCSE/evaluator.py | # coding=utf-8
# Copyright 2021 The OneFlow Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from scipy.stats import spearmanr
from libai.evaluation import DatasetEvaluator
from libai.utils import distributed as dist
def spearman_target(cos_sim, labels):
return spearmanr(cos_sim, labels).correlation
class SimcseEvaluator(DatasetEvaluator):
def __init__(self):
self._predictions = []
def reset(self):
self._predictions = []
def process(self, inputs, outputs):
sim = outputs["sim"]
labels = inputs["labels"]
self._predictions.append({"sim": sim, "labels": labels})
def evaluate(self):
if not dist.is_main_process():
return {}
else:
predictions = self._predictions
sim_array = np.array([])
label_array = np.array([])
for prediction in predictions:
sim_array = np.append(sim_array, dist.tton(prediction["sim"]))
label_array = np.append(label_array, dist.tton(prediction["labels"]))
self._results = spearman_target(sim_array, label_array)
return {"Spearman": self._results}
| 1,690 | 32.156863 | 81 | py |
libai | libai-main/projects/SimCSE/config/config_simcse_sup.py | from omegaconf import OmegaConf
from configs.common.data.bert_dataset import tokenization
from configs.common.models.bert import cfg as simcse_cfg
from configs.common.models.graph import graph
from configs.common.optim import optim
from configs.common.train import train
from libai.config import LazyCall
from libai.data.build import build_nlp_test_loader, build_nlp_train_loader
from libai.scheduler import WarmupExponentialLR
from libai.tokenizer import BertTokenizer
from projects.SimCSE.dataset.dataset import TestDataset_sup, TrainDataset_sup
from projects.SimCSE.evaluator import SimcseEvaluator
from projects.SimCSE.modeling.simcse_sup import Simcse_sup
optim["lr"] = 1e-5
graph["enabled"] = True
tokenization.tokenizer = LazyCall(BertTokenizer)(
vocab_file="./data/vocab.txt",
)
tokenization.make_vocab_size_divisible_by = 1
dataloader = OmegaConf.create()
dataloader.train = LazyCall(build_nlp_train_loader)(
dataset=[
LazyCall(TrainDataset_sup)(
name="snli-sup",
path="./data/SNLI/train.txt",
tokenizer=LazyCall(BertTokenizer)(vocab_file="./data/vocab.txt"),
max_len=64,
)
],
)
dataloader.test = [
LazyCall(build_nlp_test_loader)(
dataset=LazyCall(TestDataset_sup)(
name="cnsd_sts",
path="./data/STS/cnsd-sts-test.txt",
tokenizer=LazyCall(BertTokenizer)(vocab_file="./data/vocab.txt"),
),
),
LazyCall(build_nlp_test_loader)(
dataset=LazyCall(TestDataset_sup)(
name="cnsd_sts",
path="./data/STS/cnsd-sts-dev.txt",
tokenizer=LazyCall(BertTokenizer)(vocab_file="./data/vocab.txt"),
)
),
]
simcse_cfg.update(
dict(
vocab_size=21128,
hidden_size=768,
hidden_layers=12,
layernorm_eps=1e-12,
intermediate_size=3072,
pretrained_model_weight="./data/pytorch_model.bin",
temp=0.05,
pooler_type="cls",
bias_gelu_fusion=False,
bias_dropout_fusion=False,
apply_query_key_layer_scaling=False,
apply_residual_post_layernorm=True,
)
)
model = LazyCall(Simcse_sup)(cfg=simcse_cfg)
train.update(
dict(
output_dir="./result",
train_micro_batch_size=8,
test_micro_batch_size=8,
train_epoch=1,
train_iter=1000,
log_period=10,
dist=dict(
data_parallel_size=8,
tensor_parallel_size=1,
pipeline_parallel_size=1,
),
evaluation=dict(
enabled=True,
evaluator=LazyCall(SimcseEvaluator)(),
eval_period=10,
eval_metric="Spearman",
eval_mode="max",
eval_iter=100,
),
scheduler=LazyCall(WarmupExponentialLR)(
warmup_factor=0.0,
gamma=1.0,
warmup_method="linear",
warmup_iter=0.0,
),
)
)
| 2,934 | 27.77451 | 77 | py |
libai | libai-main/projects/SimCSE/config/config_simcse_unsup.py | from omegaconf import OmegaConf
from configs.common.data.bert_dataset import tokenization
from configs.common.models.bert import cfg as simcse_cfg
from configs.common.models.graph import graph
from configs.common.optim import optim
from configs.common.train import train
from libai.config import LazyCall
from libai.data.build import build_nlp_test_loader, build_nlp_train_loader
from libai.scheduler import WarmupExponentialLR
from libai.tokenizer import BertTokenizer
from projects.SimCSE.dataset.dataset import TestDataset_unsup, TrainDataset_unsup
from projects.SimCSE.evaluator import SimcseEvaluator
from projects.SimCSE.modeling.simcse_unsup import Simcse_unsup
optim["lr"] = 3e-5
graph["enabled"] = True
tokenization.tokenizer = LazyCall(BertTokenizer)(
vocab_file="./data/vocab.txt",
)
tokenization.make_vocab_size_divisible_by = 1
dataloader = OmegaConf.create()
dataloader.train = LazyCall(build_nlp_train_loader)(
dataset=[
LazyCall(TrainDataset_unsup)(
name="snli-unsup",
path="./data/SNLI/train.txt",
tokenizer=LazyCall(BertTokenizer)(vocab_file="./data/vocab.txt"),
max_len=64,
path2="./data/STS/cnsd-sts-train.txt",
)
],
)
dataloader.test = [
LazyCall(build_nlp_test_loader)(
dataset=LazyCall(TestDataset_unsup)(
name="cnsd_sts",
path="./data/STS/cnsd-sts-test.txt",
tokenizer=LazyCall(BertTokenizer)(vocab_file="./data/vocab.txt"),
),
),
LazyCall(build_nlp_test_loader)(
dataset=LazyCall(TestDataset_unsup)(
name="cnsd_sts",
path="./data/STS/cnsd-sts-dev.txt",
tokenizer=LazyCall(BertTokenizer)(vocab_file="./data/vocab.txt"),
)
),
]
simcse_cfg.update(
dict(
vocab_size=21128,
hidden_size=768,
hidden_layers=12,
layernorm_eps=1e-12,
intermediate_size=3072,
bias_gelu_fusion=False,
bias_dropout_fusion=False,
apply_query_key_layer_scaling=False,
apply_residual_post_layernorm=True,
pretrained_model_weight="./data/pytorch_model.bin",
pooler_type="cls",
temp=0.05,
)
)
model = LazyCall(Simcse_unsup)(cfg=simcse_cfg)
train.update(
dict(
output_dir="./result",
train_micro_batch_size=8,
test_micro_batch_size=8,
train_epoch=1,
train_iter=2500,
log_period=10,
dist=dict(
data_parallel_size=8,
tensor_parallel_size=1,
pipeline_parallel_size=1,
),
evaluation=dict(
enabled=True,
evaluator=LazyCall(SimcseEvaluator)(),
eval_period=10,
eval_iter=1e5,
eval_metric="Spearman",
eval_mode="max",
),
scheduler=LazyCall(WarmupExponentialLR)(
warmup_factor=0.000, gamma=1.0, warmup_method="linear", warmup_iter=0
),
)
)
| 2,966 | 28.67 | 81 | py |
libai | libai-main/projects/SimCSE/dataset/dataset.py | # coding=utf-8
# Copyright 2021 The OneFlow Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import random
import jsonlines
import oneflow as flow
from oneflow.utils.data import Dataset
from libai.data.structures import DistTensorData, Instance
def load_data(name, path):
assert name in ["snli-sup", "snli-unsup", "lqcmc", "eng_sts", "cnsd_sts", "wiki", "add"]
def load_snli_data_unsup(path):
with jsonlines.open(path, "r") as f:
return [line.get("origin") for line in f]
def load_snli_data_sup(path):
with jsonlines.open(path, "r") as f:
return [(line["origin"], line["entailment"], line["contradiction"]) for line in f]
def load_lqcmc_data(path):
with open(path, "r", encoding="utf8") as f:
return [line.strip().split("\t")[0] for line in f]
def load_cnsd_sts_data(path):
with open(path, "r", encoding="utf8") as f:
return [(line.split("||")[1], line.split("||")[2], line.split("||")[3]) for line in f]
def load_wiki_data(path):
data = []
with open(path, "r", encoding="utf8") as file:
for line in file.readlines():
line = " ".join(line.strip().split())
data.append(line)
return data
def load_eng_sts_data(path):
data = []
with open(path, "r", encoding="utf8") as file:
for line in file.readlines():
line = line.strip().split("\t")
data.append(line)
return data
def load_sts_to_train(path):
if path is None:
return []
with open(
path,
"r",
encoding="utf8",
) as f:
data = [line.split("||")[1] for line in f]
return data
if name == "snli-unsup":
return load_snli_data_unsup(path)
elif name == "snli-sup":
return load_snli_data_sup(path)
elif name == "wiki":
return load_wiki_data(path)
elif name == "cnsd_sts":
return load_cnsd_sts_data(path)
elif name == "eng_sts":
return load_eng_sts_data(path)
elif name == "lqcmc":
return load_lqcmc_data(path)
else:
return load_sts_to_train(path)
def padding_for_ids(data, pad_id=0, max_len=64):
data["input_ids"] = data["input_ids"] + [pad_id] * (max_len - len(data["input_ids"]))
data["attention_mask"] = data["attention_mask"] + [0] * (max_len - len(data["attention_mask"]))
data["input_ids"] = [data["input_ids"], data["input_ids"]]
data["attention_mask"] = [data["attention_mask"], data["attention_mask"]]
return Instance(
input_ids=DistTensorData(flow.tensor(data["input_ids"], dtype=flow.long)),
attention_mask=DistTensorData(flow.tensor(data["attention_mask"], dtype=flow.long)),
)
class TrainDataset_unsup(Dataset):
# unsup
def __init__(self, name, path, tokenizer, max_len, path2=None):
self.name = name
self.data = load_data(name, path) + load_data("add", path2)
random.shuffle(self.data)
self.tokenizer = tokenizer
self.max_len = max_len
self.pad_id = self.tokenizer.pad_token_id
self.cls_id = self.tokenizer.cls_token_id
self.sep_id = self.tokenizer.sep_token_id
def __len__(self):
return len(self.data)
def text2id(self, text):
tokens = self.tokenizer.tokenize(text)
ids = self.tokenizer.convert_tokens_to_ids(tokens)
ids = ids[: self.max_len - 2]
ids = [self.cls_id] + ids + [self.sep_id]
attention_mask = [1] * len(ids)
return padding_for_ids(
data={
"input_ids": ids,
"attention_mask": attention_mask,
},
pad_id=self.pad_id,
max_len=self.max_len,
)
def __getitem__(self, index):
return self.text2id(self.data[index])
class TestDataset_unsup(Dataset):
# sts datasets
def __init__(self, name, path, tokenizer):
self.data = load_data(name, path)
self.tokenizer = tokenizer
self.max_len = 64
self.pad_id = self.tokenizer.pad_token_id
self.cls_id = self.tokenizer.cls_token_id
self.sep_id = self.tokenizer.sep_token_id
def __len__(self):
return len(self.data)
def text2id(self, text):
tokens = self.tokenizer.tokenize(text)
ids = self.tokenizer.convert_tokens_to_ids(tokens)
ids = ids[: self.max_len - 2]
ids = [self.cls_id] + ids + [self.sep_id]
length = len(ids)
ids = ids + [self.pad_id] * (self.max_len - length)
attention_mask = [1] * length + [0] * (self.max_len - length)
return {
"input_ids": ids,
"attention_mask": attention_mask,
}
def __getitem__(self, index):
# sent1, sent2, laebl
sample = self.data[index]
sent1 = self.text2id(sample[0])
sent2 = self.text2id(sample[1])
score = int(sample[2])
return Instance(
input_ids=DistTensorData(
flow.tensor([sent1["input_ids"], sent2["input_ids"]], dtype=flow.long)
),
attention_mask=DistTensorData(
flow.tensor([sent1["attention_mask"], sent2["attention_mask"]], dtype=flow.long)
),
labels=DistTensorData(flow.tensor(score, dtype=flow.int)),
)
class TrainDataset_sup(Dataset):
def __init__(self, name, path, tokenizer, max_len=64):
self.data = load_data(name, path)
self.tokenizer = tokenizer
self.max_len = max_len
self.pad_id = self.tokenizer.pad_token_id
self.cls_id = self.tokenizer.cls_token_id
self.sep_id = self.tokenizer.sep_token_id
def __len__(self):
return len(self.data)
def pad_text(self, ids):
attention_mask = [1] * len(ids)
ids = ids + [self.pad_id] * (self.max_len - len(ids))
attention_mask = attention_mask + [0] * (self.max_len - len(attention_mask))
return ids, attention_mask
def text2id(self, text):
tokens = self.tokenizer.tokenize(text)
ids = self.tokenizer.convert_tokens_to_ids(tokens)
ids = ids[: self.max_len - 2]
ids = [self.cls_id] + ids + [self.sep_id]
ids, attention_mask = self.pad_text(ids)
return ids, attention_mask
def __getitem__(self, index):
ids0, mask0 = self.text2id(self.data[index][0])
ids1, mask1 = self.text2id(self.data[index][1])
ids2, mask2 = self.text2id(self.data[index][2])
return Instance(
input_ids=DistTensorData(flow.tensor([ids0, ids1, ids2], dtype=flow.long)),
attention_mask=DistTensorData(flow.tensor([mask0, mask1, mask2], dtype=flow.long)),
)
class TestDataset_sup(TrainDataset_sup):
def __getitem__(self, index):
label = int(self.data[index][2])
ids0, mask0 = self.text2id(self.data[index][0])
ids1, mask1 = self.text2id(self.data[index][1])
return Instance(
input_ids=DistTensorData(flow.tensor([ids0, ids1], dtype=flow.long)),
attention_mask=DistTensorData(flow.tensor([mask0, mask1], dtype=flow.long)),
labels=DistTensorData(flow.tensor(label, dtype=flow.int)),
)
| 7,829 | 32.896104 | 99 | py |
libai | libai-main/projects/SimCSE/utils/load_huggingface_weight.py | # coding=utf-8
# Copyright 2021 The OneFlow Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import OrderedDict
import oneflow as flow
import torch
import libai.utils.distributed as dist
def convert_tensor(tensor):
tensor = tensor.float()
return flow.Tensor(tensor.cpu().numpy())
def conver_state(state, layers, hidden_size, num_heads, head_size):
save = OrderedDict()
not_saved = []
Layers = layers
for name, tensor in state.items():
if "embeddings" in name:
if "word_embeddings" in name:
save["embeddings.vocab_embeddings.weight"] = convert_tensor(tensor)
elif "position_embeddings" in name:
save["embeddings.position_embeddings.weight"] = convert_tensor(tensor)
elif "token_type_embeddings" in name:
save["embeddings.tokentype_embeddings.weight"] = convert_tensor(tensor)
elif "LayerNorm.gamma" in name:
save["encoders.0.input_layernorm.weight"] = convert_tensor(tensor)
elif "LayerNorm.beta" in name:
save["encoders.0.input_layernorm.bias"] = convert_tensor(tensor)
elif "attention" in name:
if "self" in name:
index = name.split(".")[3]
if "encoders." + index + ".self_attention.query_key_value.weight" in save.keys():
continue
q_w = name.replace(name.split(".")[6], "query").replace(
name.split(".")[7], "weight"
)
k_w = name.replace(name.split(".")[6], "key").replace(name.split(".")[7], "weight")
v_w = name.replace(name.split(".")[6], "value").replace(
name.split(".")[7], "weight"
)
q_b = name.replace(name.split(".")[6], "query").replace(name.split(".")[7], "bias")
k_b = name.replace(name.split(".")[6], "key").replace(name.split(".")[7], "bias")
v_b = name.replace(name.split(".")[6], "value").replace(name.split(".")[7], "bias")
qkv_w = torch.cat((state[q_w], state[k_w], state[v_w]), dim=0) # 【768*3, 768】
# function for weight-----------------------------------
qkv_w = qkv_w.view([3, num_heads, head_size, hidden_size])
qkv_w = qkv_w.permute(1, 0, 2, 3).contiguous().view(3 * hidden_size, hidden_size)
# ---------------------------------------------------------
qkv_b = torch.cat((state[q_b], state[k_b], state[v_b]), dim=-1)
# function for bias--------------------------------------
qkv_b = qkv_b.view(3, num_heads, head_size)
qkv_b = qkv_b.permute(1, 0, 2).contiguous().view(-1)
# ---------------------------------------------------------
target_w = "encoders." + index + ".self_attention.query_key_value.weight"
save[target_w] = convert_tensor(qkv_w)
target_b = "encoders." + index + ".self_attention.query_key_value.bias"
save[target_b] = convert_tensor(qkv_b)
elif "output" in name:
index = name.split(".")[3]
if "dense" in name:
if "weight" in name:
target = "encoders." + index + ".self_attention.dense.weight"
save[target] = convert_tensor(tensor)
elif "bias" in name:
target = "encoders." + index + ".self_attention.dense.bias"
save[target] = convert_tensor(tensor)
elif "LayerNorm" in name:
if "gamma" in name:
target = "encoders." + index + ".post_attention_layernorm.weight"
save[target] = convert_tensor(tensor)
elif "beta" in name:
target = "encoders." + index + ".post_attention_layernorm.bias"
save[target] = convert_tensor(tensor)
elif "intermediate" in name:
index = name.split(".")[3]
if "encoders." + index + ".mlp.dense_h_to_4h.weight" in save.keys():
continue
w = "bert.encoder.layer." + index + ".intermediate.dense.weight"
b = "bert.encoder.layer." + index + ".intermediate.dense.bias"
t_w = "encoders." + index + ".mlp.dense_h_to_4h.weight"
t_b = "encoders." + index + ".mlp.dense_h_to_4h.bias"
save[t_w] = convert_tensor(state[w])
save[t_b] = convert_tensor(state[b])
elif "output" in name:
index = name.split(".")[3]
if "dense.weight" in name:
target = "encoders." + index + ".mlp.dense_4h_to_h.weight"
save[target] = convert_tensor(tensor)
elif "dense.bias" in name:
target = "encoders." + index + ".mlp.dense_4h_to_h.bias"
save[target] = convert_tensor(tensor)
elif "LayerNorm.gamma" in name:
if index == str(Layers - 1):
target = "final_layernorm.weight"
save[target] = convert_tensor(tensor)
continue
target = "encoders." + str(int(index) + 1) + ".input_layernorm.weight"
save[target] = convert_tensor(tensor)
elif "LayerNorm.beta" in name:
if index == str(Layers - 1):
target = "final_layernorm.bias"
save[target] = convert_tensor(tensor)
continue
target = "encoders." + str(int(index) + 1) + ".input_layernorm.bias"
save[target] = convert_tensor(tensor)
elif "pooler" in name:
if "weight" in name:
save["pooler.dense.weight"] = convert_tensor(tensor)
elif "bias" in name:
save["pooler.dense.bias"] = convert_tensor(tensor)
else:
not_saved.append(name)
return save, not_saved
def load_tensor(tensor_lhs, tensor_rhs):
tensor_rhs = flow.to_global(
tensor_rhs,
placement=tensor_lhs.placement,
sbp=dist.get_nd_sbp([flow.sbp.broadcast, flow.sbp.broadcast]),
)
tensor_rhs = tensor_rhs.to_global(sbp=tensor_lhs.sbp)
tensor_lhs.copy_(tensor_rhs)
def load_huggingface_bert(model, path, hidden_size, num_heads, layers=12):
head_size = hidden_size // num_heads
huggingface_state_dict = torch.load(path)
of_state_dict, _ = conver_state(
huggingface_state_dict,
layers=layers,
hidden_size=hidden_size,
num_heads=num_heads,
head_size=head_size,
)
for key, value in of_state_dict.items():
load_tensor(model.state_dict()[key], value)
| 7,362 | 44.732919 | 99 | py |
libai | libai-main/projects/SimCSE/modeling/bert_for_simcse.py | # coding=utf-8
# Copyright 2021 The OneFlow Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from libai.models import BertModel
class BertForSimCSE(BertModel):
def __init__(self, cfg):
super().__init__(cfg)
def forward(self, input_ids, attention_mask, tokentype_ids=None):
extended_attention_mask = self.extended_attn_mask(attention_mask)
embedding_output = self.embeddings(input_ids, tokentype_ids)
total_hidden = []
hidden_states = embedding_output
for layer in self.encoders:
hidden_states = layer(hidden_states, extended_attention_mask)
total_hidden.append(hidden_states)
encoder_output = self.final_layernorm(hidden_states)
pooled_output = self.pooler(encoder_output) if self.pooler is not None else None
return encoder_output, pooled_output, total_hidden
| 1,396 | 40.088235 | 88 | py |
libai | libai-main/projects/SimCSE/modeling/model_utils.py | import oneflow as flow
from oneflow import nn
import libai
def cosine_similarity(x, y, dim=-1):
return flow.sum(x * y, dim=dim) / (flow.linalg.norm(x, dim=dim) * flow.linalg.norm(y, dim=dim))
class MLPLayer(nn.Module):
def __init__(self, cfg):
super().__init__()
self.dense = libai.layers.Linear(
cfg.hidden_size, cfg.hidden_size, bias=True, parallel="row", layer_idx=-1
)
self.activation = libai.layers.build_activation("tanh")
def forward(self, features):
x = self.dense(features)
x = self.activation(x)
return x
| 601 | 25.173913 | 99 | py |
libai | libai-main/projects/SimCSE/modeling/simcse_unsup.py | # coding=utf-8
# Copyright 2021 The OneFlow Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import oneflow as flow
from oneflow import nn
from libai.utils import distributed as dist
from projects.SimCSE.modeling.model_utils import MLPLayer, cosine_similarity
from projects.SimCSE.utils.load_huggingface_weight import load_huggingface_bert
from .bert_for_simcse import BertForSimCSE
class Simcse_unsup(nn.Module):
def __init__(self, cfg):
super().__init__()
self.bert = BertForSimCSE(cfg)
self.mlp = MLPLayer(cfg)
self.pooler_type = cfg.pooler_type
if cfg.pretrained_model_weight is not None:
load_huggingface_bert(
self.bert,
cfg.pretrained_model_weight,
cfg["hidden_size"],
cfg["num_attention_heads"],
cfg["hidden_layers"],
)
def pooler(self, inputs, attention_mask):
if self.pooler_type == "cls":
return inputs[0][:, 0]
elif self.pooler_type == "pooled":
return inputs[1]
elif self.pooler_type == "last-avg":
last_hidden = inputs[0]
return (last_hidden * attention_mask.unsqueeze(-1)).sum(1) / attention_mask.sum(
-1
).unsqueeze(-1)
elif self.pooler_type == "first-last-avg":
first_hidden = inputs[2][1]
last_hidden = inputs[0]
res = ((first_hidden + last_hidden) / 2.0 * attention_mask.unsqueeze(-1)).sum(
1
) / attention_mask.sum(-1).unsqueeze(-1)
return res
def forward(self, input_ids, attention_mask, token_type_ids=None, labels=None):
if self.training:
bs, num_sent = input_ids.size(0), input_ids.size(1)
input_ids = input_ids.view(bs * num_sent, -1)
attention_mask = attention_mask.view(bs * num_sent, -1)
out = self.bert(input_ids, attention_mask)
out = self.pooler(out, attention_mask)
out = self.mlp(out)
labels = flow.arange(
out.size(0),
sbp=dist.get_nd_sbp([flow.sbp.broadcast, flow.sbp.broadcast]),
placement=out.placement,
)
labels = (labels - labels % 2 * 2) + 1
sim = cosine_similarity(out.unsqueeze(1), out.unsqueeze(0))
sim = (
sim
- flow.eye(
out.size(0),
sbp=dist.get_nd_sbp([flow.sbp.broadcast, flow.sbp.broadcast]),
placement=out.placement,
)
* 1e12
)
sim = sim / 0.05
loss = nn.CrossEntropyLoss()(sim, labels)
return {"loss": loss}
else:
bs, num_sent = input_ids.size(0), input_ids.size(1)
input_ids = input_ids.view(bs * num_sent, -1)
attention_mask = attention_mask.view(bs * num_sent, -1)
out = self.bert(input_ids, attention_mask)
out = self.pooler(out, attention_mask)
self.mlp(out)
out = out.view(bs, num_sent, -1)
sent1 = out[:, 0]
sent2 = out[:, 1]
sim = cosine_similarity(sent1, sent2)
sim = sim.to_global(sbp=dist.get_nd_sbp([flow.sbp.broadcast, flow.sbp.broadcast]))
return {"sim": sim.unsqueeze(1), "labels": labels}
| 3,946 | 37.320388 | 94 | py |
libai | libai-main/projects/SimCSE/modeling/simcse_sup.py | # coding=utf-8
# Copyright 2021 The OneFlow Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import oneflow as flow
from oneflow import nn
from libai.utils import distributed as dist
from projects.SimCSE.modeling.model_utils import MLPLayer, cosine_similarity
from projects.SimCSE.utils.load_huggingface_weight import load_huggingface_bert
from .bert_for_simcse import BertForSimCSE
class Simcse_sup(nn.Module):
def __init__(self, cfg):
super().__init__()
self.bert = BertForSimCSE(cfg)
self.mlp = MLPLayer(cfg)
self.pooler_type = cfg.pooler_type
if cfg.pretrained_model_weight is not None:
load_huggingface_bert(
self.bert,
cfg.pretrained_model_weight,
cfg["hidden_size"],
cfg["num_attention_heads"],
cfg["hidden_layers"],
)
def pooler(self, inputs, attention_mask):
if self.pooler_type == "cls":
return inputs[0][:, 0]
elif self.pooler_type == "pooled":
return inputs[1]
elif self.pooler_type == "last-avg":
last_hidden = inputs[0]
return (last_hidden * attention_mask.unsqueeze(-1)).sum(1) / attention_mask.sum(
-1
).unsqueeze(-1)
elif self.pooler_type == "first-last-avg":
first_hidden = inputs[2][1]
last_hidden = inputs[0]
res = ((first_hidden + last_hidden) / 2.0 * attention_mask.unsqueeze(-1)).sum(
1
) / attention_mask.sum(-1).unsqueeze(-1)
return res
def create_use_row(self, labels):
count = 0
use_row = []
for row in range(labels.size(0)):
if count % 2 == 0 and count != 0:
count = 0
continue
use_row.append(row)
count += 1
return flow.tensor(use_row, sbp=labels.sbp, placement=labels.placement)
def forward(self, input_ids, attention_mask, token_type_ids=None, labels=None):
if self.training:
bs = input_ids.size(0)
input_ids = input_ids.view(bs * 3, -1)
attention_mask = attention_mask.view(bs * 3, -1)
out = self.bert(input_ids, attention_mask)
out = self.pooler(out, attention_mask)
out = self.mlp(out)
labels = flow.arange(
out.size(0),
sbp=dist.get_nd_sbp([flow.sbp.broadcast, flow.sbp.broadcast]),
placement=out.placement,
)
use_row = self.create_use_row(labels)
labels = (use_row - use_row % 3 * 2) + 1
sim = cosine_similarity(out.unsqueeze(1), out.unsqueeze(0))
sim = (
sim
- flow.eye(
out.size(0),
sbp=dist.get_nd_sbp([flow.sbp.broadcast, flow.sbp.broadcast]),
placement=out.placement,
)
* 1e12
)
sim = flow.index_select(sim, dim=0, index=use_row)
sim = sim / 0.05
loss = nn.CrossEntropyLoss()(sim, labels)
return {"loss": loss}
else:
bs = input_ids.size(0)
input_ids = input_ids.view(bs * 2, -1)
attention_mask = attention_mask.view(bs * 2, -1)
out = self.bert(input_ids, attention_mask)
out = self.pooler(out, attention_mask)
self.mlp(out)
out = out.view(bs, 2, -1)
sent1 = out[:, 0]
sent2 = out[:, 1]
sim = cosine_similarity(sent1, sent2)
sim = sim.to_global(sbp=dist.get_nd_sbp([flow.sbp.broadcast, flow.sbp.broadcast]))
return {"sim": sim.unsqueeze(1), "labels": labels}
| 4,318 | 36.232759 | 94 | py |
libai | libai-main/projects/CLIP/clip/simple_tokenizer.py | # --------------------------------------------------------
# Borrow code from:
# https://github.com/openai/CLIP/tree/main/clip/simple_tokenizer.py
# --------------------------------------------------------
import gzip
import html
import os
from functools import lru_cache
import ftfy
import regex as re
from libai.utils.file_utils import download_file
@lru_cache()
def default_bpe():
default_path = os.path.join(
os.path.dirname(os.path.abspath(__file__)), "bpe_simple_vocab_16e6.txt.gz"
)
if not os.path.exists(default_path):
download_file(
default_path,
"https://oneflow-static.oss-cn-beijing.aliyuncs.com/libai/clip/bpe_simple_vocab_16e6.txt.gz", # noqa: E501
)
return default_path
@lru_cache()
def bytes_to_unicode():
"""
Returns list of utf-8 byte and a corresponding list of unicode strings.
The reversible bpe codes work on unicode strings.
This means you need a large # of unicode characters in your vocab if you
want to avoid UNKs.
When you're at something like a 10B token dataset you end up needing
around 5K for decent coverage.
This is a significant percentage of your normal, say, 32K bpe vocab.
To avoid that, we want lookup tables between utf-8 bytes and unicode strings.
And avoids mapping to whitespace/control characters the bpe code barfs on.
"""
bs = (
list(range(ord("!"), ord("~") + 1))
+ list(range(ord("¡"), ord("¬") + 1))
+ list(range(ord("®"), ord("ÿ") + 1))
)
cs = bs[:]
n = 0
for b in range(2 ** 8):
if b not in bs:
bs.append(b)
cs.append(2 ** 8 + n)
n += 1
cs = [chr(n) for n in cs]
return dict(zip(bs, cs))
def get_pairs(word):
"""Return set of symbol pairs in a word.
Word is represented as tuple of symbols (symbols being variable-length strings).
"""
pairs = set()
prev_char = word[0]
for char in word[1:]:
pairs.add((prev_char, char))
prev_char = char
return pairs
def basic_clean(text):
text = ftfy.fix_text(text)
text = html.unescape(html.unescape(text))
return text.strip()
def whitespace_clean(text):
text = re.sub(r"\s+", " ", text)
text = text.strip()
return text
class SimpleTokenizer(object):
def __init__(self, bpe_path: str = default_bpe()):
self.byte_encoder = bytes_to_unicode()
self.byte_decoder = {v: k for k, v in self.byte_encoder.items()}
merges = gzip.open(bpe_path).read().decode("utf-8").split("\n")
merges = merges[1 : 49152 - 256 - 2 + 1]
merges = [tuple(merge.split()) for merge in merges]
vocab = list(bytes_to_unicode().values())
vocab = vocab + [v + "</w>" for v in vocab]
for merge in merges:
vocab.append("".join(merge))
vocab.extend(["<|startoftext|>", "<|endoftext|>"])
self.encoder = dict(zip(vocab, range(len(vocab))))
self.decoder = {v: k for k, v in self.encoder.items()}
self.bpe_ranks = dict(zip(merges, range(len(merges))))
self.cache = {"<|startoftext|>": "<|startoftext|>", "<|endoftext|>": "<|endoftext|>"}
self.pat = re.compile(
r"""<\|startoftext\|>|<\|endoftext\|>|'s|'t|'re|'ve|'m|'ll|'d|[\p{L}]+|[\p{N}]|[^\s\p{L}\p{N}]+""", # noqa
re.IGNORECASE,
)
def bpe(self, token):
if token in self.cache:
return self.cache[token]
word = tuple(token[:-1]) + (token[-1] + "</w>",)
pairs = get_pairs(word)
if not pairs:
return token + "</w>"
while True:
bigram = min(pairs, key=lambda pair: self.bpe_ranks.get(pair, float("inf")))
if bigram not in self.bpe_ranks:
break
first, second = bigram
new_word = []
i = 0
while i < len(word):
try:
j = word.index(first, i)
new_word.extend(word[i:j])
i = j
except: # noqa
new_word.extend(word[i:])
break
if word[i] == first and i < len(word) - 1 and word[i + 1] == second:
new_word.append(first + second)
i += 2
else:
new_word.append(word[i])
i += 1
new_word = tuple(new_word)
word = new_word
if len(word) == 1:
break
else:
pairs = get_pairs(word)
word = " ".join(word)
self.cache[token] = word
return word
def encode(self, text):
bpe_tokens = []
text = whitespace_clean(basic_clean(text)).lower()
for token in re.findall(self.pat, text):
token = "".join(self.byte_encoder[b] for b in token.encode("utf-8"))
bpe_tokens.extend(self.encoder[bpe_token] for bpe_token in self.bpe(token).split(" "))
return bpe_tokens
def decode(self, tokens):
text = "".join([self.decoder[token] for token in tokens])
text = (
bytearray([self.byte_decoder[c] for c in text])
.decode("utf-8", errors="replace")
.replace("</w>", " ")
)
return text
| 5,324 | 32.074534 | 119 | py |
libai | libai-main/projects/CLIP/clip/clip.py | # --------------------------------------------------------
# Borrow code from:
# https://github.com/openai/CLIP/tree/main/clip/clip.py
# --------------------------------------------------------
import hashlib
import os
import urllib
import warnings
from typing import List, Union
import oneflow as flow
import torch
from flowvision.transforms import CenterCrop, Compose, Normalize, Resize, ToTensor
from PIL import Image
from tqdm import tqdm
from .model import build_model
from .simple_tokenizer import SimpleTokenizer as _Tokenizer
try:
from flowvision.transforms import InterpolationMode
BICUBIC = InterpolationMode.BICUBIC
except ImportError:
BICUBIC = Image.BICUBIC
__all__ = ["available_models", "load", "tokenize"]
_tokenizer = _Tokenizer()
# noqa:
_MODELS = {
"RN50": "https://openaipublic.azureedge.net/clip/models/afeb0e10f9e5a86da6080e35cf09123aca3b358a0c3e3b6c78a7b63bc04b6762/RN50.pt", # noqa: E501
"RN101": "https://openaipublic.azureedge.net/clip/models/8fa8567bab74a42d41c5915025a8e4538c3bdbe8804a470a72f30b0d94fab599/RN101.pt", # noqa: E501
"RN50x4": "https://openaipublic.azureedge.net/clip/models/7e526bd135e493cef0776de27d5f42653e6b4c8bf9e0f653bb11773263205fdd/RN50x4.pt", # noqa: E501
"RN50x16": "https://openaipublic.azureedge.net/clip/models/52378b407f34354e150460fe41077663dd5b39c54cd0bfd2b27167a4a06ec9aa/RN50x16.pt", # noqa: E501
"RN50x64": "https://openaipublic.azureedge.net/clip/models/be1cfb55d75a9666199fb2206c106743da0f6468c9d327f3e0d0a543a9919d9c/RN50x64.pt", # noqa: E501
"ViT-B/32": "https://openaipublic.azureedge.net/clip/models/40d365715913c9da98579312b702a82c18be219cc2a73407c4526f58eba950af/ViT-B-32.pt", # noqa: E501
"ViT-B/16": "https://openaipublic.azureedge.net/clip/models/5806e77cd80f8b59890b7e101eabd078d9fb84e6937f9e85e4ecb61988df416f/ViT-B-16.pt", # noqa: E501
"ViT-L/14": "https://openaipublic.azureedge.net/clip/models/b8cca3fd41ae0c99ba7e8951adf17d267cdb84cd88be6f7c2e0eca1737a03836/ViT-L-14.pt", # noqa: E501
"ViT-L/14@336px": "https://openaipublic.azureedge.net/clip/models/3035c92b350959924f9f00213499208652fc7ea050643e8b385c2dac08641f02/ViT-L-14-336px.pt", # noqa: E501
}
def _download(url: str, root: str):
os.makedirs(root, exist_ok=True)
filename = os.path.basename(url)
expected_sha256 = url.split("/")[-2]
download_target = os.path.join(root, filename)
if os.path.exists(download_target) and not os.path.isfile(download_target):
raise RuntimeError(f"{download_target} exists and is not a regular file")
if os.path.isfile(download_target):
if hashlib.sha256(open(download_target, "rb").read()).hexdigest() == expected_sha256:
return download_target
else:
warnings.warn(
f"{download_target} exists, but the SHA256 checksum does not match; "
"re-downloading the file"
)
with urllib.request.urlopen(url) as source, open(download_target, "wb") as output:
with tqdm(
total=int(source.info().get("Content-Length")),
ncols=80,
unit="iB",
unit_scale=True,
unit_divisor=1024,
) as loop:
while True:
buffer = source.read(8192)
if not buffer:
break
output.write(buffer)
loop.update(len(buffer))
if hashlib.sha256(open(download_target, "rb").read()).hexdigest() != expected_sha256:
raise RuntimeError("Model has been downloaded but the SHA256 checksum does not not match")
return download_target
def _convert_image_to_rgb(image):
return image.convert("RGB")
def _transform(n_px):
return Compose(
[
Resize(n_px, interpolation=BICUBIC),
CenterCrop(n_px),
_convert_image_to_rgb,
ToTensor(),
Normalize((0.48145466, 0.4578275, 0.40821073), (0.26862954, 0.26130258, 0.27577711)),
]
)
def available_models() -> List[str]:
"""Returns the names of available CLIP models"""
return list(_MODELS.keys())
def load(
name: str,
device: Union[str, torch.device] = "cuda" if flow.cuda.is_available() else "cpu",
download_root: str = None,
):
"""Load a CLIP model
Parameters
----------
name : str
A model name listed by `clip.available_models()`, or the path to a
model checkpoint containing the state_dict
device : Union[str, torch.device]
The device to put the loaded model
download_root: str
path to download the model files; by default, it uses "~/.cache/clip"
Returns
-------
model : flow.nn.Module
The CLIP model
preprocess : Callable[[PIL.Image], flow.Tensor]
A flowvision transform that converts a PIL image into a tensor that
the returned model can take as its input
"""
if name in _MODELS:
model_path = _download(_MODELS[name], download_root or os.path.expanduser("~/.cache/clip"))
elif os.path.isfile(name):
model_path = name
else:
raise RuntimeError(f"Model {name} not found; available models = {available_models()}")
with open(model_path, "rb") as opened_file:
try:
# loading JIT archive
model = torch.jit.load(opened_file, map_location="cpu").eval()
state_dict = None
except RuntimeError:
# loading saved state dict
state_dict = torch.load(opened_file, map_location="cpu")
model = build_model(state_dict or model.state_dict()).to(device)
if str(device) == "cpu":
model.float()
return model, _transform(model.visual.img_size)
def tokenize(
texts: Union[str, List[str]], context_length: int = 77, truncate: bool = False
) -> Union[flow.IntTensor, flow.LongTensor]:
"""
Returns the tokenized representation of given input string(s)
Parameters
----------
texts : Union[str, List[str]]
An input string or a list of input strings to tokenize
context_length : int
The context length to use; all CLIP models use 77 as the context length
truncate: bool
Whether to truncate the text in case its encoding is longer than the context length
Returns
-------
A two-dimensional tensor containing the resulting tokens,
shape = [number of input strings, context_length].
"""
if isinstance(texts, str):
texts = [texts]
sot_token = _tokenizer.encoder["<|startoftext|>"]
eot_token = _tokenizer.encoder["<|endoftext|>"]
all_tokens = [[sot_token] + _tokenizer.encode(text) + [eot_token] for text in texts]
result = flow.zeros(len(all_tokens), context_length, dtype=flow.int)
for i, tokens in enumerate(all_tokens):
if len(tokens) > context_length:
if truncate:
tokens = tokens[:context_length]
tokens[-1] = eot_token
else:
raise RuntimeError(
f"Input {texts[i]} is too long for context length {context_length}"
)
result[i, : len(tokens)] = flow.tensor(tokens, dtype=flow.int)
return result
| 7,197 | 34.99 | 168 | py |
libai | libai-main/projects/CLIP/clip/model.py | # --------------------------------------------------------
# Borrow code from:
# https://github.com/openai/CLIP/tree/main/clip/model.py
# --------------------------------------------------------
from collections import OrderedDict
from typing import Dict, Tuple, Union
import numpy as np
import oneflow as flow
import torch
from oneflow import nn
from libai.layers import MLP, Embedding, LayerNorm, Linear, MultiheadAttention, TransformerLayer
from libai.layers.activation import build_activation
from libai.layers.attention import AttnMaskType
from libai.models import VisionTransformer as ViT
from libai.utils import distributed as dist
from libai.utils.checkpoint import get_missing_parameters_message, get_unexpected_parameters_message
from .ops import multi_head_attention_forward
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1):
super().__init__()
# all conv layers have stride 1. an avgpool is performed
# after the second convolution when stride > 1
self.conv1 = nn.Conv2d(inplanes, planes, 1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.relu1 = nn.ReLU(inplace=True)
self.conv2 = nn.Conv2d(planes, planes, 3, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.relu2 = nn.ReLU(inplace=True)
self.avgpool = nn.AvgPool2d(stride) if stride > 1 else nn.Identity()
self.conv3 = nn.Conv2d(planes, planes * self.expansion, 1, bias=False)
self.bn3 = nn.BatchNorm2d(planes * self.expansion)
self.relu3 = nn.ReLU(inplace=True)
self.downsample = None
self.stride = stride
if stride > 1 or inplanes != planes * Bottleneck.expansion:
# downsampling layer is prepended with an avgpool,
# and the subsequent convolution has stride 1
self.downsample = nn.Sequential(
OrderedDict(
[
("-1", nn.AvgPool2d(stride)),
(
"0",
nn.Conv2d(inplanes, planes * self.expansion, 1, stride=1, bias=False),
),
("1", nn.BatchNorm2d(planes * self.expansion)),
]
)
)
def forward(self, x: flow.Tensor):
identity = x
out = self.relu1(self.bn1(self.conv1(x)))
out = self.relu2(self.bn2(self.conv2(out)))
out = self.avgpool(out)
out = self.bn3(self.conv3(out))
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu3(out)
return out
class AttentionPool2d(nn.Module):
def __init__(self, spacial_dim: int, embed_dim: int, num_heads: int, output_dim: int = None):
super().__init__()
self.positional_embedding = nn.Parameter(
flow.randn(spacial_dim ** 2 + 1, embed_dim) / embed_dim ** 0.5
)
self.k_proj = nn.Linear(embed_dim, embed_dim)
self.q_proj = nn.Linear(embed_dim, embed_dim)
self.v_proj = nn.Linear(embed_dim, embed_dim)
self.c_proj = nn.Linear(embed_dim, output_dim or embed_dim)
self.num_heads = num_heads
def forward(self, x):
x = x.reshape(x.shape[0], x.shape[1], x.shape[2] * x.shape[3]).permute(
2, 0, 1
) # NCHW -> (HW)NC
x = flow.cat([x.mean(dim=0, keepdim=True), x], dim=0) # (HW+1)NC
x = x + self.positional_embedding[:, None, :].to(x.dtype) # (HW+1)NC
x, _ = multi_head_attention_forward(
query=x,
key=x,
value=x,
embed_dim_to_check=x.shape[-1],
num_heads=self.num_heads,
q_proj_weight=self.q_proj.weight,
k_proj_weight=self.k_proj.weight,
v_proj_weight=self.v_proj.weight,
in_proj_weight=None,
in_proj_bias=flow.cat([self.q_proj.bias, self.k_proj.bias, self.v_proj.bias]),
bias_k=None,
bias_v=None,
dropout_p=0,
out_proj_weight=self.c_proj.weight,
out_proj_bias=self.c_proj.bias,
use_separate_proj_weight=True,
training=self.training,
need_weights=False,
)
return x[0]
class ModifiedResNet(nn.Module):
"""
A ResNet class that is similar to flowvision's but contains the following changes:
- There are now 3 "stem" convolutions as opposed to 1,
with an average pool instead of a max pool.
- Performs anti-aliasing strided convolutions, where an avgpool is
prepended to convolutions with stride > 1
- The final pooling layer is a QKV attention instead of an average pool
"""
def __init__(self, layers, output_dim, heads, input_resolution=224, width=64):
super().__init__()
self.output_dim = output_dim
self.input_resolution = input_resolution
# the 3-layer stem
self.conv1 = nn.Conv2d(3, width // 2, kernel_size=3, stride=2, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(width // 2)
self.relu1 = nn.ReLU(inplace=True)
self.conv2 = nn.Conv2d(width // 2, width // 2, kernel_size=3, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(width // 2)
self.relu2 = nn.ReLU(inplace=True)
self.conv3 = nn.Conv2d(width // 2, width, kernel_size=3, padding=1, bias=False)
self.bn3 = nn.BatchNorm2d(width)
self.relu3 = nn.ReLU(inplace=True)
self.avgpool = nn.AvgPool2d(2)
# residual layers
self._inplanes = width # this is a *mutable* variable used during construction
self.layer1 = self._make_layer(width, layers[0])
self.layer2 = self._make_layer(width * 2, layers[1], stride=2)
self.layer3 = self._make_layer(width * 4, layers[2], stride=2)
self.layer4 = self._make_layer(width * 8, layers[3], stride=2)
embed_dim = width * 32 # the ResNet feature dimension
self.attnpool = AttentionPool2d(input_resolution // 32, embed_dim, heads, output_dim)
def _make_layer(self, planes, blocks, stride=1):
layers = [Bottleneck(self._inplanes, planes, stride)]
self._inplanes = planes * Bottleneck.expansion
for _ in range(1, blocks):
layers.append(Bottleneck(self._inplanes, planes))
return nn.Sequential(*layers)
def forward(self, x):
def stem(x):
x = self.relu1(self.bn1(self.conv1(x)))
x = self.relu2(self.bn2(self.conv2(x)))
x = self.relu3(self.bn3(self.conv3(x)))
x = self.avgpool(x)
return x
x = x.to(dtype=self.conv1.weight.dtype)
x = stem(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.attnpool(x)
return x
class MLPClip(MLP):
def __init__(
self,
hidden_size,
ffn_hidden_size,
output_dropout_prob=0,
init_method=nn.init.xavier_normal_,
output_layer_init_method=None,
bias_gelu_fusion=False,
bias_dropout_fusion=False,
*,
layer_idx=0,
):
super().__init__(
hidden_size,
ffn_hidden_size,
output_dropout_prob,
init_method,
output_layer_init_method,
bias_gelu_fusion,
bias_dropout_fusion,
layer_idx=layer_idx,
)
if not bias_gelu_fusion:
self.activation_func = build_activation("quick_gelu")
class TransformerLayerClip(TransformerLayer):
def __init__(
self,
hidden_size,
ffn_hidden_size,
num_attention_heads,
is_decoder=False,
attention_dropout_prob=0,
output_dropout_prob=0,
drop_path_prob=0,
layernorm_epsilon=0.00001,
init_method=nn.init.xavier_normal_,
output_layer_init_method=None,
bias_gelu_fusion=False,
bias_dropout_fusion=False,
scale_mask_softmax_fusion=False,
apply_query_key_layer_scaling=False,
apply_residual_post_layernorm=False,
attn_mask_type=AttnMaskType.padding,
*,
layer_idx=0,
):
super().__init__(
hidden_size,
ffn_hidden_size,
num_attention_heads,
is_decoder,
attention_dropout_prob,
output_dropout_prob,
drop_path_prob,
layernorm_epsilon,
init_method,
output_layer_init_method,
bias_gelu_fusion,
bias_dropout_fusion,
scale_mask_softmax_fusion,
apply_query_key_layer_scaling,
apply_residual_post_layernorm,
attn_mask_type,
layer_idx=layer_idx,
)
self.mlp = MLPClip(
self.hidden_size,
self.ffn_hidden_size,
self.output_dropout_prob,
self.init_method,
output_layer_init_method=self.output_layer_init_method,
bias_gelu_fusion=self.bias_gelu_fusion,
bias_dropout_fusion=self.bias_dropout_fusion,
layer_idx=self.layer_idx,
)
class Transformer(nn.Module):
def __init__(self, width: int, layers: int, heads: int, attn_mask: flow.Tensor = None):
super().__init__()
self.width = width
self.layers = layers
self.attn_mask = attn_mask
self.resblocks = nn.ModuleList(
[TransformerLayerClip(width, 4 * width, heads, layer_idx=i) for i in range(layers)]
)
def forward(self, x: flow.Tensor):
for layer in self.resblocks:
x = layer(x, self.attn_mask)
return x
class VisionTransformer(ViT):
def __init__(
self,
img_size=224,
patch_size=16,
in_chans=3,
embed_dim=192,
depth=12,
num_heads=3,
mlp_ratio=4,
drop_rate=0,
attn_drop_rate=0,
drop_path_rate=0,
num_classes=1000,
loss_func=None,
):
super().__init__(
img_size,
patch_size,
in_chans,
embed_dim,
depth,
num_heads,
mlp_ratio,
drop_rate,
attn_drop_rate,
drop_path_rate,
num_classes,
loss_func,
)
self.ln_pre = LayerNorm(embed_dim, layer_idx=0)
self.head = Linear(embed_dim, num_classes, bias=False, layer_idx=-1)
def forward_features(self, x):
# patch embedding
x = self.patch_embed(x)
cls_token = self.cls_token.expand(
x.shape[0], -1, -1
) # stole cls_tokens impl from Phil Wang, thanks
cls_token = cls_token.to_global(sbp=x.sbp, placement=cls_token.placement)
x = flow.cat((cls_token, x), dim=1)
# position embedding
pos_embed = self.pos_embed.expand(x.shape[0], -1, -1)
pos_embed = pos_embed.to_global(sbp=x.sbp, placement=pos_embed.placement)
x = self.pos_drop(x + pos_embed)
# layernorm_pre
x = self.ln_pre(x)
# transformer block
x = self.blocks(x)
return x
class CLIP(nn.Module):
def __init__(
self,
embed_dim: int,
# vision
image_resolution: int,
vision_layers: Union[Tuple[int, int, int, int], int],
vision_width: int,
vision_patch_size: int,
# text
context_length: int,
vocab_size: int,
transformer_width: int,
transformer_heads: int,
transformer_layers: int,
):
super().__init__()
self.context_length = context_length
if isinstance(vision_layers, (tuple, list)):
vision_heads = vision_width * 32 // 64
self.visual = ModifiedResNet(
layers=vision_layers,
output_dim=embed_dim,
heads=vision_heads,
input_resolution=image_resolution,
width=vision_width,
).to_global(sbp=flow.sbp.broadcast, placement=dist.get_layer_placement(0))
else:
vision_heads = vision_width // 64
self.visual = VisionTransformer(
img_size=image_resolution,
patch_size=vision_patch_size,
embed_dim=vision_width,
depth=vision_layers,
num_heads=vision_heads,
num_classes=embed_dim,
)
self.transformer = Transformer(
width=transformer_width,
layers=transformer_layers,
heads=transformer_heads,
attn_mask=self.build_attention_mask(),
)
self.vocab_size = vocab_size
self.token_embedding = Embedding(vocab_size, transformer_width)
self.positional_embedding = nn.Parameter(
flow.empty(
self.context_length,
transformer_width,
sbp=dist.get_nd_sbp([flow.sbp.broadcast, flow.sbp.broadcast]),
placement=dist.get_layer_placement(0),
)
)
self.ln_final = LayerNorm((transformer_width,), layer_idx=-1)
self.text_projection = nn.Parameter(
flow.empty(
transformer_width,
embed_dim,
sbp=flow.sbp.broadcast,
placement=dist.get_layer_placement(0),
)
)
self.logit_scale = nn.Parameter(
flow.ones([], sbp=flow.sbp.broadcast, placement=dist.get_layer_placement(0))
* np.log(1 / 0.07)
)
self.initialize_parameters()
def initialize_parameters(self):
if hasattr(self.visual, "patch_embed"):
nn.init.zeros_(self.visual.patch_embed.proj.bias)
nn.init.normal_(self.token_embedding.weight, std=0.02)
nn.init.normal_(self.positional_embedding, std=0.01)
if isinstance(self.visual, ModifiedResNet):
if self.visual.attnpool is not None:
std = self.visual.attnpool.c_proj.in_features ** -0.5
nn.init.normal_(self.visual.attnpool.q_proj.weight, std=std)
nn.init.normal_(self.visual.attnpool.k_proj.weight, std=std)
nn.init.normal_(self.visual.attnpool.v_proj.weight, std=std)
nn.init.normal_(self.visual.attnpool.c_proj.weight, std=std)
for resnet_block in [
self.visual.layer1,
self.visual.layer2,
self.visual.layer3,
self.visual.layer4,
]:
for name, param in resnet_block.named_parameters():
if name.endswith("bn3.weight"):
nn.init.zeros_(param)
proj_std = (self.transformer.width ** -0.5) * ((2 * self.transformer.layers) ** -0.5)
attn_std = self.transformer.width ** -0.5
fc_std = (2 * self.transformer.width) ** -0.5
for block in self.transformer.resblocks:
nn.init.normal_(block.self_attention.query_key_value.weight, std=attn_std)
nn.init.normal_(block.self_attention.dense.weight, std=proj_std)
nn.init.normal_(block.mlp.dense_h_to_4h.weight, std=fc_std)
nn.init.normal_(block.mlp.dense_4h_to_h.weight, std=proj_std)
if self.text_projection is not None:
nn.init.normal_(self.text_projection, std=self.transformer.width ** -0.5)
def build_attention_mask(self):
# lazily create causal attention mask, with full attention between the vision tokens
# pytorch uses additive attention mask; fill with -inf
mask = flow.ones(
self.context_length,
self.context_length,
sbp=flow.sbp.broadcast,
placement=dist.get_layer_placement(0),
)
mask = flow.tril(mask) # zero out the lower diagonal
return mask
@property
def dtype(self):
return self.visual.conv1.weight.dtype
def encode_image(self, image):
return self.visual(image)["prediction_scores"]
def encode_text(self, text):
x = self.token_embedding(text) # [batch_size, n_ctx, d_model]
x = x + self.positional_embedding
# x = x.permute(1, 0, 2) # NLD -> LND
x = self.transformer(x)
# x = x.permute(1, 0, 2) # LND -> NLD
x = self.ln_final(x)
# x.shape = [batch_size, n_ctx, transformer.width]
# take features from the eot embedding (eot_token is the highest number in each sequence)
x = (
x[flow.arange(x.shape[0], sbp=x.sbp, placement=x.placement), text.argmax(dim=-1)]
@ self.text_projection
)
return x
def forward(self, image, text):
image_features = self.encode_image(image)
text_features = self.encode_text(text)
# normalized features
image_features = image_features / image_features.norm(dim=1, keepdim=True)
text_features = text_features / text_features.norm(dim=1, keepdim=True)
# cosine similarity as logits
logit_scale = self.logit_scale.exp()
logits_per_image = logit_scale * image_features @ text_features.t()
logits_per_text = logits_per_image.t()
# shape = [global_batch_size, global_batch_size]
return logits_per_image, logits_per_text
def convert_weights(model: nn.Module):
"""Convert applicable model parameters to fp16"""
def _convert_weights_to_fp16(l):
if isinstance(l, (nn.Conv1d, nn.Conv2d, nn.Linear)):
l.weight.data = l.weight.data.to(dtype=flow.float16)
if l.bias is not None:
l.bias.data = l.bias.data.to(dtype=flow.float16)
if isinstance(l, MultiheadAttention):
for attr in ["query_key_value", "dense"]:
layer = getattr(l, attr)
weight = getattr(layer, "weight")
if weight is not None:
weight.data = weight.data.to(dtype=flow.float16)
bias = getattr(layer, "bias")
if bias is not None:
bias.data = bias.data.to(dtype=flow.float16)
if hasattr(l, "text_projection"):
attr = getattr(l, "text_projection")
if attr is not None:
attr.data = attr.data.to(dtype=flow.float16)
if hasattr(l, "proj"):
attr = getattr(l, "proj")
if attr is not None:
attr.weight.data = attr.weight.data.to(dtype=flow.float16)
model.apply(_convert_weights_to_fp16)
def load_tensor(tensor_lhs: flow.Tensor, tensor_rhs: torch.Tensor):
tensor_rhs = flow.Tensor(
tensor_rhs.cpu().numpy(),
sbp=dist.get_nd_sbp([flow.sbp.broadcast, flow.sbp.broadcast]),
placement=flow.env.all_device_placement("cuda"),
).to_global(sbp=tensor_lhs.sbp, placement=tensor_lhs.placement)
tensor_lhs.data.copy_(tensor_rhs.data)
def load_weights(model: nn.Module, state_dict: Dict):
model_state_dict = model.state_dict()
incorrect_shapes = []
for k in list(state_dict.keys()):
if k in model_state_dict:
shape_model = tuple(model_state_dict[k].shape)
shape_checkpoint = tuple(state_dict[k].shape)
if shape_model != shape_checkpoint:
incorrect_shapes.append((k, shape_checkpoint, shape_model))
state_dict.pop(k)
unexpected_keys = []
for key, value in state_dict.items():
if key not in model_state_dict:
unexpected_keys.append(key)
# skip this key
continue
model_state_dict.pop(key)
load_tensor(model.state_dict()[key], value)
missing_keys = list(model_state_dict.keys())
for k, shape_checkpoint, shape_model in incorrect_shapes:
print(
"Skip loading parameter '{}' to the model due to incompatible "
"shapes: {} in the checkpoint but {} in the "
"model! You might want to double check if this is expected.".format(
k, shape_checkpoint, shape_model
)
)
if missing_keys:
print(get_missing_parameters_message(missing_keys))
if unexpected_keys:
print(get_unexpected_parameters_message(unexpected_keys))
def convert_qkv_weight(qkv_weight, num_heads):
qkv_weight = qkv_weight.view([3, num_heads, 64, num_heads * 64])
qkv_weight = (
qkv_weight.permute(1, 0, 2, 3).contiguous().view(3 * num_heads * 64, num_heads * 64)
)
return qkv_weight
def convert_qkv_bias(qkv_bias, num_heads):
qkv_bias = qkv_bias.view(3, num_heads, 64)
qkv_bias = qkv_bias.permute(1, 0, 2).contiguous().view(-1)
return qkv_bias
def change_vit_state_dict(state_dict, visual_num_heads, text_num_heads):
new_state_dict = {}
for key, value in state_dict.items():
# change prefix
if "visual.transformer.resblocks" in key:
key = key.replace("visual.transformer.resblocks", "visual.blocks")
# change "ln_1" to "input_layernorm"
if "ln_1" in key:
key = key.replace("ln_1", "input_layernorm")
# change "ln_2" to "post_attention_layernorm"
if "ln_2" in key:
key = key.replace("ln_2", "post_attention_layernorm")
# change "attn.out_proj" to "attention.dense"
if "attn.out_proj" in key:
key = key.replace("attn.out_proj", "attention.dense")
# change "attn" to "attention.query_key_value"
if "attn.in_proj_weight" in key:
key = key.replace("attn.in_proj_weight", "attention.query_key_value.weight")
if "visual" not in key:
value = convert_qkv_weight(value, text_num_heads)
else:
value = convert_qkv_weight(value, visual_num_heads)
if "attn.in_proj_bias" in key:
key = key.replace("attn.in_proj_bias", "attention.query_key_value.bias")
if "visual" not in key:
value = convert_qkv_bias(value, text_num_heads)
else:
value = convert_qkv_bias(value, visual_num_heads)
# change "mlp.c_fc" to "mlp.dense_h_to_4h"
if "mlp.c_fc" in key:
key = key.replace("mlp.c_fc", "mlp.dense_h_to_4h")
# change "mlp.c_proj" to "mlp.dense_4h_to_h"
if "mlp.c_proj" in key:
key = key.replace("mlp.c_proj", "mlp.dense_4h_to_h")
# change "class_embedding" to "cls_token"
if "class_embedding" in key:
key = key.replace("class_embedding", "cls_token")
value = value.unsqueeze(0).unsqueeze(0)
# change "pos_embed" to "positional_embedding"
if "visual.positional_embedding" == key:
key = "visual.pos_embed"
value = value.unsqueeze(0)
# change patch_embedding
if key == "visual.conv1.weight":
key = "visual.patch_embed.proj.weight"
# change "ln_post"
if "ln_post" in key:
key = key.replace("ln_post", "norm")
# change "proj"
if "visual.proj" == key:
key = "visual.head.weight"
value = value.transpose(0, 1)
# added by huangwei
key = key.replace("attention.query_key_value", "self_attention.query_key_value").replace(
"attention.dense", "self_attention.dense"
)
new_state_dict[key] = value
return new_state_dict
def build_model(state_dict: dict):
vit = "visual.proj" in state_dict
if vit:
vision_width = state_dict["visual.conv1.weight"].shape[0]
vision_layers = len(
[
k
for k in state_dict.keys()
if k.startswith("visual.") and k.endswith(".attn.in_proj_weight")
]
)
vision_patch_size = state_dict["visual.conv1.weight"].shape[-1]
grid_size = round((state_dict["visual.positional_embedding"].shape[0] - 1) ** 0.5)
image_resolution = vision_patch_size * grid_size
else:
counts: list = [
len(set(k.split(".")[2] for k in state_dict if k.startswith(f"visual.layer{b}")))
for b in [1, 2, 3, 4]
]
vision_layers = tuple(counts)
vision_width = state_dict["visual.layer1.0.conv1.weight"].shape[0]
output_width = round(
(state_dict["visual.attnpool.positional_embedding"].shape[0] - 1) ** 0.5
)
vision_patch_size = None
assert output_width ** 2 + 1 == state_dict["visual.attnpool.positional_embedding"].shape[0]
image_resolution = output_width * 32
embed_dim = state_dict["text_projection"].shape[1]
context_length = state_dict["positional_embedding"].shape[0]
vocab_size = state_dict["token_embedding.weight"].shape[0]
transformer_width = state_dict["ln_final.weight"].shape[0]
transformer_heads = transformer_width // 64
transformer_layers = len(
set(k.split(".")[2] for k in state_dict if k.startswith("transformer.resblocks"))
)
if vit:
state_dict = change_vit_state_dict(state_dict, vision_width // 64, transformer_heads)
model = CLIP(
embed_dim,
image_resolution,
vision_layers,
vision_width,
vision_patch_size,
context_length,
vocab_size,
transformer_width,
transformer_heads,
transformer_layers,
)
for key in ["input_resolution", "context_length", "vocab_size"]:
if key in state_dict:
del state_dict[key]
# convert_weights(model)
load_weights(model, state_dict)
return model.eval()
| 25,690 | 34.731572 | 100 | py |
libai | libai-main/projects/CLIP/clip/__init__.py | from .clip import load, tokenize
| 33 | 16 | 32 | py |
libai | libai-main/projects/CLIP/clip/ops.py | # --------------------------------------------------------
# Reference:
# https://github.com/pyflow/pyflow/blob/1.7/flow/nn/functional.py#L4041
# --------------------------------------------------------
import warnings
from typing import Optional, Tuple
import oneflow as flow
import oneflow.nn.functional as F
from oneflow import Tensor
def multi_head_attention_forward(
query: Tensor,
key: Tensor,
value: Tensor,
embed_dim_to_check: int,
num_heads: int,
in_proj_weight: Tensor,
in_proj_bias: Tensor,
bias_k: Optional[Tensor],
bias_v: Optional[Tensor],
dropout_p: float,
out_proj_weight: Tensor,
out_proj_bias: Tensor,
training: bool = True,
key_padding_mask: Optional[Tensor] = None,
need_weights: bool = True,
attn_mask: Optional[Tensor] = None,
use_separate_proj_weight: bool = False,
q_proj_weight: Optional[Tensor] = None,
k_proj_weight: Optional[Tensor] = None,
v_proj_weight: Optional[Tensor] = None,
static_k: Optional[Tensor] = None,
static_v: Optional[Tensor] = None,
) -> Tuple[Tensor, Optional[Tensor]]:
r"""
Args:
query, key, value: map a query and a set of key-value pairs to an output.
See "Attention Is All You Need" for more details.
embed_dim_to_check: total dimension of the model.
num_heads: parallel attention heads.
in_proj_weight, in_proj_bias: input projection weight and bias.
bias_k, bias_v: bias of the key and value sequences to be added at dim=0.
add_zero_attn: add a new batch of zeros to the key and
value sequences at dim=1.
dropout_p: probability of an element to be zeroed.
out_proj_weight, out_proj_bias: the output projection weight and bias.
training: apply dropout if is ``True``.
key_padding_mask: if provided, specified padding elements in the key will
be ignored by the attention. This is an binary mask. When the value is True,
the corresponding value on the attention layer will be filled with -inf.
need_weights: output attn_output_weights.
attn_mask: 2D or 3D mask that prevents attention to certain positions.
A 2D mask will be broadcasted for all the batches while a 3D mask allows
to specify a different mask for the entries of each batch.
use_separate_proj_weight: the function accept the proj. weights for query, key,
and value in different forms. If false, in_proj_weight will be used, which is
a combination of q_proj_weight, k_proj_weight, v_proj_weight.
q_proj_weight, k_proj_weight, v_proj_weight, in_proj_bias: input projection weight and bias.
static_k, static_v: static key and value used for attention operators.
Shape:
Inputs:
- query: :math:`(L, N, E)` where L is the target sequence length, N is the batch size, E is
the embedding dimension.
- key: :math:`(S, N, E)`, where S is the source sequence length, N is the batch size, E is
the embedding dimension.
- value: :math:`(S, N, E)` where S is the source sequence length, N is the batch size, E is
the embedding dimension.
- key_padding_mask: :math:`(N, S)` where N is the batch size, S is the source
sequence length.
If a ByteTensor is provided, the non-zero positions will be ignored while
the zero positions will be unchanged. If a BoolTensor is provided, the positions
with the value of ``True`` will be ignored while the position with the value
of ``False`` will be unchanged.
- attn_mask: 2D mask :math:`(L, S)` where L is the target sequence length,
S is the source sequence length.
3D mask :math:`(N*num_heads, L, S)` where N is the batch size, L is the target
sequence length, S is the source sequence length. attn_mask ensures that position
i is allowed to attend the unmasked positions.
If a ByteTensor is provided, the non-zero positions are not allowed to attend
while the zero positions will be unchanged. If a BoolTensor is provided, positions
with ``True`` are not allowed to attend while ``False`` values will be unchanged.
If a FloatTensor is provided, it will be added to the attention weight.
- static_k: :math:`(N*num_heads, S, E/num_heads)`, where S is the source sequence length,
N is the batch size, E is the embedding dimension. E/num_heads is the head dimension.
- static_v: :math:`(N*num_heads, S, E/num_heads)`, where S is the source sequence length,
N is the batch size, E is the embedding dimension. E/num_heads is the head dimension.
Outputs:
- attn_output: :math:`(L, N, E)` where L is the target sequence length, N is the batch size,
E is the embedding dimension.
- attn_output_weights: :math:`(N, L, S)` where N is the batch size,
L is the target sequence length, S is the source sequence length.
"""
tgt_len, bsz, embed_dim = query.size()
assert embed_dim == embed_dim_to_check
# allow MHA to have different sizes for the feature dimension
assert key.size(0) == value.size(0) and key.size(1) == value.size(1)
head_dim = embed_dim // num_heads
assert head_dim * num_heads == embed_dim, "embed_dim must be divisible by num_heads"
scaling = float(head_dim) ** -0.5
if not use_separate_proj_weight:
if flow.equal(query, key) and flow.equal(key, value):
# self-attention
q, k, v = F.linear(query, in_proj_weight, in_proj_bias).chunk(3, dim=-1)
elif flow.equal(key, value):
# encoder-decoder attention
# This is inline in_proj function with in_proj_weight and in_proj_bias
_b = in_proj_bias
_start = 0
_end = embed_dim
_w = in_proj_weight[_start:_end, :]
if _b is not None:
_b = _b[_start:_end]
q = F.linear(query, _w, _b)
if key is None:
assert value is None
k = None
v = None
else:
# This is inline in_proj function with in_proj_weight and in_proj_bias
_b = in_proj_bias
_start = embed_dim
_end = None
_w = in_proj_weight[_start:, :]
if _b is not None:
_b = _b[_start:]
k, v = F.linear(key, _w, _b).chunk(2, dim=-1)
else:
# This is inline in_proj function with in_proj_weight and in_proj_bias
_b = in_proj_bias
_start = 0
_end = embed_dim
_w = in_proj_weight[_start:_end, :]
if _b is not None:
_b = _b[_start:_end]
q = F.linear(query, _w, _b)
# This is inline in_proj function with in_proj_weight and in_proj_bias
_b = in_proj_bias
_start = embed_dim
_end = embed_dim * 2
_w = in_proj_weight[_start:_end, :]
if _b is not None:
_b = _b[_start:_end]
k = F.linear(key, _w, _b)
# This is inline in_proj function with in_proj_weight and in_proj_bias
_b = in_proj_bias
_start = embed_dim * 2
_end = None
_w = in_proj_weight[_start:, :]
if _b is not None:
_b = _b[_start:]
v = F.linear(value, _w, _b)
else:
len1, len2 = q_proj_weight.size()
assert len1 == embed_dim and len2 == query.size(-1)
len1, len2 = k_proj_weight.size()
assert len1 == embed_dim and len2 == key.size(-1)
len1, len2 = v_proj_weight.size()
assert len1 == embed_dim and len2 == value.size(-1)
if in_proj_bias is not None:
q = F.linear(query, q_proj_weight, in_proj_bias[0:embed_dim])
k = F.linear(key, k_proj_weight, in_proj_bias[embed_dim : (embed_dim * 2)])
v = F.linear(value, v_proj_weight, in_proj_bias[(embed_dim * 2) :])
else:
q = F.linear(query, q_proj_weight, in_proj_bias)
k = F.linear(key, k_proj_weight, in_proj_bias)
v = F.linear(value, v_proj_weight, in_proj_bias)
q = q * scaling
if attn_mask is not None:
assert (
attn_mask.dtype == flow.float32
or attn_mask.dtype == flow.float64
or attn_mask.dtype == flow.float16
or attn_mask.dtype == flow.uint8
or attn_mask.dtype == flow.bool
), "Only float, byte, and bool types are supported for attn_mask, not {}".format(
attn_mask.dtype
)
if attn_mask.dtype == flow.uint8:
warnings.warn(
"Byte tensor for attn_mask in nn.MultiheadAttention is deprecated. "
"Use bool tensor instead."
)
attn_mask = attn_mask.to(flow.bool)
if attn_mask.dim() == 2:
attn_mask = attn_mask.unsqueeze(0)
if list(attn_mask.size()) != [1, query.size(0), key.size(0)]:
raise RuntimeError("The size of the 2D attn_mask is not correct.")
elif attn_mask.dim() == 3:
if list(attn_mask.size()) != [bsz * num_heads, query.size(0), key.size(0)]:
raise RuntimeError("The size of the 3D attn_mask is not correct.")
else:
raise RuntimeError("attn_mask's dimension {} is not supported".format(attn_mask.dim()))
# attn_mask's dim is 3 now.
# convert ByteTensor key_padding_mask to bool
if key_padding_mask is not None and key_padding_mask.dtype == flow.uint8:
warnings.warn(
"Byte tensor for key_padding_mask in nn.MultiheadAttention is deprecated. "
"Use bool tensor instead."
)
key_padding_mask = key_padding_mask.to(flow.bool)
assert bias_k is None, "Only support bias_k is None"
assert bias_v is None, "Only support bias_v is None"
q = q.contiguous().view(tgt_len, bsz * num_heads, head_dim).transpose(0, 1)
if k is not None:
k = k.contiguous().view(-1, bsz * num_heads, head_dim).transpose(0, 1)
if v is not None:
v = v.contiguous().view(-1, bsz * num_heads, head_dim).transpose(0, 1)
if static_k is not None:
assert static_k.size(0) == bsz * num_heads
assert static_k.size(2) == head_dim
k = static_k
if static_v is not None:
assert static_v.size(0) == bsz * num_heads
assert static_v.size(2) == head_dim
v = static_v
src_len = k.size(1)
if key_padding_mask is not None:
assert key_padding_mask.size(0) == bsz
assert key_padding_mask.size(1) == src_len
attn_output_weights = flow.bmm(q, k.transpose(1, 2))
assert list(attn_output_weights.size()) == [bsz * num_heads, tgt_len, src_len]
if attn_mask is not None:
if attn_mask.dtype == flow.bool:
attn_output_weights.masked_fill_(attn_mask, float("-inf"))
else:
attn_output_weights += attn_mask
if key_padding_mask is not None:
attn_output_weights = attn_output_weights.view(bsz, num_heads, tgt_len, src_len)
attn_output_weights = attn_output_weights.masked_fill(
key_padding_mask.unsqueeze(1).unsqueeze(2),
float("-inf"),
)
attn_output_weights = attn_output_weights.view(bsz * num_heads, tgt_len, src_len)
attn_output_weights = F.softmax(attn_output_weights, dim=-1)
attn_output_weights = F.dropout(attn_output_weights, p=dropout_p, training=training)
attn_output = flow.bmm(attn_output_weights, v)
assert list(attn_output.size()) == [bsz * num_heads, tgt_len, head_dim]
attn_output = attn_output.transpose(0, 1).contiguous().view(tgt_len, bsz, embed_dim)
attn_output = F.linear(attn_output, out_proj_weight, out_proj_bias)
if need_weights:
# average attention weights over heads
attn_output_weights = attn_output_weights.view(bsz, num_heads, tgt_len, src_len)
return attn_output, attn_output_weights.sum(dim=1) / num_heads
else:
return attn_output, None
| 12,264 | 43.6 | 100 | py |
libai | libai-main/projects/CLIP/tests/test_multi_head_attn.py | import os
import sys
import unittest
import numpy as np
import oneflow as flow
import torch
from torch.nn.functional import multi_head_attention_forward as multi_head_attention_forward_torch
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir)))
from clip.ops import multi_head_attention_forward as multi_head_attention_forward_flow # noqa: E402
class TestMultiHeadAttention(unittest.TestCase):
def test_with_torch(self):
k_proj_weight = np.random.normal(size=(32, 32))
k_proj_bias = np.random.normal(size=(32))
q_proj_weight = np.random.normal(size=(32, 32))
q_proj_bias = np.random.normal(size=(32))
v_proj_weight = np.random.normal(size=(32, 32))
v_proj_bias = np.random.normal(size=(32))
c_proj_weight = np.random.normal(size=(64, 32))
c_proj_bias = np.random.normal(size=(64))
x = np.random.normal(size=(65, 16, 32))
x_torch = torch.from_numpy(x)
torch_out, _ = multi_head_attention_forward_torch(
query=x_torch,
key=x_torch,
value=x_torch,
embed_dim_to_check=x_torch.shape[-1],
num_heads=8,
q_proj_weight=torch.from_numpy(q_proj_weight),
k_proj_weight=torch.from_numpy(k_proj_weight),
v_proj_weight=torch.from_numpy(v_proj_weight),
in_proj_weight=None,
in_proj_bias=torch.cat(
[
torch.from_numpy(q_proj_bias),
torch.from_numpy(k_proj_bias),
torch.from_numpy(v_proj_bias),
]
),
bias_k=None,
bias_v=None,
add_zero_attn=False,
dropout_p=0,
out_proj_weight=torch.from_numpy(c_proj_weight),
out_proj_bias=torch.from_numpy(c_proj_bias),
use_separate_proj_weight=True,
training=True,
need_weights=False,
)
x_flow = flow.from_numpy(x).cuda()
flow_out, _ = multi_head_attention_forward_flow(
query=x_flow,
key=x_flow,
value=x_flow,
embed_dim_to_check=x_flow.shape[-1],
num_heads=8,
q_proj_weight=flow.from_numpy(q_proj_weight).cuda(),
k_proj_weight=flow.from_numpy(k_proj_weight).cuda(),
v_proj_weight=flow.from_numpy(v_proj_weight).cuda(),
in_proj_weight=None,
in_proj_bias=flow.cat(
[
flow.from_numpy(q_proj_bias).cuda(),
flow.from_numpy(k_proj_bias).cuda(),
flow.from_numpy(v_proj_bias).cuda(),
]
),
bias_k=None,
bias_v=None,
dropout_p=0,
out_proj_weight=flow.from_numpy(c_proj_weight).cuda(),
out_proj_bias=flow.from_numpy(c_proj_bias).cuda(),
use_separate_proj_weight=True,
training=True,
need_weights=False,
)
assert np.allclose(torch_out.numpy(), flow_out.numpy())
if __name__ == "__main__":
unittest.main()
| 3,149 | 33.23913 | 100 | py |
libai | libai-main/projects/CLIP/tests/test_clip.py | import os
import sys
import unittest
import oneflow as flow
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir)))
from clip.model import CLIP, ModifiedResNet, Transformer # noqa: E402
class TestCLIP(unittest.TestCase):
def test_modified_resnet(self):
net = ModifiedResNet([3, 4, 6, 3], 120, 16).to_global(
sbp=flow.sbp.broadcast, placement=flow.placement("cuda", ranks=[0])
)
x = flow.rand(
32, 3, 224, 224, sbp=flow.sbp.split(0), placement=flow.placement("cuda", ranks=[0])
)
y = net(x)
assert isinstance(y, flow.Tensor)
def test_transformer(self):
mask = flow.ones(
12, 12, sbp=flow.sbp.broadcast, placement=flow.placement("cuda", ranks=[0])
)
mask = flow.tril(mask) # zero out the lower diagonal
# [1, 1, s, s]
mask = mask.unsqueeze(0).unsqueeze(1).expand(16, 1, 12, 12)
net = Transformer(128, 10, 16, mask)
x = flow.rand(
16, 12, 128, sbp=flow.sbp.split(0), placement=flow.placement("cuda", ranks=[0])
)
y = net(x)
assert isinstance(y, flow.Tensor)
def test_clip(self):
# clip with resnet
net = CLIP(
embed_dim=10,
# vision
image_resolution=224,
vision_layers=6,
vision_width=120,
vision_patch_size=16,
# text
context_length=24,
vocab_size=3000,
transformer_width=128,
transformer_heads=16,
transformer_layers=10,
)
img = flow.rand(
16, 3, 224, 224, sbp=flow.sbp.split(0), placement=flow.placement("cuda", ranks=[0])
)
text = flow.ones(
16,
24,
dtype=flow.int,
sbp=flow.sbp.split(0),
placement=flow.placement("cuda", ranks=[0]),
)
logits_img, logits_text = net(img, text)
print(logits_img)
print(logits_text)
if __name__ == "__main__":
unittest.main()
| 2,094 | 27.69863 | 95 | py |
libai | libai-main/projects/GLM/modeling_glm.py | # coding=utf-8
# Copyright 2021 The OneFlow Authors. All rights reserved.
# Copyright 2022 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import oneflow as flow
import oneflow.nn.functional as F
from oneflow import nn
import libai.utils.distributed as dist
from libai.config import configurable
from libai.inference.generator.generation_utils import Generator
from libai.layers import LayerNorm, LMLogits, ParallelCrossEntropyLoss
from libai.models.utils import init_method_normal, scaled_init_method_normal
from projects.GLM.layers.embedding_layer import GLMEmbedding
from projects.GLM.layers.transformer_layer import TransformerLayer
class Transformer(nn.Module):
def __init__(
self,
num_layers,
hidden_size,
num_attention_heads,
attention_dropout_prob=0.0,
output_dropout_prob=0.0,
layernorm_epsilon=1.0e-5,
init_method=nn.init.xavier_normal_,
output_layer_init_method=None,
bias_gelu_fusion=False,
bias_dropout_fusion=False,
scale_mask_softmax_fusion=False,
apply_query_key_layer_scaling=False,
attention_scale=1.0,
):
super().__init__()
self.num_layers = num_layers
def build_layer(layer_number):
return TransformerLayer(
hidden_size,
num_attention_heads,
attention_dropout_prob=attention_dropout_prob,
output_dropout_prob=output_dropout_prob,
layernorm_epsilon=layernorm_epsilon,
init_method=init_method,
output_layer_init_method=output_layer_init_method,
bias_gelu_fusion=bias_gelu_fusion,
bias_dropout_fusion=bias_dropout_fusion,
scale_mask_softmax_fusion=scale_mask_softmax_fusion,
apply_query_key_layer_scaling=apply_query_key_layer_scaling,
attention_scale=attention_scale,
layer_idx=layer_number,
)
self.layers = nn.ModuleList([build_layer(i) for i in range(self.num_layers)])
self.final_layernorm = LayerNorm(hidden_size, eps=layernorm_epsilon, layer_idx=-1)
def forward(self, hidden_states, attention_mask, memory_states=None):
mem_layers = [hidden_states.detach()]
for i, layer in enumerate(self.layers):
mem_i = memory_states[i] if memory_states is not None else None
hidden_states = layer(hidden_states, attention_mask, mem=mem_i)
mem_layers.append(hidden_states.detach())
output = self.final_layernorm(hidden_states)
return output, mem_layers
class GLMModel(nn.Module):
@configurable
def __init__(
self,
num_layers,
vocab_size,
hidden_size,
num_attention_heads,
max_sequence_length=1024,
embedding_dropout_prob=0.0,
attention_dropout_prob=0.0,
output_dropout_prob=0.0,
layernorm_epsilon=1e-5,
initializer_range=0.02,
use_scaled_init_for_output_weights=True,
bias_gelu_fusion=False,
bias_dropout_fusion=False,
scale_mask_softmax_fusion=False,
apply_query_key_layer_scaling=False,
amp_enabled=False,
block_position_encoding=False,
attention_scale=1.0,
padding_idx=None,
):
super().__init__()
init_method = init_method_normal(sigma=initializer_range, mean=0)
if use_scaled_init_for_output_weights:
output_layer_init_method = scaled_init_method_normal(initializer_range, num_layers)
else:
output_layer_init_method = init_method
self.embeddings = GLMEmbedding(
vocab_size,
hidden_size,
max_sequence_length,
padding_idx=padding_idx,
init_method=init_method,
embedding_dropout_prob=embedding_dropout_prob,
amp_enabled=amp_enabled,
block_position_encoding=block_position_encoding,
)
self.transformer = Transformer(
num_layers,
hidden_size,
num_attention_heads,
attention_dropout_prob=attention_dropout_prob,
output_dropout_prob=output_dropout_prob,
layernorm_epsilon=layernorm_epsilon,
init_method=init_method,
output_layer_init_method=output_layer_init_method,
bias_gelu_fusion=bias_gelu_fusion,
bias_dropout_fusion=bias_dropout_fusion,
scale_mask_softmax_fusion=scale_mask_softmax_fusion,
apply_query_key_layer_scaling=apply_query_key_layer_scaling,
attention_scale=attention_scale,
)
self.lm_head = LMLogits(vocab_size, bias=False)
@classmethod
def from_config(cls, cfg):
return {
"num_layers": cfg.num_layers,
"vocab_size": cfg.vocab_size,
"hidden_size": cfg.hidden_size,
"num_attention_heads": cfg.num_attention_heads,
"max_sequence_length": cfg.max_sequence_length,
"embedding_dropout_prob": cfg.embedding_dropout_prob,
"attention_dropout_prob": cfg.attention_dropout_prob,
"output_dropout_prob": cfg.output_dropout_prob,
"layernorm_epsilon": cfg.layernorm_epsilon,
"initializer_range": cfg.initializer_range,
"use_scaled_init_for_output_weights": cfg.use_scaled_init_for_output_weights,
"bias_gelu_fusion": cfg.bias_gelu_fusion,
"bias_dropout_fusion": cfg.bias_dropout_fusion,
"scale_mask_softmax_fusion": cfg.scale_mask_softmax_fusion,
"apply_query_key_layer_scaling": cfg.apply_query_key_layer_scaling,
"amp_enabled": cfg.amp_enabled,
"block_position_encoding": cfg.block_position_encoding,
"attention_scale": cfg.attention_scale,
"padding_idx": cfg.padding_idx,
}
def forward(
self,
input_ids,
position_ids=None,
attention_mask=None,
memory_states=None,
output_predict=True,
):
input_ids = input_ids.to_global(placement=dist.get_layer_placement(0))
position_ids = (
position_ids.to_global(placement=dist.get_layer_placement(0))
if position_ids is not None
else None
)
attention_mask = (
attention_mask.to_global(placement=dist.get_layer_placement(0))
if attention_mask is not None
else None
)
batch_size, query_length = input_ids.size()
memory_length = memory_states[0].size(1) if memory_states is not None else 0
is_scalar = flow.numel(attention_mask) == 1
is_sep = is_scalar or flow.numel(attention_mask) == batch_size
if is_sep:
sep = attention_mask.item() if is_scalar else attention_mask
attention_mask = self.build_mask_matrix(
batch_size, query_length, sep, memory_length=memory_length, is_scalar=is_scalar
)
else:
if attention_mask.dim() == 2:
attention_mask = attention_mask.unsqueeze(1).unsqueeze(1)
attention_mask = attention_mask[:, :, :, -query_length - memory_length :]
input_embeds = self.embeddings(input_ids, position_ids)
logits, mem_layers = self.transformer(
input_embeds, attention_mask=attention_mask, memory_states=memory_states
)
mem_layers = self.update_mems(mem_layers, memory_states)
if output_predict:
logits = self.lm_head(logits, self.embeddings.word_embeddings.weight)
return (logits, mem_layers)
@staticmethod
def set_activation_checkpoint(model):
for module_block in model.modules():
# Old API in OneFlow 0.8
if hasattr(module_block, "origin"):
if isinstance(module_block.origin, TransformerLayer):
module_block.config.activation_checkpointing = True
else:
if isinstance(module_block.to(nn.Module), TransformerLayer):
module_block.to(nn.graph.GraphModule).activation_checkpointing = True
def build_mask_matrix(self, batch_size, seq_length, sep, memory_length=0, is_scalar=False):
m = flow.tril(
flow.ones((1, seq_length, seq_length)),
)
if is_scalar:
m[0, :, : int(sep)] = 1
else:
m = m.expand(batch_size, -1, -1)
ids = flow.arange(seq_length, device=sep.device, dtype=sep.dtype).view(1, -1)
mask = ids < sep.view(-1, 1)
m = m.masked_fill(mask.unsqueeze(1).expand_as(m), 1)
if memory_length > 0:
m = m.expand(batch_size, -1, -1)
m = flow.cat((flow.ones((batch_size, seq_length, memory_length)), m), dim=2)
m = m.unsqueeze(1)
m = m.to_global(
sbp=dist.get_nd_sbp([flow.sbp.broadcast, flow.sbp.broadcast]),
placement=dist.get_layer_placement(0),
)
return m
def update_mems(self, hiddens, mems):
memory_length = mems[0].size(1) if mems is not None else 0
query_length = hiddens[0].size(1)
new_memory_length = memory_length + query_length
new_mems = []
for i in range(len(hiddens)):
if new_memory_length <= query_length:
new_mems.append(hiddens[i][:, -new_memory_length:])
else:
new_mems.append(
flow.cat((mems[i][:, -new_memory_length + query_length :], hiddens[i]), dim=1)
)
return new_mems
class GLMLoss(nn.Module):
def __init__(self):
super().__init__()
self.loss_func = ParallelCrossEntropyLoss()
def forward(self, logits, labels):
lm_loss = self.loss_func(logits, labels)
lm_loss = lm_loss.mean()
return {"lm_loss": lm_loss}
class GLMForMultipleChoice(nn.Module):
def __init__(self, cfg):
super().__init__()
self.glm = GLMModel(cfg)
self.loss_func = GLMLoss()
def forward(
self,
input_ids=None,
position_ids=None,
attention_mask=None,
choice_ids=None,
choice_indices=None,
labels=None,
mems=None,
**kwargs,
):
lm_logits, mem_layers = self.glm(
input_ids,
position_ids=position_ids,
attention_mask=attention_mask,
memory_states=mems,
**kwargs,
)
outputs = F.log_softmax(lm_logits, dim=-1)
log_probs = []
for output, choices, choice_index in zip(outputs, choice_ids, choice_indices):
log_probs_single = []
for choice, choice_target_id in zip(choices, choice_index):
tmp = output[choice_target_id, choice]
log_probs_single.append(tmp.sum())
log_probs.append(flow.stack(log_probs_single))
log_probs = flow.stack(log_probs)
loss = None
if labels is not None:
loss = self.loss_func(log_probs, labels)
return {"loss": loss, "logits": log_probs, "lm_logits": lm_logits, "mems": mem_layers}
class GLMForConditionalGeneration(nn.Module, Generator):
@configurable
def __init__(
self,
num_layers,
vocab_size,
hidden_size,
num_attention_heads,
max_sequence_length=1024,
embedding_dropout_prob=0.0,
attention_dropout_prob=0.0,
output_dropout_prob=0.0,
layernorm_epsilon=1e-5,
initializer_range=0.02,
use_scaled_init_for_output_weights=True,
bias_gelu_fusion=False,
bias_dropout_fusion=False,
scale_mask_softmax_fusion=False,
apply_query_key_layer_scaling=False,
amp_enabled=False,
block_position_encoding=False,
attention_scale=1.0,
padding_idx=None,
cfg=None,
):
super().__init__()
self.cfg = cfg
self.glm = GLMModel(
num_layers=num_layers,
vocab_size=vocab_size,
hidden_size=hidden_size,
num_attention_heads=num_attention_heads,
max_sequence_length=max_sequence_length,
embedding_dropout_prob=embedding_dropout_prob,
attention_dropout_prob=attention_dropout_prob,
output_dropout_prob=output_dropout_prob,
layernorm_epsilon=layernorm_epsilon,
initializer_range=initializer_range,
use_scaled_init_for_output_weights=use_scaled_init_for_output_weights,
bias_gelu_fusion=bias_gelu_fusion,
bias_dropout_fusion=bias_dropout_fusion,
scale_mask_softmax_fusion=scale_mask_softmax_fusion,
apply_query_key_layer_scaling=apply_query_key_layer_scaling,
amp_enabled=amp_enabled,
block_position_encoding=block_position_encoding,
attention_scale=attention_scale,
padding_idx=padding_idx,
cfg=cfg,
)
self.loss_func = GLMLoss()
@classmethod
def from_config(cls, cfg):
return {
"num_layers": cfg.num_layers,
"vocab_size": cfg.vocab_size,
"hidden_size": cfg.hidden_size,
"num_attention_heads": cfg.num_attention_heads,
"max_sequence_length": cfg.max_sequence_length,
"embedding_dropout_prob": cfg.embedding_dropout_prob,
"attention_dropout_prob": cfg.attention_dropout_prob,
"output_dropout_prob": cfg.output_dropout_prob,
"layernorm_epsilon": cfg.layernorm_epsilon,
"initializer_range": cfg.initializer_range,
"use_scaled_init_for_output_weights": cfg.use_scaled_init_for_output_weights,
"bias_gelu_fusion": cfg.bias_gelu_fusion,
"bias_dropout_fusion": cfg.bias_dropout_fusion,
"scale_mask_softmax_fusion": cfg.scale_mask_softmax_fusion,
"apply_query_key_layer_scaling": cfg.apply_query_key_layer_scaling,
"amp_enabled": cfg.amp_enabled,
"block_position_encoding": cfg.block_position_encoding,
"attention_scale": cfg.attention_scale,
"padding_idx": cfg.padding_idx,
"cfg": cfg,
}
def forward(
self,
input_ids=None,
position_ids=None,
attention_mask=None,
labels=None,
memory_states=None,
**kwargs,
):
lm_logits, mems = self.glm(
input_ids, position_ids, attention_mask, memory_states=memory_states, **kwargs
)
loss = None
if labels is not None:
loss = self.loss_func(lm_logits, labels)
return {"loss": loss, "logits": lm_logits, "mems": mems}
def _reorder_cache(self, past, beam_idx):
if past is None:
return past
reordered_decoder_past = ()
for layer_past_states in past:
beam_idx = beam_idx.to_global(placement=layer_past_states.placement)
reordered_decoder_past = reordered_decoder_past + (
layer_past_states.index_select(0, beam_idx),
)
return reordered_decoder_past
def prepare_inputs_for_generation(
self,
input_ids,
past=None,
position_ids=None,
generation_attention_mask=None,
**kwargs,
):
attention_mask = generation_attention_mask
# only last token for inputs_ids if past is defined in kwargs
seq_length = input_ids.shape[1]
if past:
if position_ids is not None:
position_ids = position_ids[:, :, seq_length - 1].unsqueeze(-1)
if attention_mask is not None:
attention_mask = attention_mask[:, :, seq_length - 1, :seq_length].unsqueeze(-2)
input_ids = input_ids[:, -1].unsqueeze(-1)
else:
if position_ids is not None:
position_ids = position_ids[:, :, :seq_length]
if attention_mask is not None:
attention_mask = attention_mask[:, :, :seq_length, :seq_length]
return {
"input_ids": input_ids,
"position_ids": position_ids,
"attention_mask": attention_mask,
"memory_states": past,
}
@staticmethod
def set_pipeline_stage_id(model: nn.Module):
dist_utils = dist.get_dist_util()
if hasattr(model.glm.transformer.final_layernorm, "config"):
# Old API in OneFlow 0.8
for module_block in model.modules():
if isinstance(module_block.origin, GLMEmbedding):
module_block.config.set_stage(
dist_utils.get_layer_stage_id(0), dist.get_layer_placement(0)
)
elif isinstance(module_block.origin, TransformerLayer):
module_block.config.set_stage(
dist_utils.get_layer_stage_id(module_block.layer_idx),
dist.get_layer_placement(module_block.layer_idx),
)
elif isinstance(module_block.origin, (LMLogits, GLMLoss)):
module_block.config.set_stage(
dist_utils.get_layer_stage_id(-1), dist.get_layer_placement(-1)
)
model.glm.transformer.final_layernorm.config.set_stage(
dist_utils.get_layer_stage_id(-1), dist.get_layer_placement(-1)
)
else:
for module_block in model.modules():
if isinstance(module_block.to(nn.Module), GLMEmbedding):
module_block.to(nn.graph.GraphModule).set_stage(
dist_utils.get_layer_stage_id(0), dist.get_layer_placement(0)
)
elif isinstance(module_block.to(nn.Module), TransformerLayer):
module_block.to(nn.graph.GraphModule).set_stage(
dist_utils.get_layer_stage_id(module_block.layer_idx),
dist.get_layer_placement(module_block.layer_idx),
)
elif isinstance(module_block.to(nn.Module), (LMLogits, GLMLoss)):
module_block.to(nn.graph.GraphModule).set_stage(
dist_utils.get_layer_stage_id(-1), dist.get_layer_placement(-1)
)
model.glm.transformer.final_layernorm.to(nn.graph.GraphModule).set_stage(
dist_utils.get_layer_stage_id(-1), dist.get_layer_placement(-1)
)
| 19,130 | 38.202869 | 98 | py |
libai | libai-main/projects/GLM/infer_glm.py | # coding=utf-8
# Copyright 2021 The OneFlow Authors. All rights reserved.
# Copyright 2022 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import oneflow as flow
from libai.utils import distributed as dist
from projects.GLM.configs.glm_inference import cfg
from projects.GLM.modeling_glm import GLMForConditionalGeneration
from projects.GLM.tokenizer.glm_tokenizer import GLMChineseTokenzier
from projects.GLM.utils.glm_loader import GLMLoaderHuggerFace
tokenizer = GLMChineseTokenzier.from_pretrained("/data/home/xiezipeng/glm-10b-chinese")
input_ids = tokenizer.encode(
["西游记的作者是[MASK]。"],
return_tensors="of",
)
inputs = {"input_ids": input_ids, "attention_mask": flow.ones(input_ids.size(), dtype=flow.bool)}
inputs = tokenizer.build_inputs_for_generation(inputs, max_gen_length=512)
sbp = dist.get_nd_sbp([flow.sbp.broadcast, flow.sbp.broadcast])
placement = dist.get_layer_placement(0)
dist.set_device_type("cpu")
loader = GLMLoaderHuggerFace(
GLMForConditionalGeneration,
cfg,
"/data/home/xiezipeng/glm-10b-chinese",
embedding_dropout_prob=0,
attention_dropout_prob=0,
output_dropout_prob=0,
)
model = loader.load()
model = model.half().cuda()
model.eval()
dist.set_device_type("cuda")
while True:
outputs = model.generate(
inputs=inputs["input_ids"].to_global(sbp=sbp, placement=placement),
position_ids=inputs["position_ids"].to_global(sbp=sbp, placement=placement),
generation_attention_mask=inputs["generation_attention_mask"].to_global(
sbp=sbp, placement=placement
),
max_length=512,
)
res = tokenizer.decode(outputs[0])
if dist.is_main_process():
print(res)
| 2,213 | 33.59375 | 97 | py |
libai | libai-main/projects/GLM/tokenizer/glm_tokenizer.py | # coding=utf-8
# Copyright 2021 The OneFlow Authors. All rights reserved.
# Copyright 2022 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
from shutil import copyfile
from typing import List, Optional, Tuple
import oneflow as flow
import sentencepiece as spm
from libai.tokenizer import BertTokenizer, GPT2Tokenizer, PreTrainedTokenizer, RobertaTokenizer
logger = logging.getLogger(__name__)
class GLMTokenizerMixin(PreTrainedTokenizer):
@property
def sop_token(self) -> Optional[str]:
return "<|startofpiece|>"
@property
def sop_token_id(self) -> Optional[int]:
"""
`Optional[int]`: Id of the start token in the vocabulary, used when training a model with
autoregressive blank filling.
"""
return self.convert_tokens_to_ids(self.sop_token)
@property
def eop_token(self) -> Optional[str]:
return "<|endofpiece|>"
@property
def eop_token_id(self) -> Optional[int]:
"""
`Optional[int]`: Id of the end token in the vocabulary, used when training a model with
autoregressive blank filling.
"""
return self.convert_tokens_to_ids(self.eop_token)
@property
def gmask_token_id(self) -> int:
return self.convert_tokens_to_ids("[gMASK]")
@property
def smask_token_id(self) -> int:
return self.convert_tokens_to_ids("[sMASK]")
@property
def mask_token_ids(self):
return [self.mask_token_id, self.smask_token_id, self.gmask_token_id]
def _build_input_for_multiple_choice(self, context, choices):
context_id = context["input_ids"]
if flow.is_tensor(context_id):
context_id = context_id.tolist()
division = len(context_id)
mask_position = context_id.index(self.mask_token_id)
token = flow.tensor(context_id, dtype=flow.long)
attention_mask = [context["attention_mask"].expand(division, -1)]
position_id = flow.arange(division, dtype=flow.long)
block_position_id = flow.zeros(division, dtype=flow.long)
choice_ids, choice_indices = [], []
for choice_str in choices:
res = self.encode(choice_str)
choice = flow.tensor(res, dtype=flow.long)
choice_ids.append(choice)
choice_indices.append(
flow.arange(len(token), len(token) + len(choice), dtype=flow.long)
)
attention_mask.append(flow.tril(flow.ones((len(choice), len(choice)), dtype=flow.long)))
token = flow.cat(
(token, flow.tensor([self.sop_token_id], dtype=flow.long), choice[:-1])
)
position_id = flow.cat(
(position_id, flow.tensor([mask_position] * len(choice), dtype=flow.long))
)
block_position_id = flow.cat(
(block_position_id, flow.arange(1, 1 + len(choice), dtype=flow.long))
)
attention_mask = flow.block_diag(*attention_mask)
attention_mask[division:, :division] = context["attention_mask"].unsqueeze(0)
return {
"input_ids": token,
"position_ids": flow.stack((position_id, block_position_id)),
"attention_mask": attention_mask,
"choice_ids": choice_ids,
"choice_indices": choice_indices,
}
def _pad_batch(self, tokens, position_ids, attention_mask, max_seq_length):
pad_length = max_seq_length - len(tokens)
attention_mask = flow.nn.functional.pad(
attention_mask,
(0, pad_length, 0, pad_length),
mode="constant",
value=0,
)
tokens = flow.cat((tokens, flow.zeros(pad_length, dtype=flow.long)))
position_ids = flow.cat(
(position_ids, position_ids[..., -1:].expand(-1, pad_length)), dim=-1
)
return tokens, position_ids, attention_mask
def _collate(self, samples):
TILE = 1
length_to_pad = (
(max(map(lambda spl: len(spl["input_ids"]), samples)) + TILE - 1) // TILE * TILE
)
token_batch, position_id_batch, attention_mask_batch = [], [], []
choices_batch, choice_target_ids_batch = [], []
for sample in samples:
token, position_id, attention_mask = self._pad_batch(
sample["input_ids"], sample["position_ids"], sample["attention_mask"], length_to_pad
)
token_batch.append(token)
position_id_batch.append(position_id)
attention_mask_batch.append(attention_mask)
choices_batch.append(sample["choice_ids"])
choice_target_ids_batch.append(sample["choice_indices"])
return {
"input_ids": flow.stack(token_batch),
"position_ids": flow.stack(position_id_batch),
"attention_mask": flow.stack(attention_mask_batch).unsqueeze(1),
"choice_ids": choices_batch,
"choice_indices": choice_target_ids_batch,
}
def build_inputs_for_multiple_choice(self, model_input, choices, max_length=None):
samples = [
{key: value[i] for key, value in model_input.items()}
for i in range(len(model_input["input_ids"]))
]
samples = [
self._build_input_for_multiple_choice(sample, choice)
for sample, choice in zip(samples, choices)
]
inputs = self._collate(samples)
return inputs
def build_inputs_for_generation(
self, model_input, max_gen_length=512, targets=None, padding=False
):
mask_ids = self.mask_token_ids
input_ids = model_input["input_ids"]
batch_size, seq_length = input_ids.shape[:2]
position_id, block_position_id = list(range(seq_length)), [0 for _ in range(seq_length)]
position_ids, block_position_ids = [], []
labels = None
if targets is not None:
is_batched = isinstance(targets, (list, tuple))
targets = self.encode(targets)
if not is_batched:
targets = [targets]
assert len(targets) == len(input_ids)
targets = [(target + [self.eop_token_id])[:max_gen_length] for target in targets]
if not padding:
max_gen_length = max(map(len, targets))
targets = [[self.sop_token_id] + target for target in targets]
labels = [target[1:] for target in targets]
targets = [
target + [self.pad_token_id] * (max_gen_length + 1 - len(target))
for target in targets
]
labels = [label + [-100] * (max_gen_length - len(label)) for label in labels]
targets = flow.tensor(targets, dtype=input_ids.dtype)
labels = flow.tensor(labels, dtype=input_ids.dtype)
labels = flow.cat((input_ids.new_full((batch_size, seq_length), -100), labels), dim=1)
for i in range(batch_size):
mask_positions = []
for mask_id in mask_ids:
mask_positions += (input_ids[i] == mask_id).nonzero(as_tuple=True)[0].tolist()
if not mask_positions:
raise ValueError("Cannot find mask token in the input")
mask_positions.sort()
mask_pos = mask_positions[0]
position_ids.append(position_id + [mask_pos] * max_gen_length)
block_position_ids.append(block_position_id + list(range(1, max_gen_length + 1)))
position_ids = flow.tensor(position_ids, dtype=input_ids.dtype)
block_position_ids = flow.tensor(block_position_ids, dtype=input_ids.dtype)
position_ids = flow.stack((position_ids, block_position_ids), dim=1)
attention_mask = model_input["attention_mask"]
attention_mask = attention_mask.unsqueeze(1).expand(-1, seq_length + max_gen_length, -1)
generation_attention_mask = (
flow.cat(
[
attention_mask.new_zeros((seq_length, max_gen_length)),
flow.tril(attention_mask.new_ones((max_gen_length, max_gen_length))),
],
dim=0,
)
.unsqueeze(0)
.expand(batch_size, -1, -1)
)
attention_mask = flow.cat((attention_mask, generation_attention_mask), dim=2)
attention_mask = attention_mask.unsqueeze(1)
if targets is None:
input_ids = flow.cat(
(input_ids, input_ids.new_full((batch_size, 1), self.sop_token_id)), dim=-1
)
else:
input_ids = flow.cat((input_ids, targets[:, :-1]), dim=1)
batch = {"input_ids": input_ids, "position_ids": position_ids}
if labels is None:
batch["generation_attention_mask"] = attention_mask
else:
batch["attention_mask"] = attention_mask
batch["labels"] = labels
return batch
class GLMRobertaTokenizer(RobertaTokenizer, GLMTokenizerMixin):
model_input_names = ["input_ids", "position_ids", "attention_mask"]
truncation_side: str = "left"
@property
def gmask_token_id(self) -> int:
raise NotImplementedError("The model doesn't support gMASK")
@property
def smask_token_id(self) -> int:
raise NotImplementedError("The model doesn't support sMASK")
@property
def mask_token_ids(self):
return [self.mask_token_id]
class GLMChineseTokenzier(GLMTokenizerMixin):
vocab_files_names = {"vocab_file": "cog-pretrain.model"}
truncation_side: str = "left"
def __init__(
self,
vocab_file,
eos_token="<|endoftext|>",
unk_token="[UNK]",
pad_token="<|endoftext|>",
additional_special_tokens=["<|startofpiece|>", "<|endofpiece|>", "[gMASK]", "[sMASK]"],
add_bos_token=False,
**kwargs,
):
super().__init__(
eos_token=eos_token,
unk_token=unk_token,
pad_token=pad_token,
additional_special_tokens=additional_special_tokens,
**kwargs,
)
self.add_bos_token = add_bos_token
self.vocab_file = vocab_file
self.sp_model = spm.SentencePieceProcessor()
self.sp_model.Load(vocab_file)
self._eos_token = "<|endoftext|>"
self._unk_token = "[UNK]"
self._pad_token = "<|endoftext|>"
self._cls_token = "[CLS]"
self._mask_token = "[MASK]"
@property
def vocab_size(self):
return len(self.sp_model)
def get_vocab(self):
vocab = {self.convert_ids_to_tokens(i): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def _tokenize(self, text, **kwargs):
return self.sp_model.encode(text, out_type=str)
def _convert_token_to_id(self, token):
"""Converts a token (str) in an id using the vocab."""
return self.sp_model.PieceToId(token)
def _convert_id_to_token(self, index):
"""Converts an index (integer) in a token (str) using the vocab."""
return self.sp_model.IdToPiece(index)
def convert_tokens_to_string(self, tokens):
return self.sp_model.decode(tokens)
def save_vocabulary(
self, save_directory: str, filename_prefix: Optional[str] = None
) -> Tuple[str]:
if not os.path.isdir(save_directory):
logger.error(f"Vocabulary path ({save_directory}) should be a directory")
return
out_vocab_file = os.path.join(
save_directory,
(filename_prefix + "-" if filename_prefix else "")
+ self.vocab_files_names["vocab_file"],
)
if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file) and os.path.isfile(
self.vocab_file
):
copyfile(self.vocab_file, out_vocab_file)
elif not os.path.isfile(self.vocab_file):
with open(out_vocab_file, "wb") as fi:
content_spiece_model = self.sp_model.serialized_model_proto()
fi.write(content_spiece_model)
return (out_vocab_file,)
def build_inputs_with_special_tokens(
self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
) -> List[int]:
"""
Build model inputs from a sequence or a pair of sequence for sequence classification tasks
by concatenating and
adding special tokens. A BERT sequence has the following format:
- single sequence: ``[CLS] X [SEP]``
- pair of sequences: ``[CLS] A [SEP] B [SEP]``
Args:
token_ids_0 (:obj:`List[int]`):
List of IDs to which the special tokens will be added.
token_ids_1 (:obj:`List[int]`, `optional`):
Optional second list of IDs for sequence pairs.
Returns:
:obj:`List[int]`: List of `input IDs <../glossary.html#input-ids>`__ with the
appropriate special tokens.
"""
assert token_ids_1 is None
cls = [self.cls_token_id]
eos = [self.eos_token_id]
return cls + token_ids_0 + eos
class GLMGPT2Tokenizer(GPT2Tokenizer, GLMTokenizerMixin):
model_input_names = ["input_ids", "position_ids", "attention_mask"]
truncation_side: str = "left"
def __init__(
self,
vocab_file,
merges_file,
errors="replace",
unk_token="<|endoftext|>",
bos_token="<|endoftext|>",
eos_token="<|endoftext|>",
add_bos_token=False,
**kwargs,
):
super().__init__(
vocab_file,
merges_file,
errors,
unk_token,
bos_token,
eos_token,
add_bos_token,
**kwargs,
)
self.cls_token = "[CLS]"
self.mask_token = "[MASK]"
def build_inputs_with_special_tokens(
self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
) -> List[int]:
"""
Build model inputs from a sequence or a pair of sequence for sequence classification tasks
by concatenating and
adding special tokens. A BERT sequence has the following format:
- single sequence: ``[CLS] X [SEP]``
- pair of sequences: ``[CLS] A [SEP] B [SEP]``
Args:
token_ids_0 (:obj:`List[int]`):
List of IDs to which the special tokens will be added.
token_ids_1 (:obj:`List[int]`, `optional`):
Optional second list of IDs for sequence pairs.
Returns:
:obj:`List[int]`: List of `input IDs <../glossary.html#input-ids>`__ with the
appropriate special tokens.
"""
assert token_ids_1 is None
cls = [self.cls_token_id]
eos = [self.eos_token_id]
return cls + token_ids_0 + eos
class GLMBertTokenizer(BertTokenizer, GLMTokenizerMixin):
model_input_names = ["input_ids", "position_ids", "attention_mask"]
truncation_side: str = "left"
@property
def gmask_token_id(self) -> int:
raise NotImplementedError("The model doesn't support gMASK")
@property
def smask_token_id(self) -> int:
raise NotImplementedError("The model doesn't support sMASK")
@property
def mask_token_ids(self):
return [self.mask_token_id]
| 15,872 | 36.792857 | 100 | py |
libai | libai-main/projects/GLM/layers/position_embedding.py | # coding=utf-8
# Copyright 2021 The OneFlow Authors. All rights reserved.
# Copyright 2022 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
import oneflow as flow
from oneflow import nn
import libai.utils.distributed as dist
class SinePositionalEmbedding(nn.Module):
def __init__(self, num_embeddings, embedding_dim):
super().__init__()
self.embedding_dim = embedding_dim
self.num_embeddings = num_embeddings
position_embedding = flow.zeros(
num_embeddings,
embedding_dim,
dtype=flow.float32,
placement=dist.get_layer_placement(0),
sbp=dist.get_nd_sbp([flow.sbp.broadcast, flow.sbp.broadcast]),
)
position = flow._C.global_arange(
start=0,
end=num_embeddings,
placement=dist.get_layer_placement(0),
sbp=dist.get_nd_sbp([flow.sbp.broadcast, flow.sbp.broadcast]),
dtype=flow.float32,
).unsqueeze(1)
position_range = flow._C.global_arange(
start=0,
end=embedding_dim,
step=2,
placement=dist.get_layer_placement(0),
sbp=dist.get_nd_sbp([flow.sbp.broadcast, flow.sbp.broadcast]),
dtype=flow.float32,
)
div_term = flow.exp(position_range * (-math.log(10000.0) / embedding_dim))
position_embedding[:, : embedding_dim // 2] = flow.sin(position * div_term)
position_embedding[:, embedding_dim // 2 :] = flow.cos(position * div_term)
self.register_buffer("position_embedding", position_embedding)
def forward(self, position_ids):
position_embeds = flow._C.gather(self.position_embedding, position_ids, axis=0)
return position_embeds
def extra_repr(self) -> str:
s = "num_embeddings={num_embeddings}, embedding_dim={embedding_dim}"
return s.format(**self.__dict__)
| 2,439 | 35.969697 | 87 | py |
libai | libai-main/projects/GLM/layers/embedding_layer.py | # coding=utf-8
# Copyright 2021 The OneFlow Authors. All rights reserved.
# Copyright 2022 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import oneflow as flow
from oneflow import nn
import libai.utils.distributed as dist
from libai.layers import Embedding, VocabEmbedding
from libai.models.utils import init_method_normal
class GLMEmbedding(nn.Module):
def __init__(
self,
vocab_size,
hidden_size,
max_seq_length,
padding_idx=None,
init_method=init_method_normal(0.02, 0),
embedding_dropout_prob=0.0,
amp_enabled=False,
block_position_encoding=False,
):
super().__init__()
self.block_position_encoding = block_position_encoding
self.word_embeddings = VocabEmbedding(
vocab_size,
hidden_size,
padding_idx=padding_idx,
init_method=init_method,
amp_enabled=amp_enabled,
)
if block_position_encoding:
self.position_embeddings = Embedding(
max_seq_length + 1, hidden_size, init_method=init_method, amp_enabled=amp_enabled
)
self.block_position_embeddings = Embedding(
max_seq_length + 1, hidden_size, init_method=init_method, amp_enabled=amp_enabled
)
self.embedding_dropout = nn.Dropout(embedding_dropout_prob)
self.position_ids = flow.arange(
max_seq_length,
dtype=flow.long,
sbp=dist.get_nd_sbp([flow.sbp.broadcast, flow.sbp.broadcast]),
placement=dist.get_layer_placement(0),
).unsqueeze(0)
self.block_position_ids = flow.zeros(
(1, max_seq_length),
dtype=flow.long,
sbp=dist.get_nd_sbp([flow.sbp.broadcast, flow.sbp.broadcast]),
placement=dist.get_layer_placement(0),
)
def forward(self, input_ids, position_ids=None):
bsz, seq_len = input_ids.size()
if self.block_position_encoding and position_ids is not None:
position_ids, block_position_ids = position_ids[:, 0], position_ids[:, 1]
if position_ids is None:
position_ids = self.position_ids[:, :seq_len]
position_ids = position_ids.expand_as(input_ids).to_global(sbp=input_ids.sbp)
block_position_ids = self.block_position_ids[:, :seq_len]
block_position_ids = block_position_ids.expand_as(input_ids).to_global(
sbp=input_ids.sbp
)
word_embeddings = self.word_embeddings(input_ids)
position_embeddings = self.position_embeddings(position_ids)
input_embeddings = word_embeddings + position_embeddings
if self.block_position_encoding:
block_position_embeddings = self.block_position_embeddings(block_position_ids)
input_embeddings = input_embeddings + block_position_embeddings
input_embeddings = self.embedding_dropout(input_embeddings)
return input_embeddings
| 3,541 | 35.895833 | 97 | py |
libai | libai-main/projects/GLM/layers/transformer_layer.py | # coding=utf-8
# Copyright 2021 The OneFlow Authors. All rights reserved.
# Copyright 2022 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import oneflow.nn as nn
from libai.layers.layer_norm import LayerNorm
from libai.layers.mlp import MLP
from libai.utils import distributed as dist
from projects.GLM.layers.attention_layer import MultiheadAttention
class TransformerLayer(nn.Module):
def __init__(
self,
hidden_size,
num_attention_heads,
attention_dropout_prob=0.0,
output_dropout_prob=0.0,
layernorm_epsilon=1e-5,
init_method=nn.init.xavier_normal_,
output_layer_init_method=None,
bias_gelu_fusion=False,
bias_dropout_fusion=False,
scale_mask_softmax_fusion=False,
apply_query_key_layer_scaling=False,
attention_scale=1.0,
*,
layer_idx=0
):
super().__init__()
self.hidden_size = hidden_size
self.num_attention_heads = num_attention_heads
self.attention_dropout_prob = attention_dropout_prob
self.output_dropout_prob = output_dropout_prob
self.layernorm_epsilon = layernorm_epsilon
self.attention_scale = attention_scale
self.layer_idx = layer_idx
self.bias_gelu_fusion = bias_gelu_fusion
self.bias_dropout_fusion = bias_dropout_fusion
self.scale_mask_softmax_fusion = scale_mask_softmax_fusion
self.apply_query_key_layer_scaling = apply_query_key_layer_scaling
self.init_method = init_method
if output_layer_init_method is None:
output_layer_init_method = init_method
self.output_layer_init_method = output_layer_init_method
self.input_layernorm = LayerNorm(
self.hidden_size, eps=self.layernorm_epsilon, layer_idx=self.layer_idx
)
self.attention = self.build_attention()
self.post_attention_layernorm = LayerNorm(
self.hidden_size, eps=self.layernorm_epsilon, layer_idx=self.layer_idx
)
self.mlp = MLP(
self.hidden_size,
4 * self.hidden_size,
self.output_dropout_prob,
self.init_method,
output_layer_init_method=self.output_layer_init_method,
bias_gelu_fusion=self.bias_gelu_fusion,
bias_dropout_fusion=self.bias_dropout_fusion,
layer_idx=self.layer_idx,
)
def forward(
self,
hidden_states,
attention_mask,
mem=None,
):
hidden_states = hidden_states.to_global(placement=dist.get_layer_placement(self.layer_idx))
attention_mask = (
attention_mask.to_global(placement=dist.get_layer_placement(self.layer_idx))
if attention_mask is not None
else None
)
mem = (
mem.to_global(placement=dist.get_layer_placement(self.layer_idx))
if mem is not None
else None
)
layernorm_output = self.input_layernorm(hidden_states)
mem = self.input_layernorm(mem) if mem is not None else None
attention_output = self.attention(
layernorm_output,
attention_mask=attention_mask,
mem=mem,
)
hidden_states = hidden_states + attention_output
layernorm_output = self.post_attention_layernorm(hidden_states)
mlp_output = self.mlp(layernorm_output)
output = hidden_states + mlp_output
return output
def build_attention(self):
return MultiheadAttention(
self.hidden_size,
self.num_attention_heads,
attention_dropout_prob=self.attention_dropout_prob,
output_dropout_prob=self.output_dropout_prob,
init_method=self.init_method,
output_layer_init_method=self.output_layer_init_method,
bias_dropout_fusion=self.bias_dropout_fusion,
scale_mask_softmax_fusion=self.scale_mask_softmax_fusion,
apply_query_key_layer_scaling=self.apply_query_key_layer_scaling,
attention_scale=self.attention_scale,
layer_idx=self.layer_idx,
)
| 4,684 | 34.225564 | 99 | py |
libai | libai-main/projects/GLM/layers/attention_layer.py | # coding=utf-8
# Copyright 2021 The OneFlow Authors. All rights reserved.
# Copyright 2022 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
import oneflow as flow
from oneflow import nn
from libai.layers.linear import Linear
class MultiheadAttention(nn.Module):
def __init__(
self,
hidden_size,
num_attention_heads,
attention_dropout_prob=0.0,
output_dropout_prob=0.0,
init_method=nn.init.xavier_normal_,
output_layer_init_method=None,
bias_dropout_fusion=False,
scale_mask_softmax_fusion=False,
apply_query_key_layer_scaling=False,
attention_scale=1.0,
*,
layer_idx=0
):
super().__init__()
self.hidden_size = hidden_size
self.attention_scale = attention_scale
self.num_attention_heads = num_attention_heads
if output_layer_init_method is None:
output_layer_init_method = init_method
assert (
hidden_size % num_attention_heads == 0
), "hidden_size must be divisible by num_attention_heads."
self.num_heads = num_attention_heads
self.head_size = hidden_size // num_attention_heads
self.attention_dropout_prob = attention_dropout_prob
self.dropout = nn.Dropout(p=attention_dropout_prob)
self.norm_factor = 1.0 / math.sqrt(float(self.head_size))
self.coeff = None
if apply_query_key_layer_scaling:
self.coeff = layer_idx + 1
self.norm_factor /= self.coeff
self.scale_mask_softmax_fusion = scale_mask_softmax_fusion
self.bias_dropout_fusion = bias_dropout_fusion
if self.bias_dropout_fusion:
self.output_dropout_prob = output_dropout_prob
else:
self.output_dropout = nn.Dropout(p=output_dropout_prob)
self.query_key_value = Linear(
self.hidden_size,
self.hidden_size * 3,
parallel="col",
init_method=init_method,
layer_idx=layer_idx,
)
self.dense = Linear(
self.hidden_size,
self.hidden_size,
parallel="row",
init_method=output_layer_init_method,
skip_bias_add=self.bias_dropout_fusion,
layer_idx=layer_idx,
)
def forward(
self,
hidden_states: flow.Tensor,
attention_mask: flow.Tensor = None,
mem=None,
):
attention_mask = (
attention_mask.to_global(placement=hidden_states.placement)
if attention_mask is not None
else None
)
bsz, tgt_len = hidden_states.size()[:2]
if mem is not None:
hidden_states = flow.cat((mem, hidden_states), dim=1)
query_key_value = self.query_key_value(hidden_states)
query_key_value = query_key_value.view(bsz, -1, self.num_heads, 3 * self.head_size)
query_key_value = query_key_value.permute(0, 2, 1, 3)
query, key, value = flow.chunk(query_key_value, chunks=3, dim=-1)
if mem is not None:
query = query[:, :, -tgt_len:]
context = flow._C.fused_multi_head_attention_inference_v2(
query=query,
key=key,
value=value,
query_head_size=int(self.hidden_size // self.num_attention_heads),
causal=True,
causal_diagonal_offset=key.shape[2] - query.shape[2],
query_layout="BHMK",
key_layout="BHMK",
value_layout="BHMK",
output_layout="BM(HK)",
)
output = self.dense(context)
if self.bias_dropout_fusion:
output, bias = output
output = flow._C.fused_bias_add_dropout(
output, bias, p=self.output_dropout_prob, axis=output.ndim - 1
)
else:
output = self.output_dropout(output)
return output
def extra_repr(self) -> str:
return "hidden_size={}, num_heads={}".format(
self.hidden_size,
self.num_heads,
)
| 4,610 | 31.935714 | 91 | py |
libai | libai-main/projects/GLM/configs/glm_inference.py | from omegaconf import DictConfig
from libai.config import LazyCall
from projects.GLM.modeling_glm import GLMModel
cfg = dict(
num_layers=48,
vocab_size=30592,
hidden_size=4096,
num_attention_heads=64,
max_sequence_length=1024,
embedding_dropout_prob=0.0,
attention_dropout_prob=0.0,
output_dropout_prob=0.0,
layernorm_epsilon=1e-5,
initializer_range=0.02,
use_scaled_init_for_output_weights=True,
bias_gelu_fusion=True,
bias_dropout_fusion=True,
scale_mask_softmax_fusion=False,
apply_query_key_layer_scaling=False,
amp_enabled=True,
block_position_encoding=True,
attention_scale=1.0,
padding_idx=None,
# Inference
is_encoder_decoder=False,
max_length=512,
min_length=0,
do_sample=False,
early_stopping=False,
num_beams=1,
num_beam_groups=1,
diversity_penalty=0.0,
temperature=1.0,
top_k=50,
top_p=1.0,
typical_p=1.0,
repetition_penalty=1.0,
length_penalty=1.0,
no_repeat_ngram_size=0,
encoder_no_repeat_ngram_size=0,
num_return_sequences=1,
chunk_size_feed_forward=0,
output_scores=False,
forced_bos_token_id=None,
forced_eos_token_id=None,
remove_invalid_values=False,
exponential_decay_length_penalty=None,
use_cache=False,
# Tokenizer
pad_token_id=50000,
eos_token_id=50007,
bos_token_id=None,
sep_token_id=None,
decoder_start_token_id=None,
)
cfg = DictConfig(cfg)
glm_model = LazyCall(GLMModel)(cfg=cfg)
| 1,514 | 23.435484 | 46 | py |
libai | libai-main/projects/GLM/utils/glm_loader.py | # coding=utf-8
# Copyright 2021 The OneFlow Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
from libai.models.utils import ModelLoaderHuggerFace, ModelLoaderLiBai
class GLMLoaderHuggerFace(ModelLoaderHuggerFace):
def __init__(self, model, libai_cfg, pretrained_model_path, **kwargs):
super().__init__(model, libai_cfg, pretrained_model_path, **kwargs)
"""NOTE: base_model_prefix_1 is GLM's prefix in Transformers.
base_model_prefix_2 is GLM's prefix in LiBai."""
self.base_model_prefix_1 = "glm"
self.base_model_prefix_2 = "glm"
def _convert_state_dict(self, flow_state_dict, cfg):
"""Convert state_dict's keys to match model.
Args:
flow_state_dict (OrderedDict): model state dict.
cfg (dict): model's default config dict in LiBai.
Returns:
OrderedDict: flow state dict.
"""
# The converted checkpoint.
oneflow_state_dict = flow_state_dict.copy()
old_keys = list(oneflow_state_dict.keys())
# Get configs
num_heads = cfg.get("num_attention_heads")
hidden_size = cfg.get("hidden_size")
head_size = int(hidden_size / num_heads)
# prefix
has_prefix = any(s.startswith(self.base_model_prefix_1) for s in oneflow_state_dict)
prefix1 = self.base_model_prefix_1 + "." if has_prefix else ""
prefix2 = "glm." if has_prefix else ""
# Convert Embedding layers.
new_key = prefix2 + "embeddings.word_embeddings.weight"
old_keys.remove(prefix1 + "word_embeddings.weight")
oneflow_state_dict[new_key] = oneflow_state_dict.pop(prefix1 + "word_embeddings.weight")
if cfg.get("block_position_encoding", False) is True:
new_key = prefix2 + "embeddings.position_embeddings.weight"
old_keys.remove(prefix1 + "transformer.position_embeddings.weight")
oneflow_state_dict[new_key] = oneflow_state_dict.pop(
prefix1 + "transformer.position_embeddings.weight"
)
new_key = prefix2 + "embeddings.block_position_embeddings.weight"
old_keys.remove(prefix1 + "transformer.block_position_embeddings.weight")
oneflow_state_dict[new_key] = oneflow_state_dict.pop(
prefix1 + "transformer.block_position_embeddings.weight"
)
# Convert other layers.
for key in old_keys:
if "query_key_value" in key:
qkv = oneflow_state_dict.pop(key)
qkv = self._fix_qkv_ordering(qkv, head_size, num_heads)
oneflow_state_dict[prefix2 + key] = qkv
else:
oneflow_state_dict[prefix2 + key] = oneflow_state_dict.pop(key)
return oneflow_state_dict
def _load_config_from_json(self, config_file):
"""load config from `config.json`, and update default config.
Args:
config_file (str): Path of config file.
"""
with open(config_file, mode="r", encoding="utf-8") as f:
cfg_dict = json.load(f)
# update libai_cfg by config.json
for k, v in cfg_dict.items():
self._update_cfg(k, v)
# update libai_cfg by kwargs
for k, v in self.kwargs.items():
self._update_cfg(k, v)
self._update_cfg_log()
class GLMLoaderLiBai(ModelLoaderLiBai):
def __init__(self, model, libai_cfg, pretrained_model_path, **kwargs):
super().__init__(model, libai_cfg, pretrained_model_path, **kwargs)
self.base_model_prefix_2 = "glm"
def _convert_state_dict(self, flow_state_dict, cfg):
"""Convert state_dict's keys to match model.
Args:
flow_state_dict (OrderedDict): model state dict.
cfg (dict): model's default config dict in LiBai.
Returns:
OrderedDict: flow state dict.
"""
# The converted checkpoint.
oneflow_state_dict = flow_state_dict.copy()
old_keys = list(oneflow_state_dict.keys())
# Get configs
num_heads = cfg.get("num_attention_heads")
hidden_size = cfg.get("hidden_size")
head_size = int(hidden_size / num_heads)
# prefix
has_prefix = any(s.startswith(self.base_model_prefix_1) for s in oneflow_state_dict)
prefix1 = self.base_model_prefix_1 + "." if has_prefix else ""
prefix2 = "glm." if has_prefix else ""
# Convert Embedding layers.
new_key = prefix2 + "embeddings.word_embeddings.weight"
old_keys.remove(prefix1 + "word_embeddings.weight")
oneflow_state_dict[new_key] = oneflow_state_dict.pop(prefix1 + "word_embeddings.weight")
if cfg.get("block_position_encoding", False) is True:
new_key = prefix2 + "embeddings.position_embeddings.weight"
old_keys.remove(prefix1 + "transformer.position_embeddings.weight")
oneflow_state_dict[new_key] = oneflow_state_dict.pop(
prefix1 + "transformer.position_embeddings.weight"
)
new_key = prefix2 + "embeddings.block_position_embeddings.weight"
old_keys.remove(prefix1 + "transformer.block_position_embeddings.weight")
oneflow_state_dict[new_key] = oneflow_state_dict.pop(
prefix1 + "transformer.block_position_embeddings.weight"
)
# Convert other layers.
for key in old_keys:
if "query_key_value" in key:
qkv = oneflow_state_dict.pop(key)
qkv = self._fix_qkv_ordering(qkv, head_size, num_heads)
oneflow_state_dict[prefix2 + key] = qkv
else:
oneflow_state_dict[prefix2 + key] = oneflow_state_dict.pop(key)
return oneflow_state_dict
| 6,333 | 38.836478 | 96 | py |
libai | libai-main/projects/NeRF/evaluation/nerf_evaluator.py | import copy
import math
import os
from collections import OrderedDict
from datetime import datetime
import cv2
import flowvision.transforms as T
import imageio
import numpy as np
import oneflow as flow
from PIL import Image
from libai.evaluation.cls_evaluator import ClsEvaluator
from libai.utils import distributed as dist
class NerfEvaluator(ClsEvaluator):
def __init__(self, img_wh, image_save_path=None):
"""
Args:
img_wh (tuple(int)): the width and height of the images in the validation set
image_save_path (str): location of image storage
"""
super().__init__(topk=(1, 5))
self.img_wh = img_wh
self.image_save_path = (
str(os.path.dirname(os.path.realpath(__file__))) + "/../images"
if image_save_path is None
else image_save_path
)
if not os.path.exists(self.image_save_path):
os.makedirs(self.image_save_path)
self.toimage = T.ToPILImage()
def current_time(self):
currentDateAndTime = datetime.now()
currentTime = currentDateAndTime.strftime("%H_%M_%S")
return currentTime
def process(self, inputs, outputs):
"""
Inputs:
inputs (dict): Inputs to NeRF System
outputs (dict): Outputs to NeRF System
Outputs:
None
"""
losses, rgbs = (
outputs["losses"],
outputs["rgbs"].squeeze(0),
)
typ = list(outputs.keys())[1]
outputs.pop(typ)
outputs.pop("losses")
outputs.pop("rgbs")
results = {k: v.squeeze(0) for k, v in outputs.items()}
if len(self._predictions) == 0:
W, H = self.img_wh
img = results[f"rgb_{typ}"].view(H, W, 3).cpu()
img = img.permute(2, 0, 1) # (3, H, W)
img_gt = rgbs.view(H, W, 3).permute(2, 0, 1).cpu() # (3, H, W)
depth = self.visualize_depth(results[f"depth_{typ}"].view(H, W)) # (3, H, W)
img = self.toimage(img)
img_gt = self.toimage(img_gt)
depth = self.toimage(depth)
img.save(
os.path.join(self.image_save_path, f"img_{self.current_time()}.png"), quality=100
)
img_gt.save(
os.path.join(self.image_save_path, f"img_gt_{self.current_time()}.png"), quality=100
)
depth.save(
os.path.join(self.image_save_path, f"depth_{self.current_time()}.png"), quality=100
)
psnr = self.psnr(results[f"rgb_{typ}"], rgbs)
self._predictions.append({"losses": losses.item(), "psnr": psnr.item()})
def evaluate(self):
if not dist.is_main_process():
return {}
else:
predictions = self._predictions
total_correct_num = OrderedDict()
total_correct_num["losses"] = 0
total_correct_num["psnr"] = 0
total_samples = 0
for prediction in predictions:
losses = prediction["losses"]
psnr = prediction["psnr"]
total_correct_num["losses"] += losses
total_correct_num["psnr"] += psnr
total_samples += 1
self._results = OrderedDict()
for key, value in total_correct_num.items():
self._results[key] = value / total_samples
return copy.deepcopy(self._results)
def visualize_depth(self, depth, cmap=cv2.COLORMAP_JET):
x = depth.cpu().numpy()
x = np.nan_to_num(x) # change nan to 0
mi = np.min(x) # get minimum depth
ma = np.max(x)
x = (x - mi) / (ma - mi + 1e-8) # normalize to 0~1
x = (255 * x).astype(np.uint8)
x_ = Image.fromarray(cv2.applyColorMap(x, cmap))
x_ = T.ToTensor()(x_) # (3, H, W)
return x_
def mse(self, image_pred, image_gt, valid_mask=None, reduction="mean"):
value = (image_pred - image_gt) ** 2
if valid_mask is not None:
value = value[valid_mask]
if reduction == "mean":
return flow.mean(value)
return value
def psnr(self, image_pred, image_gt, valid_mask=None, reduction="mean"):
return -10 * flow.log(self.mse(image_pred, image_gt, valid_mask, reduction)) / math.log(10)
class NerfVisEvaluator(NerfEvaluator):
def __init__(self, img_wh, pose_dir_len, name):
"""
Args:
img_wh (tuple(int)): the width and height of the images in the validation set
"""
super().__init__(img_wh=img_wh)
self.image_list = []
self.pose_dir_len = pose_dir_len
self.name = name
self.mp4_save_path = self.image_save_path
def to8b(self, x):
return (255 * np.clip(x, 0, 1)).astype(np.uint8)
def process(self, inputs, outputs):
"""
Inputs:
inputs (dict): Inputs to NeRF System
outputs (dict): Outputs to NeRF System
Outputs:
None
"""
typ = list(outputs.keys())[0]
outputs.pop(typ)
results = {k: v.squeeze(0) for k, v in outputs.items()}
W, H = self.img_wh
img = results[f"rgb_{typ}"].view(H, W, 3).cpu().numpy()
self.image_list.append(img)
self._predictions.append({"losses": 0.0, "psnr": 0.0})
if len(self._predictions) == self.pose_dir_len:
mp4_save_path = os.path.join(self.mp4_save_path, f"{self.name}.mp4")
imageio.mimwrite(
mp4_save_path, self.to8b(np.stack(self.image_list, 0)), fps=30, quality=8
)
print("successfully save mp4 file!")
| 5,641 | 33.613497 | 100 | py |
libai | libai-main/projects/NeRF/datasets/__init__.py | from .nerf_dataset import BlenderDataset, LLFFDataset
| 54 | 26.5 | 53 | py |
libai | libai-main/projects/NeRF/datasets/nerf_dataset.py | # coding=utf-8
# Copyright 2021 The OneFlow Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import glob
import json
import os
import re
import sys
from collections import OrderedDict
from typing import Optional
import numpy as np
import oneflow as flow
from flowvision import transforms as T
from oneflow.utils.data import Dataset
from PIL import Image
from libai.data.structures import DistTensorData, Instance
def read_pfm(filename):
file = open(filename, "rb")
header = file.readline().decode("utf-8").rstrip()
if header == "PF":
color = True
elif header == "Pf":
color = False
else:
raise Exception("Not a PFM file.")
dim_match = re.match(r"^(\d+)\s(\d+)\s$", file.readline().decode("utf-8"))
if dim_match:
width, height = map(int, dim_match.groups())
else:
raise Exception("Malformed PFM header.")
scale = float(file.readline().rstrip())
if scale < 0: # little-endian
endian = "<"
scale = -scale
else:
endian = ">" # big-endian
data = np.fromfile(file, endian + "f")
shape = (height, width, 3) if color else (height, width)
data = np.reshape(data, shape)
data = np.flipud(data)
file.close()
return data, scale
def save_pfm(filename, image, scale=1):
file = open(filename, "wb")
image = np.flipud(image)
if image.dtype.name != "float32":
raise Exception("Image dtype must be float32.")
if len(image.shape) == 3 and image.shape[2] == 3: # color image
color = True
elif len(image.shape) == 2 or len(image.shape) == 3 and image.shape[2] == 1: # greyscale
color = False
else:
raise Exception("Image must have H x W x 3, H x W x 1 or H x W dimensions.")
file.write("PF\n".encode("utf-8") if color else "Pf\n".encode("utf-8"))
file.write("{} {}\n".format(image.shape[1], image.shape[0]).encode("utf-8"))
endian = image.dtype.byteorder
if endian == "<" or endian == "=" and sys.byteorder == "little":
scale = -scale
file.write(("%f\n" % scale).encode("utf-8"))
image.tofile(file)
file.close()
# Preparatory conversion tools for 3D rendering
def create_meshgrid(
height: int,
width: int,
normalized_coordinates: bool = True,
device: Optional[flow.device] = flow.device("cpu"),
dtype: flow.dtype = flow.float32,
):
"""Generate a coordinate grid for an image.
When the flag ``normalized_coordinates`` is set to True, the grid is
normalized to be in the range :math:`[-1,1]` to be consistent with the pytorch
function :py:func:`torch.nn.functional.grid_sample`.
Args:
height: the image height (rows).
width: the image width (cols).
normalized_coordinates: whether to normalize
coordinates in the range :math:`[-1,1]` in order to be consistent with the
PyTorch function :py:func:`torch.nn.functional.grid_sample`.
device: the device on which the grid will be generated.
dtype: the data type of the generated grid.
Return:
grid tensor with shape :math:`(1, H, W, 2)`.
Example:
>>> create_meshgrid(2, 2)
tensor([[[[-1., -1.],
[ 1., -1.]],
<BLANKLINE>
[[-1., 1.],
[ 1., 1.]]]])
>>> create_meshgrid(2, 2, normalized_coordinates=False)
tensor([[[[0., 0.],
[1., 0.]],
<BLANKLINE>
[[0., 1.],
[1., 1.]]]])
"""
xs = flow.linspace(0, width - 1, width, device=device, dtype=dtype)
ys = flow.linspace(0, height - 1, height, device=device, dtype=dtype)
if normalized_coordinates:
xs = (xs / (width - 1) - 0.5) * 2
ys = (ys / (height - 1) - 0.5) * 2
# generate grid by stacking coordinates
base_grid = flow.stack(flow.meshgrid([xs, ys], indexing="ij"), dim=-1) # WxHx2
return base_grid.permute(1, 0, 2).unsqueeze(0) # 1xHxWx2
def get_rays(directions, c2w):
"""
Get ray origin and normalized directions in world coordinate for all pixels in one image.
Inputs:
directions: (H, W, 3) precomputed ray directions in camera coordinate
c2w: (3, 4) transformation matrix from camera coordinate to world coordinate
Outputs:
rays_o: (H*W, 3), the origin of the rays in world coordinate
rays_d: (H*W, 3), the normalized direction of the rays in world coordinate
"""
# Rotate ray directions from camera coordinate to the world coordinate
rays_d = directions @ c2w[:, :3].T # (H, W, 3)
# rays_d = rays_d / flow.norm(rays_d, dim=-1, keepdim=True)
# The origin of all rays is the camera origin in world coordinate
rays_o = c2w[:, 3].expand(rays_d.shape) # (H, W, 3)
rays_d = rays_d.view(-1, 3)
rays_o = rays_o.view(-1, 3)
return rays_o, rays_d
def get_ray_directions(H, W, focal):
"""
Get ray directions for all pixels in camera coordinate.
Inputs:
H, W, focal: image height, width and focal length
Outputs:
directions: (H, W, 3), the direction of the rays in camera coordinate
"""
grid = create_meshgrid(H, W, normalized_coordinates=False)[0]
i, j = grid.unbind(-1)
i = flow.tensor(i.numpy())
j = flow.tensor(j.numpy())
directions = flow.stack(
[(i - W / 2) / focal, -(j - H / 2) / focal, -flow.ones_like(i)], -1
) # compute about tanx (H, W, 3)
return directions
def get_ndc_rays(H, W, focal, near, rays_o, rays_d):
"""
Transform rays from world coordinate to NDC.
NDC: Space such that the canvas is a cube with sides [-1, 1] in each axis.
For detailed derivation, please see:
http://www.songho.ca/opengl/gl_projectionmatrix.html
https://github.com/bmild/nerf/files/4451808/ndc_derivation.pdf
https://pengfeixc.com/blogs/computer-graphics/3D-matrix-transformation-part-three
In practice, use NDC "if and only if" the scene is unbounded (has a large depth).
See https://github.com/bmild/nerf/issues/18
Inputs:
H, W, focal: image height, width and focal length
near: (N_rays) or float, the depths of the near plane
rays_o: (N_rays, 3), the origin of the rays in world coordinate
rays_d: (N_rays, 3), the direction of the rays in world coordinate
Outputs:
rays_o: (N_rays, 3), the origin of the rays in NDC
rays_d: (N_rays, 3), the direction of the rays in NDC
"""
# Shift ray origins to near plane
t = -(near + rays_o[..., 2]) / rays_d[..., 2]
rays_o = rays_o + t[..., None] * rays_d
# Store some intermediate homogeneous results
ox_oz = rays_o[..., 0] / rays_o[..., 2]
oy_oz = rays_o[..., 1] / rays_o[..., 2]
# Projection
o0 = -1.0 / (W / (2.0 * focal)) * ox_oz
o1 = -1.0 / (H / (2.0 * focal)) * oy_oz
o2 = 1.0 + 2.0 * near / rays_o[..., 2]
d0 = -1.0 / (W / (2.0 * focal)) * (rays_d[..., 0] / rays_d[..., 2] - ox_oz)
d1 = -1.0 / (H / (2.0 * focal)) * (rays_d[..., 1] / rays_d[..., 2] - oy_oz)
d2 = 1 - o2
rays_o = flow.stack([o0, o1, o2], -1) # (B, 3)
rays_d = flow.stack([d0, d1, d2], -1) # (B, 3)
return rays_o, rays_d
def normalize(v):
"""Normalize a vector."""
return v / np.linalg.norm(v)
def average_poses(poses):
"""
Calculate the average pose, which is then used to center all poses
using @center_poses. Its computation is as follows:
1. Compute the center: the average of pose centers.
2. Compute the z axis: the normalized average z axis.
3. Compute axis y': the average y axis.
4. Compute x' = y' cross product z, then normalize it as the x axis.
5. Compute the y axis: z cross product x.
Note that at step 3, we cannot directly use y' as y axis since it's
not necessarily orthogonal to z axis. We need to pass from x to y.
Inputs:
poses: (N_images, 3, 4)
Outputs:
pose_avg: (3, 4) the average pose
"""
# 1. Compute the center
center = poses[..., 3].mean(0) # (3)
# 2. Compute the z axis
z = normalize(poses[..., 2].mean(0)) # (3)
# 3. Compute axis y' (no need to normalize as it's not the final output)
y_ = poses[..., 1].mean(0) # (3)
# 4. Compute the x axis
x = normalize(np.cross(y_, z)) # (3)
# 5. Compute the y axis (as z and x are normalized, y is already of norm 1)
y = np.cross(z, x) # (3)
pose_avg = np.stack([x, y, z, center], 1) # (3, 4)
return pose_avg
def center_poses(poses):
"""
Center the poses so that we can use NDC.
See https://github.com/bmild/nerf/issues/34
Inputs:
poses: (N_images, 3, 4)
Outputs:
poses_centered: (N_images, 3, 4) the centered poses
pose_avg: (3, 4) the average pose
"""
pose_avg = average_poses(poses) # (3, 4)
pose_avg_homo = np.eye(4)
pose_avg_homo[:3] = pose_avg # convert to homogeneous coordinate for faster computation
# by simply adding 0, 0, 0, 1 as the last row
last_row = np.tile(np.array([0, 0, 0, 1]), (len(poses), 1, 1)) # (N_images, 1, 4)
poses_homo = np.concatenate([poses, last_row], 1) # (N_images, 4, 4) homogeneous coordinate
poses_centered = np.linalg.inv(pose_avg_homo) @ poses_homo # (N_images, 4, 4)
poses_centered = poses_centered[:, :3] # (N_images, 3, 4)
return poses_centered, np.linalg.inv(pose_avg_homo)
def create_spiral_poses(radii, focus_depth, n_poses=120):
"""
Computes poses that follow a spiral path for rendering purpose.
See https://github.com/Fyusion/LLFF/issues/19
In particular, the path looks like:
https://tinyurl.com/ybgtfns3
Inputs:
radii: (3) radii of the spiral for each axis
focus_depth: float, the depth that the spiral poses look at
n_poses: int, number of poses to create along the path
Outputs:
poses_spiral: (n_poses, 3, 4) the poses in the spiral path
"""
poses_spiral = []
for t in np.linspace(0, 4 * np.pi, n_poses + 1)[:-1]: # rotate 4pi (2 rounds)
# the parametric function of the spiral (see the interactive web)
center = np.array([np.cos(t), -np.sin(t), -np.sin(0.5 * t)]) * radii
# the viewing z axis is the vector pointing from the @focus_depth plane
# to @center
z = normalize(center - np.array([0, 0, -focus_depth]))
# compute other axes as in @average_poses
y_ = np.array([0, 1, 0]) # (3)
x = normalize(np.cross(y_, z)) # (3)
y = np.cross(z, x) # (3)
poses_spiral += [np.stack([x, y, z, center], 1)] # (3, 4)
return np.stack(poses_spiral, 0) # (n_poses, 3, 4)
def spheric_pose(theta, phi, radius):
def trans_t(t):
return np.array(
[
[1, 0, 0, 0],
[0, 1, 0, -0.9 * t],
[0, 0, 1, t],
[0, 0, 0, 1],
]
)
def rot_phi(phi):
return np.array(
[
[1, 0, 0, 0],
[0, np.cos(phi), -np.sin(phi), 0],
[0, np.sin(phi), np.cos(phi), 0],
[0, 0, 0, 1],
]
)
def rot_theta(th):
return np.array(
[
[np.cos(th), 0, -np.sin(th), 0],
[0, 1, 0, 0],
[np.sin(th), 0, np.cos(th), 0],
[0, 0, 0, 1],
]
)
c2w = rot_theta(theta) @ rot_phi(phi) @ trans_t(radius)
c2w = np.array([[-1, 0, 0, 0], [0, 0, 1, 0], [0, 1, 0, 0], [0, 0, 0, 1]]) @ c2w
return c2w[:3]
def create_spheric_poses(radius, n_poses=120):
"""
Create circular poses around z axis.
Inputs:
radius: the (negative) height and the radius of the circle.
Outputs:
spheric_poses: (n_poses, 3, 4) the poses in the circular path
"""
spheric_poses = []
for th in np.linspace(0, 2 * np.pi, n_poses + 1)[:-1]:
spheric_poses += [spheric_pose(th, -np.pi / 5, radius)] # 36 degree view downwards
return np.stack(spheric_poses, 0)
def pose_spherical(theta, phi, radius):
def trans_t(t):
return flow.Tensor(
[
[1, 0, 0, 0],
[0, 1, 0, 0],
[0, 0, 1, t],
[0, 0, 0, 1],
]
).float()
def rot_phi(phi):
return flow.Tensor(
[
[1, 0, 0, 0],
[0, np.cos(phi), -np.sin(phi), 0],
[0, np.sin(phi), np.cos(phi), 0],
[0, 0, 0, 1],
]
).float()
def rot_theta(th):
return flow.Tensor(
[
[np.cos(th), 0, -np.sin(th), 0],
[0, 1, 0, 0],
[np.sin(th), 0, np.cos(th), 0],
[0, 0, 0, 1],
]
).float()
c2w = trans_t(radius)
c2w = rot_phi(phi / 180.0 * np.pi) @ c2w
c2w = rot_theta(theta / 180.0 * np.pi) @ c2w
c2w = flow.Tensor(np.array([[-1, 0, 0, 0], [0, 0, 1, 0], [0, 1, 0, 0], [0, 0, 0, 1]])) @ c2w
return c2w
def viewmatrix(z, up, pos):
vec2 = normalize(z)
vec1_avg = up
vec0 = normalize(np.cross(vec1_avg, vec2))
vec1 = normalize(np.cross(vec2, vec0))
m = np.stack([vec0, vec1, vec2, pos], 1)
return m
def render_path_spiral(c2w, hwf, up, rads, focal, zdelta, zrate, rots, N):
render_poses = []
hwf = hwf[:, None]
rads = np.array(list(rads) + [1.0])
for theta in np.linspace(0.0, 2.0 * np.pi * rots, N + 1)[:-1]:
c = np.dot(
c2w, np.array([np.cos(theta), -np.sin(theta), -np.sin(theta * zrate), 1.0]) * rads
)
z = normalize(c - np.dot(c2w, np.array([0, 0, -focal, 1.0])))
render_poses.append(np.concatenate([viewmatrix(z, up, c), hwf], 1))
return render_poses
# Blender and LLFF Datasets
def trun_dict_to_instance(dict):
return Instance(**{key: DistTensorData(flow.tensor(value)) for key, value in dict.items()})
class NerfBaseDataset(Dataset):
def __init__(self, root_dir, split, img_wh):
super(NerfBaseDataset, self).__init__()
self.root_dir = root_dir
self.split = split
self.img_wh = img_wh
self.transform = T.Compose([T.ToTensor()])
os.environ["ONEFLOW_DISABLE_VIEW"] = "true"
def load_meta(self):
pass
class BlenderDataset(NerfBaseDataset):
def __init__(self, root_dir, split="train", img_wh=(800, 800), batchsize=1024, **kwargs):
"""
Args:
root_dir: str,
split: str,
img_wh: tuple,
"""
super(BlenderDataset, self).__init__(root_dir, split, img_wh)
self.white_back = True
self.batchsize = batchsize
self.load_meta()
self.render_poses = flow.stack(
[pose_spherical(angle, -30.0, 4.0) for angle in np.linspace(-180, 180, 40 + 1)[:-1]], 0
) # use for test
def load_meta(self):
if self.split == "vis":
with open(os.path.join(self.root_dir, "transforms_train.json"), "r") as f:
self.meta = json.load(f)
else:
with open(os.path.join(self.root_dir, f"transforms_{self.split}.json"), "r") as f:
self.meta = json.load(f)
w, h = self.img_wh
camera_angle_x = float(self.meta["camera_angle_x"])
self.focal = 0.5 * w / np.tan(0.5 * camera_angle_x)
self.near = 2.0
self.far = 6.0
self.bounds = np.array([self.near, self.far])
self.directions = get_ray_directions(h, w, self.focal) # (h, w, 3)
if self.split == "train": # create buffer of all rays and rgb data
self.image_paths = []
self.poses = []
self.all_rays = []
self.all_rgbs = []
self.indexs = [
i * self.img_wh[0] * self.img_wh[1] for i in range(len(self.meta["frames"]))
]
for frame in self.meta["frames"]:
pose = np.array(frame["transform_matrix"])[:3, :4]
self.poses += [pose]
c2w = flow.Tensor(pose)
image_path = os.path.join(self.root_dir, f"{frame['file_path']}.png")
self.image_paths += [image_path]
img = Image.open(image_path)
img = img.resize(self.img_wh, Image.LANCZOS)
img = self.transform(img) # (4, h, w)
img = img.view(4, -1).permute(1, 0) # (h*w, 4) RGBA
img = img[:, :3] * img[:, -1:] + (1 - img[:, -1:]) # blend A to RGB
self.all_rgbs += [img]
rays_o, rays_d = get_rays(self.directions, c2w) # both (h*w, 3)
self.all_rays += [
flow.cat(
[
rays_o,
rays_d,
self.near * flow.ones_like(rays_o[:, :1]),
self.far * flow.ones_like(rays_o[:, :1]),
],
1,
)
] # (h*w, 8)
self.all_rays = flow.cat(self.all_rays, 0) # (len(self.meta['frames])*h*w, 3)
self.all_rgbs = flow.cat(self.all_rgbs, 0) # (len(self.meta['frames])*h*w, 3)
self.num_iter = 0
self.dH = int(self.img_wh[0] // 2 * 0.5)
self.dW = int(self.img_wh[1] // 2 * 0.5)
def __len__(self):
if self.split == "train":
return int(len(self.all_rays) / self.batchsize)
elif self.split == "val":
return 8 # only validate 8 images (to support <=8 gpus)
elif self.split == "vis":
return len(self.render_poses)
elif self.split == "test":
return len(self.meta["frames"])
def __getitem__(self, idx):
if self.split == "train": # use data in the buffers
idx = idx % len(self.indexs)
if self.num_iter < 500:
coords = flow.stack(
flow.meshgrid(
flow.linspace(
self.img_wh[1] // 2 - self.dH,
self.img_wh[1] // 2 + self.dH - 1,
2 * self.dH,
),
flow.linspace(
self.img_wh[0] // 2 - self.dW,
self.img_wh[0] // 2 + self.dW - 1,
2 * self.dW,
),
),
-1,
)
else:
coords = flow.stack(
flow.meshgrid(
flow.linspace(0, self.img_wh[1] - 1, self.img_wh[1]),
flow.linspace(0, self.img_wh[0] - 1, self.img_wh[0]),
),
-1,
) # (H, W, 2)
coords = flow.reshape(coords, [-1, 2]) # (H * W, 2)
select_inds = np.random.choice(
coords.shape[0], size=[self.batchsize], replace=False
) # (N_rand,)
select_coords = coords[select_inds].long() # (N_rand, 2)
rays = self.all_rays[
self.indexs[idx] : self.indexs[idx] + self.img_wh[0] * self.img_wh[1]
]
rgbs = self.all_rgbs[
self.indexs[idx] : self.indexs[idx] + self.img_wh[0] * self.img_wh[1]
]
rays = rays.view(self.img_wh[1], self.img_wh[0], -1)
rgbs = rgbs.view(self.img_wh[1], self.img_wh[0], -1)
rays = rays[select_coords[:, 0], select_coords[:, 1]] # (N_rand, 3)
rgbs = rgbs[select_coords[:, 0], select_coords[:, 1]] # (N_rand, 3)
self.num_iter += 1
sample = OrderedDict(rays=rays, rgbs=rgbs) # # a alignment point with nerf_pytorch
elif self.split == "val" or self.split == "test": # create data for each image separately
frame = self.meta["frames"][idx]
c2w = flow.Tensor(frame["transform_matrix"])[:3, :4]
img = Image.open(os.path.join(self.root_dir, f"{frame['file_path']}.png"))
img = img.resize(self.img_wh, Image.LANCZOS)
img = self.transform(img) # (4, H, W)
valid_mask = (img[-1] > 0).flatten() # (H*W) valid color area
img = img.view(4, -1).permute(1, 0) # (H*W, 4) RGBA
img = img[:, :3] * img[:, -1:] + (1 - img[:, -1:]) # blend A to RGB
rays_o, rays_d = get_rays(self.directions, c2w)
rays = flow.concat(
[
rays_o,
rays_d,
self.near * flow.ones_like(rays_o[:, :1]),
self.far * flow.ones_like(rays_o[:, :1]),
],
1,
) # (H*W, 8)
sample = OrderedDict(rays=rays, rgbs=img, c2w=c2w, valid_mask=valid_mask)
else:
c2w = self.render_poses[idx][:3, :4]
rays_o, rays_d = get_rays(self.directions, c2w)
rays = flow.concat(
[
rays_o,
rays_d,
self.near * flow.ones_like(rays_o[:, :1]),
self.far * flow.ones_like(rays_o[:, :1]),
],
1,
) # (H*W, 8)
sample = OrderedDict(rays=rays, c2w=c2w)
return trun_dict_to_instance(sample)
class LLFFDataset(NerfBaseDataset):
def __init__(
self,
root_dir,
split="train",
img_wh=(504, 378),
spheric_poses=False,
val_num=1,
batchsize=1024,
):
"""
Args:
root_dir: str,
split: str,
img_wh: tuple,
spheric_poses: bool, whether the images are taken in a spheric inward-facing manner
default: False (forward-facing)
val_num: int, number of val images (used for multigpu training, validate same image
for all gpus)
batchsize: int, batchsize of rays
"""
super(LLFFDataset, self).__init__(root_dir, split, img_wh)
self.spheric_poses = spheric_poses
self.val_num = max(1, val_num) # at least 1
self.batchsize = batchsize
self.load_meta()
# build render_poses for inference
up = normalize(self.poses[:, :3, 1].sum(0))
tt = self.poses[:, :3, 3]
rads = np.percentile(np.abs(tt), 90, 0)
close_depth, inf_depth = self.bounds.min() * 0.9, self.bounds.max() * 5.0
dt = 0.75
focal = 1.0 / (((1.0 - dt) / close_depth + dt / inf_depth))
zdelta = close_depth * 0.2
N_views = 120
N_rots = 2
hwf = self.hwf
center = self.poses[:, :3, 3].mean(0)
vec2 = normalize(self.poses[:, :3, 2].sum(0))
up = self.poses[:, :3, 1].sum(0)
c2w = viewmatrix(vec2, up, center)
self.render_poses = flow.Tensor(
render_path_spiral(
c2w, hwf, normalize(up), rads, focal, zdelta, zrate=0.5, rots=N_rots, N=N_views
)
) # use for test
self.white_back = False
def load_meta(self):
poses_bounds = np.load(os.path.join(self.root_dir, "poses_bounds.npy")) # (N_images, 17)
self.image_paths = sorted(glob.glob(os.path.join(self.root_dir, "images/*")))
if self.split in ["train", "val"]:
assert len(poses_bounds) == len(
self.image_paths
), "Mismatch between number of images and number of poses! Please rerun COLMAP!"
poses = poses_bounds[:, :15].reshape(-1, 3, 5) # (N_images, 3, 5)
self.bounds = poses_bounds[:, -2:] # (N_images, 2)
H, W, self.focal = poses[0, :, -1] # original intrinsics, same for all images
H, W, self.focal = H.item(), W.item(), self.focal.item()
assert (
H * self.img_wh[0] == W * self.img_wh[1]
), f"You must set @img_wh to have the same aspect ratio as ({W}, {H}) !"
self.focal *= self.img_wh[0] / W
poses = np.concatenate([poses[..., 1:2], -poses[..., :1], poses[..., 2:4]], -1)
# (N_images, 3, 4) exclude H, W, focal
self.poses, self.pose_avg = center_poses(poses)
distances_from_center = np.linalg.norm(self.poses[..., 3], axis=1)
val_idx = np.argmin(distances_from_center) # choose val image as the closest to
near_original = self.bounds.min()
scale_factor = near_original * 0.75 # 0.75 is the default parameter
self.bounds /= scale_factor
self.poses[..., 3] /= scale_factor
self.directions = get_ray_directions(
self.img_wh[1], self.img_wh[0], self.focal
) # (H, W, 3)
self.hwf = np.array([self.img_wh[1], self.img_wh[0], self.focal])
if self.split == "train": # create buffer of all rays and rgb data
# use first N_images-1 to train, the LAST is val
self.all_rays = []
self.all_rgbs = []
self.indexs = [
i * self.img_wh[0] * self.img_wh[1] for i in range(len(self.image_paths) - 1)
]
for i, image_path in enumerate(self.image_paths):
if i == val_idx: # exclude the val image
continue
c2w = flow.Tensor(self.poses[i])
img = Image.open(image_path).convert("RGB")
assert (
img.size[1] * self.img_wh[0] == img.size[0] * self.img_wh[1]
), f"{image_path} has different aspect ratio than img_wh, please check your data!"
img = img.resize(self.img_wh, Image.LANCZOS)
img = self.transform(img) # (3, h, w)
img = img.view(3, -1).permute(1, 0) # (h*w, 3) RGB
self.all_rgbs += [img]
rays_o, rays_d = get_rays(self.directions, c2w) # both (h*w, 3)
if not self.spheric_poses:
near, far = 0, 1
rays_o, rays_d = get_ndc_rays(
self.img_wh[1], self.img_wh[0], self.focal, 1.0, rays_o, rays_d
)
else:
near = self.bounds.min()
far = min(8 * near, self.bounds.max()) # focus on central object only
self.all_rays += [
flow.concat(
[
rays_o,
rays_d,
near * flow.ones_like(rays_o[:, :1]),
far * flow.ones_like(rays_o[:, :1]),
],
1,
)
] # (h*w, 8)
self.all_rays = flow.cat(self.all_rays, 0) # ((N_images-1)*h*w, 8)
self.all_rgbs = flow.cat(self.all_rgbs, 0) # ((N_images-1)*h*w, 3)
self.num_iter = 0
self.dH = int(self.img_wh[0] // 2 * 0.5)
self.dW = int(self.img_wh[1] // 2 * 0.5)
elif self.split == "val":
self.c2w_val = self.poses[val_idx]
self.image_path_val = self.image_paths[val_idx]
else: # for testing, create a parametric rendering path
if self.split.endswith("train"): # test on training set
self.poses_test = self.poses
elif not self.spheric_poses:
focus_depth = 3.5 # hardcoded, this is numerically close to the formula
# given in the original repo. Mathematically if near=1
# and far=infinity, then this number will converge to 4
radii = np.percentile(np.abs(self.poses[..., 3]), 90, axis=0)
self.poses_test = create_spiral_poses(radii, focus_depth)
else:
radius = 1.1 * self.bounds.min()
self.poses_test = create_spheric_poses(radius)
def __len__(self):
if self.split == "train":
return int(len(self.all_rays) / self.batchsize)
elif self.split == "vis":
return len(self.render_poses)
elif self.split == "val":
return self.val_num
elif self.split == "test":
return len(self.poses_test)
def __getitem__(self, idx):
if self.split == "train": # use data in the buffers
idx = idx % len(self.indexs)
if self.num_iter < 500:
coords = flow.stack(
flow.meshgrid(
flow.linspace(
self.img_wh[1] // 2 - self.dH,
self.img_wh[1] // 2 + self.dH - 1,
2 * self.dH,
),
flow.linspace(
self.img_wh[0] // 2 - self.dW,
self.img_wh[0] // 2 + self.dW - 1,
2 * self.dW,
),
),
-1,
)
else:
coords = flow.stack(
flow.meshgrid(
flow.linspace(0, self.img_wh[1] - 1, self.img_wh[1]),
flow.linspace(0, self.img_wh[0] - 1, self.img_wh[0]),
),
-1,
) # (H, W, 2)
coords = flow.reshape(coords, [-1, 2]) # (H * W, 2)
select_inds = np.random.choice(
coords.shape[0], size=[self.batchsize], replace=False
) # (N_rand,)
select_coords = coords[select_inds].long() # (N_rand, 2)
rays = self.all_rays[
self.indexs[idx] : self.indexs[idx] + self.img_wh[0] * self.img_wh[1]
]
rgbs = self.all_rgbs[
self.indexs[idx] : self.indexs[idx] + self.img_wh[0] * self.img_wh[1]
]
rays = rays.view(self.img_wh[1], self.img_wh[0], -1)
rgbs = rgbs.view(self.img_wh[1], self.img_wh[0], -1)
rays = rays[select_coords[:, 0], select_coords[:, 1]] # (N_rand, 3)
rgbs = rgbs[select_coords[:, 0], select_coords[:, 1]] # (N_rand, 3)
self.num_iter += 1
sample = OrderedDict(rays=rays, rgbs=rgbs) # a alignment point with nerf_pytorch
elif self.split in ["val", "test"]:
if self.split == "val":
c2w = flow.Tensor(self.c2w_val)
else:
c2w = flow.Tensor(self.poses_test[idx])
rays_o, rays_d = get_rays(self.directions, c2w)
if not self.spheric_poses:
near, far = 0, 1
rays_o, rays_d = get_ndc_rays(
self.img_wh[1], self.img_wh[0], self.focal, 1.0, rays_o, rays_d
)
else:
near = self.bounds.min()
far = min(8 * near, self.bounds.max())
rays = flow.cat(
[
rays_o,
rays_d,
near * flow.ones_like(rays_o[:, :1]),
far * flow.ones_like(rays_o[:, :1]),
],
1,
) # (h*w, 8)
sample = OrderedDict(rays=rays, c2w=c2w)
if self.split == "val":
img = Image.open(self.image_path_val).convert("RGB")
img = img.resize(self.img_wh, Image.LANCZOS)
img = self.transform(img) # (3, h, w)
img = img.view(3, -1).permute(1, 0) # (h*w, 3)
sample["rgbs"] = img
else:
c2w = self.render_poses[idx][:3, :4]
rays_o, rays_d = get_rays(self.directions, c2w)
if not self.spheric_poses:
near, far = 0, 1
rays_o, rays_d = get_ndc_rays(
self.img_wh[1], self.img_wh[0], self.focal, 1.0, rays_o, rays_d
)
else:
near = self.bounds.min()
far = min(8 * near, self.bounds.max())
rays = flow.concat(
[
rays_o,
rays_d,
near * flow.ones_like(rays_o[:, :1]),
far * flow.ones_like(rays_o[:, :1]),
],
1,
) # (H*W, 8)
sample = OrderedDict(rays=rays, c2w=c2w)
return trun_dict_to_instance(sample)
| 32,893 | 36.379545 | 99 | py |
libai | libai-main/projects/NeRF/configs/config_nerf_for_rendering.py | from projects.NeRF.configs.config_nerf import (
train,
dataset,
dataloader,
graph,
model,
LazyCall,
build_image_test_loader,
)
from projects.NeRF.evaluation.nerf_evaluator import NerfVisEvaluator
from libai.data.samplers import SingleRoundSampler
# NOTE: Used for generating MP4 format files
# Redefining evaluator
train.evaluation = dict(
enabled=True,
# evaluator for calculating psnr
evaluator=LazyCall(NerfVisEvaluator)(
img_wh=(400, 400) if train.dataset_type == "Blender" else (504, 378),
pose_dir_len=40 if train.dataset_type == "Blender" else 120,
name="blender_rendering_result"
if train.dataset_type == "Blender"
else "llff_rendering_result",
),
eval_period=train.evaluation.eval_period,
eval_iter=1e5, # running steps for validation/test
# Metrics to be used for best model checkpoint.
eval_metric="psnr",
eval_mode="max",
)
dataloader.test = [
LazyCall(build_image_test_loader)(
dataset=LazyCall(dataset)(
split="vis",
img_wh=(400, 400) if dataset.dataset_type == "Blender" else (504, 378),
root_dir=train.blender_dataset_path
if dataset.dataset_type == "Blender"
else train.llff_dataset_path,
spheric_poses=None if dataset.dataset_type == "Blender" else False,
val_num=None if dataset.dataset_type == "Blender" else 1, # Number of your GPUs
),
sampler=LazyCall(SingleRoundSampler)(shuffle=False, drop_last=False),
num_workers=0,
test_batch_size=train.test_micro_batch_size,
)
]
train.load_weight = "/path/to/ckpt" # Already trained NeRF checkpoint location
| 1,712 | 33.26 | 92 | py |
libai | libai-main/projects/NeRF/configs/config_model.py | from omegaconf import DictConfig
from libai.config import LazyCall
from projects.NeRF.modeling.System import NerfSystem
cfg = dict(
D=8,
W=256,
in_channels_xyz=63,
in_channels_dir=27,
skips=[4],
N_samples=64,
use_disp=False,
perturb=1.0,
noise_std=0.0,
N_importance=128,
chunk=64 * 1204,
dataset_type="Blender",
)
cfg = DictConfig(cfg)
model = LazyCall(NerfSystem)(cfg=cfg)
| 427 | 16.12 | 52 | py |
libai | libai-main/projects/NeRF/configs/config_nerf.py | from omegaconf import OmegaConf
import oneflow as flow
import oneflow.nn as nn
from libai.data.build import build_image_train_loader, build_image_test_loader
from libai.config import LazyCall, get_config
from libai.optim import get_default_optimizer_params
from libai.scheduler.lr_scheduler import WarmupCosineAnnealingLR, WarmupMultiStepLR
from projects.NeRF.datasets import BlenderDataset, LLFFDataset
from projects.NeRF.optimizers import Ranger, RAdam
from projects.NeRF.evaluation.nerf_evaluator import NerfEvaluator
from projects.NeRF.configs.config_model import model
def get_nerf_dataset(dataset_type="Blender"):
"""
Args:
dataset_type: Blender or LLFF
"""
assert dataset_type in ["Blender", "LLFF"], "The Nerf dataset must be one of Blender and LLFF"
if dataset_type == "Blender":
return BlenderDataset
else:
return LLFFDataset
graph = get_config("common/models/graph.py").graph
graph.enabled = False
train = get_config("common/train.py").train
# Refine train cfg for Nerf System
train.train_micro_batch_size = 1024 # Verification by ray
train.test_micro_batch_size = 1 # Verification by picture
train.dataset_type = "Blender" # Blender or LLFF
train.blender_dataset_path = "/path/to/blender"
train.llff_dataset_path = "/path/to/llff"
train.train_epoch = 16 if train.dataset_type == "Blender" else 30
train.warmup_ratio = int(1 / train.train_epoch)
train.evaluation.eval_period = 1000
train.log_period = 50
train.optim_type = "adam"
train.lr_scheduler_type = "cosine"
# Redefining model config
model.cfg.dataset_type = train.dataset_type
model.cfg.loss_func = nn.MSELoss()
model.cfg.noise_std = 0.0 if train.dataset_type == "Blender" else 1.0
# Redefining evaluator
train.evaluation = dict(
enabled=True,
# evaluator for calculating psnr
evaluator=LazyCall(NerfEvaluator)(
img_wh=(400, 400) if train.dataset_type == "Blender" else (504, 378)
),
eval_period=train.evaluation.eval_period,
eval_iter=1e5, # running steps for validation/test
# Metrics to be used for best model checkpoint.
eval_metric="psnr",
eval_mode="max",
)
# Refine optimizer cfg for Nerf System
# NOTE: In theory, both datasets used by Nerf are optimized using the Adam optimizer, but
# since the borrowed code base also implements three other optimizer configurations, libai
# also implements the corresponding optimizer.
if train.optim_type == "adam":
optimizer = flow.optim.Adam
lr = 5e-4
elif train.optim_type == "sgd":
optimizer = flow.optim.SGD
lr = 5e-2
elif train.optim_type == "radam":
optimizer = RAdam
lr = 5e-4
elif train.optim_type == "ranger":
optimizer = Ranger
lr = 5e-4
else:
raise NotImplementedError("Nerf does not support this type of optimizer!")
optim = LazyCall(optimizer)(
params=LazyCall(get_default_optimizer_params)(
# params.model is meant to be set to the model object,
# before instantiating the optimizer.
clip_grad_max_norm=None,
clip_grad_norm_type=None,
weight_decay_norm=None,
weight_decay_bias=None,
),
lr=lr,
weight_decay=0,
)
if train.optim_type == "sgd":
optim.momentum = 0.9
if train.lr_scheduler_type == "steplr":
scheduler = WarmupMultiStepLR
elif train.lr_scheduler_type == "cosine":
scheduler = WarmupCosineAnnealingLR
else:
raise NotImplementedError("Nerf does not support this type of scheduler!")
train.scheduler = LazyCall(scheduler)(
warmup_factor=0.001,
warmup_method="linear",
)
if train.lr_scheduler_type == "steplr":
if train.dataset_type == "Blender":
milestones = [2 / 16, 4 / 16, 8 / 16]
else:
milestones = [10 / 30, 20 / 30]
train.scheduler.milestones = milestones
train.scheduler.gamma = 0.5
elif train.lr_scheduler_type == "cosine":
train.scheduler.eta_min = 1e-8
train.warmup_ratio = (
train.warmup_ratio
if train.warmup_ratio > 0 and train.optim_type not in ["radam", "ranger"]
else 0.0
)
# Set fp16 ON
train.amp.enabled = True
dataset = LazyCall(get_nerf_dataset)(dataset_type=train.dataset_type)
dataloader = OmegaConf.create()
dataloader.train = LazyCall(build_image_train_loader)(
dataset=[
LazyCall(dataset)(
split="train",
img_wh=(400, 400) if dataset.dataset_type == "Blender" else (504, 378),
root_dir=train.blender_dataset_path
if dataset.dataset_type == "Blender"
else train.llff_dataset_path,
spheric_poses=None if dataset.dataset_type == "Blender" else False,
val_num=None if dataset.dataset_type == "Blender" else 1, # Number of your GPUs
batchsize=train.train_micro_batch_size,
)
],
num_workers=4,
train_batch_size=1,
test_batch_size=train.test_micro_batch_size,
)
dataloader.test = [
LazyCall(build_image_test_loader)(
dataset=LazyCall(dataset)(
split="val",
img_wh=(400, 400) if dataset.dataset_type == "Blender" else (504, 378),
root_dir=train.blender_dataset_path
if dataset.dataset_type == "Blender"
else train.llff_dataset_path,
spheric_poses=None if dataset.dataset_type == "Blender" else False,
val_num=None if dataset.dataset_type == "Blender" else 1, # Number of your GPUs
),
num_workers=0,
test_batch_size=train.test_micro_batch_size,
)
]
# Distributed Settings
depth = None
train.train_micro_batch_size = 1
train.dist.pipeline_num_layers = depth
train.dist.data_parallel_size = 1
train.dist.tensor_parallel_size = 1
train.dist.pipeline_parallel_size = 1
| 5,666 | 31.382857 | 98 | py |
libai | libai-main/projects/NeRF/optimizers/Radam.py | import math
import oneflow as flow
from oneflow.optim import Optimizer
class RAdam(Optimizer):
def __init__(
self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=0, degenerated_to_sgd=True
):
if not 0.0 <= lr:
raise ValueError("Invalid learning rate: {}".format(lr))
if not 0.0 <= eps:
raise ValueError("Invalid epsilon value: {}".format(eps))
if not 0.0 <= betas[0] < 1.0:
raise ValueError("Invalid beta parameter at index 0: {}".format(betas[0]))
if not 0.0 <= betas[1] < 1.0:
raise ValueError("Invalid beta parameter at index 1: {}".format(betas[1]))
self.degenerated_to_sgd = degenerated_to_sgd
if isinstance(params, (list, tuple)) and len(params) > 0 and isinstance(params[0], dict):
for param in params:
if "betas" in param and (
param["betas"][0] != betas[0] or param["betas"][1] != betas[1]
):
param["buffer"] = [[None, None, None] for _ in range(10)]
defaults = dict(
lr=lr,
betas=betas,
eps=eps,
weight_decay=weight_decay,
buffer=[[None, None, None] for _ in range(10)],
)
super(RAdam, self).__init__(params, defaults)
def __setstate__(self, state):
super(RAdam, self).__setstate__(state)
def step(self, closure=None):
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
for p in group["params"]:
if p.grad is None:
continue
grad = p.grad.data.float()
if grad.is_sparse:
raise RuntimeError("RAdam does not support sparse gradients")
p_data_fp32 = p.data.float()
state = self.state[p]
if len(state) == 0:
state["step"] = 0
state["exp_avg"] = flow.zeros_like(p_data_fp32)
state["exp_avg_sq"] = flow.zeros_like(p_data_fp32)
else:
state["exp_avg"] = state["exp_avg"].type_as(p_data_fp32)
state["exp_avg_sq"] = state["exp_avg_sq"].type_as(p_data_fp32)
exp_avg, exp_avg_sq = state["exp_avg"], state["exp_avg_sq"]
beta1, beta2 = group["betas"]
exp_avg_sq.mul_(beta2).addcmul_(1 - beta2, grad, grad)
exp_avg.mul_(beta1).add_(1 - beta1, grad)
state["step"] += 1
buffered = group["buffer"][int(state["step"] % 10)]
if state["step"] == buffered[0]:
N_sma, step_size = buffered[1], buffered[2]
else:
buffered[0] = state["step"]
beta2_t = beta2 ** state["step"]
N_sma_max = 2 / (1 - beta2) - 1
N_sma = N_sma_max - 2 * state["step"] * beta2_t / (1 - beta2_t)
buffered[1] = N_sma
# more conservative since it's an approximated value
if N_sma >= 5:
step_size = math.sqrt(
(1 - beta2_t)
* (N_sma - 4)
/ (N_sma_max - 4)
* (N_sma - 2)
/ N_sma
* N_sma_max
/ (N_sma_max - 2)
) / (1 - beta1 ** state["step"])
elif self.degenerated_to_sgd:
step_size = 1.0 / (1 - beta1 ** state["step"])
else:
step_size = -1
buffered[2] = step_size
# more conservative since it's an approximated value
if N_sma >= 5:
if group["weight_decay"] != 0:
p_data_fp32.add_(-group["weight_decay"] * group["lr"], p_data_fp32)
denom = exp_avg_sq.sqrt().add_(group["eps"])
p_data_fp32.addcdiv_(-step_size * group["lr"], exp_avg, denom)
p.data.copy_(p_data_fp32)
elif step_size > 0:
if group["weight_decay"] != 0:
p_data_fp32.add_(-group["weight_decay"] * group["lr"], p_data_fp32)
p_data_fp32.add_(-step_size * group["lr"], exp_avg)
p.data.copy_(p_data_fp32)
return loss
| 4,573 | 39.122807 | 100 | py |
libai | libai-main/projects/NeRF/optimizers/Ranger.py | import math
import oneflow as flow
from oneflow.optim import Optimizer
class Ranger(Optimizer):
def __init__(
self,
params,
lr=1e-3,
alpha=0.5,
k=6,
N_sma_threshhold=5,
betas=(0.95, 0.999),
eps=1e-5,
weight_decay=0,
):
# parameter checks
if not 0.0 <= alpha <= 1.0:
raise ValueError(f"Invalid slow update rate: {alpha}")
if not 1 <= k:
raise ValueError(f"Invalid lookahead steps: {k}")
if not lr > 0:
raise ValueError(f"Invalid Learning Rate: {lr}")
if not eps > 0:
raise ValueError(f"Invalid eps: {eps}")
defaults = dict(
lr=lr,
alpha=alpha,
k=k,
step_counter=0,
betas=betas,
N_sma_threshhold=N_sma_threshhold,
eps=eps,
weight_decay=weight_decay,
)
super().__init__(params, defaults)
# adjustable threshold
self.N_sma_threshhold = N_sma_threshhold
# now we can get to work...
# removed as we now use step from RAdam...no need for duplicate step counting
# for group in self.param_groups:
# group["step_counter"] = 0
# print("group step counter init")
# look ahead params
self.alpha = alpha
self.k = k
# radam buffer for state
self.radam_buffer = [[None, None, None] for ind in range(10)]
def __setstate__(self, state):
print("set state called")
super(Ranger, self).__setstate__(state)
def step(self, closure=None):
loss = None
for group in self.param_groups:
for p in group["params"]:
if p.grad is None:
continue
grad = p.grad.data.float()
if grad.is_sparse:
raise RuntimeError("Ranger optimizer does not support sparse gradients")
p_data_fp32 = p.data.float()
state = self.state[p] # get state dict for this param
if len(state) == 0:
state["step"] = 0
state["exp_avg"] = flow.zeros_like(p_data_fp32)
state["exp_avg_sq"] = flow.zeros_like(p_data_fp32)
# look ahead weight storage now in state dict
state["slow_buffer"] = p.data.clone()
else:
state["exp_avg"] = state["exp_avg"].type_as(p_data_fp32)
state["exp_avg_sq"] = state["exp_avg_sq"].type_as(p_data_fp32)
# begin computations
exp_avg, exp_avg_sq = state["exp_avg"], state["exp_avg_sq"]
beta1, beta2 = group["betas"]
# compute variance mov avg
exp_avg_sq.mul_(beta2).addcmul_(1 - beta2, grad, grad)
# compute mean moving avg
exp_avg.mul_(beta1).add_(1 - beta1, grad)
state["step"] += 1
buffered = self.radam_buffer[int(state["step"] % 10)]
if state["step"] == buffered[0]:
N_sma, step_size = buffered[1], buffered[2]
else:
buffered[0] = state["step"]
beta2_t = beta2 ** state["step"]
N_sma_max = 2 / (1 - beta2) - 1
N_sma = N_sma_max - 2 * state["step"] * beta2_t / (1 - beta2_t)
buffered[1] = N_sma
if N_sma > self.N_sma_threshhold:
step_size = math.sqrt(
(1 - beta2_t)
* (N_sma - 4)
/ (N_sma_max - 4)
* (N_sma - 2)
/ N_sma
* N_sma_max
/ (N_sma_max - 2)
) / (1 - beta1 ** state["step"])
else:
step_size = 1.0 / (1 - beta1 ** state["step"])
buffered[2] = step_size
if group["weight_decay"] != 0:
p_data_fp32.add_(-group["weight_decay"] * group["lr"], p_data_fp32)
if N_sma > self.N_sma_threshhold:
denom = exp_avg_sq.sqrt().add_(group["eps"])
p_data_fp32.addcdiv_(-step_size * group["lr"], exp_avg, denom)
else:
p_data_fp32.add_(-step_size * group["lr"], exp_avg)
p.data.copy_(p_data_fp32)
# integrated look ahead...
# we do it at the param level instead of group level
if state["step"] % group["k"] == 0:
slow_p = state["slow_buffer"] # get access to slow param tensor
slow_p.add_(
self.alpha, p.data - slow_p
) # (fast weights - slow weights) * alpha
p.data.copy_(slow_p) # copy interpolated weights to RAdam param tensor
return loss
| 5,082 | 34.298611 | 92 | py |
libai | libai-main/projects/NeRF/optimizers/__init__.py | from .Radam import RAdam
from .Ranger import Ranger
| 52 | 16.666667 | 26 | py |
libai | libai-main/projects/NeRF/modeling/NeRF.py | # coding=utf-8
# Copyright 2021 The OneFlow Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import oneflow as flow
import oneflow.nn as nn
import oneflow.nn.functional as F
from libai.utils import distributed as dist
class Embedding(nn.Module):
def __init__(self, in_channels, N_freqs, logscale=True):
"""
Defines a function that embeds x to (x, sin(2^k x), cos(2^k x), ...)
in_channels: number of input channels (3 for both xyz and direction)
"""
super(Embedding, self).__init__()
self.N_freqs = N_freqs
self.in_channels = in_channels
self.funcs = [flow.sin, flow.cos]
self.out_channels = in_channels * (len(self.funcs) * N_freqs + 1)
if logscale:
freq_bands = 2 ** flow.linspace(0, N_freqs - 1, N_freqs)
else:
freq_bands = flow.linspace(1, 2 ** (N_freqs - 1), N_freqs).cuda()
self.register_buffer(
"freq_bands",
freq_bands.to_global(
placement=dist.get_layer_placement(0),
sbp=dist.get_nd_sbp([flow.sbp.broadcast, flow.sbp.broadcast]),
),
persistent=False,
)
def forward(self, x):
"""
Embeds x to (x, sin(2^k x), cos(2^k x), ...)
Different from the paper, "x" is also in the output
See https://github.com/bmild/nerf/issues/12
Inputs:
x (Tensor): (B, self.in_channels)
Outputs:
out (Tensor): (B, self.out_channels)
"""
out = [x]
for freq in self.freq_bands:
for func in self.funcs:
m = func(freq * x)
out += [m]
return flow.cat(out, -1)
class NeRF(nn.Module): # a alignment point with nerf_pytorch
def __init__(
self, D=8, W=256, input_ch=63, input_ch_views=27, output_ch=5, skips=[4], use_viewdirs=True
):
"""
D: number of layers for density (sigma) encoder
W: number of hidden units in each layer
input_ch: number of input channels for xyz (3+3*10*2=63 by default)
input_ch_views: number of input channels for direction (3+3*4*2=27 by default)
output_ch: number of output channels
skips: add skip connection in the Dth layer
"""
super(NeRF, self).__init__()
self.D = D
self.W = W
self.input_ch = input_ch
self.input_ch_views = input_ch_views
self.skips = skips
self.use_viewdirs = use_viewdirs
self.pts_linears = nn.ModuleList(
[nn.Linear(input_ch, W)]
+ [
nn.Linear(W, W) if i not in self.skips else nn.Linear(W + input_ch, W)
for i in range(D - 1)
]
)
self.views_linears = nn.ModuleList([nn.Linear(input_ch_views + W, W // 2)])
if use_viewdirs:
self.feature_linear = nn.Linear(W, W)
self.alpha_linear = nn.Linear(W, 1)
self.rgb_linear = nn.Linear(W // 2, 3)
else:
self.output_linear = nn.Linear(W, output_ch)
def forward(self, x, sigma_only=False):
"""
Encodes input (xyz+dir) to rgb+sigma (not ready to render yet).
For rendering this ray, please see rendering.py
Inputs:
x (Tensor): (B, self.in_channels_xyz+self.in_channels_dir)
the embedded vector of position and direction
sigma_only (bool): whether to infer sigma only. If True,
x is of shape (B, self.in_channels_xyz)
Outputs:
if sigma_ony:
sigma (Tensor): (B, 1) sigma
else:
out (Tensor): (B, 4), rgb and sigma
"""
if not sigma_only:
input_pts, input_views = flow.split(x, [self.input_ch, self.input_ch_views], dim=-1)
else:
input_pts = x
h = input_pts
for i, l in enumerate(self.pts_linears):
h = self.pts_linears[i](h)
h = F.relu(h)
if i in self.skips:
h = flow.cat([input_pts, h], -1)
if self.use_viewdirs:
alpha = self.alpha_linear(h)
if sigma_only:
return alpha
feature = self.feature_linear(h)
h = flow.cat([feature, input_views], -1)
for i, l in enumerate(self.views_linears):
h = self.views_linears[i](h)
h = F.relu(h)
rgb = self.rgb_linear(h).sigmoid() # sigmoid
outputs = flow.cat([rgb, alpha], -1)
else:
outputs = self.output_linear(h)
return outputs
| 5,175 | 34.210884 | 99 | py |
libai | libai-main/projects/NeRF/modeling/System.py | # coding=utf-8
# Copyright 2021 The OneFlow Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
from collections import defaultdict
import oneflow as flow
import oneflow.nn as nn
from libai.config.config import configurable
from projects.NeRF.modeling.NeRF import Embedding, NeRF
class NerfSystem(nn.Module):
@configurable
def __init__(
self,
D=8,
W=256,
in_channels_xyz=63,
in_channels_dir=27,
skips=[4],
N_samples=64,
use_disp=False,
perturb=1.0,
noise_std=1.0,
N_importance=128,
chunk=32 * 1204,
dataset_type="Blender",
loss_func=None,
):
"""
Args:
D (int): number of layers for density (sigma) encoder
W (int): number of hidden units in each layer
in_channels_xyz (int): number of input channels for xyz (3+3*10*2=63 by default)
in_channels_dir (int): number of input channels for direction (3+3*4*2=27 by default)
skips (list(int)): add skip connection in the Dth layer
N_samples (int): number of coarse samples
use_disp (bool): use disparity depth sampling
perturb (float): factor to perturb depth sampling points
noise_std (float): std dev of noise added to regularize sigma
N_importance (int): number of additional fine samples
chunk (int): chunk size to split the input to avoid OOM
dataset_type (str): the dataset applied for training and evaluating
loss_func (callable): type of loss function
"""
super(NerfSystem, self).__init__()
self.N_samples = N_samples
self.use_disp = use_disp
self.perturb = perturb
self.noise_std = noise_std
self.N_importance = N_importance
self.chunk = chunk
self.white_back = True if dataset_type == "Blender" else False
self.loss_func = nn.MSELoss() if loss_func is None else loss_func
self.embedding_xyz = Embedding(3, 10) # 10 is the default number
self.embedding_dir = Embedding(3, 4) # 4 is the default number
self.nerf_coarse = NeRF(
D=D,
W=W,
input_ch=in_channels_xyz,
input_ch_views=in_channels_dir,
output_ch=5,
skips=skips,
)
self.models = [self.nerf_coarse]
if N_importance > 0:
self.nerf_fine = NeRF(
D=D,
W=W,
input_ch=in_channels_xyz,
input_ch_views=in_channels_dir,
output_ch=5,
skips=skips,
)
self.models += [self.nerf_fine]
@classmethod
def from_config(cls, cfg):
return {
"D": cfg.D,
"W": cfg.W,
"in_channels_xyz": cfg.in_channels_xyz,
"in_channels_dir": cfg.in_channels_dir,
"skips": cfg.skips,
"N_samples": cfg.N_samples,
"use_disp": cfg.use_disp,
"perturb": cfg.perturb,
"noise_std": cfg.noise_std,
"N_importance": cfg.N_importance,
"chunk": cfg.chunk,
"dataset_type": cfg.dataset_type,
"loss_func": cfg.loss_func,
}
def sample_pdf(self, bins, weights, N_importance, det=False, eps=1e-5):
"""
Sample @N_importance samples from @bins with distribution defined by @weights.
Inputs:
bins (tensor): (N_rays, N_samples_+1) where N_samples_ is "the number of
coarse samples per ray - 2"
weights (tensor): (N_rays, N_samples_)
N_importance (int): the number of samples to draw from the distribution
det (bool): deterministic or not
eps (float): a small number to prevent division by zero
Outputs:
samples: the sampled samples
"""
N_rays, N_samples_ = weights.shape
weights = weights + eps # prevent division by zero (don't do inplace op!)
pdf = weights / flow.sum(weights, -1, keepdim=True) # (N_rays, N_samples_)
cdf = flow.cumsum(pdf, -1) # (N_rays, N_samples), cumulative distribution function
cdf = flow.cat([flow.zeros_like(cdf[:, :1]), cdf], -1) # (N_rays, N_samples_+1)
# padded to 0~1 inclusive
if det:
u = flow.linspace(0, 1, N_importance).to_global(placement=bins.placement, sbp=bins.sbp)
u = u.expand(N_rays, N_importance)
else:
u = flow.rand(N_rays, N_importance).to_global(placement=bins.placement, sbp=bins.sbp)
u = u.contiguous()
inds = flow.searchsorted(cdf, u, right=True)
below = flow.max(flow.zeros_like(inds - 1), inds - 1)
above = flow.min((cdf.shape[-1] - 1) * flow.ones_like(inds), inds)
inds_g = flow.stack([below, above], -1) # (batch, N_samples, 2)
# cdf_g = tf.gather(cdf, inds_g, axis=-1, batch_dims=len(inds_g.shape)-2)
# bins_g = tf.gather(bins, inds_g, axis=-1, batch_dims=len(inds_g.shape)-2)
matched_shape = [inds_g.shape[0], inds_g.shape[1], cdf.shape[-1]]
cdf_g = flow.gather(cdf.unsqueeze(1).expand(matched_shape), 2, inds_g)
bins_g = flow.gather(bins.unsqueeze(1).expand(matched_shape), 2, inds_g)
denom = cdf_g[..., 1] - cdf_g[..., 0]
denom = flow.where(denom < 1e-5, flow.ones_like(denom), denom)
t = (u - cdf_g[..., 0]) / denom
samples = bins_g[..., 0] + t * (bins_g[..., 1] - bins_g[..., 0])
return samples
def inference(
self,
N_rays,
model,
embedding_xyz,
xyz_,
no_norm_dir_,
dir_,
dir_embedded,
z_vals,
noise_std=1,
chunk=1024 * 32,
white_back=False,
weights_only=False,
):
"""
Helper function that performs model inference.
Inputs:
N_rays (tensor): rays (N_rays, 3+3+2), ray origins, directions and near,
far depth bounds
model (nn.Module): NeRF model (coarse or fine)
embedding_xyz (nn.Module): embedding module for xyz
xyz_ (tensor): (N_rays, N_samples_, 3) sampled positions
N_samples_ is the number of sampled points in each ray;
= N_samples for coarse model
= N_samples+N_importance for fine model
no_norm_dir_ (tensor): (N_rays, 3) ray directions without norm
dir_ (tensor): (N_rays, 3) ray directions with norm
dir_embedded (tensor): (N_rays, embed_dir_channels) embedded directions
z_vals (tensor): (N_rays, N_samples_) depths of the sampled positions
weights_only (tensor): do inference on sigma only or not
Outputs:
if weights_only:
weights (tensor): (N_rays, N_samples_) weights of each sample
else:
rgb_final (tensor): (N_rays, 3) the final rgb image
depth_final (tensor): (N_rays) depth map
weights (tensor): (N_rays, N_samples_): weights of each sample
"""
N_samples_ = xyz_.shape[1]
# Embed directions
xyz_ = xyz_.view(-1, 3) # (N_rays*N_samples_, 3)
if not weights_only:
dir_embedded = dir_embedded[:, None].expand(
dir_embedded.shape[0], N_samples_, dir_embedded.shape[1]
)
dir_embedded = dir_embedded.reshape(-1, dir_embedded.shape[-1])
# Perform model inference to get rgb and raw sigma
B = xyz_.shape[0]
out_chunks = []
for i in range(0, B, chunk):
# Embed positions by chunk
xyz_embedded = embedding_xyz(xyz_[i : i + chunk])
if not weights_only:
xyzdir_embedded = flow.cat([xyz_embedded, dir_embedded[i : i + chunk]], 1)
else:
xyzdir_embedded = xyz_embedded
out_chunk = model(xyzdir_embedded)
out_chunks = out_chunks + [out_chunk]
out = flow.cat(out_chunks, 0)
if weights_only:
sigmas = out.view(N_rays, N_samples_)
else:
rgbsigma = out.view(N_rays, N_samples_, 4)
rgbs = rgbsigma[..., :3] # (N_rays, N_samples_, 3)
sigmas = rgbsigma[..., 3] # (N_rays, N_samples_)
# Convert these values using volume rendering (Section 4)
deltas = z_vals[:, 1:].clone() - z_vals[:, :-1].clone() # (N_rays, N_samples_-1)
delta_inf = 1e10 * flow.ones_like(deltas[:, :1]).to_global(
sbp=deltas.sbp, placement=deltas.placement
) # (N_rays, 1) the last delta is infinity
deltas = flow.cat([deltas, delta_inf], -1) # (N_rays, N_samples_)
# Multiply each distance by the norm of its corresponding direction ray
# to convert to real world distance (accounts for non-unit directions).
deltas = deltas * flow.norm(no_norm_dir_.unsqueeze(1), dim=-1)
noise = (
flow.randn(sigmas.shape).to_global(placement=sigmas.placement, sbp=sigmas.sbp)
* noise_std
)
# compute alpha by the formula (3)
alphas = 1 - flow.exp(-deltas * flow.relu(sigmas + noise)) # (N_rays, N_samples_)
ne_alphas = 1 - alphas + 1e-10
alphas_shifted = flow.cat(
[
flow.ones_like(alphas[:, :1]).to_global(sbp=alphas.sbp, placement=alphas.placement),
ne_alphas,
],
-1,
) # [1, a1, a2, ...]
weights = alphas * flow.cumprod(alphas_shifted, -1)[:, :-1] # (N_rays, N_samples_)
# weights = alphas * alphas_shifted[:, :-1] # (N_rays, N_samples_)
weights_sum = weights.sum(1) # (N_rays), the accumulated opacity along the rays
# equals "1 - (1-a1)(1-a2)...(1-an)" mathematically
if weights_only:
return weights
# compute final weighted outputs
rgb_final = flow.sum(weights.unsqueeze(-1) * rgbs, -2) # (N_rays, 3)
depth_final = flow.sum(weights * z_vals, -1) # (N_rays)
if white_back:
rgb_final = rgb_final + (1 - weights_sum.unsqueeze(-1))
return rgb_final, depth_final, weights
def render_rays(
self,
models,
embeddings,
rays,
N_samples=64,
use_disp=False,
perturb=0.0,
N_importance=0.0,
test_time=False,
noise_std=1.0,
chunk=1024 * 32,
white_back=False,
):
# Extract models from lists
model_coarse = models[0]
embedding_xyz = embeddings[0]
embedding_dir = embeddings[1]
# Decompose the inputs
N_rays = rays.shape[0]
rays_o, rays_d = rays[:, 0:3], rays[:, 3:6] # both (N_rays, 3)
near, far = rays[:, 6:7], rays[:, 7:8] # both (N_rays, 1)
viewdirs = rays_d / flow.norm(rays_d, dim=-1, keepdim=True)
# Embed direction
dir_embedded = embedding_dir(viewdirs) # (N_rays, embed_dir_channels)
# Sample depth points
z_steps = flow.linspace(0, 1, N_samples).to_global(
sbp=rays.sbp, placement=rays.placement
) # (N_samples)
if not use_disp: # use linear sampling in depth space
z_vals = near * (1 - z_steps) + far * z_steps
else: # use linear sampling in disparity space
z_vals = 1 / (1 / near * (1 - z_steps) + 1 / far * z_steps)
z_vals = z_vals.expand(N_rays, N_samples)
if perturb > 0: # perturb sampling depths (z_vals)
z_vals_mid = 0.5 * (
z_vals[:, :-1] + z_vals[:, 1:]
) # (N_rays, N_samples-1) interval mid points
# get intervals between samples
upper = flow.cat([z_vals_mid, z_vals[:, -1:]], -1)
lower = flow.cat([z_vals[:, :1], z_vals_mid], -1)
v = flow.rand(z_vals.shape).to_global(sbp=rays.sbp, placement=rays.placement)
perturb_rand = perturb * v
z_vals = lower + (upper - lower) * perturb_rand
xyz_coarse_sampled = rays_o.unsqueeze(1) + rays_d.unsqueeze(1) * z_vals.unsqueeze(
2
) # (N_rays, N_samples, 3)
if test_time:
weights_coarse = self.inference(
rays.shape[0],
model_coarse,
embedding_xyz,
xyz_coarse_sampled,
rays_d,
viewdirs,
dir_embedded,
z_vals,
noise_std,
chunk,
white_back,
weights_only=True,
)
result = {"opacity_coarse": weights_coarse.sum(1)}
else:
rgb_coarse, depth_coarse, weights_coarse = self.inference(
rays.shape[0],
model_coarse,
embedding_xyz,
xyz_coarse_sampled,
rays_d,
viewdirs,
dir_embedded,
z_vals,
noise_std,
chunk,
white_back,
weights_only=False,
)
result = {
"rgb_coarse": rgb_coarse,
"depth_coarse": depth_coarse,
"opacity_coarse": weights_coarse.sum(1),
}
if N_importance > 0: # sample points for fine model
z_vals_mid = 0.5 * (
z_vals[:, :-1] + z_vals[:, 1:]
) # (N_rays, N_samples-1) interval mid points
z_vals_ = self.sample_pdf(
z_vals_mid, weights_coarse[:, 1:-1], N_importance, det=(perturb == 0)
).detach()
# detach so that grad doesn't propogate to weights_coarse from here
z_vals, _ = flow.sort(flow.cat([z_vals, z_vals_], -1), -1)
xyz_fine_sampled = rays_o.unsqueeze(1) + rays_d.unsqueeze(1) * z_vals.unsqueeze(2)
# (N_rays, N_samples+N_importance, 3)
model_fine = models[1]
rgb_fine, depth_fine, weights_fine = self.inference(
rays.shape[0],
model_fine,
embedding_xyz,
xyz_fine_sampled,
rays_d,
viewdirs,
dir_embedded,
z_vals,
noise_std,
chunk,
white_back,
weights_only=False,
)
result["rgb_fine"] = rgb_fine
result["depth_fine"] = depth_fine
result["opacity_fine"] = weights_fine.sum(1)
return result
def forward_features(self, rays):
"""Do batched inference on rays using chunk."""
B = rays.shape[0]
results = defaultdict(list)
for i in range(0, B, self.chunk):
rendered_ray_chunks = self.render_rays(
self.models,
[self.embedding_xyz, self.embedding_dir],
rays[i : i + self.chunk],
self.N_samples,
self.use_disp,
self.perturb,
self.N_importance,
False,
self.noise_std,
self.chunk, # chunk size is effective in val mode
self.white_back,
)
for k, v in rendered_ray_chunks.items():
results[k] += [v]
for k, v in results.items():
results[k] = flow.cat(v, 0)
return results
def forward(self, rays, rgbs=None, c2w=None, valid_mask=None):
"""
Inputs:
rays (tensor): (batchsize, 3+3+2) the set of input rays samples
rgbs (tensor): (batchsize, 3) the set of input rgbs samples
c2w (tensor): (3, 4) transformation matrix from camera coordinate to world coordinate
valid_mask (tensor): (H W) valid color area
Outputs:
re (dict): regarding the series of outputs such as rgbs and loss obtained from the
model predictions.
"""
if c2w is None:
rays = rays.squeeze() # (H*W, 3)
rgbs = rgbs.squeeze() # (H*W, 3)
results = self.forward_features(rays)
losses = self.loss_func(results["rgb_coarse"], rgbs)
if "rgb_fine" in results:
losses += self.loss_func(results["rgb_fine"], rgbs)
return {"losses": losses}
else:
if rgbs is None:
rays = rays.squeeze() # (H*W, 3)
results = self.forward_features(rays)
typ = "fine" if "rgb_fine" in results else "coarse"
re = collections.OrderedDict()
re[typ] = flow.Tensor([0.0]).to_global(sbp=rays.sbp, placement=rays.placement)
for key, value in results.items():
re[key] = value.unsqueeze(0)
return re
else:
rays = rays.squeeze() # (H*W, 3)
rgbs = rgbs.squeeze() # (H*W, 3)
results = self.forward_features(rays)
losses = self.loss_func(results["rgb_coarse"], rgbs)
if "rgb_fine" in results:
losses += self.loss_func(results["rgb_fine"], rgbs)
typ = "fine" if "rgb_fine" in results else "coarse"
re = collections.OrderedDict()
re["losses"] = losses
re[typ] = flow.Tensor([0.0]).to_global(sbp=losses.sbp, placement=losses.placement)
for key, value in results.items():
re[key] = value.unsqueeze(0)
re["rgbs"] = rgbs.unsqueeze(0)
return re
| 18,288 | 38.415948 | 100 | py |
libai | libai-main/projects/Couplets/distribute_infer.py | import os
import sys
dir_path = os.path.abspath(os.path.dirname(__file__)) # noqa
sys.path.append(dir_path) # noqa
import oneflow as flow # noqa
from dataset.mask import make_sequence_mask # noqa
from tokenizer.tokenizer import CoupletsTokenizer # noqa
from libai.data.structures import DistTensorData # noqa
from libai.inference.basic import BasePipeline # noqa
from libai.utils import distributed as dist # noqa
def get_global_tensor(rawdata):
t = flow.tensor(rawdata, dtype=flow.long).unsqueeze(0)
dtd = DistTensorData(t)
dtd.to_global()
return dtd.tensor
class CoupletPipeline(BasePipeline):
def _parse_parameters(self, **pipeline_parameters):
preprocess_params = {**pipeline_parameters}
forward_params = {}
postprocess_params = {}
return preprocess_params, forward_params, postprocess_params
def update_cfg(
self,
data_parallel=1,
tensor_parallel=1,
pipeline_parallel=1,
pipeline_stage_id=None,
pipeline_num_layers=None,
):
super().update_cfg(
data_parallel,
tensor_parallel,
pipeline_parallel,
pipeline_stage_id,
pipeline_num_layers,
)
self.cfg.vocab_file = "data_test/couplets/vocab.txt"
def build_tokenizer(self, cfg):
return CoupletsTokenizer(cfg.vocab_file)
def generate(self, sentence):
# Encode
sentence = " ".join([word for word in sentence])
tokens_list = self.tokenizer.tokenize(sentence)
encoder_ids_list = (
[self.tokenizer.bos_id]
+ self.tokenizer.convert_tokens_to_ids(tokens_list)
+ [self.tokenizer.eos_id]
)
seq_len = len(encoder_ids_list)
encoder_input_ids = get_global_tensor(encoder_ids_list)
encoder_states = self.model.encode(encoder_input_ids, None)
# Decode
decoder_ids_list = [self.tokenizer.bos_id]
decoder_input_ids = get_global_tensor(decoder_ids_list)
for i in range(seq_len + 10):
mask_array = make_sequence_mask(decoder_ids_list)
decoder_attn_mask = get_global_tensor(mask_array)
logits = self.model.decode(decoder_input_ids, decoder_attn_mask, encoder_states, None)
prob = logits[:, -1]
_, next_word = flow.max(prob, dim=1)
next_word = next_word.item()
decoder_ids_list = decoder_ids_list + [next_word]
decoder_input_ids = get_global_tensor(decoder_ids_list)
if next_word == self.tokenizer.eos_id:
break
result_tokens_list = self.tokenizer.convert_ids_to_tokens(decoder_ids_list)
return "".join(result_tokens_list).replace("<bos>", "").replace("<eos>", "")
def preprocess(self, sentence) -> dict:
input_dict = {"sentence": sentence}
return input_dict
def forward(self, input_dict) -> dict:
model_output = self.generate(input_dict["sentence"])
model_out_dict = {"下联": model_output}
return model_out_dict
def postprocess(self, model_out_dict) -> dict:
return model_out_dict
if __name__ == "__main__":
pipeline = CoupletPipeline(
"projects/Couplets/configs/config.py",
data_parallel=1,
tensor_parallel=1,
pipeline_parallel=4,
pipeline_stage_id=None,
pipeline_num_layers=12,
model_path="output/couplet/model_final/model",
mode="libai",
)
out = pipeline("滚滚长江东逝水")
if dist.is_main_process():
print(out)
| 3,590 | 31.351351 | 98 | py |
libai | libai-main/projects/Couplets/infer.py | import os
import sys
dir_path = os.path.abspath(os.path.dirname(__file__))
sys.path.append(dir_path)
import oneflow as flow # noqa
from dataset.mask import make_padding_mask, make_sequence_mask # noqa
from modeling.model import Seq2Seq # noqa
from tokenizer.tokenizer import CoupletsTokenizer # noqa
from libai.config import LazyConfig # noqa
from libai.data.structures import DistTensorData # noqa
from libai.engine.default import DefaultTrainer # noqa
from libai.utils.checkpoint import Checkpointer # noqa
def get_global_tensor(rawdata):
t = flow.tensor(rawdata, dtype=flow.long).unsqueeze(0)
dtd = DistTensorData(t)
dtd.to_global()
return dtd.tensor
class GeneratorForEager:
def __init__(self, config_file, checkpoint_file, vocab_file):
cfg = LazyConfig.load(config_file)
self.model = DefaultTrainer.build_model(cfg).eval()
Checkpointer(self.model).load(checkpoint_file)
self.tokenizer = CoupletsTokenizer(vocab_file)
def infer(self, sentence):
# Encode
sentence = " ".join([word for word in sentence])
tokens_list = self.tokenizer.tokenize(sentence)
encoder_ids_list = (
[self.tokenizer.bos_id]
+ self.tokenizer.convert_tokens_to_ids(tokens_list)
+ [self.tokenizer.eos_id]
)
seq_len = len(encoder_ids_list)
encoder_input_ids = get_global_tensor(encoder_ids_list)
encoder_states = self.model.encode(encoder_input_ids, None)
# Decode
decoder_ids_list = [self.tokenizer.bos_id]
decoder_input_ids = get_global_tensor(decoder_ids_list)
for i in range(seq_len + 10):
mask_array = make_sequence_mask(decoder_ids_list)
decoder_attn_mask = get_global_tensor(mask_array)
logits = self.model.decode(decoder_input_ids, decoder_attn_mask, encoder_states, None)
prob = logits[:, -1]
_, next_word = flow.max(prob, dim=1)
next_word = next_word.item()
decoder_ids_list = decoder_ids_list + [next_word]
decoder_input_ids = get_global_tensor(decoder_ids_list)
if next_word == self.tokenizer.eos_id:
break
result_tokens_list = self.tokenizer.convert_ids_to_tokens(decoder_ids_list)
return "".join(result_tokens_list).replace("<bos>", "").replace("<eos>", "")
if __name__ == "__main__":
config_file = "output/couplet/config.yaml"
checkpoint_file = "output/couplet/model_final"
vocab_file = "data_test/couplets/vocab.txt"
generator = GeneratorForEager(config_file, checkpoint_file, vocab_file)
sentence = input("上联:\n")
result = generator.infer(sentence)
print("下联:\n" + result)
| 2,739 | 36.534247 | 98 | py |
libai | libai-main/projects/Couplets/dataset/dataset.py | import os
import oneflow as flow
from dataset.mask import make_padding_mask, make_sequence_mask
from oneflow.utils.data import Dataset
from tokenizer.tokenizer import CoupletsTokenizer
from libai.data.structures import DistTensorData, Instance
class CoupletsDataset(Dataset):
def __init__(self, path, is_train=True, maxlen=64):
if is_train:
datapath = os.path.join(path, "train")
else:
datapath = os.path.join(path, "test")
src = []
with open(f"{datapath}/in.txt", "r") as f_src:
for line in f_src.readlines():
src.append(line.strip("\n"))
tgt = []
with open(f"{datapath}/out.txt", "r") as f_tgt:
for line in f_tgt.readlines():
tgt.append(line.strip("\n"))
self.data = list(zip(src, tgt))
self.tokenizer = CoupletsTokenizer(f"{path}/vocab.txt")
self.maxlen = maxlen
self.unk_id = self.tokenizer.unk_id
self.pad_id = self.tokenizer.pad_id
self.bos_id = self.tokenizer.bos_id
self.eos_id = self.tokenizer.eos_id
def __len__(self):
return len(self.data)
def text2ids(self, text):
tokens = self.tokenizer.tokenize(text)
ids = self.tokenizer.convert_tokens_to_ids(tokens)
ids = ids[: self.maxlen - 2]
ids = [self.bos_id] + ids + [self.eos_id]
ids = ids + [self.pad_id] * (self.maxlen - len(ids))
return ids
def __getitem__(self, index):
sample = self.data[index]
src_ids = self.text2ids(sample[0])
tgt_ids = self.text2ids(sample[1])
encoder_self_attn_mask = make_padding_mask(src_ids, src_ids, self.pad_id)
decoder_self_attn_mask = make_padding_mask(
tgt_ids, tgt_ids, self.pad_id
) * make_sequence_mask(tgt_ids)
cross_attn_mask = make_padding_mask(tgt_ids, src_ids, self.pad_id)
return Instance(
encoder_input_ids=DistTensorData(flow.tensor(src_ids, dtype=flow.long)),
decoder_input_ids=DistTensorData(flow.tensor(tgt_ids, dtype=flow.long)),
encoder_attn_mask=DistTensorData(flow.tensor(encoder_self_attn_mask, dtype=flow.long)),
decoder_attn_mask=DistTensorData(flow.tensor(decoder_self_attn_mask, dtype=flow.long)),
encoder_decoder_attn_mask=DistTensorData(flow.tensor(cross_attn_mask, dtype=flow.long)),
)
| 2,414 | 37.951613 | 100 | py |
libai | libai-main/projects/Couplets/dataset/mask.py | import numpy as np
def make_padding_mask(q_ids, kv_ids, pad_id):
q = (np.array(q_ids) != pad_id).reshape(-1, 1)
kv = (np.array(kv_ids) != pad_id).reshape(1, -1)
padding_mask = (q * kv).astype(float)
return padding_mask
def make_sequence_mask(ids):
seqlen = len(ids)
sequence_mask = np.triu(np.ones((seqlen, seqlen))).transpose()
return sequence_mask
| 382 | 24.533333 | 66 | py |
libai | libai-main/projects/Couplets/tokenizer/tokenizer.py | import collections
def load_vocab(vocab_file):
vocab = collections.OrderedDict()
index = 0
with open(vocab_file, "r", encoding="utf-8") as reader:
while True:
token = reader.readline()
token = token.strip("\n")
if not token:
break
vocab[token] = index
index += 1
return vocab
class CoupletsTokenizer:
def __init__(self, vocab_file):
self.vocab = load_vocab(vocab_file)
self.inv_vocab = {v: k for k, v in self.vocab.items()}
self.vocab_tokens = self.vocab.keys()
self.unk_id = self.vocab["<unk>"]
self.pad_id = self.vocab["<pad>"]
self.bos_id = self.vocab["<bos>"]
self.eos_id = self.vocab["<eos>"]
def tokenize(self, text):
tokens_list = text.split()
return tokens_list
def convert_tokens_to_ids(self, tokens_list):
ids_list = []
for token in tokens_list:
if token not in self.vocab_tokens:
token = "<unk>"
token_id = self.vocab[token]
ids_list.append(token_id)
return ids_list
def convert_ids_to_tokens(self, ids_list):
tokens_list = []
for token_id in ids_list:
token = self.inv_vocab[token_id]
tokens_list.append(token)
return tokens_list
| 1,361 | 27.978723 | 62 | py |
libai | libai-main/projects/Couplets/configs/config.py | import os
import sys
dir_path = os.path.abspath(os.path.dirname(__file__))
dir_path = "/".join(dir_path.split("/")[:-1])
sys.path.append(dir_path)
from omegaconf import OmegaConf # noqa
from dataset.dataset import CoupletsDataset # noqa
from modeling.model import Seq2Seq # noqa
from libai.config import get_config # noqa
from libai.config import LazyCall # noqa
from libai.data.build import build_nlp_train_loader, build_nlp_test_loader # noqa
optim = get_config("common/optim.py").optim
graph = get_config("common/models/graph.py").graph
train = get_config("common/train.py").train
dataloader = OmegaConf.create()
dataloader.train = LazyCall(build_nlp_train_loader)(
dataset=[
LazyCall(CoupletsDataset)(
path="data_test/couplets",
is_train=True,
maxlen=64,
)
],
num_workers=4,
)
dataloader.test = [
LazyCall(build_nlp_test_loader)(
dataset=LazyCall(CoupletsDataset)(
path="data_test/couplets",
is_train=False,
maxlen=64,
),
num_workers=4,
)
]
transformer_cfg = dict(
vocab_size=9027,
max_position_embeddings=64,
hidden_size=512,
intermediate_size=512,
hidden_layers=6,
num_attention_heads=8,
embedding_dropout_prob=0.1,
hidden_dropout_prob=0.1,
attention_dropout_prob=0.1,
initializer_range=0.02,
layernorm_epsilon=1e-5,
bias_gelu_fusion=False,
bias_dropout_fusion=False,
scale_mask_softmax_fusion=False,
apply_query_key_layer_scaling=True,
)
model = LazyCall(Seq2Seq)(cfg=transformer_cfg)
train.update(
dict(
rdma_enabled=False,
activation_checkpoint=dict(enabled=False),
amp=dict(enabled=False),
output_dir="output/couplet/",
train_micro_batch_size=128,
test_micro_batch_size=32,
train_epoch=20,
train_iter=0,
log_period=10,
warmup_ratio=0.01,
dist=dict(
data_parallel_size=1,
tensor_parallel_size=1,
pipeline_parallel_size=1,
pipeline_stage_id=None,
pipeline_num_layers=model.cfg.hidden_layers * 2,
),
evaluation=dict(
enabled=False,
),
)
)
| 2,244 | 25.104651 | 82 | py |
libai | libai-main/projects/Couplets/modeling/model.py | import oneflow as flow
from oneflow import nn
from libai.layers.cross_entropy import ParallelCrossEntropyLoss
from libai.utils import distributed as dist
from .transformer_model import TransformerModel
class Seq2SeqLoss(nn.Module):
def __init__(self):
super().__init__()
self.lm_loss = ParallelCrossEntropyLoss()
def forward(self, logits, lm_labels):
logits = logits[:, :-1, :]
lm_labels = lm_labels[:, 1:]
lm_loss = self.lm_loss(logits, lm_labels)
lm_loss = lm_loss.mean()
return lm_loss
class Seq2Seq(nn.Module):
def __init__(self, cfg):
super().__init__()
self.language_model = TransformerModel(cfg)
self.loss_func = Seq2SeqLoss()
def forward(
self,
encoder_input_ids,
decoder_input_ids,
encoder_attn_mask,
decoder_attn_mask,
encoder_decoder_attn_mask,
):
logits = self.language_model(
encoder_input_ids,
decoder_input_ids,
encoder_attn_mask,
decoder_attn_mask,
encoder_decoder_attn_mask,
)
if self.training:
loss = self.loss_func(logits, decoder_input_ids)
return {"total_loss": loss}
logits = logits.view(-1, logits.shape[-1])
return {"prediction_scores": logits}
def encode(
self,
encoder_input_ids,
encoder_attn_mask,
):
encoder_input_embeddings = self.language_model.embedding(encoder_input_ids)
if encoder_attn_mask is not None:
encoder_extended_attn_mask = self.language_model.extended_attn_mask(encoder_attn_mask)
encoder_states = self.language_model.encoder(
encoder_input_embeddings,
encoder_extended_attn_mask,
)
else:
encoder_states = self.language_model.encoder(
encoder_input_embeddings,
None,
)
return encoder_states
def decode(
self,
decoder_input_ids,
decoder_attn_mask,
encoder_states,
encoder_decoder_attn_mask,
):
decoder_input_embeddings = self.language_model.embedding(decoder_input_ids)
decoder_extended_attn_mask = self.language_model.extended_attn_mask(decoder_attn_mask)
if encoder_decoder_attn_mask is not None:
encoder_decoder_extended_attn_mask = self.language_model.extended_attn_mask(
encoder_decoder_attn_mask
)
decoder_states = self.language_model.decoder(
decoder_input_embeddings,
decoder_extended_attn_mask,
encoder_states,
encoder_decoder_extended_attn_mask,
)
else:
decoder_states = self.language_model.decoder(
decoder_input_embeddings,
decoder_extended_attn_mask,
encoder_states,
None,
)
logits = self.language_model.lm_head(decoder_states)
return logits
@staticmethod
def set_pipeline_stage_id(model):
dist_utils = dist.get_dist_util()
from .transformer_model import ExtendedMask, TransformerEmbedding, TransformerLayer
# Set pipeline parallelism stage_id
if hasattr(model.language_model.lm_head, "config"):
# Old API in OneFlow 0.8
for module_block in model.modules():
# module.origin can get the original module
if isinstance(module_block.origin, TransformerEmbedding):
module_block.config.set_stage(
dist_utils.get_layer_stage_id(0), dist.get_layer_placement(0)
)
elif isinstance(module_block.origin, ExtendedMask):
module_block.config.set_stage(
dist_utils.get_layer_stage_id(0), dist.get_layer_placement(0)
)
elif isinstance(module_block.origin, TransformerLayer):
module_block.config.set_stage(
dist_utils.get_layer_stage_id(module_block.layer_idx),
dist.get_layer_placement(module_block.layer_idx),
)
# Set the lm_head stage id
model.language_model.lm_head.config.set_stage(
dist_utils.get_layer_stage_id(-1), dist.get_layer_placement(-1)
)
else:
for module_block in model.modules():
if isinstance(module_block.to(nn.Module), TransformerEmbedding):
module_block.to(flow.nn.graph.GraphModule).set_stage(
dist_utils.get_layer_stage_id(0), dist.get_layer_placement(0)
)
elif isinstance(module_block.to(nn.Module), ExtendedMask):
module_block.to(flow.nn.graph.GraphModule).set_stage(
dist_utils.get_layer_stage_id(0), dist.get_layer_placement(0)
)
elif isinstance(module_block.to(nn.Module), TransformerLayer):
module_block.to(flow.nn.graph.GraphModule).set_stage(
dist_utils.get_layer_stage_id(module_block.layer_idx),
dist.get_layer_placement(module_block.layer_idx),
)
# Set the lm_head stage id
model.language_model.lm_head.to(flow.nn.graph.GraphModule).set_stage(
dist_utils.get_layer_stage_id(-1), dist.get_layer_placement(-1)
)
| 5,603 | 36.610738 | 98 | py |
libai | libai-main/projects/Couplets/modeling/transformer_model.py | import math
import oneflow as flow
from oneflow import nn
from libai.config import configurable
from libai.layers import (
LayerNorm,
Linear,
SinePositionalEmbedding,
TransformerLayer,
VocabEmbedding,
)
from libai.models.utils import init_method_normal, scaled_init_method_normal
from libai.utils import distributed as dist
class ExtendedMask(flow.nn.Module):
def forward(self, x):
return x.unsqueeze(1)
class TransformerEmbedding(nn.Module):
def __init__(
self,
vocab_size,
hidden_size,
max_sequence_length,
embedding_dropout_prob,
init_method=nn.init.xavier_normal_,
):
super().__init__()
self.hidden_size = hidden_size
self.vocab_size = vocab_size
self.word_embedding = VocabEmbedding(vocab_size, hidden_size, init_method=init_method)
self.positional_encoding = SinePositionalEmbedding(max_sequence_length, hidden_size)
self.position_ids = flow.arange(
max_sequence_length,
dtype=flow.long,
sbp=dist.get_nd_sbp([flow.sbp.broadcast, flow.sbp.broadcast]),
placement=dist.get_layer_placement(0),
).unsqueeze(0)
self.embedding_dropout = nn.Dropout(embedding_dropout_prob)
def forward(self, input_ids):
seq_length = input_ids.size()[1]
word_embeddings = self.word_embedding(input_ids)
position_ids = (
self.position_ids[:, :seq_length].expand_as(input_ids).to_global(sbp=input_ids.sbp)
)
positional_encodings = self.positional_encoding(position_ids)
embeddings = word_embeddings * math.sqrt(self.hidden_size) + positional_encodings
embeddings = self.embedding_dropout(embeddings)
return embeddings
class TransformerEncoder(nn.Module):
def __init__(
self,
hidden_size=512,
ffn_hidden_size=512,
hidden_layers=6,
num_attention_heads=8,
is_decoder=False,
initializer_range=0.02,
attention_dropout_prob=0.1,
output_dropout_prob=0.1,
layernorm_epsilon=1e-5,
bias_gelu_fusion=False,
bias_dropout_fusion=False,
scale_mask_softmax_fusion=False,
apply_query_key_layer_scaling=True,
):
super().__init__()
self.encoder_layers = nn.ModuleList(
[
TransformerLayer(
hidden_size=hidden_size,
ffn_hidden_size=ffn_hidden_size,
num_attention_heads=num_attention_heads,
is_decoder=is_decoder,
attention_dropout_prob=attention_dropout_prob,
output_dropout_prob=output_dropout_prob,
layernorm_epsilon=layernorm_epsilon,
bias_gelu_fusion=bias_gelu_fusion,
bias_dropout_fusion=bias_dropout_fusion,
scale_mask_softmax_fusion=scale_mask_softmax_fusion,
apply_query_key_layer_scaling=apply_query_key_layer_scaling,
init_method=init_method_normal(initializer_range),
output_layer_init_method=scaled_init_method_normal(
initializer_range, hidden_layers
),
layer_idx=i,
)
for i in range(hidden_layers)
]
)
self.encoder_final_layernorm = LayerNorm(
(hidden_size,), eps=layernorm_epsilon, layer_idx=hidden_layers - 1
)
def forward(self, encoder_input_embeddings, encoder_extended_attn_mask):
enc_hidden_states = encoder_input_embeddings
for layer in self.encoder_layers:
enc_hidden_states = layer(enc_hidden_states, encoder_extended_attn_mask)
encoder_states = self.encoder_final_layernorm(enc_hidden_states)
return encoder_states
class TransformerDecoder(nn.Module):
def __init__(
self,
hidden_size=512,
ffn_hidden_size=512,
hidden_layers=6,
num_attention_heads=8,
is_decoder=True,
initializer_range=0.02,
attention_dropout_prob=0.1,
output_dropout_prob=0.1,
layernorm_epsilon=1e-5,
bias_gelu_fusion=False,
bias_dropout_fusion=False,
scale_mask_softmax_fusion=False,
apply_query_key_layer_scaling=True,
):
super().__init__()
self.decoder_layers = nn.ModuleList(
[
TransformerLayer(
hidden_size=hidden_size,
ffn_hidden_size=ffn_hidden_size,
num_attention_heads=num_attention_heads,
is_decoder=is_decoder,
attention_dropout_prob=attention_dropout_prob,
output_dropout_prob=output_dropout_prob,
layernorm_epsilon=layernorm_epsilon,
bias_gelu_fusion=bias_gelu_fusion,
bias_dropout_fusion=bias_dropout_fusion,
scale_mask_softmax_fusion=scale_mask_softmax_fusion,
apply_query_key_layer_scaling=apply_query_key_layer_scaling,
init_method=init_method_normal(initializer_range),
output_layer_init_method=scaled_init_method_normal(
initializer_range, hidden_layers
),
layer_idx=i,
)
for i in range(hidden_layers, 2 * hidden_layers)
]
)
self.decoder_final_layernorm = LayerNorm(
(hidden_size,), eps=layernorm_epsilon, layer_idx=2 * hidden_layers - 1
)
def forward(
self,
decoder_input_embeddings,
decoder_extended_attn_mask,
encoder_states,
encoder_decoder_extended_attn_mask,
):
dec_hidden_states = decoder_input_embeddings
for layer in self.decoder_layers:
dec_hidden_states = layer(
dec_hidden_states,
decoder_extended_attn_mask,
encoder_states,
encoder_decoder_extended_attn_mask,
)
decoder_states = self.decoder_final_layernorm(dec_hidden_states)
return decoder_states
class TransformerModel(nn.Module):
@configurable
def __init__(
self,
vocab_size,
max_position_embeddings,
hidden_size=512,
intermediate_size=512,
hidden_layers=6,
num_attention_heads=8,
embedding_dropout_prob=0.1,
hidden_dropout_prob=0.1,
attention_dropout_prob=0.1,
initializer_range=0.02,
layernorm_epsilon=1e-5,
bias_gelu_fusion=False,
bias_dropout_fusion=False,
scale_mask_softmax_fusion=False,
apply_query_key_layer_scaling=True,
):
super().__init__()
self.embedding = TransformerEmbedding(
vocab_size,
hidden_size,
max_position_embeddings,
embedding_dropout_prob,
init_method=init_method_normal(initializer_range),
)
self.extended_attn_mask = ExtendedMask()
self.encoder = TransformerEncoder(
hidden_size=hidden_size,
ffn_hidden_size=intermediate_size,
hidden_layers=hidden_layers,
num_attention_heads=num_attention_heads,
is_decoder=False,
initializer_range=0.02,
attention_dropout_prob=attention_dropout_prob,
output_dropout_prob=hidden_dropout_prob,
layernorm_epsilon=layernorm_epsilon,
bias_gelu_fusion=bias_gelu_fusion,
bias_dropout_fusion=bias_dropout_fusion,
scale_mask_softmax_fusion=scale_mask_softmax_fusion,
apply_query_key_layer_scaling=apply_query_key_layer_scaling,
)
self.decoder = TransformerDecoder(
hidden_size=hidden_size,
ffn_hidden_size=intermediate_size,
hidden_layers=hidden_layers,
num_attention_heads=num_attention_heads,
is_decoder=True,
initializer_range=0.02,
attention_dropout_prob=attention_dropout_prob,
output_dropout_prob=hidden_dropout_prob,
layernorm_epsilon=layernorm_epsilon,
bias_gelu_fusion=bias_gelu_fusion,
bias_dropout_fusion=bias_dropout_fusion,
scale_mask_softmax_fusion=scale_mask_softmax_fusion,
apply_query_key_layer_scaling=apply_query_key_layer_scaling,
)
self.lm_head = Linear(
hidden_size,
vocab_size,
layer_idx=-1,
)
@classmethod
def from_config(cls, cfg):
return {
"vocab_size": cfg.vocab_size,
"max_position_embeddings": cfg.max_position_embeddings,
"hidden_size": cfg.hidden_size,
"intermediate_size": cfg.intermediate_size,
"hidden_layers": cfg.hidden_layers,
"num_attention_heads": cfg.num_attention_heads,
"embedding_dropout_prob": cfg.embedding_dropout_prob,
"hidden_dropout_prob": cfg.hidden_dropout_prob,
"attention_dropout_prob": cfg.attention_dropout_prob,
"initializer_range": cfg.initializer_range,
"layernorm_epsilon": cfg.layernorm_epsilon,
"bias_gelu_fusion": cfg.bias_gelu_fusion,
"bias_dropout_fusion": cfg.bias_dropout_fusion,
"scale_mask_softmax_fusion": cfg.scale_mask_softmax_fusion,
"apply_query_key_layer_scaling": cfg.apply_query_key_layer_scaling,
}
def forward(
self,
encoder_input_ids,
decoder_input_ids,
encoder_attn_mask,
decoder_attn_mask,
encoder_decoder_attn_mask,
):
encoder_input_embeddings = self.embedding(encoder_input_ids)
decoder_input_embeddings = self.embedding(decoder_input_ids)
encoder_extended_attn_mask = self.extended_attn_mask(encoder_attn_mask)
decoder_extended_attn_mask = self.extended_attn_mask(decoder_attn_mask)
encoder_decoder_extended_attn_mask = self.extended_attn_mask(encoder_decoder_attn_mask)
encoder_states = self.encoder(encoder_input_embeddings, encoder_extended_attn_mask)
decoder_states = self.decoder(
decoder_input_embeddings,
decoder_extended_attn_mask,
encoder_states,
encoder_decoder_extended_attn_mask,
)
logits = self.lm_head(decoder_states)
return logits
| 10,592 | 35.527586 | 95 | py |
libai | libai-main/projects/MT5/mt5_model.py | # coding=utf-8
# Copyright 2021 The OneFlow Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import oneflow as flow
import oneflow.nn as nn
from libai.config import configurable
from libai.inference.generator.generation_utils import Generator
from libai.layers import Linear, LMLogits, RMSLayerNorm
from libai.models.utils import init_method_normal, scaled_init_method_normal
from libai.utils import distributed as dist
from projects.MT5.layers.embed_layer import MT5Embedding
from projects.MT5.layers.loss_layer import MT5Loss
from projects.MT5.layers.mask_layer import ExtendedMask
from projects.MT5.layers.transformer_layer import TransformerLayer
from projects.MT5.utils.mt5_loader import T5LoaderHuggerFace
class MT5Model(flow.nn.Module, Generator):
@configurable
def __init__(
self,
vocab_size,
hidden_size,
hidden_layers,
num_attention_heads,
head_size,
intermediate_size,
embedding_dropout_prob,
hidden_dropout_prob,
attention_probs_dropout_prob,
relative_attention_num_buckets,
padding_idx=None,
initializer_range=0.02,
layernorm_eps=1e-12,
amp_enabled=False,
model_type="mt5",
cfg=None,
) -> None:
super().__init__()
self.cfg = cfg
self.model_type = model_type
init_method = init_method_normal(initializer_range)
scaled_init_method = scaled_init_method_normal(initializer_range, hidden_layers)
self.embedding = MT5Embedding(
hidden_size=hidden_size,
vocab_size=vocab_size,
embedding_dropout_prob=embedding_dropout_prob,
init_method=init_method,
amp_enabled=amp_enabled,
)
self.extended_attn_mask = ExtendedMask()
encoder_layers = flow.nn.ModuleList(
[
TransformerLayer(
hidden_size=hidden_size,
ffn_hidden_size=intermediate_size,
num_attention_heads=num_attention_heads,
head_size=head_size,
relative_attention_num_buckets=relative_attention_num_buckets,
is_decoder=False,
attention_dropout_prob=attention_probs_dropout_prob,
output_dropout_prob=hidden_dropout_prob,
layernorm_epsilon=layernorm_eps,
init_method=init_method,
output_layer_init_method=scaled_init_method,
padding_idx=padding_idx,
layer_idx=i,
model_type=model_type,
has_relative_attention_bias=bool(i == 0),
)
for i in range(hidden_layers)
]
)
encoder_final_layernorm = RMSLayerNorm(
(hidden_size,),
eps=layernorm_eps,
layer_idx=hidden_layers - 1,
)
self.encoder = flow.nn.Sequential()
self.encoder.add_module("layers", encoder_layers)
self.encoder.add_module("final_layernorm", encoder_final_layernorm)
decoder_layers = flow.nn.ModuleList(
[
TransformerLayer(
hidden_size=hidden_size,
ffn_hidden_size=intermediate_size,
num_attention_heads=num_attention_heads,
head_size=head_size,
relative_attention_num_buckets=relative_attention_num_buckets,
is_decoder=True,
attention_dropout_prob=attention_probs_dropout_prob,
output_dropout_prob=hidden_dropout_prob,
layernorm_epsilon=layernorm_eps,
init_method=init_method,
output_layer_init_method=scaled_init_method,
padding_idx=padding_idx,
layer_idx=i,
model_type=model_type,
has_relative_attention_bias=bool(i - hidden_layers == 0),
)
for i in range(hidden_layers, 2 * hidden_layers)
]
)
decoder_final_layernorm = RMSLayerNorm(
(hidden_size,),
eps=layernorm_eps,
layer_idx=2 * hidden_layers - 1,
)
self.decoder = flow.nn.Sequential()
self.decoder.add_module("layers", decoder_layers)
self.decoder.add_module("final_layernorm", decoder_final_layernorm)
self.past_key_values = [None] * len(self.decoder.layers)
self.encoder_states = None
self.past_length = 0
if model_type == "mt5":
self.lm_head = Linear(
hidden_size, vocab_size, bias=False, layer_idx=2 * hidden_layers - 1
)
else:
self.lm_head = LMLogits(vocab_size, bias=False)
@classmethod
def from_config(cls, cfg):
return {
"vocab_size": cfg.vocab_size,
"hidden_size": cfg.hidden_size,
"hidden_layers": cfg.hidden_layers,
"num_attention_heads": cfg.num_attention_heads,
"head_size": cfg.head_size,
"intermediate_size": cfg.intermediate_size,
"embedding_dropout_prob": cfg.embedding_dropout_prob,
"hidden_dropout_prob": cfg.hidden_dropout_prob,
"attention_probs_dropout_prob": cfg.attention_probs_dropout_prob,
"relative_attention_num_buckets": cfg.relative_attention_num_buckets,
"padding_idx": cfg.padding_idx,
"initializer_range": cfg.initializer_range,
"layernorm_eps": cfg.layernorm_eps,
"amp_enabled": cfg.amp_enabled,
"model_type": cfg.model_type,
"cfg": cfg,
}
def forward(
self,
encoder_input_ids=None,
decoder_input_ids=None,
encoder_attn_mask=None,
decoder_attn_mask=None,
encoder_decoder_attn_mask=None,
use_cache=False,
only_encoder=False,
):
encoder_input_ids = (
encoder_input_ids.to_global(placement=dist.get_layer_placement(0))
if encoder_input_ids is not None
else encoder_input_ids
)
decoder_input_ids = (
decoder_input_ids.to_global(placement=dist.get_layer_placement(0))
if decoder_input_ids is not None
else decoder_input_ids
)
encoder_attn_mask = (
encoder_attn_mask.to_global(placement=dist.get_layer_placement(0))
if encoder_attn_mask is not None
else encoder_attn_mask
)
decoder_attn_mask = (
decoder_attn_mask.to_global(placement=dist.get_layer_placement(0))
if decoder_attn_mask is not None
else decoder_attn_mask
)
encoder_decoder_attn_mask = (
encoder_decoder_attn_mask.to_global(placement=dist.get_layer_placement(0))
if encoder_decoder_attn_mask is not None
else encoder_decoder_attn_mask
)
if use_cache and self.encoder_states is not None:
encoder_states = self.encoder_states
else:
position_bias = None
encoder_decoder_position_bias = None
self.set_cache(encoder_states=None, past_key_values=None)
encoder_attn_mask = self.extended_attn_mask(encoder_attn_mask)
enc_embedding_output = self.embedding(encoder_input_ids)
# transpose [batch_size, seq_len, embed_size] to [seq_len, batch_size, embed_size]
enc_hidden_states = enc_embedding_output.transpose(0, 1)
for layer in self.encoder.layers:
enc_hidden_states, position_bias = layer(
enc_hidden_states,
encoder_attn_mask,
position_bias=position_bias,
)
encoder_states = self.encoder.final_layernorm(enc_hidden_states)
if only_encoder:
return encoder_states
decoder_attn_mask = self.extended_attn_mask(
decoder_attn_mask, decoder_input_ids, is_decoder=True
)
encoder_decoder_attn_mask = self.extended_attn_mask(encoder_decoder_attn_mask)
dec_embedding_output = self.embedding(decoder_input_ids)
# transpose [batch_size, seq_len, embed_size] to [seq_len, batch_size, embed_size]
dec_hidden_states = dec_embedding_output.transpose(0, 1)
if use_cache:
presents = []
position_bias = None
encoder_decoder_position_bias = None
for layer, past_key_value in zip(self.decoder.layers, self.past_key_values):
dec_hidden_states, position_bias, encoder_decoder_position_bias = layer(
dec_hidden_states,
decoder_attn_mask,
encoder_states,
encoder_decoder_attn_mask,
past_key_value=past_key_value,
position_bias=position_bias,
encoder_decoder_position_bias=encoder_decoder_position_bias,
use_cache=use_cache,
)
if use_cache:
dec_hidden_states, present = dec_hidden_states
presents.append(present)
if use_cache:
self.set_cache(encoder_states, past_key_values=presents)
decoder_states = self.decoder.final_layernorm(dec_hidden_states)
if self.cfg.tie_word_embeddings:
decoder_states = decoder_states * (self.cfg.hidden_size ** -0.5)
if self.model_type == "mt5":
logits = self.lm_head(decoder_states)
else:
logits = self.lm_head(decoder_states, self.embedding.word_embeddings.weight)
return {"logits": logits}
def set_cache(self, encoder_states, past_key_values):
self.encoder_states = encoder_states
self.past_length = 0 if past_key_values is None else past_key_values[0][0].shape[2]
if past_key_values is None:
past_key_values = [None] * len(self.decoder.layers)
assert len(past_key_values) == len(self.decoder.layers), (
f"past_key_values's length {len(past_key_values)} doesn't match "
f"decoder num_layers' length {self.decoder.layers}"
)
self.past_key_values = past_key_values
def _reorder_cache(self, beam_idx):
past_key_values = self.past_key_values
reordered_decoder_past = ()
for layer_past_states in past_key_values:
# get the correct batch idx from layer past batch dim
# batch dim of `past` is at 2nd position
reordered_layer_past_states = ()
for layer_past_state in layer_past_states:
# need to set correct `past` for each of the four key / value states
beam_idx = beam_idx.to_global(placement=layer_past_state.placement)
reordered_layer_past_states = reordered_layer_past_states + (
layer_past_state.index_select(0, beam_idx),
)
assert reordered_layer_past_states[0].shape == layer_past_states[0].shape
assert len(reordered_layer_past_states) == len(layer_past_states)
reordered_decoder_past = reordered_decoder_past + (reordered_layer_past_states,)
return reordered_decoder_past
def prepare_inputs_for_generation(
self,
input_ids,
past=None,
encoder_attn_mask=None,
encoder_decoder_attn_mask=None,
use_cache=None,
encoder_outputs=None,
):
# cut decoder_input_ids if past is used
if past is not None:
input_ids = input_ids[:, -1:]
self.past_key_values = past
self.encoder_states = encoder_outputs
decoder_attn_maks = flow.ones(
input_ids.size(),
dtype=flow.bool,
sbp=dist.get_nd_sbp([flow.sbp.broadcast, flow.sbp.broadcast]),
placement=flow.placement("cuda", list(range(dist.get_world_size()))),
)
return {
"decoder_input_ids": input_ids,
"decoder_attn_mask": decoder_attn_maks,
"encoder_attn_mask": encoder_attn_mask,
"encoder_decoder_attn_mask": encoder_decoder_attn_mask,
"use_cache": use_cache,
}
class MT5ForPreTraining(flow.nn.Module):
def __init__(self, cfg) -> None:
super().__init__()
if cfg.pretrained_model_path is not None:
loader = T5LoaderHuggerFace(MT5Model, cfg, cfg.pretrained_model_path)
self.mt5_model = loader.load()
else:
self.mt5_model = MT5Model(cfg)
self.loss_func = MT5Loss()
def set_cache(self, encoder_states, past_key_values):
self.mt5_model.set_cache(encoder_states, past_key_values)
def forward(
self,
encoder_input_ids,
decoder_input_ids,
encoder_attn_mask,
decoder_attn_mask,
encoder_decoder_attn_mask,
lm_labels=None,
loss_mask=None,
use_cache=False,
):
logits = self.mt5_model(
encoder_input_ids,
decoder_input_ids,
encoder_attn_mask,
decoder_attn_mask,
encoder_decoder_attn_mask,
use_cache=use_cache,
)["logits"]
# transpose [seq_len, batch_size, vocab_size] to [batch_size, seq_len, vocab_size]
logits = logits.transpose(0, 1)
if lm_labels is not None:
lm_loss = self.loss_func(logits, lm_labels, loss_mask)
return lm_loss
else:
return {
"prediction_scores": logits,
}
@staticmethod
def set_pipeline_stage_id(model):
dist_utils = dist.get_dist_util()
# Set pipeline parallelism stage_id
if hasattr(model.mt5_model.encoder.final_layernorm, "config"):
# Old API in OneFlow 0.8
for module_block in model.modules():
if isinstance(module_block.origin, MT5Embedding):
module_block.config.set_stage(
dist_utils.get_layer_stage_id(0), dist.get_layer_placement(0)
)
elif isinstance(module_block.origin, ExtendedMask):
module_block.config.set_stage(
dist_utils.get_layer_stage_id(0), dist.get_layer_placement(0)
)
elif isinstance(module_block.origin, TransformerLayer):
module_block.config.set_stage(
dist_utils.get_layer_stage_id(module_block.layer_idx),
dist.get_layer_placement(module_block.layer_idx),
)
elif isinstance(module_block.origin, MT5Loss):
module_block.config.set_stage(
dist_utils.get_layer_stage_id(-1), dist.get_layer_placement(-1)
)
model.mt5_model.encoder.final_layernorm.config.set_stage(
dist_utils.get_layer_stage_id(model.mt5_model.encoder.final_layernorm.layer_idx),
dist.get_layer_placement(model.mt5_model.encoder.final_layernorm.layer_idx),
)
model.mt5_model.decoder.final_layernorm.config.set_stage(
dist_utils.get_layer_stage_id(model.mt5_model.decoder.final_layernorm.layer_idx),
dist.get_layer_placement(model.mt5_model.decoder.final_layernorm.layer_idx),
)
else:
for module_block in model.modules():
if isinstance(module_block.to(nn.Module), MT5Embedding):
module_block.to(flow.nn.graph.GraphModule).set_stage(
dist_utils.get_layer_stage_id(0), dist.get_layer_placement(0)
)
elif isinstance(module_block.to(nn.Module), ExtendedMask):
module_block.to(flow.nn.graph.GraphModule).set_stage(
dist_utils.get_layer_stage_id(0), dist.get_layer_placement(0)
)
elif isinstance(module_block.to(nn.Module), TransformerLayer):
module_block.to(flow.nn.graph.GraphModule).set_stage(
dist_utils.get_layer_stage_id(module_block.layer_idx),
dist.get_layer_placement(module_block.layer_idx),
)
elif isinstance(module_block.to(nn.Module), MT5Loss):
module_block.to(flow.nn.graph.GraphModule).set_stage(
dist_utils.get_layer_stage_id(-1), dist.get_layer_placement(-1)
)
model.mt5_model.encoder.final_layernorm.to(flow.nn.graph.GraphModule).set_stage(
dist_utils.get_layer_stage_id(model.mt5_model.encoder.final_layernorm.layer_idx),
dist.get_layer_placement(model.mt5_model.encoder.final_layernorm.layer_idx),
)
model.mt5_model.decoder.final_layernorm.to(flow.nn.graph.GraphModule).set_stage(
dist_utils.get_layer_stage_id(model.mt5_model.decoder.final_layernorm.layer_idx),
dist.get_layer_placement(model.mt5_model.decoder.final_layernorm.layer_idx),
)
@staticmethod
def set_activation_checkpoint(model):
for module_block in model.modules():
# Old API in OneFlow 0.8
if hasattr(module_block, "origin"):
if isinstance(module_block.origin, TransformerLayer):
module_block.config.activation_checkpointing = True
else:
if isinstance(module_block.to(nn.Module), TransformerLayer):
module_block.to(nn.graph.GraphModule).activation_checkpointing = True
| 18,295 | 40.487528 | 97 | py |
libai | libai-main/projects/MT5/train_net.py | # coding=utf-8
# Copyright 2021 The OneFlow Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
import random
import sys
import numpy as np
import oneflow as flow
from libai.config import LazyConfig, default_argument_parser, try_get_key
from libai.engine import DefaultTrainer, default_setup
from libai.utils.checkpoint import Checkpointer
from libai.utils.events import JSONWriter, TensorboardXWriter
from projects.MT5.utils.mt5_metrc_printer import MT5MetricPrinter
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir)))
logger = logging.getLogger("libai." + __name__)
class Mt5Trainer(DefaultTrainer):
def __init__(self, cfg):
super().__init__(cfg)
def build_writers(self):
"""
Build a list of writers to be used. By default it contains
writers that write metrics to the screen,
a json file, and a tensorboard event file respectively.
If you'd like a different list of writers, you can overwrite it in
your trainer.
Returns:
list[EventWriter]: a list of :class:`EventWriter` objects.
It is now implemented by:
.. code-block:: python
return [
MT5MetricPrinter(self.global_batch_size, self.max_iter),
JSONWriter(os.path.join(self.cfg.train.output_dir, "metrics.json")),
TensorboardXWriter(self.cfg.train.output_dir),
]
"""
# Assume the default print/log frequency.
return [
# It may not always print what you want to see, since it prints "common" metrics only.
MT5MetricPrinter(self.global_batch_size, self.max_iter, self.cfg.train.log_period),
JSONWriter(os.path.join(self.cfg.train.output_dir, "metrics.json")),
TensorboardXWriter(self.cfg.train.output_dir),
]
def main(args):
cfg = LazyConfig.load(args.config_file)
cfg = LazyConfig.apply_overrides(cfg, args.opts)
default_setup(cfg, args)
seed_for_rank = cfg.train.seed + flow.env.get_rank()
flow.manual_seed(seed_for_rank)
flow.cuda.manual_seed(seed_for_rank)
np.random.seed(seed_for_rank)
random.seed(seed_for_rank)
if args.fast_dev_run:
cfg.train.train_epoch = 0
cfg.train.train_iter = 20
cfg.train.evaluation.eval_period = 10
cfg.train.log_period = 1
if args.eval_only:
tokenizer = None
if try_get_key(cfg, "tokenization") is not None:
tokenizer = Mt5Trainer.build_tokenizer(cfg)
model = Mt5Trainer.build_model(cfg)
Checkpointer(model, save_dir=cfg.train.output_dir).resume_or_load(
cfg.train.load_weight, resume=args.resume
)
if try_get_key(cfg, "train.graph.enabled", default=False):
model = Mt5Trainer.build_graph(cfg, model, is_train=False)
test_loader = Mt5Trainer.build_test_loader(cfg, tokenizer)
if len(test_loader) == 0:
logger.info("No dataset in dataloader.test, please set dataset for dataloader.test")
_ = Mt5Trainer.test(cfg, test_loader, model)
return
trainer = Mt5Trainer(cfg)
return trainer.train()
if __name__ == "__main__":
args = default_argument_parser().parse_args()
main(args)
| 3,832 | 33.845455 | 98 | py |
libai | libai-main/projects/MT5/layers/logits_layer.py | # coding=utf-8
# Copyright 2021 The OneFlow Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import oneflow as flow
from oneflow import nn
from libai.layers import Linear
from libai.utils import distributed as dist
class LMLogits(nn.Module):
def __init__(self, vocab_size, hidden_size=None, bias=False, model_type="t5", layer_idx=-1):
super().__init__()
self.model_type = model_type
if model_type == "t5":
self.bias = (
nn.Parameter(
flow.zeros(
(vocab_size,),
dtype=flow.float32,
placement=dist.get_layer_placement(layer_idx),
sbp=dist.get_nd_sbp([flow.sbp.broadcast, flow.sbp.split(0)]),
)
)
if bias
else None
)
elif model_type == "mt5":
self.linear = Linear(hidden_size, vocab_size, bias=False, layer_idx=layer_idx)
def forward(self, input, word_embeddings=None):
if self.model_type == "t5":
w = word_embeddings.to_global(placement=input.placement)
input = input.to_global(grad_sbp=input.sbp)
logits = flow._C.matmul(input, w, transpose_b=True)
if self.bias is not None:
logits = logits + self.bias
else:
logits = self.linear(input)
return logits
| 1,958 | 35.962264 | 96 | py |
libai | libai-main/projects/MT5/layers/mask_layer.py | # coding=utf-8
# Copyright 2021 The OneFlow Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import oneflow as flow
from libai.utils import distributed as dist
class ExtendedMask(flow.nn.Module):
def forward(self, x, input_tensor=None, is_decoder=False):
if x.dim() == 3:
extended_mask = x[:, None, :, :]
elif x.dim() == 2:
if is_decoder:
extended_mask = self.create_extended_mask_for_decoder(x, input_tensor)
else:
extended_mask = x[:, None, None, :]
return extended_mask
def create_extended_mask_for_decoder(self, x, input_tensor):
batch_size, seq_len = input_tensor.size()
seq_ids = flow.arange(
seq_len,
sbp=dist.get_nd_sbp([flow.sbp.broadcast, flow.sbp.broadcast]),
placement=x.placement,
)
causal_mask = (
seq_ids[None, None, :].repeat(batch_size, seq_len, 1) <= seq_ids[None, :, None]
)
causal_mask = causal_mask.to(x.dtype)
causal_mask = causal_mask.to_global(sbp=x.sbp)
if causal_mask.shape[1] < x.shape[1]:
prefix_seq_len = x.shape[1] - causal_mask.shape[1]
ones = flow.ones(
(batch_size, seq_len, prefix_seq_len),
dtype=causal_mask.dtype,
sbp=causal_mask.sbp,
placement=causal_mask.placement,
)
causal_mask = flow.cat(
[
ones,
causal_mask,
],
dim=-1,
)
extended_mask = causal_mask[:, None, :, :] * x[:, None, None, :]
return extended_mask
| 2,225 | 33.78125 | 91 | py |
libai | libai-main/projects/MT5/layers/loss_layer.py | # coding=utf-8
# Copyright 2021 The OneFlow Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import oneflow as flow
from libai.layers import ParallelCrossEntropyLoss
from libai.utils import distributed as dist
class MT5Loss(flow.nn.Module):
def __init__(self) -> None:
super().__init__()
self.lm_loss = ParallelCrossEntropyLoss()
def forward(self, logits, lm_labels, loss_mask):
lm_labels = lm_labels.to_global(placement=logits.placement)
lm_loss = self.lm_loss(logits, lm_labels)
loss_mask = loss_mask.to_global(placement=lm_loss.placement)
loss_mask = loss_mask.float()
denominator = loss_mask.sum().to_global(
sbp=dist.get_nd_sbp([flow.sbp.broadcast, flow.sbp.broadcast])
)
lm_loss = flow._C.amp_white_identity(lm_loss)
lm_loss = flow._C.amp_black_identity(lm_loss)
masked_lm_loss = flow.sum(lm_loss.view(-1) * loss_mask.view(-1)) / denominator
masked_lm_loss = masked_lm_loss.to_global(
sbp=dist.get_nd_sbp([flow.sbp.partial_sum, flow.sbp.broadcast])
)
if self.training:
# token throughput
done_tokens = (
flow.zeros(
1,
sbp=dist.get_nd_sbp([flow.sbp.broadcast, flow.sbp.broadcast]),
placement=lm_labels.placement,
)
+ logits.shape[0] * logits.shape[1]
)
# correct token
correct_tokens = flow.sum(
(
logits.to_global(
sbp=dist.get_nd_sbp([flow.sbp.broadcast, flow.sbp.broadcast]),
placement=lm_labels.placement,
)
.argmax(dim=-1)
.eq(lm_labels)
).float()
)
return {
"mlm_loss": masked_lm_loss,
"done_tokens": done_tokens,
"correct_tokens": correct_tokens,
"denominator": denominator,
}
else:
return {
"mlm_loss": masked_lm_loss,
}
| 2,691 | 34.421053 | 86 | py |
libai | libai-main/projects/MT5/layers/transformer_layer.py | # coding=utf-8
# Copyright 2021 The OneFlow Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import oneflow.nn as nn
from libai.layers.droppath import DropPath
from libai.layers.layer_norm import RMSLayerNorm as LayerNorm
from libai.utils import distributed as dist
from projects.MT5.layers.attention_layer import MultiheadAttention
from projects.MT5.layers.mlp_layer import MT5MLP, T5MLP
class TransformerLayer(nn.Module):
"""A single transformer layer.
Transformer layer takes input with size [bsz, seq_length, hidden size] and returns an
output of the same size.
The input and output has same sbp sign, (S(0), B).
Arguments:
hidden_size: size of hidden state.
ffn_hidden_size: size of feed forword neural network.
num_attention_heads: number of attention heads.
is_decoder: used to specify whether this is transformer encoder layer or transformer
decoder layer. Default: ``False``.
attention_dropout_prob: dropout probability of attention weights.
output_dropout_prob: dropout probability of output.
layernorm_epsilon: epsilon used in layernorm layer. Default: `1e-5`.
init_method: method to initialize the input layer weights.
output_layer_init_method: method to initialize the output layer weights.
If None, use `init_method`.
layer_idx: the layer index, which determines the placement.
"""
def __init__(
self,
hidden_size,
ffn_hidden_size,
num_attention_heads,
head_size,
relative_attention_num_buckets,
is_decoder=False,
attention_dropout_prob=0.0,
output_dropout_prob=0.0,
drop_path_prob=0.0,
layernorm_epsilon=1e-5,
init_method=nn.init.xavier_normal_,
output_layer_init_method=None,
padding_idx=None,
*,
layer_idx=0,
model_type="t5",
has_relative_attention_bias=False
):
super().__init__()
self.hidden_size = hidden_size
self.ffn_hidden_size = ffn_hidden_size
self.num_attention_heads = num_attention_heads
self.head_size = head_size
self.attention_dropout_prob = attention_dropout_prob
self.output_dropout_prob = output_dropout_prob
self.layernorm_epsilon = layernorm_epsilon
self.layer_idx = layer_idx
self.is_decoder = is_decoder
self.init_method = init_method
if output_layer_init_method is None:
output_layer_init_method = init_method
self.output_layer_init_method = output_layer_init_method
self.drop_path = DropPath(drop_path_prob) if drop_path_prob > 0.0 else nn.Identity()
self.input_layernorm = LayerNorm(
self.hidden_size, eps=self.layernorm_epsilon, layer_idx=self.layer_idx
)
self.self_attention = self.build_attention(
is_cross_attention=False,
relative_attention_num_buckets=relative_attention_num_buckets,
padding_idx=padding_idx,
has_relative_attention_bias=has_relative_attention_bias,
is_decoder=self.is_decoder,
)
self.post_attention_layernorm = LayerNorm(
self.hidden_size, eps=self.layernorm_epsilon, layer_idx=self.layer_idx
)
if self.is_decoder:
self.cross_attention = self.build_attention(
is_cross_attention=True,
relative_attention_num_buckets=relative_attention_num_buckets,
padding_idx=padding_idx,
is_decoder=self.is_decoder,
)
self.post_cross_attention_layernorm = LayerNorm(
self.hidden_size, eps=self.layernorm_epsilon, layer_idx=self.layer_idx
)
if model_type == "mt5":
self.mlp = MT5MLP(
self.hidden_size,
self.ffn_hidden_size,
self.output_dropout_prob,
self.init_method,
output_layer_init_method=self.output_layer_init_method,
layer_idx=self.layer_idx,
)
elif model_type == "t5":
self.mlp = T5MLP(
self.hidden_size,
self.ffn_hidden_size,
self.output_dropout_prob,
self.init_method,
output_layer_init_method=self.output_layer_init_method,
layer_idx=self.layer_idx,
)
def forward(
self,
hidden_states,
attention_mask=None,
encoder_states=None,
encoder_attention_mask=None,
past_key_value=None,
use_cache=False,
position_bias=None,
encoder_decoder_position_bias=None,
):
"""
Args:
hidden_states: shape is (batch_size, seq_length, hidden_size),
sbp signature is (S(0), B).
attention_mask: the combination of key padding mask and casual mask of hidden states
with shape (batch_size, 1, seq_length, seq_length) and the sbp
signature is (S(0), B),
encoder_states: encoder output with shape (batch_size, seq_length, hidden_size)
and the sbp signature is (S(0), B), which will be used in cross attention.
encoder_attention_mask: key padding mask of encoder states with shape
(batch_size, 1, seq_length, seq_length) and the sbp signature is (S(0), B).
past_key_value: tuple of key and value, each shape is
(seq_length, bsz, num_heads, head_size), For decoder layer,
the past_key_value contains the states both from self attention
and cross attention.
use_cache: it will be set to `True` when the model is in the inference phase and
used for incremental decoding.
"""
hidden_states = hidden_states.to_global(placement=dist.get_layer_placement(self.layer_idx))
if attention_mask is not None:
attention_mask = attention_mask.to_global(
placement=dist.get_layer_placement(self.layer_idx)
)
if past_key_value is not None:
if self.is_decoder:
assert len(past_key_value) == 4
self_attn_past_key_value = past_key_value[:2]
cross_attn_past_key_value = past_key_value[2:]
else:
self_attn_past_key_value = past_key_value
cross_attn_past_key_value = None
else:
self_attn_past_key_value, cross_attn_past_key_value = None, None
layernorm_output = self.input_layernorm(hidden_states)
attention_output, position_bias = self.self_attention(
layernorm_output,
attention_mask=attention_mask,
past_key_value=self_attn_past_key_value,
position_bias=position_bias,
use_cache=use_cache,
)
attention_output = self.drop_path(attention_output)
if use_cache:
attention_output, presents = attention_output
else:
presents = None
hidden_states = hidden_states + attention_output
layernorm_output = self.post_attention_layernorm(hidden_states)
if self.is_decoder:
if presents is not None:
query_length = presents[0].shape[2]
else:
query_length = None
attention_output, encoder_decoder_position_bias = self.cross_attention(
layernorm_output,
encoder_states,
attention_mask=encoder_attention_mask,
past_key_value=cross_attn_past_key_value,
position_bias=encoder_decoder_position_bias,
use_cache=use_cache,
query_length=query_length,
)
if use_cache:
attention_output, decoder_presents = attention_output
presents = presents + decoder_presents
attention_output = self.drop_path(attention_output)
hidden_states = hidden_states + attention_output
layernorm_output = self.post_cross_attention_layernorm(hidden_states)
mlp_output = self.mlp(layernorm_output)
mlp_output = self.drop_path(mlp_output)
output = hidden_states + mlp_output
if use_cache:
output = (output, presents)
output = (output,) + (position_bias,)
if self.is_decoder:
output = output + (encoder_decoder_position_bias,)
return output
def build_attention(
self,
is_cross_attention=False,
relative_attention_num_buckets=None,
padding_idx=None,
has_relative_attention_bias=False,
is_decoder=False,
):
return MultiheadAttention(
self.hidden_size,
self.num_attention_heads,
head_size=self.head_size,
relative_attention_num_buckets=relative_attention_num_buckets,
is_cross_attention=is_cross_attention,
attention_dropout_prob=self.attention_dropout_prob,
output_dropout_prob=self.output_dropout_prob,
init_method=self.init_method,
output_layer_init_method=self.output_layer_init_method,
padding_idx=padding_idx,
layer_idx=self.layer_idx,
has_relative_attention_bias=has_relative_attention_bias,
is_decoder=is_decoder,
)
| 10,004 | 38.082031 | 99 | py |
libai | libai-main/projects/MT5/layers/embed_layer.py | # coding=utf-8
# Copyright 2021 The OneFlow Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import oneflow as flow
import oneflow.nn as nn
from oneflow.nn import init
import libai.utils.distributed as dist
from libai.layers.embedding import VocabEmbedding
class MT5Embedding(flow.nn.Module):
def __init__(
self,
hidden_size,
vocab_size,
embedding_dropout_prob,
pad_token_id=0,
init_method=flow.nn.init.xavier_normal_,
amp_enabled=False,
) -> None:
super().__init__()
self.hidden_size = hidden_size
self.vocab_size = vocab_size
self.word_embeddings = VocabEmbedding(
num_embeddings=vocab_size,
embedding_dim=hidden_size,
init_method=init_method,
amp_enabled=amp_enabled,
padding_idx=pad_token_id,
)
self.embedding_dropout = flow.nn.Dropout(embedding_dropout_prob)
def forward(self, input_ids):
word_embeddings = self.word_embeddings(input_ids)
embeddings = self.embedding_dropout(word_embeddings)
return embeddings
class Embedding(nn.Module):
"""Construct the trainable embedding module, which does not support parallelization.
This can be used for positional embedding and token type embedding.
Arguments:
num_embeddings: size of vocabulary.
embedding_dim: dimension of embeddings.
padding_idx: pad index. Defaults to None.
init_method: method to initialize weights. Defaults to ``flow.nn.init.xavier_normal_``.
amp_enabled: fp16 option for embedding weight. Defaults to False.
"""
def __init__(
self,
num_embeddings,
embedding_dim,
padding_idx=None,
init_method=init.xavier_normal_,
amp_enabled=False,
layer_idx=0,
):
super().__init__()
self.num_embeddings = num_embeddings
self.embedding_dim = embedding_dim
if padding_idx is not None:
if padding_idx > 0:
assert (
padding_idx < self.num_embeddings
), "Padding_idx must be within num_embeddings"
elif padding_idx < 0:
assert (
padding_idx >= -self.num_embeddings
), "Padding_idx must be within num_embeddings"
padding_idx = self.num_embeddings + padding_idx
self.padding_idx = padding_idx
self.init_method = init_method
self.amp_enabled = amp_enabled
assert num_embeddings > 0
self.weight = nn.Parameter(
flow.empty(
(num_embeddings, embedding_dim),
dtype=flow.float32,
placement=dist.get_layer_placement(layer_idx),
sbp=dist.get_nd_sbp([flow.sbp.broadcast, flow.sbp.broadcast]),
)
)
self.init_method(self.weight)
def forward(self, input_ids):
weight = flow._C.amp_white_identity(self.weight) if self.amp_enabled else self.weight
input_embeds = flow._C.gather(weight, input_ids, axis=0)
return input_embeds
def _fill_padding_idx_with_zero(self) -> None:
if self.padding_idx is not None:
with flow.no_grad():
self.weight[self.padding_idx] = flow.zeros(
self.embedding_dim,
placement=dist.get_layer_placement(0),
sbp=dist.get_nd_sbp([flow.sbp.broadcast, flow.sbp.broadcast]),
)
def extra_repr(self) -> str:
s = "num_embeddings={num_embeddings}, embedding_dim={embedding_dim}"
if self.padding_idx is not None:
s += ", padding_idx={padding_idx}"
return s.format(**self.__dict__)
| 4,294 | 34.204918 | 95 | py |
libai | libai-main/projects/MT5/layers/attention_layer.py | # coding=utf-8
# Copyright 2021 The OneFlow Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
from typing import Tuple
import oneflow as flow
from oneflow import nn
from libai.layers.linear import Linear
from libai.utils import distributed as dist
from projects.MT5.layers.embed_layer import Embedding
class MultiheadAttention(nn.Module):
"""Multi-head attention layer, support self attention and cross attention.
Args:
hidden_size: size of hidden state.
num_attention_heads: number of attention heads.
is_cross_attention: used to specify whether it is self attention or cross attention.
Defaults to False.
attention_dropout_prob: dropout probability of attention weights.
Defaults to 0.0.
output_dropout_prob: dropout probability of output. Defaults to 0.0.
init_method: method to initialize the input layer weights.
Defaults to ``init.xavier_normal_``.
output_layer_init_method: method to initialize the output layer weights.
If None, use ``init_method``.
layer_idx: a layer_idx sign which determines the placements.
It will be used in pipeline parallelism. Defaults to 0.
"""
def __init__(
self,
hidden_size,
num_attention_heads,
head_size,
relative_attention_num_buckets,
is_cross_attention=False,
attention_dropout_prob=0.0,
output_dropout_prob=0.0,
init_method=nn.init.xavier_normal_,
output_layer_init_method=None,
padding_idx=None,
*,
layer_idx=0,
has_relative_attention_bias=False,
is_decoder=False,
):
super().__init__()
self.hidden_size = hidden_size
self.relative_attention_num_buckets = relative_attention_num_buckets
self.has_relative_attention_bias = has_relative_attention_bias
self.is_decoder = is_decoder
self.attention_dropout_prob = attention_dropout_prob
if output_layer_init_method is None:
output_layer_init_method = init_method
self.num_heads = num_attention_heads
self.head_size = head_size
self.dropout = nn.Dropout(p=attention_dropout_prob)
self.norm_factor = 1.0 / math.sqrt(float(self.head_size))
self.is_cross_attention = is_cross_attention
self.output_dropout = nn.Dropout(p=output_dropout_prob)
if self.is_cross_attention:
self.query = Linear(
self.hidden_size,
self.num_heads * self.head_size,
bias=False,
parallel="col",
init_method=init_method,
layer_idx=layer_idx,
)
self.key_value = Linear(
self.hidden_size,
self.num_heads * self.head_size * 2,
bias=False,
parallel="col",
init_method=init_method,
layer_idx=layer_idx,
)
else:
self.query_key_value = Linear(
self.hidden_size,
self.num_heads * self.head_size * 3,
bias=False,
parallel="col",
init_method=init_method,
layer_idx=layer_idx,
)
self.dense = Linear(
self.num_heads * self.head_size,
self.hidden_size,
bias=False,
parallel="row",
init_method=output_layer_init_method,
skip_bias_add=False,
layer_idx=layer_idx,
)
if self.has_relative_attention_bias:
self.relative_attention_bias = Embedding(
self.relative_attention_num_buckets,
self.num_heads,
padding_idx=padding_idx,
layer_idx=layer_idx,
)
def forward(
self,
hidden_states: flow.Tensor,
encoder_states: flow.Tensor = None,
attention_mask: flow.Tensor = None,
past_key_value: Tuple[flow.Tensor, flow.Tensor] = None,
use_cache: bool = False,
position_bias=None,
query_length=None,
):
"""
Args:
hidden_states (flow.Tensor): shape is [bsz, tgt_len, hidden_size].
encoder_states (flow.Tensor, optional): shape is [bsz, src_len, hidden_size].
Defaults to None.
attention_mask (flow.Tensor, optional): shape is [bsz, 1, tgt_len, src_len].
It should be the combination of padding mask and casual mask.
It is the padding mask of source input when used with self-attention in encoder.
And it is the combination of padding mask of target input and casual mask when
used with self-attention in decoder. It is the padding mask of source input when
used with cross-attention in decoder.
Defaults to None.
past_key_value (Tuple[flow.Tensor, flow.Tensor], optional): tuple of key and value,
each shape is [bsz, num_heads, src_len, head_size]. Defaults to None.
use_cache (bool, optional): it will be set to True, when the model is in the inference
phase and used for incremental decoding. Defaults to False.
"""
if encoder_states is not None:
encoder_states = encoder_states.to_global(placement=hidden_states.placement)
if attention_mask is not None:
attention_mask = attention_mask.to_global(placement=hidden_states.placement)
# hidden_states shape: [seq_len, batch_size, hidden_size]
real_seq_length, bsz = hidden_states.size()[:2]
if past_key_value is not None:
assert (
len(past_key_value) == 2
), "past_key_value should have 2 past states: keys and values."
f"Got {len(past_key_value)} past states.\n"
real_seq_length += past_key_value[0].shape[2] if query_length is None else query_length
key_length = real_seq_length if encoder_states is None else encoder_states.shape[0]
if self.is_cross_attention:
query = self.query(hidden_states)
query = query.view(-1, bsz, self.num_heads, self.head_size)
query = query.permute(1, 2, 0, 3) # bsz, num_head, seq_len, head_size
if past_key_value is not None:
key, value = past_key_value
elif encoder_states is not None:
key_value = self.key_value(encoder_states)
key_value = key_value.view(-1, bsz, self.num_heads, 2 * self.head_size)
key_value = key_value.permute(1, 2, 0, 3)
key, value = flow.chunk(key_value, chunks=2, dim=-1)
else:
raise ValueError(
"past_key_value and encoder_states cannot be None at the same time."
)
else:
query_key_value = self.query_key_value(hidden_states)
if use_cache:
query_key_value = query_key_value.view(bsz, -1, self.num_heads, 3 * self.head_size)
query_key_value = query_key_value.permute(
0, 2, 1, 3
) # [bsz, num_heads, src_len, 3 * head_size]
query, key, value = flow.chunk(query_key_value, chunks=3, dim=-1)
else:
attention_scores, value = flow._C.fused_self_attention(
query_key_value, head_size=self.head_size, alpha=1
)
if past_key_value is not None:
past_key, past_value = past_key_value
key = flow.cat((past_key.type_as(key), key), dim=2)
value = flow.cat((past_value.type_as(value), value), dim=2)
if use_cache:
past_key_value = (key, value)
if self.is_cross_attention or use_cache:
attention_scores = flow.matmul(query, key, transpose_b=True, alpha=1)
if position_bias is None:
if not self.has_relative_attention_bias:
position_bias = flow.zeros(
(1, self.num_heads, real_seq_length, key_length),
sbp=dist.get_nd_sbp([flow.sbp.broadcast, flow.sbp.broadcast]),
placement=attention_scores.placement,
)
else:
position_bias = self.compute_bias(
real_seq_length, key_length, placement=attention_mask.placement
)
if past_key_value is not None:
position_bias = position_bias[:, :, -hidden_states.size(1) :, :]
if attention_mask is not None:
if use_cache:
attention_mask = attention_mask.expand_as(attention_scores)
attention_weights = flow._C.fused_bias_add_scale_mask_softmax_dropout(
attention_scores,
position_bias,
attention_mask,
fill_value=-10000.0,
scale=1,
p=self.attention_dropout_prob,
)[0]
else:
attention_scores = attention_scores + position_bias
attention_weights = flow.softmax(attention_scores, dim=-1)
attention_weights = self.dropout(attention_weights)
context = flow.matmul(attention_weights, value)
""" transpose [batch_size, num_head, seq_len, head_size] to
[seq_len, batch_size, num_head, head_size]
"""
context = flow._C.transpose(context, perm=(2, 0, 1, 3))
output = self.dense(context.flatten(2))
output = self.output_dropout(output)
if use_cache:
output = (output, past_key_value)
output = (output,) + (position_bias,)
return output
def extra_repr(self) -> str:
return "hidden_size={}, num_heads={}, is_cross_attention={}".format(
self.hidden_size,
self.num_heads,
self.is_cross_attention,
)
def _relative_position_bucket(
self, relative_position, bidirectional=True, num_buckets=32, max_distance=128
):
relative_buckets = 0
if bidirectional:
num_buckets //= 2
relative_buckets = (
relative_buckets + (relative_position > 0).to(flow.long) * num_buckets
)
relative_position = flow.abs(relative_position)
else:
relative_position = (
-1
* flow.min(
relative_position,
flow.zeros(
relative_position.size(),
sbp=relative_position.sbp,
placement=relative_position.placement,
),
).to(flow.long)
)
max_exact = num_buckets // 2
is_small = relative_position < max_exact
relative_postion_if_large = max_exact + (
flow.log(relative_position.float() / max_exact)
/ math.log(max_distance / max_exact)
* (num_buckets - max_exact)
).to(flow.long)
relative_postion_if_large = flow.min(
relative_postion_if_large,
flow.zeros(
relative_postion_if_large.size(),
dtype=relative_postion_if_large.dtype,
sbp=relative_postion_if_large.sbp,
placement=relative_postion_if_large.placement,
).fill_(num_buckets - 1),
)
relative_buckets = relative_buckets + flow.where(
is_small, relative_position, relative_postion_if_large
)
return relative_buckets
def compute_bias(self, query_length, key_length, placement=None):
"""Compute binned relative position bias"""
context_position = flow.arange(
query_length,
dtype=flow.long,
sbp=dist.get_nd_sbp([flow.sbp.broadcast, flow.sbp.broadcast]),
placement=placement,
)
memory_position = flow.arange(
key_length,
dtype=flow.long,
sbp=dist.get_nd_sbp([flow.sbp.broadcast, flow.sbp.broadcast]),
placement=placement,
)
relative_position = (
memory_position[None, :] - context_position[:, None]
) # shape (query_length, key_length)
relative_position_bucket = self._relative_position_bucket(
relative_position,
bidirectional=(not self.is_decoder),
num_buckets=self.relative_attention_num_buckets,
) # shape (query_length, key_length)
values = self.relative_attention_bias(
relative_position_bucket
) # shape (query_length, key_length, num_heads)
values = values.permute([2, 0, 1]).unsqueeze(
0
) # shape (1, num_heads, query_length, key_length)
return values
| 13,441 | 37.849711 | 99 | py |
libai | libai-main/projects/MT5/layers/lm_head_layer.py | # coding=utf-8
# Copyright 2021 The OneFlow Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from oneflow import nn
from libai.layers import Linear, LMLogits
class LMHead(nn.Module):
def __init__(self, model_type, hidden_size, vocab_size, hidden_layers):
super().__init__()
if model_type == "mt5":
self.lm_head = Linear(
hidden_size, vocab_size, bias=False, layer_idx=2 * hidden_layers - 1
)
else:
self.lm_head = LMLogits(vocab_size, bias=True)
def forward(self, decoder_states, embed_weight=None):
if isinstance(self.lm_head, Linear):
logits = self.lm_head(decoder_states)
else:
logits = self.lm_head(decoder_states, embed_weight)
return logits
| 1,310 | 34.432432 | 84 | py |
libai | libai-main/projects/MT5/layers/mlp_layer.py | # coding=utf-8
# Copyright 2021 The OneFlow Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import oneflow as flow
from oneflow import nn
from libai.layers import Linear, build_activation
class T5MLP(nn.Module):
def __init__(
self,
hidden_size,
ffn_hidden_size,
output_dropout_prob=0.0,
init_method=nn.init.xavier_normal_,
output_layer_init_method=None,
*,
layer_idx=0,
):
super().__init__()
self.output_dropout_prob = output_dropout_prob
if output_layer_init_method is None:
output_layer_init_method = init_method
self.dense_h_to_4h = Linear(
hidden_size,
ffn_hidden_size,
bias=False,
parallel="col",
skip_bias_add=False,
init_method=init_method,
layer_idx=layer_idx,
)
self.activation_func = build_activation("relu")
self.dense_4h_to_h = Linear(
ffn_hidden_size,
hidden_size,
bias=False,
parallel="row",
skip_bias_add=False,
init_method=output_layer_init_method,
layer_idx=layer_idx,
)
self.dropout = nn.Dropout(self.output_dropout_prob)
def forward(self, hidden_states):
intermediate = self.dense_h_to_4h(hidden_states)
intermediate = self.activation_func(intermediate)
output = self.dense_4h_to_h(intermediate)
output = self.dropout(output)
return output
class MT5MLP(nn.Module):
def __init__(
self,
hidden_size,
ffn_hidden_size,
output_dropout_prob=0.0,
init_method=nn.init.xavier_normal_,
output_layer_init_method=None,
*,
layer_idx=0,
):
super().__init__()
self.output_dropout_prob = output_dropout_prob
if output_layer_init_method is None:
output_layer_init_method = init_method
self.wi_0 = Linear(
hidden_size,
ffn_hidden_size,
bias=False,
parallel="col",
skip_bias_add=False,
init_method=init_method,
layer_idx=layer_idx,
)
self.wi_1 = Linear(
hidden_size,
ffn_hidden_size,
bias=False,
parallel="col",
skip_bias_add=False,
init_method=init_method,
layer_idx=layer_idx,
)
self.wo = Linear(
ffn_hidden_size,
hidden_size,
bias=False,
parallel="row",
skip_bias_add=False,
init_method=output_layer_init_method,
layer_idx=layer_idx,
)
self.dropout = nn.Dropout(self.output_dropout_prob)
def forward(self, hidden_states):
wi_0_out = self.wi_0(hidden_states)
hidden_linear = self.wi_1(hidden_states)
hidden_states = flow._C.fused_fast_gelu_mul(wi_0_out, hidden_linear)
output = self.wo(hidden_states)
output = self.dropout(output)
return output
| 3,629 | 27.582677 | 76 | py |
libai | libai-main/projects/MT5/configs/mt5_base.py | from omegaconf import DictConfig
from libai.config import LazyCall
from projects.MT5.mt5_model import MT5Model, MT5ForPreTraining
cfg = dict(
vocab_size=250112,
hidden_size=768,
hidden_layers=12,
num_attention_heads=12,
head_size=64,
intermediate_size=2048,
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
embedding_dropout_prob=0.1,
relative_attention_num_buckets=32,
initializer_range=1.0,
layernorm_eps=1e-06,
amp_enabled=False,
model_type="mt5",
eos_token_id=1,
padding_idx=0,
is_encoder_decoder=True,
tie_word_embeddings=True,
)
cfg = DictConfig(cfg)
mt5_model = LazyCall(MT5Model)(cfg=cfg)
pretrain_model = LazyCall(MT5ForPreTraining)(cfg=cfg)
| 737 | 22.806452 | 62 | py |
libai | libai-main/projects/MT5/configs/t5_inference.py | from .mt5_base import cfg
from libai.config import LazyCall
from libai.tokenizer import T5Tokenizer
from projects.MT5.mt5_model import MT5Model, MT5ForPreTraining
from configs.common.train import train
from configs.common.data.t5_dataset import tokenization
cfg.update(
model_type="t5",
is_encoder_decoder=True,
max_length=20,
min_length=0,
do_sample=False,
early_stopping=False,
num_beams=1,
num_beam_groups=1,
diversity_penalty=0.0,
temperature=1.0,
top_k=50,
top_p=1.0,
typical_p=1.0,
repetition_penalty=1.0,
length_penalty=1.0,
no_repeat_ngram_size=0,
encoder_no_repeat_ngram_size=0,
num_return_sequences=1,
chunk_size_feed_forward=0,
output_scores=False,
forced_bos_token_id=None,
forced_eos_token_id=None,
remove_invalid_values=False,
exponential_decay_length_penalty=None,
use_cache=True,
# Tokenizer
pad_token_id=0,
eos_token_id=1,
bos_token_id=None,
sep_token_id=None,
decoder_start_token_id=0,
)
model = LazyCall(MT5Model)(cfg=cfg)
tokenization.tokenizer = LazyCall(T5Tokenizer)(
vocab_file="/path/to/spiece.model",
add_bos_token=True,
)
| 1,182 | 24.170213 | 62 | py |
libai | libai-main/projects/MT5/configs/mt5_small.py | from omegaconf import DictConfig
from libai.config import LazyCall
from projects.MT5.mt5_model import MT5Model, MT5ForPreTraining
cfg = dict(
vocab_size=250112,
hidden_size=512,
hidden_layers=8,
num_attention_heads=6,
head_size=64,
intermediate_size=1024,
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
embedding_dropout_prob=0.1,
relative_attention_num_buckets=32,
initializer_range=1.0,
layernorm_eps=1e-06,
amp_enabled=False,
model_type="mt5",
eos_token_id=1,
padding_idx=0,
is_encoder_decoder=True,
tie_word_embeddings=False,
)
cfg = DictConfig(cfg)
mt5_model = LazyCall(MT5Model)(cfg=cfg)
pretrain_model = LazyCall(MT5ForPreTraining)(cfg=cfg)
| 736 | 22.774194 | 62 | py |
libai | libai-main/projects/MT5/configs/mt5_large.py | from omegaconf import DictConfig
from libai.config import LazyCall
from projects.MT5.mt5_model import MT5Model, MT5ForPreTraining
cfg = dict(
vocab_size=250112,
hidden_size=1024,
hidden_layers=24,
num_attention_heads=16,
head_size=64,
intermediate_size=2816,
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
embedding_dropout_prob=0.1,
relative_attention_num_buckets=32,
initializer_range=1.0,
layernorm_eps=1e-06,
amp_enabled=False,
model_type="mt5",
eos_token_id=1,
padding_idx=0,
is_encoder_decoder=True,
tie_word_embeddings=False,
)
cfg = DictConfig(cfg)
mt5_model = LazyCall(MT5Model)(cfg=cfg)
pretrain_model = LazyCall(MT5ForPreTraining)(cfg=cfg)
| 739 | 22.870968 | 62 | py |
libai | libai-main/projects/MT5/configs/mt5_pretrain.py | from libai.config import LazyCall
from libai.evaluation import PPLEvaluator
from libai.scheduler import WarmupExponentialLR
from configs.common.train import train
from configs.common.data.t5_dataset import dataloader, tokenization
from configs.common.models.graph import graph
from configs.common.optim import optim
from projects.MT5.configs.mt5_base import pretrain_model as model
vocab_file = "./data_test/bert_data/bert-base-chinese-vocab.txt"
data_prefix = "./data_test/bert_data/loss_compara_content_sentence"
tokenization.tokenizer.vocab_file = vocab_file
dataloader.train.dataset[0].data_prefix = data_prefix
dataloader.train.dataset[0].indexed_dataset.data_prefix = data_prefix
# model config
model.cfg.hidden_size = 768
model.cfg.hidden_layers = 12
model.cfg.num_attention_heads = 12
model.cfg.head_size = 64
model.cfg.intermediate_size = 2048
model.cfg.model_type = "mt5"
model.cfg.hidden_dropout_prob = 0.0
model.cfg.attention_probs_dropout_prob = 0.0
model.cfg.embedding_dropout_prob = 0.0
model.cfg.vocab_size = 30522
model.cfg.padding_idx = 0
model.cfg.tie_word_embeddings = False
model.cfg.is_encoder_decoder = False
model.cfg.amp_enabled = True
model.cfg.initializer_range = 0.02
model.cfg.pretrained_model_path = None
train.update(
dict(
output_dir="projects/MT5/output/mt5_output",
train_micro_batch_size=4,
train_epoch=1,
train_iter=24000,
log_period=10,
amp=dict(enabled=True),
warmup_ratio=1 / 24,
# checkpointer=dict(period=10, max_to_keep=20),
input_placement_device="cpu",
dist=dict(
data_parallel_size=2,
tensor_parallel_size=2,
pipeline_parallel_size=1,
pipeline_num_layers=2 * model.cfg.hidden_layers,
),
scheduler=LazyCall(WarmupExponentialLR)(
warmup_factor=0.001,
gamma=1.0,
warmup_method="linear",
warmup_iter=0.0,
),
evaluation=dict(
evaluator=LazyCall(PPLEvaluator)(),
enabled=True,
eval_iter=1e5,
eval_period=5000,
),
)
)
train.zero_optimization.enabled = True
train.zero_optimization.stage = 2
train.activation_checkpoint.enabled = False
train.num_accumulation_steps = 8
| 2,284 | 30.736111 | 69 | py |
libai | libai-main/projects/MT5/utils/mt5_loader.py | # coding=utf-8
# Copyright 2021 The OneFlow Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import oneflow as flow
from libai.models.utils import ModelLoaderHuggerFace, ModelLoaderLiBai
class T5LoaderHuggerFace(ModelLoaderHuggerFace):
def __init__(self, model, libai_cfg, pretrained_model_path, **kwargs):
super().__init__(model, libai_cfg, pretrained_model_path, **kwargs)
"""NOTE: base_model_prefix_1 is T5's prefix in Transformers.
base_model_prefix_2 is T5's prefix in LiBai."""
self.base_model_prefix_1 = "transformer"
self.base_model_prefix_2 = "mt5_model"
def _convert_state_dict(self, flow_state_dict, cfg):
"""Convert state_dict's keys to match model.
Args:
flow_state_dict (OrderedDict): model state dict.
cfg (dict): model's default config dict in LiBai.
Returns:
OrderedDict: flow state dict.
"""
# The converted checkpoint.
oneflow_state_dict = flow_state_dict.copy()
old_keys = list(oneflow_state_dict.keys())
# Get configs
num_heads = cfg.get("num_attention_heads")
hidden_size = cfg.get("hidden_size")
head_size = cfg.get("head_size", None)
if head_size is None:
head_size = int(hidden_size / num_heads)
has_prefix = any(s.startswith(self.base_model_prefix_1) for s in oneflow_state_dict)
prefix1 = self.base_model_prefix_1 + "." if has_prefix else ""
prefix2 = self.base_model_prefix_2 + "." if has_prefix else ""
encoder_decoder_idx = 1 if has_prefix else 0
layer_idx1 = 3 if has_prefix else 2
layer_idx2 = 5 if has_prefix else 4
op_idx = 6 if has_prefix else 5
# Convert T5's Embedding layers.
# NOTE: Transformers' T5 has no position embedding layer.
new_key = prefix2 + "embedding.word_embeddings.weight"
old_keys.remove(prefix1 + "shared.weight")
oneflow_state_dict[new_key] = oneflow_state_dict.pop(prefix1 + "shared.weight")
# Convert T5's final_layer_norm
new_key = prefix2 + "encoder.final_layernorm.weight"
old_keys.remove(prefix1 + "encoder.final_layer_norm.weight")
oneflow_state_dict[new_key] = oneflow_state_dict.pop(
prefix1 + "encoder.final_layer_norm.weight"
)
new_key = prefix2 + "decoder.final_layernorm.weight"
old_keys.remove(prefix1 + "decoder.final_layer_norm.weight")
oneflow_state_dict[new_key] = oneflow_state_dict.pop(
prefix1 + "decoder.final_layer_norm.weight"
)
# Convert MT5's lm_head
if cfg.model_type == "mt5" and "lm_head.weight" in oneflow_state_dict:
new_key = prefix2 + "lm_head.weight"
old_keys.remove("lm_head.weight")
oneflow_state_dict[new_key] = oneflow_state_dict.pop("lm_head.weight")
# NOTE: Each layers has no bias in Transformer's T5.
for key in old_keys:
keys = key.split(".")
if layer_idx1 > len(keys) or layer_idx2 > len(keys):
continue
layer1 = keys[layer_idx1]
layer2 = keys[layer_idx2]
op_name = keys[op_idx]
if keys[op_idx + 1] == "relative_attention_bias" and keys[op_idx] == "SelfAttention":
new_key = (
prefix2
+ keys[encoder_decoder_idx]
+ ".layers.0.self_attention.relative_attention_bias.weight"
)
oneflow_state_dict[new_key] = oneflow_state_dict.pop(key)
# Convert T5's Encoder layers.
if keys[encoder_decoder_idx] == "encoder":
if op_name == "SelfAttention":
new_key = (
prefix2
+ "encoder.layers."
+ layer1
+ ".self_attention.query_key_value.weight"
)
if new_key in oneflow_state_dict.keys():
continue
q_w = ".".join(keys[: op_idx + 1]) + ".q." + "weight"
k_w = ".".join(keys[: op_idx + 1]) + ".k." + "weight"
v_w = ".".join(keys[: op_idx + 1]) + ".v." + "weight"
qkv_w = flow.cat(
(
oneflow_state_dict.pop(q_w),
oneflow_state_dict.pop(k_w),
oneflow_state_dict.pop(v_w),
),
dim=0,
)
qkv_w = self._fix_qkv_ordering(qkv_w, head_size, num_heads, hidden_size)
oneflow_state_dict[new_key] = qkv_w
o_w = ".".join(keys[: op_idx + 1]) + ".o." + "weight"
new_key = prefix2 + "encoder.layers." + layer1 + ".self_attention.dense.weight"
oneflow_state_dict[new_key] = oneflow_state_dict.pop(o_w)
elif op_name == "layer_norm":
if layer2 == "0":
new_key = prefix2 + "encoder.layers." + layer1 + ".input_layernorm.weight"
oneflow_state_dict[new_key] = oneflow_state_dict.pop(key)
elif layer2 == "1":
new_key = (
prefix2
+ "encoder.layers."
+ layer1
+ ".post_attention_layernorm.weight"
)
oneflow_state_dict[new_key] = oneflow_state_dict.pop(key)
elif op_name == "DenseReluDense":
if cfg.get("model_type") == "t5":
if keys[op_idx + 1] == "wi":
new_key = (
prefix2 + "encoder.layers." + layer1 + ".mlp.dense_h_to_4h.weight"
)
oneflow_state_dict[new_key] = oneflow_state_dict.pop(key)
elif keys[op_idx + 1] == "wo":
new_key = (
prefix2 + "encoder.layers." + layer1 + ".mlp.dense_4h_to_h.weight"
)
oneflow_state_dict[new_key] = oneflow_state_dict.pop(key)
elif cfg.get("model_type") == "mt5":
if keys[op_idx + 1] == "wi_0":
new_key = prefix2 + "encoder.layers." + layer1 + ".mlp.wi_0.weight"
oneflow_state_dict[new_key] = oneflow_state_dict.pop(key)
elif keys[op_idx + 1] == "wi_1":
new_key = prefix2 + "encoder.layers." + layer1 + ".mlp.wi_1.weight"
oneflow_state_dict[new_key] = oneflow_state_dict.pop(key)
elif keys[op_idx + 1] == "wo":
new_key = prefix2 + "encoder.layers." + layer1 + ".mlp.wo.weight"
oneflow_state_dict[new_key] = oneflow_state_dict.pop(key)
# Convert T5's decoder Layers.
elif keys[encoder_decoder_idx] == "decoder":
if op_name == "SelfAttention":
new_key = (
prefix2
+ "decoder.layers."
+ layer1
+ ".self_attention.query_key_value.weight"
)
if new_key in oneflow_state_dict.keys():
continue
q_w = ".".join(keys[: op_idx + 1]) + ".q." + "weight"
k_w = ".".join(keys[: op_idx + 1]) + ".k." + "weight"
v_w = ".".join(keys[: op_idx + 1]) + ".v." + "weight"
qkv_w = flow.cat(
(
oneflow_state_dict.pop(q_w),
oneflow_state_dict.pop(k_w),
oneflow_state_dict.pop(v_w),
),
dim=0,
)
qkv_w = self._fix_qkv_ordering(qkv_w, head_size, num_heads, hidden_size)
oneflow_state_dict[new_key] = qkv_w
o_w = ".".join(keys[: op_idx + 1]) + ".o." + "weight"
new_key = prefix2 + "decoder.layers." + layer1 + ".self_attention.dense.weight"
oneflow_state_dict[new_key] = oneflow_state_dict.pop(o_w)
elif op_name == "layer_norm":
if layer2 == "0":
new_key = prefix2 + "decoder.layers." + layer1 + ".input_layernorm.weight"
oneflow_state_dict[new_key] = oneflow_state_dict.pop(key)
elif layer2 == "1":
new_key = (
prefix2
+ "decoder.layers."
+ layer1
+ ".post_attention_layernorm.weight"
)
oneflow_state_dict[new_key] = oneflow_state_dict.pop(key)
elif layer2 == "2":
new_key = (
prefix2
+ "decoder.layers."
+ layer1
+ ".post_cross_attention_layernorm.weight"
)
oneflow_state_dict[new_key] = oneflow_state_dict.pop(key)
elif op_name == "EncDecAttention":
new_key = prefix2 + "decoder.layers." + layer1 + ".cross_attention.query.weight"
if new_key in oneflow_state_dict.keys():
continue
q_w = ".".join(keys[: op_idx + 1]) + ".q." + "weight"
k_w = ".".join(keys[: op_idx + 1]) + ".k." + "weight"
v_w = ".".join(keys[: op_idx + 1]) + ".v." + "weight"
q_w = oneflow_state_dict.pop(q_w)
kv_w = flow.cat(
(
oneflow_state_dict.pop(k_w),
oneflow_state_dict.pop(v_w),
),
dim=0,
)
q_w = self._fix_qkv_ordering(q_w, head_size, num_heads, hidden_size)
kv_w = self._fix_qkv_ordering(kv_w, head_size, num_heads, hidden_size)
oneflow_state_dict[new_key] = q_w
new_key = (
prefix2 + "decoder.layers." + layer1 + ".cross_attention.key_value.weight"
)
oneflow_state_dict[new_key] = kv_w
o_w = ".".join(keys[: op_idx + 1]) + ".o." + "weight"
new_key = prefix2 + "decoder.layers." + layer1 + ".cross_attention.dense.weight"
oneflow_state_dict[new_key] = oneflow_state_dict.pop(o_w)
elif op_name == "DenseReluDense":
if cfg.get("model_type") == "t5":
if keys[op_idx + 1] == "wi":
new_key = (
prefix2 + "decoder.layers." + layer1 + ".mlp.dense_h_to_4h.weight"
)
oneflow_state_dict[new_key] = oneflow_state_dict.pop(key)
elif keys[op_idx + 1] == "wo":
new_key = (
prefix2 + "decoder.layers." + layer1 + ".mlp.dense_4h_to_h.weight"
)
oneflow_state_dict[new_key] = oneflow_state_dict.pop(key)
elif cfg.get("model_type") == "mt5":
if keys[op_idx + 1] == "wi_0":
new_key = prefix2 + "decoder.layers." + layer1 + ".mlp.wi_0.weight"
oneflow_state_dict[new_key] = oneflow_state_dict.pop(key)
elif keys[op_idx + 1] == "wi_1":
new_key = prefix2 + "decoder.layers." + layer1 + ".mlp.wi_1.weight"
oneflow_state_dict[new_key] = oneflow_state_dict.pop(key)
elif keys[op_idx + 1] == "wo":
new_key = prefix2 + "decoder.layers." + layer1 + ".mlp.wo.weight"
oneflow_state_dict[new_key] = oneflow_state_dict.pop(key)
return oneflow_state_dict
def _load_config_from_json(self, config_file):
"""load config from `config.json`, and update default config.
Args:
config_file (str): Path of config file.
"""
with open(config_file, mode="r", encoding="utf-8") as f:
cfg_dict = json.load(f)
self._update_cfg("vocab_size", cfg_dict["vocab_size"])
self._update_cfg("hidden_size", cfg_dict["d_model"])
self._update_cfg("hidden_layers", cfg_dict["num_layers"])
self._update_cfg("num_attention_heads", cfg_dict["num_heads"])
self._update_cfg("intermediate_size", cfg_dict["d_ff"])
self._update_cfg("hidden_dropout_prob", cfg_dict["dropout_rate"])
self._update_cfg("attention_probs_dropout_prob", cfg_dict["dropout_rate"])
self._update_cfg(
"relative_attention_num_buckets", cfg_dict["relative_attention_num_buckets"]
)
self._update_cfg("embedding_dropout_prob", cfg_dict["dropout_rate"])
self._update_cfg("initializer_range", cfg_dict["initializer_factor"])
self._update_cfg("layernorm_eps", cfg_dict["layer_norm_epsilon"])
self._update_cfg("head_size", cfg_dict["d_kv"])
if "tie_word_embeddings" in self.libai_cfg:
self._update_cfg("tie_word_embeddings", cfg_dict.get("tie_word_embeddings", True))
# update libai_cfg by kwargs
for k, v in self.kwargs.items():
self._update_cfg(k, v)
self._update_cfg_log()
class T5LoaderLibai(ModelLoaderLiBai):
def __init__(self, model, libai_cfg, pretrained_model_path, **kwargs):
super().__init__(model, libai_cfg, pretrained_model_path, **kwargs)
self.base_model_prefix_2 = "mt5_model"
| 14,821 | 48.07947 | 100 | py |
libai | libai-main/projects/MT5/utils/mt5_metrc_printer.py | import datetime
import logging
import time
from libai.utils.events import EventWriter, get_event_storage
class MT5MetricPrinter(EventWriter):
"""
Print **MT5** metrics to the terminal, including
iteration time, ETA, memory, all losses, and the learning rate.
It also applies smoothing using a window of 20 elements.
It's meant to print MT5 metrics in MT5 ways.
To print something in more customized ways, please implement a similar printer by yourself.
"""
def __init__(self, batch_size, max_iter, log_period):
"""
Args:
max_iter (int): the maximum number of iterations to train.
Used to compute ETA.
"""
self.logger = logging.getLogger("libai." + __name__)
self._batch_size = batch_size
self._max_iter = max_iter
self._last_write = None
self._log_period = log_period
def write(self):
storage = get_event_storage()
iteration = storage.iter
consumed_samples = storage.samples
try:
done_tokens = storage.history("done_tokens").avg(self._log_period)
token_time = storage.history("time").avg(self._log_period)
except KeyError:
done_tokens = None
try:
correct_tokens = storage.history("correct_tokens").avg(self._log_period)
denominator = storage.history("denominator").avg(self._log_period)
acc_mlm = correct_tokens / denominator
except KeyError:
acc_mlm = None
if iteration == self._max_iter:
# This hook only reports training progress (loss, ETA, etc) but not other data,
# therefore do not write anything after training succeeds, even if this method
# is called.
return
try:
data_time = storage.history("data_time").avg(self._log_period)
except KeyError:
# they may not exist in the first few iterations (due to warmup)
# or when SimpleTrainer is not used
data_time = None
eta_string = None
try:
iter_time = storage.history("time").global_avg()
eta_seconds = storage.history("time").median(1000) * (self._max_iter - iteration - 1)
storage.put_scalar("eta_seconds", eta_seconds, smoothing_hint=False)
eta_string = str(datetime.timedelta(seconds=int(eta_seconds)))
except KeyError:
iter_time = None
# estimate eta on our own - more noisy
if self._last_write is not None:
estimate_iter_time = (time.perf_counter() - self._last_write[1]) / (
iteration - self._last_write[0]
)
eta_seconds = estimate_iter_time * (self._max_iter - iteration - 1)
eta_string = str(datetime.timedelta(seconds=int(eta_seconds)))
self._last_write = (iteration, time.perf_counter())
try:
lr = "{:.2e}".format(storage.history("lr").latest())
except KeyError:
lr = "N/A"
max_mem_mb = None
# NOTE: max_mem is parsed by grep in "dev/parse_results.sh"
self.logger.info(
" {eta} {iter} {sample} {losses} {time} {data_time} {tpt} lr: {lr} {memory} "
" {tokens_speed} {acc_mlm}".format(
eta=f"eta: {eta_string}" if eta_string else "",
iter=f"iteration: {iteration}/{self._max_iter}",
sample=f"consumed_samples: {consumed_samples}",
losses=" ".join(
[
"{}: {:.4g}".format(k, v.median(200))
for k, v in storage.histories().items()
if "loss" in k
]
),
time="time: {:.4f} s/iter ".format(iter_time) if iter_time is not None else "",
data_time="data_time: {:.4f} s/iter".format(data_time)
if data_time is not None
else "",
tpt="total_throughput: {:.2f} samples/s".format(self._batch_size / iter_time)
if iter_time is not None
else "",
lr=lr,
memory="max_mem: {:.0f}M".format(max_mem_mb) if max_mem_mb is not None else "",
tokens_speed="tokens_throughput: {:.4f} tokens/s".format(done_tokens / token_time)
if done_tokens is not None
else "",
acc_mlm="acc_mlm: {:.4f}".format(acc_mlm) if acc_mlm is not None else "",
)
)
| 4,601 | 39.725664 | 98 | py |
libai | libai-main/projects/QQP/dataset/qqp_dataset.py | # coding=utf-8
# Copyright 2021 The OneFlow Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from .data import GLUEAbstractDataset
from .data_utils import clean_text
logger = logging.getLogger("libai." + __name__)
LABELS = [0, 1]
class QQPDataset(GLUEAbstractDataset):
def __init__(self, dataset_name, data_paths, tokenizer, max_seq_length, test_label=0):
self.test_label = test_label
self.dataset_name = dataset_name
super().__init__("QQP", dataset_name, data_paths, tokenizer, max_seq_length)
def process_samples_from_single_path(self, filename):
""" "Implement abstract method."""
logger.info(" > Processing {} ...".format(filename))
samples = []
total = 0
first = True
is_test = False
with open(filename, "r") as f:
for line in f:
row = line.strip().split("\t")
if first:
first = False
if len(row) == 3:
is_test = True
logger.info(
" reading {}, {}, and {} columns and "
"setting labels to {}".format(
row[0].strip(), row[1].strip(), row[2].strip(), self.test_label
)
)
else:
assert len(row) == 6
logger.info(
" reading {}, {}, {}, and {} columns"
" ...".format(
row[0].strip(), row[3].strip(), row[4].strip(), row[5].strip()
)
)
continue
if is_test:
assert len(row) == 3, "expected length 3: {}".format(row)
uid = int(row[0].strip())
text_a = clean_text(row[1].strip())
text_b = clean_text(row[2].strip())
label = self.test_label
assert len(text_a) > 0
assert len(text_b) > 0
else:
if len(row) == 6:
uid = int(row[0].strip())
text_a = clean_text(row[3].strip())
text_b = clean_text(row[4].strip())
label = int(row[5].strip())
else:
logger.info("***WARNING*** index error, " "skipping: {}".format(row))
continue
if len(text_a) == 0:
logger.info("***WARNING*** zero length a, " "skipping: {}".format(row))
continue
if len(text_b) == 0:
logger.info("***WARNING*** zero length b, " "skipping: {}".format(row))
continue
assert label in LABELS
assert uid >= 0
sample = {"uid": uid, "text_a": text_a, "text_b": text_b, "label": label}
total += 1
samples.append(sample)
if total % 50000 == 0:
logger.info(" > processed {} so far ...".format(total))
logger.info(" >> processed {} samples.".format(len(samples)))
return samples
| 3,895 | 38.755102 | 95 | py |
libai | libai-main/projects/QQP/dataset/data_utils.py | # coding=utf-8
# Copyright 2021 The OneFlow Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
import numpy as np
import oneflow as flow
from libai.data.structures import DistTensorData, Instance
def clean_text(text):
"""Remove new lines and multiple spaces and adjust end of sentence dot."""
text = text.replace("\n", " ")
text = re.sub(r"\s+", " ", text)
for _ in range(3):
text = text.replace(" . ", ". ")
return text
def build_sample(ids, types, paddings, label, unique_id):
"""Convert to numpy and return a sample consumed by the batch producer."""
ids_np = np.array(ids, dtype=np.int64)
types_np = np.array(types, dtype=np.int64)
paddings_np = np.array(paddings, dtype=np.int64)
sample = Instance(
model_input=DistTensorData(flow.tensor(ids_np, dtype=flow.long), placement_idx=0),
attention_mask=DistTensorData(flow.tensor(paddings_np, dtype=flow.long), placement_idx=0),
tokentype_ids=DistTensorData(flow.tensor(types_np, dtype=flow.long), placement_idx=0),
labels=DistTensorData(flow.tensor(label, dtype=flow.long), placement_idx=-1),
)
return sample
def build_tokens_types_paddings_from_text(text_a, text_b, tokenizer, max_seq_length):
"""Build token types and paddings, trim if needed, and pad if needed."""
text_a_ids = tokenizer.tokenize(text_a)
text_b_ids = None
if text_b is not None:
text_b_ids = tokenizer.tokenize(text_b)
return build_tokens_types_paddings_from_ids(
text_a_ids, text_b_ids, max_seq_length, tokenizer.cls, tokenizer.sep, tokenizer.pad
)
def build_tokens_types_paddings_from_ids(
text_a_ids, text_b_ids, max_seq_length, cls_id, sep_id, pad_id
):
"""Build token types and paddings, trim if needed, and pad if needed."""
ids = []
types = []
paddings = []
# [CLS].
ids.append(cls_id)
types.append(0)
paddings.append(1)
# A.
len_text_a = len(text_a_ids)
ids.extend(text_a_ids)
types.extend([0] * len_text_a)
paddings.extend([1] * len_text_a)
# [SEP].
ids.append(sep_id)
types.append(0)
paddings.append(1)
# B.
if text_b_ids is not None:
len_text_b = len(text_b_ids)
ids.extend(text_b_ids)
types.extend([1] * len_text_b)
paddings.extend([1] * len_text_b)
# Cap the size.
trimmed = False
if len(ids) >= max_seq_length:
max_seq_length_m1 = max_seq_length - 1
ids = ids[0:max_seq_length_m1]
types = types[0:max_seq_length_m1]
paddings = paddings[0:max_seq_length_m1]
trimmed = True
# [SEP].
if (text_b_ids is not None) or trimmed:
ids.append(sep_id)
if text_b_ids is None:
types.append(0)
else:
types.append(1)
paddings.append(1)
# Padding.
padding_length = max_seq_length - len(ids)
if padding_length > 0:
ids.extend([pad_id] * padding_length)
types.extend([pad_id] * padding_length)
paddings.extend([0] * padding_length)
return ids, types, paddings
| 3,632 | 28.778689 | 98 | py |
libai | libai-main/projects/QQP/dataset/data.py | # coding=utf-8
# Copyright 2021 The OneFlow Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from abc import ABC, abstractmethod
from oneflow.utils.data import Dataset
from .data_utils import build_sample, build_tokens_types_paddings_from_text
logger = logging.getLogger("libai." + __name__)
class GLUEAbstractDataset(ABC, Dataset):
"""GLUE base dataset class."""
def __init__(self, task_name, dataset_name, datapaths, tokenizer, max_seq_length):
# Store inputs.
self.task_name = task_name
self.dataset_name = dataset_name
self.tokenizer = tokenizer
self.max_seq_length = max_seq_length
logger.info(" > building {} dataset for {}:".format(self.task_name, self.dataset_name))
# Process the files.
string = " > paths:"
for path in datapaths:
string += " " + path
logger.info(string)
self.samples = []
for datapath in datapaths:
self.samples.extend(self.process_samples_from_single_path(datapath))
logger.info(" >> total number of samples: {}".format(len(self.samples)))
def __len__(self):
return len(self.samples)
def __getitem__(self, idx):
raw_sample = self.samples[idx]
ids, types, paddings = build_tokens_types_paddings_from_text(
raw_sample["text_a"], raw_sample["text_b"], self.tokenizer, self.max_seq_length
)
sample = build_sample(ids, types, paddings, raw_sample["label"], raw_sample["uid"])
return sample
@abstractmethod
def process_samples_from_single_path(self, datapath):
"""Abstract method that takes a single path / filename and
returns a list of dataset samples, each sample being a dict of
{'text_a': string, 'text_b': string, 'label': int, 'uid': int}
"""
| 2,374 | 36.698413 | 95 | py |
libai | libai-main/projects/QQP/dataset/download_qqp_data.py | # coding=utf-8
# Copyright 2021 The OneFlow Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
sys.path.append(".")
from libai.utils.file_utils import get_data_from_cache # noqa
# fmt:off
VOCAB_URL = "https://oneflow-static.oss-cn-beijing.aliyuncs.com/ci-files/dataset/libai/bert_dataset/bert-base-chinese-vocab.txt" # noqa
QQP_TRAIN_URL = "https://oneflow-static.oss-cn-beijing.aliyuncs.com/ci-files/dataset/libai/QQP/train.tsv" # noqa
QQP_TEST_URL = "https://oneflow-static.oss-cn-beijing.aliyuncs.com/ci-files/dataset/libai/QQP/dev.tsv" # noqa
# fmt:on
VOCAB_MD5 = "3b5b76c4aef48ecf8cb3abaafe960f09"
QQP_TRAIN_MD5 = "f65950abb9499d8e3e33da7d68d61c4e"
QQP_TEST_MD5 = "35ca3d547003266660a77c6031069548"
cache_dir = "projects/QQP/QQP_DATA/"
if __name__ == "__main__":
print("downloading vocab...")
get_data_from_cache(VOCAB_URL, cache_dir, md5=VOCAB_MD5)
print("downloading training data...")
get_data_from_cache(QQP_TRAIN_URL, cache_dir, md5=QQP_TRAIN_MD5)
print("downloading testing data...")
get_data_from_cache(QQP_TEST_URL, cache_dir, md5=QQP_TEST_MD5)
print("downloading complete")
| 1,660 | 38.547619 | 135 | py |
libai | libai-main/projects/QQP/tokenizer/bert_tokenization.py | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tokenization classes."""
import collections
import re
import sys
import unicodedata
import six
is_python3 = sys.version_info.major == 3
if is_python3:
unicode = str
def validate_case_matches_checkpoint(do_lower_case, init_checkpoint):
"""Checks whether the casing config is consistent with the checkpoint name."""
# The casing has to be passed in by the user and there is no explicit check
# as to whether it matches the checkpoint. The casing information probably
# should have been stored in the bert_config.json file, but it's not, so
# we have to heuristically detect it to validate.
if not init_checkpoint:
return
m = re.match("^.*?([A-Za-z0-9_-]+)/bert_model.ckpt", init_checkpoint)
if m is None:
return
model_name = m.group(1)
lower_models = [
"uncased_L-24_H-1024_A-16",
"uncased_L-12_H-768_A-12",
"multilingual_L-12_H-768_A-12",
"chinese_L-12_H-768_A-12",
]
cased_models = [
"cased_L-12_H-768_A-12",
"cased_L-24_H-1024_A-16",
"multi_cased_L-12_H-768_A-12",
]
is_bad_config = False
if model_name in lower_models and not do_lower_case:
is_bad_config = True
actual_flag = "False"
case_name = "lowercased"
opposite_flag = "True"
if model_name in cased_models and do_lower_case:
is_bad_config = True
actual_flag = "True"
case_name = "cased"
opposite_flag = "False"
if is_bad_config:
raise ValueError(
"You passed in `--do_lower_case=%s` with `--init_checkpoint=%s`. "
"However, `%s` seems to be a %s model, so you "
"should pass in `--do_lower_case=%s` so that the fine-tuning matches "
"how the model was pre-training. If this error is wrong, please "
"just comment out this check."
% (actual_flag, init_checkpoint, model_name, case_name, opposite_flag)
)
def convert_to_unicode(text):
"""Converts `text` to Unicode (if it's not already), assuming utf-8 input."""
if six.PY3:
if isinstance(text, str):
return text
elif isinstance(text, bytes):
return text.decode("utf-8", "ignore")
else:
raise ValueError("Unsupported string type: %s" % (type(text)))
elif six.PY2:
if isinstance(text, str):
return text.decode("utf-8", "ignore")
elif isinstance(text, unicode):
return text
else:
raise ValueError("Unsupported string type: %s" % (type(text)))
else:
raise ValueError("Not running on Python2 or Python 3?")
def printable_text(text):
"""Returns text encoded in a way suitable for print or `tf.logging`."""
# These functions want `str` for both Python2 and Python3, but in one case
# it's a Unicode string and in the other it's a byte string.
if six.PY3:
if isinstance(text, str):
return text
elif isinstance(text, bytes):
return text.decode("utf-8", "ignore")
else:
raise ValueError("Unsupported string type: %s" % (type(text)))
elif six.PY2:
if isinstance(text, str):
return text
elif isinstance(text, unicode):
return text.encode("utf-8")
else:
raise ValueError("Unsupported string type: %s" % (type(text)))
else:
raise ValueError("Not running on Python2 or Python 3?")
def load_vocab(vocab_file):
"""Loads a vocabulary file into a dictionary."""
vocab = collections.OrderedDict()
index = 0
with open(vocab_file, "r", encoding="utf-8") as reader:
while True:
token = convert_to_unicode(reader.readline())
if not token:
break
token = token.strip()
vocab[token] = index
index += 1
return vocab
def convert_by_vocab(vocab, items, inv=False):
"""Converts a sequence of [tokens|ids] using the vocab.。"""
output = []
for item in items:
if not inv:
if _is_chinese_substr(item):
output.append(vocab[item[2:]] + len(vocab))
else:
output.append(vocab[item])
else:
if item > len(vocab):
output.append("##" + vocab[item - len(vocab)])
else:
output.append(vocab[item])
return output
def convert_tokens_to_ids(vocab, tokens):
return convert_by_vocab(vocab, tokens)
def convert_ids_to_tokens(inv_vocab, ids):
return convert_by_vocab(inv_vocab, ids, inv=True)
def whitespace_tokenize(text):
"""Runs basic whitespace cleaning and splitting on a piece of text."""
text = text.strip()
if not text:
return []
tokens = text.split()
return tokens
class FullTokenizer(object):
"""Runs end-to-end tokenziation."""
def __init__(self, vocab_file, do_lower_case=True, do_chinese_wwm=False):
self.vocab = load_vocab(vocab_file) # _BertTokenizer会增加[BOS]和[EOS]
self.inv_vocab = {v: k for k, v in self.vocab.items()}
if not do_chinese_wwm:
self.basic_tokenizer = BasicTokenizer(do_lower_case=do_lower_case)
else:
self.basic_tokenizer = BasicTokenizerWithChineseWWM(do_lower_case)
self.wordpiece_tokenizer = WordpieceTokenizer(vocab=self.vocab)
def tokenize(self, text):
split_tokens = []
for token in self.basic_tokenizer.tokenize(text):
for sub_token in self.wordpiece_tokenizer.tokenize(token):
split_tokens.append(sub_token)
return split_tokens
def convert_tokens_to_ids(self, tokens):
return convert_by_vocab(self.vocab, tokens)
def convert_ids_to_tokens(self, ids):
return convert_by_vocab(self.inv_vocab, ids, inv=True)
@staticmethod
def convert_tokens_to_string(tokens, clean_up_tokenization_spaces=True):
"""Converts a sequence of tokens (string) to a single string."""
def clean_up_tokenization(out_string):
"""Clean up a list of simple English tokenization artifacts
like spaces before punctuations and abreviated forms.
"""
out_string = (
out_string.replace(" .", ".")
.replace(" ?", "?")
.replace(" !", "!")
.replace(" ,", ",")
.replace(" ' ", "'")
.replace(" n't", "n't")
.replace(" 'm", "'m")
.replace(" 's", "'s")
.replace(" 've", "'ve")
.replace(" 're", "'re")
)
return out_string
text = " ".join(tokens).replace(" ##", "").strip()
if clean_up_tokenization_spaces:
clean_text = clean_up_tokenization(text)
return clean_text
else:
return text
def vocab_size(self):
return len(self.vocab)
class BasicTokenizer(object):
"""Runs basic tokenization (punctuation splitting, lower casing, etc.)."""
def __init__(self, do_lower_case=True):
"""Constructs a BasicTokenizer.
Args:
do_lower_case: Whether to lower case the input.
"""
self.do_lower_case = do_lower_case
def tokenize(self, text):
"""Tokenizes a piece of text."""
text = convert_to_unicode(text)
text = self._clean_text(text)
# This was added on November 1st, 2018 for the multilingual and Chinese
# models. This is also applied to the English models now, but it doesn't
# matter since the English models were not trained on any Chinese data
# and generally don't have any Chinese data in them (there are Chinese
# characters in the vocabulary because Wikipedia does have some Chinese
# words in the English Wikipedia.).
text = self._tokenize_chinese_chars(text)
orig_tokens = whitespace_tokenize(text)
split_tokens = []
for token in orig_tokens:
if self.do_lower_case:
token = token.lower()
token = self._run_strip_accents(token)
split_tokens.extend(self._run_split_on_punc(token))
output_tokens = whitespace_tokenize(" ".join(split_tokens))
return output_tokens
def _run_strip_accents(self, text):
"""Strips accents from a piece of text."""
text = unicodedata.normalize("NFD", text)
output = []
for char in text:
cat = unicodedata.category(char)
if cat == "Mn":
continue
output.append(char)
return "".join(output)
def _run_split_on_punc(self, text):
"""Splits punctuation on a piece of text."""
chars = list(text)
i = 0
start_new_word = True
output = []
while i < len(chars):
char = chars[i]
if _is_punctuation(char):
output.append([char])
start_new_word = True
else:
if start_new_word:
output.append([])
start_new_word = False
output[-1].append(char)
i += 1
return ["".join(x) for x in output]
def _tokenize_chinese_chars(self, text):
"""Adds whitespace around any CJK character."""
output = []
for char in text:
cp = ord(char)
if self._is_chinese_char(cp):
output.append(" ")
output.append(char)
output.append(" ")
else:
output.append(char)
return "".join(output)
def _is_chinese_char(self, cp):
"""Checks whether CP is the codepoint of a CJK character."""
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like the all of the other languages.
if (
(cp >= 0x4E00 and cp <= 0x9FFF)
or (cp >= 0x3400 and cp <= 0x4DBF) #
or (cp >= 0x20000 and cp <= 0x2A6DF) #
or (cp >= 0x2A700 and cp <= 0x2B73F) #
or (cp >= 0x2B740 and cp <= 0x2B81F) #
or (cp >= 0x2B820 and cp <= 0x2CEAF) #
or (cp >= 0xF900 and cp <= 0xFAFF)
or (cp >= 0x2F800 and cp <= 0x2FA1F) #
): #
return True
return False
def _clean_text(self, text):
"""Performs invalid character removal and whitespace cleanup on text."""
output = []
for char in text:
cp = ord(char)
if cp == 0 or cp == 0xFFFD or _is_control(char):
continue
if _is_whitespace(char):
output.append(" ")
else:
output.append(char)
return "".join(output)
class WordpieceTokenizer(object):
"""Runs WordPiece tokenziation."""
def __init__(self, vocab, unk_token="[UNK]", max_input_chars_per_word=200):
self.vocab = vocab
self.unk_token = unk_token
self.max_input_chars_per_word = max_input_chars_per_word
def tokenize(self, text):
"""Tokenizes a piece of text into its word pieces.
This uses a greedy longest-match-first algorithm to perform tokenization
using the given vocabulary.
For example:
input = "unaffable"
output = ["un", "##aff", "##able"]
input = "有没有"
output = ["有", "##没", "##有"]
(NickPan)对中文的特殊处理:
中文的substr和原字不应该用不同的embedding,因此"有"和"##有"应该要用同样的id,
考虑到有些中文词表中本身带有中文substr,有些则没有(如bert4kreas),为了兼容
两种情况,这里统一的处理方式是将中文substr的id设置为vocab_size+id(substr.remove(##)),
然后在构建样本mask的时候再考虑。因此在wordpiece_tokenize时,不需要考虑"##有"是否在词表中,
只需要考虑"有"在词表中即可。
Args:
text: A single token or whitespace separated tokens. This should have
already been passed through `BasicTokenizer.
Returns:
A list of wordpiece tokens.
"""
text = convert_to_unicode(text)
output_tokens = []
for token in whitespace_tokenize(text):
chars = list(token)
if len(chars) > self.max_input_chars_per_word:
output_tokens.append(self.unk_token)
continue
is_bad = False
start = 0
sub_tokens = []
while start < len(chars):
end = len(chars)
cur_substr = None
while start < end:
substr = "".join(chars[start:end])
if start > 0:
substr = "##" + substr
if substr.startswith("##"):
if _is_chinese_substr(substr):
if substr[2:] in self.vocab: # 中文substr
cur_substr = substr
break
else:
if substr in self.vocab:
cur_substr = substr # 英文substr
break
else:
if substr in self.vocab: # 非substr,可以是字,也可以是整个中文词
cur_substr = substr
break
end -= 1
if cur_substr is None:
is_bad = True
break
sub_tokens.append(cur_substr)
start = end
if is_bad:
output_tokens.append(self.unk_token)
else:
output_tokens.extend(sub_tokens)
return output_tokens
def _is_whitespace(char):
"""Checks whether `chars` is a whitespace character."""
# \t, \n, and \r are technically contorl characters but we treat them
# as whitespace since they are generally considered as such.
if char == " " or char == "\t" or char == "\n" or char == "\r":
return True
cat = unicodedata.category(char)
if cat == "Zs":
return True
return False
def _is_control(char):
"""Checks whether `chars` is a control character."""
# These are technically control characters but we count them as whitespace
# characters.
if char == "\t" or char == "\n" or char == "\r":
return False
cat = unicodedata.category(char)
if cat in ("Cc", "Cf"):
return True
return False
def _is_punctuation(char):
"""Checks whether `chars` is a punctuation character."""
cp = ord(char)
# We treat all non-letter/number ASCII as punctuation.
# Characters such as "^", "$", and "`" are not in the Unicode
# Punctuation class but we treat them as punctuation anyways, for
# consistency.
if (
(cp >= 33 and cp <= 47)
or (cp >= 58 and cp <= 64)
or (cp >= 91 and cp <= 96)
or (cp >= 123 and cp <= 126)
):
return True
cat = unicodedata.category(char)
if cat.startswith("P"):
return True
return False
class BasicTokenizerWithChineseWWM(object):
"""Runs basic tokenization (punctuation splitting, lower casing, etc.).
(nickpan)对中英混杂做了特殊处理,见_tokenize_chinese_chars
"""
def __init__(self, do_lower_case=True):
"""Constructs a BasicTokenizer.
Args:
do_lower_case: Whether to lower case the input.
"""
try:
import jieba
self.pre_tokenizer = lambda x: jieba.lcut(x, HMM=False)
except ImportError:
raise (ImportError("Chinese WWM need jieba"))
self.do_lower_case = do_lower_case
def tokenize(self, text):
"""Tokenizes a piece of text."""
text = convert_to_unicode(text)
text = self._clean_text(text)
# This was added on November 1st, 2018 for the multilingual and Chinese
# models. This is also applied to the English models now, but it doesn't
# matter since the English models were not trained on any Chinese data
# and generally don't have any Chinese data in them (there are Chinese
# characters in the vocabulary because Wikipedia does have some Chinese
# words in the English Wikipedia.).
text = self._tokenize_chinese_chars(text)
orig_tokens = whitespace_tokenize(text)
split_tokens = []
for token in orig_tokens:
if self.do_lower_case:
token = token.lower()
token = self._run_strip_accents(token)
split_tokens.extend(self._run_split_on_punc(token))
output_tokens = whitespace_tokenize(" ".join(split_tokens))
return output_tokens
def _run_strip_accents(self, text):
"""Strips accents from a piece of text."""
text = unicodedata.normalize("NFD", text)
output = []
for char in text:
cat = unicodedata.category(char)
if cat == "Mn":
continue
output.append(char)
return "".join(output)
def _run_split_on_punc(self, text):
"""Splits punctuation on a piece of text."""
chars = list(text)
i = 0
start_new_word = True
output = []
while i < len(chars):
char = chars[i]
if _is_punctuation(char):
output.append([char])
start_new_word = True
else:
if start_new_word:
output.append([])
start_new_word = False
output[-1].append(char)
i += 1
return ["".join(x) for x in output]
def _tokenize_chinese_chars(self, text):
"""Adds whitespace around any CJK character.
(nickpan)并且如果是纯中文片段,则用jieba分词,否则则保留
两边加空格的操作。
"""
output = []
piece = ""
for char in text:
cp = ord(char)
if self._is_chinese_char(cp):
piece += char
else:
chinese_words = self.pre_tokenizer(piece)
for word in chinese_words:
output.append(" ")
output.append(word)
output.append(" ")
output.append(char)
piece = ""
chinese_words = self.pre_tokenizer(piece)
for word in chinese_words:
output.append(" ")
output.append(word)
output.append(" ")
return "".join(output)
def _is_chinese_char(self, cp):
"""Checks whether CP is the codepoint of a CJK character."""
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like the all of the other languages.
if (
(cp >= 0x4E00 and cp <= 0x9FFF)
or (cp >= 0x3400 and cp <= 0x4DBF) #
or (cp >= 0x20000 and cp <= 0x2A6DF) #
or (cp >= 0x2A700 and cp <= 0x2B73F) #
or (cp >= 0x2B740 and cp <= 0x2B81F) #
or (cp >= 0x2B820 and cp <= 0x2CEAF) #
or (cp >= 0xF900 and cp <= 0xFAFF)
or (cp >= 0x2F800 and cp <= 0x2FA1F) #
): #
return True
return False
def _clean_text(self, text):
"""Performs invalid character removal and whitespace cleanup on text."""
output = []
for char in text:
cp = ord(char)
if cp == 0 or cp == 0xFFFD or _is_control(char):
continue
if _is_whitespace(char):
output.append(" ")
else:
output.append(char)
return "".join(output)
def _is_chinese_substr(char):
return re.findall("##[\u4E00-\u9FA5]", char)
| 21,129 | 32.862179 | 84 | py |
libai | libai-main/projects/QQP/tokenizer/tokenizer.py | # coding=utf-8
# Copyright 2021 The OneFlow Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import re
import jieba
from libai.tokenizer.tokenization_base import PreTrainedTokenizer
from .bert_tokenization import FullTokenizer as FullBertTokenizer
logger = logging.getLogger("libai." + __name__)
class _BertCNWWMTokenizer(PreTrainedTokenizer):
"""Chinese whole word BERT tokenizer."""
def __init__(self, vocab_file, lower_case=True, vocab_extra_ids=0):
if lower_case:
name = "BERT Lower Case"
else:
name = "BERT Upper Case"
super().__init__(name)
self.tokenizer = FullBertTokenizer(vocab_file, do_lower_case=lower_case)
self.cls_id = self.tokenizer.vocab["[CLS]"]
self.sep_id = self.tokenizer.vocab["[SEP]"]
self.pad_id = self.tokenizer.vocab["[PAD]"]
self.mask_id = self.tokenizer.vocab["[MASK]"]
self.unk_id = self.tokenizer.vocab["[UNK]"]
self._additional_special_tokens = []
# (dsachan) Add BOS and EOS tokens
# SPECIAL_TOKENS = {"eos_token": "[EOS]", "bos_token": "[BOS]"}
self._bos_token = "[BOS]"
self.add_token(self._bos_token)
self._bos_token_id = self.vocab.get(self._bos_token)
self._eos_token = "[EOS]"
self.add_token(self._eos_token)
self._eos_token_id = self.vocab.get(self._eos_token)
# (dsachan) Add additional special tokens
# These can be used as sentinel tokens in T5 model inputs
additional_special_tokens = []
additional_special_tokens.extend(
["<extra_id_{}>".format(i) for i in range(vocab_extra_ids)]
)
self.add_additional_special_tokens(additional_special_tokens)
def add_token(self, token):
if token not in self.vocab:
self.inv_vocab[self.vocab_size] = token
# self.vocab_size comes from len(vocab)
# and it will increase as we add elements
self.vocab[token] = self.vocab_size
def add_additional_special_tokens(self, tokens_list):
setattr(self, "additional_special_tokens", tokens_list)
for value in tokens_list:
self.add_token(value)
@property
def vocab_size(self):
return self.tokenizer.vocab_size()
@property
def vocab(self):
return self.tokenizer.vocab
@property
def inv_vocab(self):
return self.tokenizer.inv_vocab
def tokenize(self, text):
text_tokens = self.tokenizer.tokenize(text)
# 使用jieba分词
text_tokens = get_new_segment(text_tokens)
return self.tokenizer.convert_tokens_to_ids(text_tokens)
def decode(self, ids):
tokens = self.tokenizer.convert_ids_to_tokens(ids)
return self.tokenizer.convert_tokens_to_string(tokens)
def decode_token_ids(self, token_ids):
tokens = self.tokenizer.convert_ids_to_tokens(token_ids)
exclude_list = ["[PAD]", "[CLS]"]
non_pads = [t for t in tokens if t not in exclude_list]
result = ""
for s in non_pads:
if s.startswith("##"):
result += s[2:]
else:
result += " " + s
return result
@property
def cls(self):
return self.cls_id
@property
def sep(self):
return self.sep_id
@property
def pad(self):
return self.pad_id
@property
def mask(self):
return self.mask_id
@property
def bos_token(self):
"""Beginning of sentence token id"""
return self._bos_token
@property
def eos_token(self):
"""End of sentence token id"""
return self._eos_token
@property
def additional_special_tokens(self):
"""All the additional special tokens you may want to use (list of strings)."""
return self._additional_special_tokens
@property
def bos_token_id(self):
"""Id of the beginning of sentence token in the vocabulary."""
return self._bos_token_id
@property
def eos_token_id(self):
"""Id of the end of sentence token in the vocabulary."""
return self._eos_token_id
@property
def additional_special_tokens_ids(self):
"""Ids of all the additional special tokens in the vocabulary (list of integers)."""
return [self.vocab.get(token) for token in self._additional_special_tokens]
@additional_special_tokens.setter
def additional_special_tokens(self, value):
self._additional_special_tokens = value
def get_new_segment(segment):
seq_cws = jieba.cut("".join(segment) if isinstance(segment, list) else segment)
seq_cws_dict = {x: 1 for x in seq_cws}
new_segment = []
i = 0
while i < len(segment):
if len(re.findall("[\u4E00-\u9FA5]", segment[i])) == 0:
new_segment.append(segment[i])
i += 1
continue
has_add = False
for length in range(3, 0, -1):
if i + length > len(segment):
continue
if "".join(segment[i : i + length]) in seq_cws_dict:
new_segment.append(segment[i])
for l in range(1, length):
new_segment.append("##" + segment[i + l])
i += length
has_add = True
break
if not has_add:
new_segment.append(segment[i])
i += 1
return new_segment
| 5,981 | 30.819149 | 92 | py |
libai | libai-main/projects/QQP/configs/config_qqp.py | from omegaconf import OmegaConf
from configs.common.data.bert_dataset import tokenization
from configs.common.models.bert import cfg as qqp_cfg
from configs.common.optim import optim
from configs.common.train import train
from configs.common.models.graph import graph
from libai.config import LazyCall
from libai.data.build import build_nlp_test_loader, build_nlp_train_loader
from projects.QQP.dataset.qqp_dataset import QQPDataset
from projects.QQP.modeling.model import Classification
from projects.QQP.tokenizer.tokenizer import _BertCNWWMTokenizer
tokenization.tokenizer = LazyCall(_BertCNWWMTokenizer)(
vocab_file="projects/QQP/QQP_DATA/bert-base-chinese-vocab.txt",
lower_case=True,
)
tokenization.append_eod = False
tokenization.make_vocab_size_divisible_by = 128
dataloader = OmegaConf.create()
dataloader.train = LazyCall(build_nlp_train_loader)(
dataset=[
LazyCall(QQPDataset)(
dataset_name="QQP_TRAIN",
data_paths=[
"projects/QQP/QQP_DATA/train.tsv",
],
max_seq_length=512,
),
],
num_workers=4,
)
dataloader.test = [
LazyCall(build_nlp_test_loader)(
dataset=LazyCall(QQPDataset)(
dataset_name="QQP_TEST",
data_paths=[
"projects/QQP/QQP_DATA/dev.tsv",
],
max_seq_length=512,
),
num_workers=4,
),
]
qqp_cfg.update(
dict(
# exist key
vocab_size=21248,
hidden_size=1024,
hidden_layers=24,
num_attention_heads=16,
# new key
num_classes=2,
pretrain_megatron_weight=None, # "path/to/model_optim_rng.pt",
)
)
model = LazyCall(Classification)(cfg=qqp_cfg)
optim.lr = 1e-6
optim.weight_decay = 0.1
train.update(
dict(
activation_checkpoint=dict(enabled=True),
amp=dict(enabled=True),
output_dir="output/finetune_qqp/",
train_micro_batch_size=16,
test_micro_batch_size=4,
train_epoch=1,
train_iter=0,
eval_period=100,
log_period=10,
warmup_ratio=0.01,
topk=(1,),
dist=dict(
data_parallel_size=1,
tensor_parallel_size=1,
pipeline_parallel_size=1,
),
)
)
| 2,277 | 26.119048 | 74 | py |
libai | libai-main/projects/QQP/modeling/model.py | # coding=utf-8
# Copyright 2021 The OneFlow Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import oneflow as flow
from oneflow import nn
from libai.layers import Linear
from libai.models.bert_model import BertModel
from libai.models.utils import init_method_normal
from libai.utils import distributed as dist
logger = logging.getLogger("libai." + __name__)
class ClassificationLoss(nn.Module):
def __init__(self):
super().__init__()
def forward(self, classification_logits, label):
loss = nn.CrossEntropyLoss()(classification_logits, label)
# NOTE: Change loss sbp sign [P, P] -> [P, B] to add with sop loss
# whose sbp sign: [P, B]
loss = loss.to_global(sbp=dist.get_nd_sbp([flow.sbp.partial_sum, flow.sbp.broadcast]))
return loss
class Classification(nn.Module):
def __init__(self, cfg):
super().__init__()
self.num_classes = cfg.num_classes
self.language_model = BertModel(cfg)
if cfg.pretrain_megatron_weight is not None:
from .load_megatron_weight import load_megatron_bert
logger.info(f"loading pretraining: {cfg.pretrain_megatron_weight}")
load_megatron_bert(self.language_model, cfg.pretrain_megatron_weight)
logger.info("load succeed")
init_method = init_method_normal(cfg.initializer_range)
self.classification_dropout = nn.Dropout(cfg.hidden_dropout_prob)
self.classification_head = Linear(
cfg.hidden_size,
self.num_classes,
bias=True,
parallel="row",
init_method=init_method,
layer_idx=-1,
)
self.loss_func = ClassificationLoss()
def forward(self, model_input, attention_mask, tokentype_ids=None, labels=None):
encoder_output, pooled_output = self.language_model(
model_input, attention_mask, tokentype_ids
)
classification_output = self.classification_dropout(pooled_output)
classification_logits = self.classification_head(classification_output)
# reshape
classification_logits = classification_logits.view(-1, self.num_classes)
if self.training and labels is not None:
loss = self.loss_func(classification_logits, labels)
return {"total_loss": loss}
return {"prediction_scores": classification_logits}
| 2,934 | 35.234568 | 94 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.