repo stringlengths 2 99 | file stringlengths 13 225 | code stringlengths 0 18.3M | file_length int64 0 18.3M | avg_line_length float64 0 1.36M | max_line_length int64 0 4.26M | extension_type stringclasses 1 value |
|---|---|---|---|---|---|---|
libai | libai-main/libai/models/utils/model_loader/__init__.py | 0 | 0 | 0 | py | |
libai | libai-main/libai/models/utils/model_loader/roberta_loader.py | # coding=utf-8
# Copyright 2021 The OneFlow Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import oneflow as flow
from .bert_loader import BertLoaderHuggerFace, BertLoaderLiBai
class RobertaLoaderHuggerFace(BertLoaderHuggerFace):
def __init__(self, model, libai_cfg, pretrained_model_path, **kwargs):
super().__init__(model, libai_cfg, pretrained_model_path, **kwargs)
"""NOTE: base_model_prefix_1 is RoBERTa's prefix in Transformers,
base_model_prefix_2 is RoBERTa's prefix in LiBai."""
self.base_model_prefix_1 = "roberta"
self.base_model_prefix_2 = "roberta"
def _convert_state_dict(self, flow_state_dict, cfg):
"""Convert state_dict's keys to match model.
Args:
flow_state_dict (OrderedDict): model state dict.
cfg (dict): model's default config dict in LiBai.
Returns:
OrderedDict: flow state dict.
"""
# The converted checkpoint.
oneflow_state_dict = flow_state_dict.copy()
# Get configs
num_heads = cfg.get("num_attention_heads")
hidden_size = cfg.get("hidden_size")
layers = cfg.get("hidden_layers")
head_size = int(hidden_size / num_heads)
# prefix
has_prefix = any(s.startswith(self.base_model_prefix_1) for s in oneflow_state_dict)
prefix = "roberta." if has_prefix else ""
index_idx = 3 if has_prefix else 2
qkv_idx = 6 if has_prefix else 5
old_keys = oneflow_state_dict.keys()
for key in list(old_keys):
# Convert roberta's embedding layers
if "embeddings" in key:
if "word_embeddings" in key:
new_key = key.replace("word_embeddings", "vocab_embeddings")
oneflow_state_dict[new_key] = oneflow_state_dict.pop(key)
elif "token_type_embeddings" in key:
new_key = key.replace("token_type_embeddings", "tokentype_embeddings")
oneflow_state_dict[new_key] = oneflow_state_dict.pop(key)
elif "LayerNorm.weight" in key:
new_key = prefix + "encoders.0.input_layernorm.weight"
oneflow_state_dict[new_key] = oneflow_state_dict.pop(key)
elif "LayerNorm.bias" in key:
new_key = prefix + "encoders.0.input_layernorm.bias"
oneflow_state_dict[new_key] = oneflow_state_dict.pop(key)
else:
oneflow_state_dict[key] = oneflow_state_dict[key]
# Convert roberta's attention layers
elif "attention" in key:
if "self" in key:
index = key.split(".")[index_idx]
if (
prefix + "encoders." + index + ".self_attention.query_key_value.weight"
in oneflow_state_dict.keys()
):
continue
q_w = key.replace(key.split(".")[qkv_idx], "query").replace(
key.split(".")[qkv_idx + 1], "weight"
)
k_w = q_w.replace("query", "key")
v_w = q_w.replace("query", "value")
q_b = q_w.replace("weight", "bias")
k_b = k_w.replace("weight", "bias")
v_b = v_w.replace("weight", "bias")
qkv_w = flow.cat(
(
oneflow_state_dict.pop(q_w),
oneflow_state_dict.pop(k_w),
oneflow_state_dict.pop(v_w),
),
dim=0,
)
qkv_b = flow.cat(
(
oneflow_state_dict.pop(q_b),
oneflow_state_dict.pop(k_b),
oneflow_state_dict.pop(v_b),
),
dim=-1,
)
qkv_w = self._fix_qkv_ordering(qkv_w, head_size, num_heads)
qkv_b = self._fix_qkv_ordering(qkv_b, head_size, num_heads)
new_key = (
prefix + "encoders." + index + ".self_attention.query_key_value.weight"
)
oneflow_state_dict[new_key] = qkv_w
new_key = prefix + "encoders." + index + ".self_attention.query_key_value.bias"
oneflow_state_dict[new_key] = qkv_b
elif "output" in key:
index = key.split(".")[index_idx]
if "dense" in key:
if "weight" in key:
new_key = prefix + "encoders." + index + ".self_attention.dense.weight"
oneflow_state_dict[new_key] = oneflow_state_dict.pop(key)
elif "bias" in key:
new_key = prefix + "encoders." + index + ".self_attention.dense.bias"
oneflow_state_dict[new_key] = oneflow_state_dict.pop(key)
elif "LayerNorm" in key:
if "weight" in key:
new_key = (
prefix + "encoders." + index + ".post_attention_layernorm.weight"
)
oneflow_state_dict[new_key] = oneflow_state_dict.pop(key)
elif "bias" in key:
new_key = (
prefix + "encoders." + index + ".post_attention_layernorm.bias"
)
oneflow_state_dict[new_key] = oneflow_state_dict.pop(key)
# Convert roberta's intermediate layers
elif "intermediate" in key:
index = key.split(".")[index_idx]
if (
prefix + "encoders." + index + ".mlp.dense_h_to_4h.weight"
in oneflow_state_dict.keys()
):
continue
if "weight" in key:
w = key
b = key.replace("weight", "bias")
new_key = prefix + "encoders." + index + ".mlp.dense_h_to_4h.weight"
oneflow_state_dict[new_key] = oneflow_state_dict.pop(w)
new_key = new_key.replace("weight", "bias")
oneflow_state_dict[new_key] = oneflow_state_dict.pop(b)
# Convert roberta's output layers
elif "output" in key:
index = key.split(".")[index_idx]
if "dense.weight" in key:
if (
prefix + "encoders." + index + ".mlp.dense_4h_to_h.weight"
in oneflow_state_dict.keys()
):
continue
w = key
b = w.replace("weight", "bias")
new_key = prefix + "encoders." + index + ".mlp.dense_4h_to_h.weight"
oneflow_state_dict[new_key] = oneflow_state_dict.pop(w)
new_key = new_key.replace("weight", "bias")
oneflow_state_dict[new_key] = oneflow_state_dict.pop(b)
elif "LayerNorm.weight" in key:
if (
prefix + "encoders." + str(int(index) + 1) + ".input_layernorm.weight"
in oneflow_state_dict.keys()
):
continue
w = key
b = w.replace("weight", "bias")
if index == str(layers - 1):
new_key = prefix + "final_layernorm.weight"
oneflow_state_dict[new_key] = oneflow_state_dict.pop(w)
new_key = new_key.replace("weight", "bias")
oneflow_state_dict[new_key] = oneflow_state_dict.pop(b)
continue
new_key = prefix + "encoders." + str(int(index) + 1) + ".input_layernorm.weight"
oneflow_state_dict[new_key] = oneflow_state_dict.pop(w)
new_key = new_key.replace("weight", "bias")
oneflow_state_dict[new_key] = oneflow_state_dict.pop(b)
# Convert roberta's pooler layers
elif "pooler" in key:
if "weight" in key:
new_key = prefix + "pooler.dense.weight"
oneflow_state_dict[new_key] = oneflow_state_dict.pop(key)
elif "bias" in key:
new_key = prefix + "pooler.dense.bias"
oneflow_state_dict[new_key] = oneflow_state_dict.pop(key)
# Convert lm_head layers
elif "lm_head" in key:
if "layer_norm.weight" in key:
new_key = "lm_head.layernorm.weight"
oneflow_state_dict[new_key] = oneflow_state_dict.pop(key)
elif "layer_norm.bias" in key:
new_key = "lm_head.layernorm.bias"
oneflow_state_dict[new_key] = oneflow_state_dict.pop(key)
elif "seq_relationship" in key:
new_key = key.replace("cls", "cls_head")
oneflow_state_dict[new_key] = oneflow_state_dict.pop(key)
elif "lm_head.bias" in key:
new_key = "lm_head.lm_logits.bias"
oneflow_state_dict[new_key] = oneflow_state_dict.pop(key)
else:
oneflow_state_dict[key] = oneflow_state_dict.pop(key)
else:
oneflow_state_dict[key] = oneflow_state_dict.pop(key)
return oneflow_state_dict
class RobertaLoaderLiBai(BertLoaderLiBai):
def __init__(self, model, libai_cfg, pretrained_model_path, **kwargs):
super().__init__(model, libai_cfg, pretrained_model_path, **kwargs)
self.base_model_prefix_2 = "roberta"
| 10,599 | 45.696035 | 100 | py |
libai | libai-main/libai/tokenizer/tokenization_roberta.py | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tokenization classes for RoBERTa."""
import json
import logging
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from .tokenization_base import PreTrainedTokenizer
logger = logging.getLogger(__name__)
VOCAB_FILES_NAMES = {
"vocab_file": "vocab.json",
"merges_file": "merges.txt",
}
PRETRAINED_VOCAB_FILES_MAP = {
"vocab_file": {
"roberta-base": "https://huggingface.co/roberta-base/resolve/main/vocab.json",
"roberta-large": "https://huggingface.co/roberta-large/resolve/main/vocab.json",
"roberta-large-mnli": "https://huggingface.co/roberta-large-mnli/resolve/main/vocab.json",
},
"merges_file": {
"roberta-base": "https://huggingface.co/roberta-base/resolve/main/merges.txt",
"roberta-large": "https://huggingface.co/roberta-large/resolve/main/merges.txt",
"roberta-large-mnli": "https://huggingface.co/roberta-large-mnli/resolve/main/merges.txt",
},
}
PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = {
"roberta-base": 512,
"roberta-large": 512,
"roberta-large-mnli": 512,
}
@lru_cache()
def bytes_to_unicode():
"""
Returns list of utf-8 byte and a mapping to unicode strings. We specifically avoids mapping to
whitespace/control characters the bpe code barfs on.
The reversible bpe codes work on unicode strings. This means you need a large # of unicode
characters in your vocab if you want to avoid UNKs. When you're at something like a 10B token
dataset you end up needing around 5K for decent coverage. This is a significant percentage of
your normal, say, 32K bpe vocab. To avoid that, we want lookup tables between utf-8 bytes and
unicode strings.
"""
bs = (
list(range(ord("!"), ord("~") + 1))
+ list(range(ord("¡"), ord("¬") + 1))
+ list(range(ord("®"), ord("ÿ") + 1))
)
cs = bs[:]
n = 0
for b in range(2 ** 8):
if b not in bs:
bs.append(b)
cs.append(2 ** 8 + n)
n += 1
cs = [chr(n) for n in cs]
return dict(zip(bs, cs))
def get_pairs(word):
"""
Return set of symbol pairs in a word.
Word is represented as tuple of symbols (symbols being variable-length strings).
"""
pairs = set()
prev_char = word[0]
for char in word[1:]:
pairs.add((prev_char, char))
prev_char = char
return pairs
class RobertaTokenizer(PreTrainedTokenizer):
"""Constructs a RoBERTa tokenizer, derived from the GPT-2 tokenizer,
using byte-level Byte-Pair-Encoding.
Args:
vocab_file (:obj:`str`):
Path to the vocabulary file.
merges_file (:obj:`str`):
Path to the merges file.
errors (:obj:`str`, `optional`, defaults to :obj:`"replace"`):
Paradigm to follow when decoding bytes to UTF-8. See `bytes.decode
<https://docs.python.org/3/library/stdtypes.html#bytes.decode>`__ for more information.
bos_token (:obj:`str`, `optional`, defaults to `<s>`):
The beginning of sequence token.
eos_token (:obj:`str`, `optional`, defaults to `</s>`):
The end of sequence token.
cls_token (:obj:`str`, `optional`, defaults to `<s>`):
The first token of the sequence when built with special tokens.
unk_token (:obj:`str`, `optional`, defaults to `<unk>`):
The unknown token. A token that is not in the vocabulary cannot be
converted to an ID and is set to be this token instead.
pad_token (:obj:`str`, `optional`, defaults to `<pad>`): A special token
used to make arrays of tokens the same size for batching purpose.
Will then be ignored by attention mechanisms or loss computation.
mask_token (:obj:`str`, `optional`, defaults to `<mask>`): A special token
representing a masked token (used by masked-language modeling pretraining
objectives, like BERT).
"""
vocab_files_names = VOCAB_FILES_NAMES
pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
model_input_names = ["input_ids", "attention_mask"]
def __init__(
self,
vocab_file,
merges_file,
errors="replace",
bos_token="<s>",
eos_token="</s>",
sep_token="</s>",
cls_token="<s>",
unk_token="<unk>",
pad_token="<pad>",
mask_token="<mask>",
add_bos_token=False,
**kwargs,
):
super(RobertaTokenizer, self).__init__(
errors=errors,
bos_token=bos_token,
eos_token=eos_token,
sep_token=sep_token,
cls_token=cls_token,
unk_token=unk_token,
pad_token=pad_token,
mask_token=mask_token,
**kwargs,
)
with open(vocab_file, encoding="utf-8") as file:
self.encoder = json.load(file)
self.decoder = {v: k for k, v in self.encoder.items()}
self.errors = errors
self.byte_encoder = bytes_to_unicode()
self.byte_decoder = {v: k for k, v in self.byte_encoder.items()}
with open(merges_file, encoding="utf-8") as file:
bpe_merges = file.read().split("\n")[1:-1]
bpe_merges = [tuple(merge.split()) for merge in bpe_merges]
self.bpe_ranks = dict(zip(bpe_merges, range(len(bpe_merges))))
self.cache = {}
self.pat = re.compile(
r"""'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+"""
)
self.add_bos_token = add_bos_token
@property
def vocab_size(self):
return len(self.encoder)
def get_vocab(self):
return dict(self.encoder, **self.added_tokens_encoder)
def bpe(self, token):
if token in self.cache:
return self.cache[token]
word = tuple(token)
pairs = get_pairs(word)
if not pairs:
return token
while True:
bigram = min(pairs, key=lambda pair: self.bpe_ranks.get(pair, float("inf")))
if bigram not in self.bpe_ranks:
break
first, second = bigram
new_word = []
i = 0
while i < len(word):
try:
j = word.index(first, i)
except ValueError:
new_word.extend(word[i:])
break
else:
new_word.extend(word[i:j])
i = j
if word[i] == first and i < len(word) - 1 and word[i + 1] == second:
new_word.append(first + second)
i += 2
else:
new_word.append(word[i])
i += 1
new_word = tuple(new_word)
word = new_word
if len(word) == 1:
break
else:
pairs = get_pairs(word)
word = " ".join(word)
self.cache[token] = word
return word
def _tokenize(self, text):
"""Tokenize a string."""
bpe_tokens = []
for token in re.findall(self.pat, text):
token = "".join(self.byte_encoder[b] for b in token.encode("utf-8"))
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(token).split(" "))
return bpe_tokens
def _convert_token_to_id(self, token):
"""Converts a token (str) in an id using the vocab."""
return self.encoder.get(token, self.encoder.get(self.unk_token))
def _convert_id_to_token(self, index):
"""Converts an index (integer) in a token (str) using the vocab."""
return self.decoder.get(index)
def convert_tokens_to_string(self, tokens):
"""Converts a sequence of tokens (string) in a single string."""
text = "".join(tokens)
text = bytearray([self.byte_decoder[c] for c in text]).decode("utf-8", errors=self.errors)
return text
def build_inputs_with_special_tokens(
self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
) -> List[int]:
"""Add special tokens to a sequence or a pair of sequence.
RoBERTa format sentence input:
- single sequence: [CLS] tokens_a [SEP]
- pair of sequences: [CLS] tokens_a [SEP] tokens_b [SEP]
Args:
token_ids_0 (List[int]): The token ids of sentence 0.
token_ids_1 (List[int], optional): The token ids of sentence 1. Defaults to None.
Returns:
:obj:`List[str]`: The sequence after adding special toekens.
"""
if self.add_bos_token:
cls = [self.cls_token_id]
sep = [self.sep_token_id]
else:
cls = []
sep = []
if token_ids_1 is None:
return cls + token_ids_0 + sep
return cls + token_ids_0 + sep + token_ids_1 + sep
def save_vocabulary(
self, save_directory: str, filename_prefix: Optional[str] = None
) -> Tuple[str]:
if not os.path.isdir(save_directory):
logger.error(f"Vocabulary path ({save_directory}) should be a directory")
return
vocab_file = os.path.join(
save_directory,
(filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"],
)
merge_file = os.path.join(
save_directory,
(filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"],
)
with open(vocab_file, "w", encoding="utf-8") as f:
f.write(json.dumps(self.encoder, ensure_ascii=False))
index = 0
with open(merge_file, "w", encoding="utf-8") as writer:
writer.write("#version: 0.2\n")
for bpe_tokens, token_index in sorted(self.bpe_ranks.items(), key=lambda kv: kv[1]):
if index != token_index:
logger.warning(
f"Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."
" Please check that the tokenizer is not corrupted!"
)
index = token_index
writer.write(" ".join(bpe_tokens) + "\n")
index += 1
return vocab_file, merge_file
def create_token_type_ids_from_sequences(
self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
) -> List[int]:
"""
Create a mask from the two sequences passed to be used in a sequence-pair
classification task. RoBERTa does not make use of token type ids, therefore
a list of zeros is returned.
Args:
token_ids_0 (`List[int]`):
List of IDs.
token_ids_1 (`List[int]`, `optional`):
Optional second list of IDs for sequence pairs.
Returns:
`List[int]`: List of zeros.
"""
sep = [self.sep_token_id]
cls = [self.cls_token_id]
if token_ids_1 is None:
return len(cls + token_ids_0 + sep) * [0]
return len(cls + token_ids_0 + sep + sep + token_ids_1 + sep) * [0]
| 11,875 | 35.097264 | 100 | py |
libai | libai-main/libai/tokenizer/tokenization_base.py | # coding=utf-8
# Copyright 2020 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""copy from HuggingFace transformer repo, to tokenize the sentence.
This class only focus on tokenization, converting token to id and their inverse operation.
It does not construct inputs using special symbols."""
import copy
import itertools
import json
import logging
import os
import unicodedata
from io import open
from typing import Dict, List, Optional, Union
import numpy as np
import oneflow as flow
from libai.utils import distributed as dist
from libai.utils.file_io import PathManager
from libai.utils.file_utils import cached_path
logger = logging.getLogger(__name__)
def _is_whitespace(char):
"""Checks whether `char` is a whitespace character."""
# \t, \n, and \r are technically control characters but we treat them
# as whitespace since they are generally considered as such.
if char == " " or char == "\t" or char == "\n" or char == "\r":
return True
cat = unicodedata.category(char)
if cat == "Zs":
return True
return False
def _is_control(char):
"""Checks whether `char` is a control character."""
# These are technically control characters but we count them as whitespace
# characters.
if char == "\t" or char == "\n" or char == "\r":
return False
cat = unicodedata.category(char)
if cat.startswith("C"):
return True
return False
def _is_punctuation(char):
"""Checks whether `char` is a punctuation character."""
cp = ord(char)
# We treat all non-letter/number ASCII as punctuation.
# Characters such as "^", "$", and "`" are not in the Unicode
# Punctuation class but we treat them as punctuation anyways, for
# consistency.
if (
(cp >= 33 and cp <= 47)
or (cp >= 58 and cp <= 64)
or (cp >= 91 and cp <= 96)
or (cp >= 123 and cp <= 126)
):
return True
cat = unicodedata.category(char)
if cat.startswith("P"):
return True
return False
SPECIAL_TOKENS_MAP_FILE = "special_tokens_map.json"
ADDED_TOKENS_FILE = "added_tokens.json"
TOKENIZER_CONFIG_FILE = "tokenizer_config.json"
class PreTrainedTokenizer(object):
"""
Base class for all tokenizers.
Handle all the shared methods for tokenization and special tokens, methods
dowloading/caching/loading pretrained tokenizers as well as adding tokens to the vocabulary.
This class also contains the added tokens in a unified way on top of all tokenizers, so we don't
have to handle the specific vocabulary augmentation methods of the various underlying
dictionary structures (BPE, sentencepiece...).
Class attributes (overridden by derived classes):
``vocab_files_names``: a python ``dict`` with, as keys, the ``__init__`` keyword name of
each vocabulary file required by the model, and as associated values, the filename for
saving the associated file (string).
``pretrained_vocab_files_map``: a python ``dict of dict`` the high-level keys being the
``__init__`` keyword name of each vocabulary file required by the model, the low-level
being the `short-cut-names` (string) of the pretrained models with, as associated values,
the `url` (string) to the associated pretrained vocabulary file.
``max_model_input_sizes``: a python ``dict`` with, as keys, the `short-cut-names` (string)
of the pretrained models, and as associated values, the maximum length of the sequence
inputs of this model, or None if the model has no maximum input size.
``pretrained_init_configuration``: a python ``dict`` with, as keys, the `short-cut-names`
(string) of the pretrained models, and as associated values, a dictionnary of specific
arguments to pass to the ``__init__`` method of the tokenizer class for this pretrained
model when loading the tokenizer with the ``from_pretrained()`` method.
Args:
bos_token (:obj:`str`, `optional`): A special token representing the beginning of a
sentence.
eos_token (:obj:`str`, `optional`): A special token representing the end of a sentence.
unk_token (:obj:`str`, `optional`): A special token representing an out-of-vocabulary token.
sep_token (:obj:`str`, `optional`): A special token separating two different sentences in
the same input (used by BERT for instance).
pad_token (:obj:`str`, `optional`): A special token used to make arrays of tokens the same
size for batching purpose.
Will then be ignored by attention mechanisms or loss computation.
cls_token (:obj:`str`, `optional`): A special token representing the class of the input
(used by BERT for instance).
mask_token (:obj:`str`, `optional`): A special token representing a masked token (used by
masked-language modeling pretraining objectives, like BERT).
eod_token (:obj:`str`, `optional`): A special token representing the end of a document.
additional_special_tokens (tuple or list of :obj:`str`, `optional`):
A tuple or a list of additional special tokens.
"""
vocab_files_names = {}
pretrained_vocab_files_map = {}
pretrained_init_configuration = {}
max_model_input_sizes = {}
SPECIAL_TOKENS_ATTRIBUTES = [
"bos_token",
"eos_token",
"unk_token",
"sep_token",
"pad_token",
"cls_token",
"mask_token",
"eod_token",
"additional_special_tokens",
]
def __init__(self, verbose=True, **kwargs):
self._bos_token = None
self._eos_token = None
self._unk_token = None
self._sep_token = None
self._pad_token = None
self._cls_token = None
self._mask_token = None
self._eod_token = None
self._additional_special_tokens = []
self.verbose = verbose
# Added tokens - We store this for both slow and fast tokenizers
# until the serialization of Fast tokenizers is updated
self.added_tokens_encoder: Dict[str, int] = {}
self.added_tokens_decoder: Dict[int, str] = {}
self.unique_no_split_tokens: List[str] = []
# inputs and kwargs for saving and re-loading
# (see ``from_pretrained`` and ``save_pretrained``)
self.init_inputs = ()
self.init_kwargs = {}
# We directly set the hidden value to allow initialization with special tokens
# which are not yet in the vocabulary. Necessary for serialization/de-serialization
for key, value in kwargs.items():
if value is None:
continue
if key in self.SPECIAL_TOKENS_ATTRIBUTES:
if key == "additional_special_tokens":
assert all(
isinstance(t, str) for t in value
), "One of the tokens is not a string"
setattr(self, key, list(value))
elif isinstance(value, str):
setattr(self, key, value)
else:
raise TypeError(f"special token {key} has to be str but got: {type(value)}")
@classmethod
def from_pretrained(cls, *inputs, **kwargs):
r"""
Instantiate a :class:`~PreTrainedTokenizer` (or a derived class) from a
predefined tokenizer.
Args:
pretrained_model_name_or_path(`str` or `os.PathLike`):
Can be either:
- a string with the `shortcut name` of a predefined tokenizer to load from cache
or download, e.g.: ``bert-base-uncased``.
- a path to a `directory` containing vocabulary files required by the tokenizer,
for instance saved using the :func:`~PreTrainedTokenizer.save_pretrained`
method, e.g., ``./my_model_directory/``.
- (not applicable to all derived classes) a path or url to a single saved
vocabulary file if and only if the tokenizer only requires a single vocabulary
file (e.g. Bert, XLNet), e.g., ``./my_model_directory/vocab.txt``.
cache_dir: (`optional`) string:
Path to a directory in which a downloaded predefined tokenizer vocabulary files
should be cached if the standard cache should not be used.
force_download: (`optional`) boolean, default False:
Force to (re-)download the vocabulary files and override the cached versions if
they exist.
proxies: (`optional`) dict, default None:
A dictionary of proxy servers to use by protocol or endpoint,
e.g., {'http': 'foo.bar:3128', 'http://hostname': 'foo.bar:4012'}.
The proxies are used on each request.
inputs: (`optional`) positional arguments: will be passed to the
Tokenizer ``__init__`` method.
kwargs: (`optional`) keyword arguments: will be passed to the
Tokenizer ``__init__`` method. Can be used to set special tokens
like ``bos_token``, ``eos_token``, ``unk_token``, ``sep_token``,
``pad_token``, ``cls_token``, ``mask_token``, ``additional_special_tokens``.
See parameters in the doc string of :class:`~PreTrainedTokenizer`
for details.
Examples:
.. code-block:: python
# We can't instantiate directly the base class `PreTrainedTokenizer` so let's
# show our examples on a derived class: BertTokenizer
# Download vocabulary from S3 and cache.
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
# If vocabulary files are in a directory (e.g. tokenizer was
# saved using `save_pretrained('./test/saved_model/')`)
tokenizer = BertTokenizer.from_pretrained('./test/saved_model/')
# If the tokenizer uses a single vocabulary file, you can point directly to this file
tokenizer = BertTokenizer.from_pretrained('./test/saved_model/my_vocab.txt')
# You can link tokens to special vocabulary when instantiating
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased', unk_token='<unk>')
# You should be sure '<unk>' is in the vocabulary when doing that.
# Otherwise use tokenizer.add_special_tokens({'unk_token': '<unk>'}) instead)
assert tokenizer.unk_token == '<unk>'
"""
return cls._from_pretrained(*inputs, **kwargs)
@classmethod
def _from_pretrained(cls, pretrained_model_name_or_path, *init_inputs, **kwargs):
cache_dir = kwargs.pop("cache_dir", None)
force_download = kwargs.pop("force_download", False)
proxies = kwargs.pop("proxies", None)
s3_models = list(cls.max_model_input_sizes.keys())
vocab_files = {}
init_configuration = {}
if pretrained_model_name_or_path in s3_models:
# Get the vocabulary from AWS S3 bucket
for file_id, map_list in cls.pretrained_vocab_files_map.items():
vocab_files[file_id] = map_list[pretrained_model_name_or_path]
if (
cls.pretrained_init_configuration
and pretrained_model_name_or_path in cls.pretrained_init_configuration
):
init_configuration = cls.pretrained_init_configuration[
pretrained_model_name_or_path
]
else:
# Get the vocabulary from local files
logger.info(
"Model name '{}' not found in model shortcut name list ({}). "
"Assuming '{}' is a path or url to a directory containing tokenizer files.".format(
pretrained_model_name_or_path,
", ".join(s3_models),
pretrained_model_name_or_path,
)
)
# Look for the tokenizer main vocabulary files
for file_id, file_name in cls.vocab_files_names.items():
if os.path.isdir(pretrained_model_name_or_path):
# If a directory is provided we look for the standard filenames
full_file_name = os.path.join(pretrained_model_name_or_path, file_name)
else:
# If a path to a file is provided we use it (will only work for non-BPE
# tokenizer using a single vocabulary file)
full_file_name = pretrained_model_name_or_path
if not os.path.exists(full_file_name):
logger.info("Didn't find file {}. We won't load it.".format(full_file_name))
full_file_name = None
vocab_files[file_id] = full_file_name
# Look for the additional tokens files
additional_files_names = {
"added_tokens_file": ADDED_TOKENS_FILE,
"special_tokens_map_file": SPECIAL_TOKENS_MAP_FILE,
"tokenizer_config_file": TOKENIZER_CONFIG_FILE,
}
# If a path to a file was provided, get the parent directory
saved_directory = pretrained_model_name_or_path
if os.path.exists(saved_directory) and not os.path.isdir(saved_directory):
saved_directory = os.path.dirname(saved_directory)
for file_id, file_name in additional_files_names.items():
full_file_name = os.path.join(saved_directory, file_name)
if not os.path.exists(full_file_name):
logger.info("Didn't find file {}. We won't load it.".format(full_file_name))
full_file_name = None
vocab_files[file_id] = full_file_name
if all(full_file_name is None for full_file_name in vocab_files.values()):
logger.error(
"Model name '{}' was not found in model name list ({}). "
"We assumed '{}' was a path or url but couldn't find tokenizer files"
"at this path or url.".format(
pretrained_model_name_or_path,
", ".join(s3_models),
pretrained_model_name_or_path,
)
)
return None
# Get files from url, cache, or disk depending on the case
try:
resolved_vocab_files = {}
for file_id, file_path in vocab_files.items():
if file_path is None:
resolved_vocab_files[file_id] = None
else:
resolved_vocab_files[file_id] = cached_path(
file_path,
cache_dir=cache_dir,
force_download=force_download,
proxies=proxies,
)
except EnvironmentError as e:
if pretrained_model_name_or_path in s3_models:
logger.error("Couldn't reach server to download vocabulary.")
else:
logger.error(
"Model name '{}' was not found in model name list ({}). "
"We assumed '{}' was a path or url but couldn't find files {} "
"at this path or url.".format(
pretrained_model_name_or_path,
", ".join(s3_models),
pretrained_model_name_or_path,
str(vocab_files.keys()),
)
)
raise e
for file_id, file_path in vocab_files.items():
if file_path == resolved_vocab_files[file_id]:
logger.info("loading file {}".format(file_path))
else:
logger.info(
"loading file {} from cache at {}".format(
file_path, resolved_vocab_files[file_id]
)
)
# Prepare tokenizer initialization kwargs
# Did we saved some inputs and kwargs to reload ?
tokenizer_config_file = resolved_vocab_files.pop("tokenizer_config_file", None)
if tokenizer_config_file is not None:
init_kwargs = json.load(open(tokenizer_config_file, encoding="utf-8"))
saved_init_inputs = init_kwargs.pop("init_inputs", ())
if not init_inputs:
init_inputs = saved_init_inputs
else:
init_kwargs = init_configuration
# Update with newly provided kwargs
init_kwargs.update(kwargs)
# Merge resolved_vocab_files arguments in init_kwargs.
added_tokens_file = resolved_vocab_files.pop("added_tokens_file", None)
special_tokens_map_file = resolved_vocab_files.pop("special_tokens_map_file", None)
for args_name, file_path in resolved_vocab_files.items():
if args_name not in init_kwargs:
init_kwargs[args_name] = file_path
if special_tokens_map_file is not None:
special_tokens_map = json.load(open(special_tokens_map_file, encoding="utf-8"))
for key, value in special_tokens_map.items():
if key not in init_kwargs:
init_kwargs[key] = value
# Instantiate tokenizer.
tokenizer = cls(*init_inputs, **init_kwargs)
# Save inputs and kwargs for saving and re-loading with ``save_pretrained``
tokenizer.init_inputs = init_inputs
tokenizer.init_kwargs = init_kwargs
# Add supplementary tokens.
special_tokens = tokenizer.all_special_tokens
if added_tokens_file is not None:
with open(added_tokens_file, encoding="utf-8") as added_tokens_handle:
added_tok_encoder = json.load(added_tokens_handle)
# Sort added tokens by index
added_tok_encoder_sorted = list(sorted(added_tok_encoder.items(), key=lambda x: x[1]))
for token, index in added_tok_encoder_sorted:
assert index == len(tokenizer), (
f"Non-consecutive added token '{token}' found. "
f"Should have index {len(tokenizer)} but has index {index} in saved vocabulary."
)
tokenizer.add_tokens(token, special_tokens=bool(token in special_tokens))
# Check all our special tokens are registered as "no split" token
# (we don't cut them) and are in the vocab
added_tokens = tokenizer.sanitize_special_tokens()
if added_tokens:
logger.warning(
"Special tokens have been added in the vocabulary,"
"make sure the associated word embedding are fine-tuned or trained."
)
return tokenizer
def save_pretrained(self, save_directory):
"""
Save the tokenizer vocabulary files together with:
- added tokens,
- special-tokens-to-class-attributes-mapping,
- tokenizer instantiation positional and keywords inputs (e.g. do_lower_case for Bert).
This won't save modifications other than ``added tokens`` and ``special token mapping``,
you may have applied to the tokenizer after the instantiation (e.g. modifying
tokenizer.do_lower_case after creation).
This method make sure the full tokenizer can then be re-loaded using the
:func:`~PreTrainedTokenizer.from_pretrained` class method.
"""
if not PathManager.isdir(save_directory):
logger.error("Saving directory ({}) should be a directory".format(save_directory))
return
PathManager.mkdirs(save_directory)
special_tokens_map_file = os.path.join(save_directory, SPECIAL_TOKENS_MAP_FILE)
added_tokens_file = os.path.join(save_directory, ADDED_TOKENS_FILE)
tokenizer_config_file = os.path.join(save_directory, TOKENIZER_CONFIG_FILE)
tokenizer_config = copy.deepcopy(self.init_kwargs)
if len(self.init_inputs) > 0:
tokenizer_config["init_inputs"] = copy.deepcopy(self.init_inputs)
for file_id in self.vocab_files_names.keys():
tokenizer_config.pop(file_id, None)
with open(tokenizer_config_file, "w", encoding="utf-8") as f:
f.write(json.dumps(tokenizer_config, ensure_ascii=False))
with open(special_tokens_map_file, "w", encoding="utf-8") as f:
f.write(json.dumps(self.special_tokens_map, ensure_ascii=False))
added_vocab = self.get_added_vocab()
if added_vocab:
with open(added_tokens_file, "w", encoding="utf-8") as f:
out_str = json.dumps(added_vocab, ensure_ascii=False)
f.write(out_str)
vocab_files = self.save_vocabulary(save_directory)
return vocab_files + (special_tokens_map_file, added_tokens_file)
def save_vocabulary(self, save_directory):
"""Save the tokenizer vocabulary to a directory. This method does *NOT* save added tokens
and special token mappings.
Please use :func:`~PreTrainedTokenizer.save_pretrained` to save the
full Tokenizer state if you want to reload it using the
:func:`~PreTrainedTokenizer.from_pretrained` class method.
"""
raise NotImplementedError
@property
def vocab_size(self) -> int:
"""Size of the base vocabulary (without the added tokens)."""
raise NotImplementedError
def padded_vocab_size(self, multiple=1) -> int:
"""Padded the vocabulary with dummy tokens and return the new size."""
vocab_size = len(self)
while vocab_size % multiple != 0:
vocab_size += 1
return vocab_size
def __len__(self):
"""Size of the full vocabulary with the added tokens."""
return self.vocab_size + len(self.added_tokens_encoder)
def get_vocab(self) -> Dict[str, int]:
"""
Returns the vocabulary as a dictionary of token to index.
:obj:`tokenizer.get_vocab()[token]` is equivalent to
:obj:`tokenizer.convert_tokens_to_ids(token)`
when :obj:`token` is in the vocab.
Returns:
:obj:`Dict[str, int]`: The vocabulary.
"""
raise NotImplementedError
def get_added_vocab(self) -> Dict[str, int]:
"""
Returns the added tokens in the vocabulary as a dictionary of token to index.
Returns:
:obj:`Dict[str, int]`: The added tokens.
"""
return self.added_tokens_encoder
def add_tokens(self, new_tokens: Union[str, List[str]], special_tokens: bool = False) -> int:
"""
Add a list of new tokens to the tokenizer class. If the new tokens are not in the
vocabulary, they are added to it with indices starting from the length of
the current vocabulary.
.. Note::
When adding new tokens to the vocabulary, you should make sure to also resize
the token embedding matrix of the model so that its embedding matrix matches
the tokenizer.
In order to do that, please use the
:meth:`~PreTrainedModel.resize_token_embeddings` method.
Args:
new_tokens (:obj:`str`, or a list of `str`):
Tokens are only added if they are not already in the vocabulary.
special_tokens (:obj:`bool`, `optional`, defaults to :obj:`False`):
Can be used to specify if the token is a special token. This mostly change
the normalization behavior
(special tokens like CLS or [MASK] are usually not lower-cased for instance).
Returns:
:obj:`int`: Number of tokens added to the vocabulary.
Examples:
.. code-block:: python
# Let's see how to increase the vocabulary of Bert model and tokenizer
tokenizer = BertTokenizerFast.from_pretrained('bert-base-uncased')
model = BertModel.from_pretrained('bert-base-uncased')
num_added_toks = tokenizer.add_tokens(['new_tok1', 'my_new-tok2'])
print('We have added', num_added_toks, 'tokens')
# Notice: resize_token_embeddings expect to receive the full size of the new
# vocabulary, i.e., the length of the tokenizer.
model.resize_token_embeddings(len(tokenizer))
"""
if not new_tokens:
return 0
if not isinstance(new_tokens, (list, tuple)):
new_tokens = [new_tokens]
tokens_to_add = []
for token in new_tokens:
if not isinstance(token, str):
raise TypeError(f"Token {token} is not a string but a {type(token)}.")
if not special_tokens and hasattr(self, "do_lower_case") and self.do_lower_case:
token = token.lower()
if (
token != self.unk_token
and self.convert_tokens_to_ids(token) == self.convert_tokens_to_ids(self.unk_token)
and token not in tokens_to_add
):
tokens_to_add.append(token)
if self.verbose:
logger.info(f"Adding {token} to the vocabulary")
added_tok_encoder = dict((tok, len(self) + i) for i, tok in enumerate(tokens_to_add))
added_tok_decoder = {v: k for k, v in added_tok_encoder.items()}
self.added_tokens_encoder.update(added_tok_encoder)
self.added_tokens_decoder.update(added_tok_decoder)
if special_tokens:
self.unique_no_split_tokens = sorted(
set(self.unique_no_split_tokens).union(set(new_tokens))
)
else:
self.unique_no_split_tokens = sorted(
set(self.unique_no_split_tokens).union(set(tokens_to_add))
)
return len(tokens_to_add)
def sanitize_special_tokens(self) -> int:
"""
Make sure that all the special tokens attributes of the tokenizer
(:obj:`tokenizer.mask_token`, :obj:`tokenizer.cls_token`, etc.)
are in the vocabulary.
Add the missing ones to the vocabulary if needed.
Return:
:obj:`int`: The number of tokens added in the vocaulary during the operation.
"""
return self.add_tokens(self.all_special_tokens, special_tokens=True)
def add_special_tokens(self, special_tokens_dict: Dict[str, str]) -> int:
"""
Add a dictionary of special tokens (eos, pad, cls, etc.) to the encoder and link them to
class attributes. If special tokens are NOT in the vocabulary, they are added to it
(indexed starting from the last index of the current vocabulary).
.. Note::
When adding new tokens to the vocabulary, you should make sure to also resize the
token embedding matrix of the model so that its embedding matrix matches the tokenizer.
In order to do that, please use the
:meth:`~PreTrainedModel.resize_token_embeddings` method.
Using :obj:`add_special_tokens` will ensure your special tokens can be used in several ways:
- Special tokens are carefully handled by the tokenizer (they are never split).
- You can easily refer to special tokens using tokenizer class attributes like
:obj:`tokenizer.cls_token`. This makes it easy to develop model-agnostic training and
fine-tuning scripts.
When possible, special tokens are already registered for provided pretrained models
(for instance :class:`~BertTokenizer` :obj:`cls_token` is already registered
to be :obj`'[CLS]'` and XLM's one is also registered to be :obj:`'</s>'`).
Args:
special_tokens_dict (dictionary `str` to `str`):
Keys should be in the list of predefined special attributes: [``bos_token``,
``eos_token``, ``unk_token``, ``sep_token``, ``pad_token``,
``cls_token``, ``mask_token``,
``additional_special_tokens``].
Tokens are only added if they are not already in the vocabulary (tested by
checking if the tokenizer assign the index of the ``unk_token`` to them).
Returns:
:obj:`int`: Number of tokens added to the vocabulary.
Examples:
.. code-block:: python
# Let's see how to add a new classification token to GPT-2
tokenizer = GPT2Tokenizer.from_pretrained('gpt2')
model = GPT2Model.from_pretrained('gpt2')
special_tokens_dict = {'cls_token': '<CLS>'}
num_added_toks = tokenizer.add_special_tokens(special_tokens_dict)
print('We have added', num_added_toks, 'tokens')
# Notice: resize_token_embeddings expect to receive the full size of the new vocabulary,
# i.e., the length of the tokenizer.
model.resize_token_embeddings(len(tokenizer))
assert tokenizer.cls_token == '<CLS>'
"""
if not special_tokens_dict:
return 0
added_tokens = 0
for key, value in special_tokens_dict.items():
assert key in self.SPECIAL_TOKENS_ATTRIBUTES, f"Key {key} is not a special token"
if self.verbose:
logger.info(f"Assigning {value} to the {key} key of the tokenizer")
setattr(self, key, value)
if key == "additional_special_tokens":
assert isinstance(value, (list, tuple)) and all(
isinstance(t, str) for t in value
), f"Tokens {value} for key {key} should all be a string"
added_tokens += self.add_tokens(value, special_tokens=True)
else:
assert isinstance(value, str), f"Token {value} for key {key} should be a string"
added_tokens += self.add_tokens([value], special_tokens=True)
return added_tokens
def tokenize(self, text: str, **kwargs) -> List[str]:
"""
Converts a string in a sequence of tokens, using the tokenizer.
Split in words for word-based vocabulary or sub-words for sub-word-based vocabularies
(BPE/SentencePieces/WordPieces). Take care of added tokens.
Args:
text (:obj:`str`):
The sequence to be encoded.
**kwargs (additional keyword arguments):
Passed along to the model-specific ``prepare_for_tokenization``
preprocessing method.
Returns:
:obj:`List[str]`: The list of tokens.
"""
def split_on_token(tok, text):
result = []
split_text = text.split(tok)
for i, sub_text in enumerate(split_text):
sub_text = sub_text.strip()
if i == 0 and not sub_text:
result += [tok]
elif i == len(split_text) - 1:
if sub_text:
result += [sub_text]
else:
pass
else:
if sub_text:
result += [sub_text]
result += [tok]
return result
def split_on_tokens(tok_list, text):
if not text:
return []
if not tok_list:
return self._tokenize(text, **kwargs)
tokenized_text = []
text_list = [text]
for tok in tok_list:
tokenized_text = []
for sub_text in text_list:
if sub_text not in self.unique_no_split_tokens:
tokenized_text += split_on_token(tok, sub_text)
else:
tokenized_text += [sub_text]
text_list = tokenized_text
return list(
itertools.chain.from_iterable(
(
self._tokenize(token)
if token not in self.unique_no_split_tokens
else [token]
for token in tokenized_text
)
)
)
no_split_token = self.unique_no_split_tokens
tokenized_text = split_on_tokens(no_split_token, text)
return tokenized_text
def _tokenize(self, text, **kwargs):
"""
Converts a string in a sequence of tokens (string), using the tokenizer. Split in words for
word-based vocabulary or sub-words for sub-word-based vocabularies
(BPE/SentencePieces/WordPieces).
Do NOT take care of added tokens.
"""
raise NotImplementedError
def convert_tokens_to_ids(self, tokens: Union[str, List[str]]) -> Union[int, List[int]]:
"""Converts a token string (or a sequence of tokens) in a single integer id
(or a sequence of ids), using the vocabulary.
"""
if tokens is None:
return None
if isinstance(tokens, str):
return self._convert_token_to_id_with_added_voc(tokens)
if len(tokens) > 0 and isinstance(tokens[0], list):
ids = []
for ts in tokens:
ids_x = []
for token in ts:
ids_x.append(self._convert_token_to_id_with_added_voc(token))
ids.append(ids_x)
return ids
ids = []
for token in tokens:
ids.append(self._convert_token_to_id_with_added_voc(token))
return ids
def convert_to_tensors(self, token_ids, return_tensors=None, is_global=False, **kwargs):
if return_tensors is None:
return_token_ids = token_ids
elif return_tensors == "of":
if not is_global:
return_token_ids = flow.tensor(token_ids, dtype=flow.long)
elif is_global:
sbp = kwargs.get("sbp", dist.get_nd_sbp([flow.sbp.broadcast, flow.sbp.broadcast]))
placement = kwargs.get(
"placement", flow.placement("cuda", list(range(dist.get_world_size())))
)
return_token_ids = flow.tensor(
token_ids, sbp=sbp, placement=placement, dtype=flow.long
)
elif return_tensors == "np":
return_token_ids = np.array(token_ids, dtype=np.int64)
return return_token_ids
def _convert_token_to_id_with_added_voc(self, token):
if token is None:
return None
if token in self.added_tokens_encoder:
return self.added_tokens_encoder[token]
return self._convert_token_to_id(token)
def _convert_token_to_id(self, token):
raise NotImplementedError
def encode(self, text, return_tensors=None, is_global=False, **kwargs):
if isinstance(text, str):
tokens = self.tokenize(text)
token_ids = self.convert_tokens_to_ids(tokens)
token_ids = self.build_inputs_with_special_tokens(token_ids)
token_ids = self.convert_to_tensors(
token_ids, return_tensors=return_tensors, is_global=is_global, **kwargs
)
return token_ids
elif isinstance(text, (list, tuple)) and len(text) > 0 and isinstance(text[0], str):
tokens = [self.tokenize(t) for t in text]
token_ids_list = self.convert_tokens_to_ids(tokens)
token_ids_list = [
self.build_inputs_with_special_tokens(token_ids) for token_ids in token_ids_list
]
token_ids_list = self.convert_to_tensors(
token_ids_list, return_tensors=return_tensors, is_global=is_global, **kwargs
)
return token_ids_list
elif isinstance(text, (list, tuple)) and len(text) > 0 and isinstance(text[0], int):
return text
else:
raise ValueError(
"Input is not valid. Should be a string, a list/tuple of strings or "
"a list/tuple of integers."
)
def convert_ids_to_tokens(
self, ids: Union[int, List[int]], skip_special_tokens: bool = False
) -> Union[str, List[str]]:
"""
Converts a single index or a sequence of indices in a token or a sequence of tokens,
using the vocabulary and added tokens.
Args:
ids (:obj:`int` or :obj:`List[int]`):
The token id (or token ids) to convert to tokens.
skip_special_tokens (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not to remove special tokens in the decoding.
Returns:
:obj:`str` or :obj:`List[str]`: The decoded token(s).
"""
if isinstance(ids, int):
if ids in self.added_tokens_decoder:
return self.added_tokens_decoder[ids]
else:
return self._convert_id_to_token(ids)
tokens = []
for index in ids:
if skip_special_tokens and index in self.all_special_ids:
continue
if index in self.added_tokens_decoder:
tokens.append(self.added_tokens_decoder[index])
else:
tokens.append(self._convert_id_to_token(index))
return tokens
def _convert_id_to_token(self, index: int) -> str:
raise NotImplementedError
def convert_tokens_to_string(self, tokens: List[str]) -> str:
"""
Converts a sequence of tokens to a single string. The most simple way to do it is
``" ".join(tokens)`` but we often want to remove sub-word tokenization artifacts
at the same time.
Args:
tokens (:obj:`List[str]`): The token to join in a string.
Returns:
:obj:`str`: The joined tokens.
"""
return " ".join(tokens)
def decode(
self,
token_ids,
skip_special_tokens=False,
clean_up_tokenization_spaces=True,
spaces_between_special_tokens: bool = True,
):
"""
Converts a sequence of ids (integer) in a string, using the tokenizer and vocabulary
with options to remove special tokens and clean up tokenization spaces.
Similar to doing ``self.convert_tokens_to_string(self.convert_ids_to_tokens(token_ids))``.
Args:
token_ids: list of tokenized input ids. Can be obtained using the `encode` or
`encode_plus` methods.
skip_special_tokens: if set to True, will replace special tokens.
clean_up_tokenization_spaces: if set to True, will clean up the tokenization spaces.
"""
# Convert inputs to python lists
if isinstance(token_ids, flow.Tensor):
token_ids = token_ids.tolist()
filtered_tokens = self.convert_ids_to_tokens(
token_ids, skip_special_tokens=skip_special_tokens
)
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
sub_texts = []
current_sub_text = []
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(current_sub_text))
current_sub_text = []
sub_texts.append(token)
else:
current_sub_text.append(token)
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(current_sub_text))
if spaces_between_special_tokens:
text = " ".join(sub_texts)
else:
text = "".join(sub_texts)
if clean_up_tokenization_spaces:
clean_text = self.clean_up_tokenization(text)
return clean_text
else:
return text
@property
def bos_token(self) -> str:
"""
:obj:`str`: Beginning of sentence token. Log an error if used while not having been set.
"""
if self._bos_token is None and self.verbose:
logger.error("Using bos_token, but it is not set yet.")
return None
return str(self._bos_token)
@property
def eos_token(self) -> str:
"""
:obj:`str`: End of sentence token. Log an error if used while not having been set.
"""
if self._eos_token is None and self.verbose:
logger.error("Using eos_token, but it is not set yet.")
return None
return str(self._eos_token)
@property
def unk_token(self) -> str:
"""
:obj:`str`: Unknown token. Log an error if used while not having been set.
"""
if self._unk_token is None and self.verbose:
logger.error("Using unk_token, but it is not set yet.")
return None
return str(self._unk_token)
@property
def sep_token(self) -> str:
"""
:obj:`str`: Separation token, to separate context and query in an input sequence.
Log an error if used while not having been set.
"""
if self._sep_token is None and self.verbose:
logger.error("Using sep_token, but it is not set yet.")
return None
return str(self._sep_token)
@property
def pad_token(self) -> str:
"""
:obj:`str`: Padding token. Log an error if used while not having been set.
"""
if self._pad_token is None and self.verbose:
logger.error("Using pad_token, but it is not set yet.")
return None
return str(self._pad_token)
@property
def cls_token(self) -> str:
"""
:obj:`str`: Classification token, to extract a summary of an input sequence leveraging
self-attention along the full depth of the model.
Log an error if used while not having been set.
"""
if self._cls_token is None and self.verbose:
logger.error("Using cls_token, but it is not set yet.")
return None
return str(self._cls_token)
@property
def mask_token(self) -> str:
"""
:obj:`str`: Mask token, to use when training a model with masked-language modeling.
Log an error if used while not having been set.
"""
if self._mask_token is None and self.verbose:
logger.error("Using mask_token, but it is not set yet.")
return None
return str(self._mask_token)
@property
def eod_token(self) -> str:
"""
:obj:`str`: End of document token. Log an error if used while not having been set.
"""
if self._eod_token is None and self.verbose:
logger.error("Using eod_token, but it is not set yet.")
return None
return str(self._eod_token)
@property
def start_token(self) -> str:
"""
:obj:`str`: Start token of sentence. Common name for bos_token and cls_token.
"""
if self._bos_token is not None and self._cls_token is not None:
if self._bos_token == self._cls_token:
return str(self._bos_token)
else:
logger.error("Conflict between bos_token and cls_token.")
return None
elif self._bos_token is None and self._cls_token is not None:
return str(self._cls_token)
elif self._bos_token is not None and self._cls_token is None:
return str(self._bos_token)
else:
logger.error("Using start_token, but it is not set yet.")
return None
@property
def end_token(self) -> str:
"""
:obj:`str`: End token of sentence. Common name for eos_token and sep_token.
Note: eod_token is not considered, because it is often same with eos_token.
"""
if self._eos_token is not None and self._sep_token is not None:
if self._eos_token == self._sep_token:
return str(self._eos_token)
else:
logger.error("Conflict between eos_token and _sep_token.")
return None
elif self._eos_token is None and self._sep_token is not None:
return str(self._sep_token)
elif self._eos_token is not None and self._sep_token is None:
return str(self._eos_token)
else:
logger.error("Using end_token, but it is not set yet.")
return None
@property
def additional_special_tokens(self) -> List[str]:
"""
:obj:`List[str]`: All the additional special tokens you may want to use.
Log an error if used while not having been set.
"""
if self._additional_special_tokens is None and self.verbose:
logger.error("Using additional_special_tokens, but it is not set yet.")
return None
return [str(tok) for tok in self._additional_special_tokens]
@bos_token.setter
def bos_token(self, value):
self._bos_token = value
@eos_token.setter
def eos_token(self, value):
self._eos_token = value
@unk_token.setter
def unk_token(self, value):
self._unk_token = value
@sep_token.setter
def sep_token(self, value):
self._sep_token = value
@pad_token.setter
def pad_token(self, value):
self._pad_token = value
@cls_token.setter
def cls_token(self, value):
self._cls_token = value
@mask_token.setter
def mask_token(self, value):
self._mask_token = value
@eod_token.setter
def eod_token(self, value):
self._eod_token = value
@additional_special_tokens.setter
def additional_special_tokens(self, value):
self._additional_special_tokens = value
@property
def bos_token_id(self) -> Optional[int]:
"""
:obj:`Optional[int]`: Id of the beginning of sentence token in the vocabulary.
Returns :obj:`None` if the token has not been set.
"""
if self._bos_token is None:
return None
return self.convert_tokens_to_ids(self.bos_token)
@property
def eos_token_id(self) -> Optional[int]:
"""
:obj:`Optional[int]`: Id of the end of sentence token in the vocabulary.
Returns :obj:`None` if the token has not been set.
"""
if self._eos_token is None:
return None
return self.convert_tokens_to_ids(self.eos_token)
@property
def unk_token_id(self) -> Optional[int]:
"""
:obj:`Optional[int]`: Id of the unknown token in the vocabulary.
Returns :obj:`None` if the token has not been set.
"""
if self._unk_token is None:
return None
return self.convert_tokens_to_ids(self.unk_token)
@property
def sep_token_id(self) -> Optional[int]:
"""
:obj:`Optional[int]`: Id of the separation token in the vocabulary,
to separate context and query in an input sequence.
Returns :obj:`None` if the token has not been set.
"""
if self._sep_token is None:
return None
return self.convert_tokens_to_ids(self.sep_token)
@property
def pad_token_id(self) -> Optional[int]:
"""
:obj:`Optional[int]`: Id of the padding token in the vocabulary.
Returns :obj:`None` if the token has not been set.
"""
if self._pad_token is None:
return None
return self.convert_tokens_to_ids(self.pad_token)
@property
def cls_token_id(self) -> Optional[int]:
"""
:obj:`Optional[int]`: Id of the classification token in the vocabulary,
to extract a summary of an input sequence leveraging self-attention
along the full depth of the model.
Returns :obj:`None` if the token has not been set.
"""
if self._cls_token is None:
return None
return self.convert_tokens_to_ids(self.cls_token)
@property
def mask_token_id(self) -> Optional[int]:
"""
:obj:`Optional[int]`: Id of the mask token in the vocabulary, used when training a
model with masked-language modeling. Returns :obj:`None` if the token has not been set.
"""
if self._mask_token is None:
return None
return self.convert_tokens_to_ids(self.mask_token)
@property
def eod_token_id(self) -> Optional[int]:
"""
:obj:`Optional[int]`: Id of the end of document token in the vocabulary.
Returns :obj:`None` if the token has not been set.
"""
if self._eod_token is None:
return None
return self.convert_tokens_to_ids(self.eod_token)
@property
def start_token_id(self) -> Optional[int]:
"""
:obj:`Optional[int]`: Id of the start token in the vocabulary.
Returns :obj:`None` if the token has not been set.
"""
start_token = self.start_token
if start_token is None:
return None
else:
return self.convert_tokens_to_ids(start_token)
@property
def end_token_id(self) -> Optional[int]:
"""
:obj:`Optional[int]`: Id of the end token in the vocabulary.
Returns :obj:`None` if the token has not been set.
"""
end_token = self.end_token
if end_token is None:
return None
else:
return self.convert_tokens_to_ids(end_token)
@property
def additional_special_tokens_ids(self) -> List[int]:
"""
:obj:`List[int]`: Ids of all the additional special tokens in the vocabulary.
Log an error if used while not having been set.
"""
return self.convert_tokens_to_ids(self.additional_special_tokens)
@property
def special_tokens_map(self) -> Dict[str, Union[str, List[str]]]:
"""
A dictionary mapping special token class attributes
(:obj:`cls_token`, :obj:`unk_token`, etc.) to their values
(:obj:`'<unk>'`, :obj:`'<cls>'`, etc.).
"""
set_attr = {}
for attr in self.SPECIAL_TOKENS_ATTRIBUTES:
attr_value = getattr(self, "_" + attr)
if attr_value:
set_attr[attr] = attr_value
return set_attr
@property
def all_special_tokens(self) -> List[str]:
"""
:obj:`List[str]`: All the special tokens
(:obj:`'<unk>'`, :obj:`'<cls>'`, etc.) mapped to class attributes.
"""
all_toks = []
set_attr = self.special_tokens_map
for attr_value in set_attr.values():
all_toks = all_toks + (
list(attr_value) if isinstance(attr_value, (list, tuple)) else [attr_value]
)
all_toks = list(set(all_toks))
return all_toks
@property
def all_special_ids(self) -> List[int]:
"""
:obj:`List[int]`: List the ids of the special tokens
(:obj:`'<unk>'`, :obj:`'<cls>'`, etc.) mapped to class attributes.
"""
all_toks = self.all_special_tokens
all_ids = list(self.convert_tokens_to_ids(all_toks))
return all_ids
@staticmethod
def clean_up_tokenization(out_string):
"""Clean up a list of simple English tokenization artifacts like spaces before
punctuations and abbreviated forms.
"""
out_string = (
out_string.replace(" .", ".")
.replace(" ?", "?")
.replace(" !", "!")
.replace(" ,", ",")
.replace(" ' ", "'")
.replace(" n't", "n't")
.replace(" 'm", "'m")
.replace(" do not", " don't")
.replace(" 's", "'s")
.replace(" 've", "'ve")
.replace(" 're", "'re")
)
return out_string
| 52,649 | 40.197183 | 100 | py |
libai | libai-main/libai/tokenizer/tokenization_bert.py | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tokenization classes for bert (wordpieces)."""
import collections
import logging
import os
import re
import unicodedata
from io import open
from typing import List, Optional
from .tokenization_base import PreTrainedTokenizer, _is_control, _is_punctuation, _is_whitespace
logger = logging.getLogger(__name__)
VOCAB_FILES_NAMES = {"vocab_file": "vocab.txt"}
PRETRAINED_VOCAB_FILES_MAP = {
"vocab_file": {
"bert-base-uncased": "https://huggingface.co/bert-base-uncased/resolve/main/vocab.txt",
"bert-large-uncased": "https://huggingface.co/bert-large-uncased/resolve/main/vocab.txt",
"bert-base-cased": "https://huggingface.co/bert-base-cased/resolve/main/vocab.txt",
"bert-large-cased": "https://huggingface.co/bert-large-cased/resolve/main/vocab.txt",
"bert-base-chinese": "https://huggingface.co/bert-base-chinese/resolve/main/vocab.txt",
}
}
PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = {
"bert-base-uncased": 512,
"bert-large-uncased": 512,
"bert-base-cased": 512,
"bert-large-cased": 512,
"bert-base-chinese": 512,
}
PRETRAINED_INIT_CONFIGURATION = {
"bert-base-uncased": {"do_lower_case": True},
"bert-large-uncased": {"do_lower_case": True},
"bert-base-cased": {"do_lower_case": False},
"bert-large-cased": {"do_lower_case": False},
"bert-base-chinese": {"do_lower_case": False},
}
def load_vocab(vocab_file):
"""Loads a vocabulary file into a dictionary."""
vocab = collections.OrderedDict()
with open(vocab_file, "r", encoding="utf-8") as reader:
tokens = reader.readlines()
for index, token in enumerate(tokens):
token = token.rstrip("\n")
vocab[token] = index
return vocab
def whitespace_tokenize(text):
"""Runs basic whitespace cleaning and splitting on a piece of text."""
text = text.strip()
if not text:
return []
tokens = text.split()
return tokens
def _is_chinese_substr(char):
return re.findall("##[\u4E00-\u9FA5]", char)
class BertTokenizer(PreTrainedTokenizer):
"""
Construct a BERT tokenizer. Based on WordPiece.
Args:
vocab_file (:obj:`str`):
Path to a one-wordpiece-per-line vocabulary file.
do_lower_case (:obj:`bool`, `optional`, defaults to :obj:`True`):
Whether to lower case the input.
Only has an effect when do_basic_tokenize=True.
do_basic_tokenize (:obj:`bool`, `optional`, defaults to :obj:`True`):
Whether to do basic tokenization before wordpiece.
never_split (:obj:`Iterable`, `optional`):
List of tokens which will never be split during tokenization.
Only has an effect when do_basic_tokenize=True.
tokenize_chinese_chars (:obj:`bool`, `optional`, defaults to :obj:`True`):
Whether to tokenize Chinese characters.
This should likely be deactivated for Japanese,
see: https://github.com/huggingface/pytorch-pretrained-BERT/issues/328.
do_chinese_wwm (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether to do whole word masking for Chinese.
Chinese sentence will be segmented by a third-party tool first.
Each substr will be added '##' prefix and its index will be calucated by
id(##A) = id(A) + vocab_size.
"""
vocab_files_names = VOCAB_FILES_NAMES
pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
pretrained_init_configuration = PRETRAINED_INIT_CONFIGURATION
max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__(
self,
vocab_file,
do_lower_case=True,
do_basic_tokenize=True,
never_split=None,
unk_token="[UNK]",
sep_token="[SEP]",
pad_token="[PAD]",
cls_token="[CLS]",
mask_token="[MASK]",
tokenize_chinese_chars=True,
do_chinese_wwm=False,
add_bos_token=False,
**kwargs,
):
super(BertTokenizer, self).__init__(
unk_token=unk_token,
sep_token=sep_token,
pad_token=pad_token,
cls_token=cls_token,
mask_token=mask_token,
**kwargs,
)
if not os.path.isfile(vocab_file):
raise ValueError(
"Can't find a vocabulary file at path '{}'. To load the "
"vocabulary from a Google pretrained model use "
"`tokenizer = BertTokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`".format(
vocab_file
)
)
self.vocab = load_vocab(vocab_file)
self.ids_to_tokens = collections.OrderedDict(
[(ids, tok) for tok, ids in self.vocab.items()]
)
self.do_basic_tokenize = do_basic_tokenize
if do_basic_tokenize:
if do_chinese_wwm:
self.basic_tokenizer = BasicTokenizerWithChineseWWM(
do_lower_case=do_lower_case,
never_split=never_split,
tokenize_chinese_chars=tokenize_chinese_chars,
)
else:
self.basic_tokenizer = BasicTokenizer(
do_lower_case=do_lower_case,
never_split=never_split,
tokenize_chinese_chars=tokenize_chinese_chars,
)
self.wordpiece_tokenizer = WordpieceTokenizer(vocab=self.vocab, unk_token=self.unk_token)
self.add_bos_token = add_bos_token
@property
def vocab_size(self):
return len(self.vocab)
def get_vocab(self):
return dict(self.vocab, **self.added_tokens_encoder)
def _tokenize(self, text):
split_tokens = []
if self.do_basic_tokenize:
for token in self.basic_tokenizer.tokenize(text, never_split=self.all_special_tokens):
for sub_token in self.wordpiece_tokenizer.tokenize(token):
split_tokens.append(sub_token)
else:
split_tokens = self.wordpiece_tokenizer.tokenize(text)
return split_tokens
def _convert_token_to_id(self, token):
"""Converts a token (str) in an id using the vocab.
For Chinese substr, id = vocab_size + id(substr.remove(##)).
"""
index = self.vocab.get(token, self.vocab.get(self.unk_token))
return index
def _convert_id_to_token(self, index):
"""Converts an index (integer) in a token (str) using the vocab.
For Chinese substr, id = vocab_size + id(substr.remove(##)).
"""
token = self.ids_to_tokens.get(index, self.unk_token)
return token
def convert_tokens_to_string(self, tokens):
"""Converts a sequence of tokens (string) to a single string."""
out_string = " ".join(tokens).replace(" ##", "").strip()
return out_string
def build_inputs_with_special_tokens(
self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
) -> List[int]:
"""Add special tokens to a sequence or a pair of sequence.
BERT format sentence input:
- single sequence: [CLS] tokens_a [SEP]
- pair of sequences: [CLS] tokens_a [SEP] tokens_b [SEP]
Args:
token_ids_0 (List[int]): The token ids of sentence 0.
token_ids_1 (List[int], optional): The token ids of sentence 1. Defaults to None.
Returns:
:obj:`List[str]`: The sequence after adding special toekens.
"""
if self.add_bos_token:
cls = [self.cls_token_id]
sep = [self.sep_token_id]
else:
cls = []
sep = []
if token_ids_1 is None:
return cls + token_ids_0 + sep
return cls + token_ids_0 + sep + token_ids_1 + sep
def save_vocabulary(self, save_directory, filename_prefix=None):
"""Save the tokenizer vocabulary to a directory or file."""
index = 0
if os.path.isdir(save_directory):
vocab_file = os.path.join(
save_directory,
(filename_prefix + "-" if filename_prefix else "")
+ VOCAB_FILES_NAMES["vocab_file"],
)
else:
vocab_file = (filename_prefix + "-" if filename_prefix else "") + save_directory
with open(vocab_file, "w", encoding="utf-8") as writer:
for token, token_index in sorted(self.vocab.items(), key=lambda kv: kv[1]):
if index != token_index:
logger.warning(
"Saving vocabulary to {}: vocabulary indices are not consecutive."
" Please check that the vocabulary is not corrupted!".format(vocab_file)
)
index = token_index
writer.write(token + "\n")
index += 1
return (vocab_file,)
class BasicTokenizer(object):
"""
Constructs a BasicTokenizer that will run basic
tokenization (punctuation splitting, lower casing, etc.).
"""
def __init__(self, do_lower_case=True, never_split=None, tokenize_chinese_chars=True):
"""Constructs a BasicTokenizer.
Args:
**do_lower_case**: Whether to lower case the input.
**never_split**: (`optional`) list of str
Kept for backward compatibility purposes.
Now implemented directly at the base class level
(see :func:`PreTrainedTokenizer.tokenize`)
List of token not to split.
**tokenize_chinese_chars**: (`optional`) boolean (default True)
Whether to tokenize Chinese characters.
This should likely be deactivated for Japanese:
see: https://github.com/huggingface/pytorch-pretrained-BERT/issues/328
"""
if never_split is None:
never_split = []
self.do_lower_case = do_lower_case
self.never_split = set(never_split)
self.tokenize_chinese_chars = tokenize_chinese_chars
def tokenize(self, text, never_split=None):
"""
Basic Tokenization of a piece of text.
Split on "white spaces" only, for sub-word tokenization, see WordPieceTokenizer.
Args:
**never_split**: (`optional`) list of str
Kept for backward compatibility purposes.
Now implemented directly at the base class level
(see :func:`PreTrainedTokenizer.tokenize`)
List of token not to split.
"""
# union() returns a new set by concatenating the two sets.
never_split = self.never_split.union(set(never_split)) if never_split else self.never_split
text = self._clean_text(text)
# This was added on November 1st, 2018 for the multilingual and Chinese
# models. This is also applied to the English models now, but it doesn't
# matter since the English models were not trained on any Chinese data
# and generally don't have any Chinese data in them (there are Chinese
# characters in the vocabulary because Wikipedia does have some Chinese
# words in the English Wikipedia.).
if self.tokenize_chinese_chars:
text = self._tokenize_chinese_chars(text)
orig_tokens = whitespace_tokenize(text)
split_tokens = []
for token in orig_tokens:
if self.do_lower_case and token not in never_split:
token = token.lower()
token = self._run_strip_accents(token)
split_tokens.extend(self._run_split_on_punc(token, never_split))
output_tokens = whitespace_tokenize(" ".join(split_tokens))
return output_tokens
def _run_strip_accents(self, text):
"""Strips accents from a piece of text."""
text = unicodedata.normalize("NFD", text)
output = []
for char in text:
cat = unicodedata.category(char)
if cat == "Mn":
continue
output.append(char)
return "".join(output)
def _run_split_on_punc(self, text, never_split=None):
"""Splits punctuation on a piece of text."""
if never_split is not None and text in never_split:
return [text]
chars = list(text)
i = 0
start_new_word = True
output = []
while i < len(chars):
char = chars[i]
if _is_punctuation(char):
output.append([char])
start_new_word = True
else:
if start_new_word:
output.append([])
start_new_word = False
output[-1].append(char)
i += 1
return ["".join(x) for x in output]
def _tokenize_chinese_chars(self, text):
"""Adds whitespace around any CJK character."""
output = []
for char in text:
cp = ord(char)
if self._is_chinese_char(cp):
output.append(" ")
output.append(char)
output.append(" ")
else:
output.append(char)
return "".join(output)
def _is_chinese_char(self, cp):
"""Checks whether CP is the codepoint of a CJK character."""
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like the all of the other languages.
if (
(cp >= 0x4E00 and cp <= 0x9FFF)
or (cp >= 0x3400 and cp <= 0x4DBF) #
or (cp >= 0x20000 and cp <= 0x2A6DF) #
or (cp >= 0x2A700 and cp <= 0x2B73F) #
or (cp >= 0x2B740 and cp <= 0x2B81F) #
or (cp >= 0x2B820 and cp <= 0x2CEAF) #
or (cp >= 0xF900 and cp <= 0xFAFF)
or (cp >= 0x2F800 and cp <= 0x2FA1F) #
): #
return True
return False
def _clean_text(self, text):
"""Performs invalid character removal and whitespace cleanup on text."""
output = []
for char in text:
cp = ord(char)
if cp == 0 or cp == 0xFFFD or _is_control(char):
continue
if _is_whitespace(char):
output.append(" ")
else:
output.append(char)
return "".join(output)
class BasicTokenizerWithChineseWWM(BasicTokenizer):
"""Pre-segmentation for Chinese sentences, which will be used in whole word mask."""
def __init__(self, do_lower_case=True, never_split=None, tokenize_chinese_chars=True):
super(BasicTokenizerWithChineseWWM, self).__init__(
do_lower_case=do_lower_case,
never_split=never_split,
tokenize_chinese_chars=tokenize_chinese_chars,
)
try:
import jieba
self.pre_tokenizer = lambda x: jieba.lcut(x, HMM=False)
except ImportError:
raise (ImportError("Chinese whole word mask need jieba"))
def _tokenize_chinese_chars(self, text):
"""For Chinese pieces, uses jieba to segment the words and
adds whitespace around CJK character."""
output = []
piece = ""
for char in text:
cp = ord(char)
if self._is_chinese_char(cp):
piece += char
else:
chinese_words = self.pre_tokenizer(piece)
for word in chinese_words:
output.append(" ")
output.append(word)
output.append(" ")
output.append(char)
piece = ""
chinese_words = self.pre_tokenizer(piece)
for word in chinese_words:
output.append(" ")
output.append(word)
output.append(" ")
return "".join(output)
class WordpieceTokenizer(object):
"""Runs WordPiece tokenization."""
def __init__(self, vocab, unk_token, max_input_chars_per_word=100):
self.vocab = vocab
self.unk_token = unk_token
self.max_input_chars_per_word = max_input_chars_per_word
def tokenize(self, text):
"""Tokenizes a piece of text into its word pieces.
This uses a greedy longest-match-first algorithm to perform tokenization
using the given vocabulary.
For example:
input = "unaffable"
output = ["un", "##aff", "##able"]
input = "有没有"
output = ["有", "##没", "##有"]
Args:
text: A single token or whitespace separated tokens. This should have
already been passed through `BasicTokenizer`.
Returns:
A list of wordpiece tokens.
"""
output_tokens = []
for token in whitespace_tokenize(text):
chars = list(token)
if len(chars) > self.max_input_chars_per_word:
output_tokens.append(self.unk_token)
continue
is_bad = False
start = 0
sub_tokens = []
while start < len(chars):
end = len(chars)
cur_substr = None
while start < end:
substr = "".join(chars[start:end])
if start > 0:
substr = "##" + substr
if substr.startswith("##"):
if _is_chinese_substr(substr):
if substr[2:] in self.vocab: # for Chinese substr
cur_substr = substr
break
else:
if substr in self.vocab: # for English substr
cur_substr = substr
break
else:
if (
substr in self.vocab
): # non-substr, maybe character or whole Chinese word
cur_substr = substr
break
end -= 1
if cur_substr is None:
is_bad = True
break
sub_tokens.append(cur_substr)
start = end
if is_bad:
output_tokens.append(self.unk_token)
else:
output_tokens.extend(sub_tokens)
return output_tokens
| 19,425 | 36.720388 | 99 | py |
libai | libai-main/libai/tokenizer/__init__.py | # coding=utf-8
# Copyright 2021 The OneFlow Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .build import build_tokenizer
from .tokenization_bert import BertTokenizer
from .tokenization_roberta import RobertaTokenizer
from .tokenization_gpt2 import GPT2Tokenizer
from .tokenization_t5 import T5Tokenizer
from .tokenization_base import PreTrainedTokenizer
| 889 | 39.454545 | 74 | py |
libai | libai-main/libai/tokenizer/tokenization_gpt2.py | # coding=utf-8
# Copyright 2018 The Open AI Team Authors and The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tokenization classes for OpenAI GPT (BPE)."""
import json
import logging
import os
from functools import lru_cache
from io import open
from typing import List, Optional
import regex as re
from .tokenization_base import PreTrainedTokenizer
logger = logging.getLogger(__name__)
VOCAB_FILES_NAMES = {
"vocab_file": "vocab.json",
"merges_file": "merges.txt",
}
PRETRAINED_VOCAB_FILES_MAP = {
"vocab_file": {"gpt2": "https://huggingface.co/gpt2/resolve/main/vocab.json"},
"merges_file": {"gpt2": "https://huggingface.co/gpt2/resolve/main/merges.txt"},
}
PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = {
"gpt2": 1024,
}
@lru_cache()
def bytes_to_unicode():
"""
Returns list of utf-8 byte and a mapping to unicode strings. We specifically avoids mapping
to whitespace/control characters the bpe code barfs on.
The reversible bpe codes work on unicode strings. This means you need a large # of unicode
characters in your vocab if you want to avoid UNKs. When you're at something like a 10B token
dataset you end up needing around 5K for decent coverage. This is a significant percentage
of your normal, say, 32K bpe vocab. To avoid that, we want lookup tables between utf-8
bytes and unicode strings.
"""
bs = (
list(range(ord("!"), ord("~") + 1))
+ list(range(ord("¡"), ord("¬") + 1))
+ list(range(ord("®"), ord("ÿ") + 1))
)
cs = bs[:]
n = 0
for b in range(2 ** 8):
if b not in bs:
bs.append(b)
cs.append(2 ** 8 + n)
n += 1
cs = [chr(n) for n in cs]
return dict(zip(bs, cs))
def get_pairs(word):
"""
Return set of symbol pairs in a word.
Word is represented as tuple of symbols (symbols being variable-length strings).
"""
pairs = set()
prev_char = word[0]
for char in word[1:]:
pairs.add((prev_char, char))
prev_char = char
return pairs
class GPT2Tokenizer(PreTrainedTokenizer):
"""
Construct a GPT-2 tokenizer. Based on byte-level Byte-Pair-Encoding.
Args:
vocab_file (:obj:`str`):
Path to the vocabulary file.
merges_file (:obj:`str`):
Path to the merges file.
errors (:obj:`str`, `optional`, defaults to :obj:`"replace"`):
Paradigm to follow when decoding bytes to UTF-8. See `bytes.decode
<https://docs.python.org/3/library/stdtypes.html#bytes.decode>`__ for more information.
unk_token (:obj:`str`, `optional`, defaults to :obj:`<|endoftext|>`):
The unknown token. A token that is not in the vocabulary cannot be
converted to an ID and is set to be this token instead.
bos_token (:obj:`str`, `optional`, defaults to :obj:`<|endoftext|>`):
The beginning of sequence token.
eos_token (:obj:`str`, `optional`, defaults to :obj:`<|endoftext|>`):
The end of sequence token.
"""
vocab_files_names = VOCAB_FILES_NAMES
pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__(
self,
vocab_file,
merges_file,
errors="replace",
unk_token="<|endoftext|>",
bos_token="<|endoftext|>",
eos_token="<|endoftext|>",
add_bos_token=False,
**kwargs,
):
super(GPT2Tokenizer, self).__init__(
bos_token=bos_token, eos_token=eos_token, unk_token=unk_token, **kwargs
)
self.encoder = json.load(open(vocab_file, encoding="utf-8"))
self.decoder = {v: k for k, v in self.encoder.items()}
self.errors = errors # how to handle errors in decoding
self.byte_encoder = bytes_to_unicode()
self.byte_decoder = {v: k for k, v in self.byte_encoder.items()}
bpe_data = open(merges_file, encoding="utf-8").read().split("\n")[1:-1]
bpe_merges = [tuple(merge.split()) for merge in bpe_data]
self.bpe_ranks = dict(zip(bpe_merges, range(len(bpe_merges))))
self.cache = {}
# Should haved added re.IGNORECASE so BPE merges can happen for
# capitalized versions of contractions
self.pat = re.compile(
r"""'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+"""
)
self.add_bos_token = add_bos_token
@property
def vocab_size(self):
return len(self.encoder)
def get_vocab(self):
return dict(self.encoder, **self.added_tokens_encoder)
def bpe(self, token):
if token in self.cache:
return self.cache[token]
word = tuple(token)
pairs = get_pairs(word)
if not pairs:
return token
while True:
bigram = min(pairs, key=lambda pair: self.bpe_ranks.get(pair, float("inf")))
if bigram not in self.bpe_ranks:
break
first, second = bigram
new_word = []
i = 0
while i < len(word):
try:
j = word.index(first, i)
new_word.extend(word[i:j])
i = j
except: # noqa
new_word.extend(word[i:])
break
if word[i] == first and i < len(word) - 1 and word[i + 1] == second:
new_word.append(first + second)
i += 2
else:
new_word.append(word[i])
i += 1
new_word = tuple(new_word)
word = new_word
if len(word) == 1:
break
else:
pairs = get_pairs(word)
word = " ".join(word)
self.cache[token] = word
return word
def _tokenize(self, text):
"""Tokenize a string."""
bpe_tokens = []
for token in re.findall(self.pat, text):
# Maps all our bytes to unicode strings, avoiding control tokens
# of the BPE (spaces in our case)
token = "".join(self.byte_encoder[b] for b in token.encode("utf-8"))
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(token).split(" "))
return bpe_tokens
def _convert_token_to_id(self, token):
"""Converts a token (str) in an id using the vocab."""
return self.encoder.get(token, self.encoder.get(self.unk_token))
def _convert_id_to_token(self, index):
"""Converts an index (integer) in a token (str) using the vocab."""
return self.decoder.get(index)
def convert_tokens_to_string(self, tokens):
"""Converts a sequence of tokens (string) to a single string."""
text = "".join(tokens)
text = bytearray([self.byte_decoder[c] for c in text]).decode("utf-8", errors=self.errors)
return text
def build_inputs_with_special_tokens(
self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
) -> List[int]:
"""Add special tokens to a sequence or a pair of sequence.
GPT2 format sentence input:
- single sequence: <|endoftext|> tokens_a
- pair of sequences: <|endoftext|> tokens_a <|endoftext|> tokens_b
Args:
token_ids_0 (List[int]): The token ids of sentence 0.
token_ids_1 (List[int], optional): The token ids of sentence 1. Defaults to None.
Returns:
:obj:`List[str]`: The sequence after adding special toekens.
"""
if self.add_bos_token:
bos = [self.bos_token_id]
else:
bos = []
if token_ids_1 is None:
return bos + token_ids_0
return bos + token_ids_0 + bos + token_ids_1
def save_vocabulary(self, save_directory, filename_prefix=None):
if not os.path.isdir(save_directory):
logger.error(f"Vocabulary path ({save_directory}) should be a directory")
return
vocab_file = os.path.join(
save_directory,
(filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"],
)
merge_file = os.path.join(
save_directory,
(filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"],
)
with open(vocab_file, "w", encoding="utf-8") as f:
f.write(json.dumps(self.encoder, ensure_ascii=False))
index = 0
with open(merge_file, "w", encoding="utf-8") as writer:
writer.write("#version: 0.2\n")
for bpe_tokens, token_index in sorted(self.bpe_ranks.items(), key=lambda kv: kv[1]):
if index != token_index:
logger.warning(
f"Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."
" Please check that the tokenizer is not corrupted!"
)
index = token_index
writer.write(" ".join(bpe_tokens) + "\n")
index += 1
return (vocab_file, merge_file)
| 9,721 | 34.611722 | 100 | py |
libai | libai-main/libai/tokenizer/tokenization_t5.py | # coding=utf-8
# Copyright 2018 T5 Authors and The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tokenization class for Google T5 (sentence piece)."""
import logging
import os
import warnings
from shutil import copyfile
from typing import List, Optional
import regex as re
import sentencepiece as spm
from .tokenization_base import PreTrainedTokenizer
logger = logging.getLogger(__name__)
VOCAB_FILES_NAMES = {"vocab_file": "spiece.model"}
PRETRAINED_VOCAB_FILES_MAP = {
"vocab_file": {"t5-base": "https://huggingface.co/t5-base/resolve/main/spiece.model"}
}
PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = {
"t5-base": 512,
}
class T5Tokenizer(PreTrainedTokenizer):
"""
Construct a T5 tokenizer. Based on `SentencePiece <https://github.com/google/sentencepiece>`.
Args:
vocab_file (:obj:`str`):
Path to the vocabulary file.
eos_token (:obj:`str`, `optional`, defaults to :obj:`"</s>"`):
The end of sequence token.
unk_token (:obj:`str`, `optional`, defaults to :obj:`"<unk>"`):
The unknown token. A token that is not in the vocabulary cannot
be converted to an ID and is set to be this token instead.
pad_token (:obj:`str`, `optional`, defaults to :obj:`"<pad>"`):
The token used for padding, for example when batching sequences of different lengths.
extra_ids (:obj:`int`, `optional`, defaults to 100):
Add a number of extra ids added to the end of the vocabulary for use
as sentinels. These tokens are accessible as "<extra_id_{%d}>" where
"{%d}" is a number between 0 and extra_ids-1. Extra tokens are indexed
from the end of the vocabulary up to beginning ("<extra_id_0>" is the
last token in the vocabulary like in T5 preprocessing see `here
<https://github.com/google-research/text-to-text-transfer-transformer/blob/9fd7b14a769417be33bc6c850f9598764913c833/t5/data/preprocessors.py#L2117>`__).
additional_special_tokens (:obj:`List[str]`, `optional`):
Additional special tokens used by the tokenizer.
"""
vocab_files_names = VOCAB_FILES_NAMES
pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__(
self,
vocab_file,
eos_token="</s>",
unk_token="<unk>",
pad_token="<pad>",
extra_ids=100,
additional_special_tokens=None,
add_bos_token=False,
**kwargs,
):
# Add extra_ids to the special token list
if extra_ids > 0 and additional_special_tokens is None:
additional_special_tokens = [f"<extra_id_{i}>" for i in range(extra_ids)]
elif extra_ids > 0 and additional_special_tokens is not None:
extra_tokens = len(
set(filter(lambda x: bool("extra_id" in str(x)), additional_special_tokens))
)
if extra_tokens != extra_ids:
raise ValueError(
f"Both extra_ids ({extra_ids}) and additional_special_tokens "
f"({additional_special_tokens}) are privided to T5Tokenizer. "
"In this case the additional_special_tokens must include the extra_ids tokens"
)
super().__init__(
eos_token=eos_token,
unk_token=unk_token,
pad_token=pad_token,
additional_special_tokens=additional_special_tokens,
**kwargs,
)
self.vocab_file = vocab_file
self._extra_ids = extra_ids
self.sp_model = spm.SentencePieceProcessor()
self.sp_model.Load(vocab_file)
self.add_bos_token = add_bos_token
@property
def vocab_size(self):
return self.sp_model.get_piece_size() + self._extra_ids
def get_vocab(self):
vocab = {self.convert_ids_to_tokens(i): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def _tokenize(self, text):
"""Tokenize a string."""
pieces = self.sp_model.encode(text, out_type=str)
return pieces
def _convert_token_to_id(self, token):
"""Converts a token (str) in an id using the vocab."""
if token.startswith("<extra_id_"):
match = re.match(r"<extra_id_(\d+)>", token)
num = int(match.group(1))
return self.vocab_size - num - 1
return self.sp_model.piece_to_id(token)
def _convert_id_to_token(self, index):
"""Converts an index (integer) in a token (str) using the vocab."""
if index < self.sp_model.get_piece_size():
token = self.sp_model.IdToPiece(index)
else:
token = f"<extra_id_{self.vocab_size - 1 - index}>"
return token
def convert_tokens_to_string(self, tokens):
"""Converts a sequence of tokens (string) to a single string."""
current_sub_tokens = []
out_string = ""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode_pieces(current_sub_tokens) + token + " "
current_sub_tokens = []
else:
current_sub_tokens.append(token)
out_string += self.sp_model.decode_pieces(current_sub_tokens)
return out_string.strip()
def _add_eos_if_not_present(self, token_ids):
if not self.add_bos_token:
return token_ids
if len(token_ids) > 0 and token_ids[-1] == self.eos_token_id:
warnings.warn("This sequence already has {self.eos_token}.")
return token_ids
else:
return token_ids + [self.eos_token_id]
def build_inputs_with_special_tokens(
self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
) -> List[int]:
"""Add special tokens to a sequence or a pair of sequence.
T5 format sentence input:
- single sequence: tokens_a </s>
- pair of sequences: tokens_a </s> tokens_b </s>
Args:
token_ids_0 (List[int]): The token ids of sentence 0.
token_ids_1 (List[int], optional): The token ids of sentence 1. Defaults to None.
Returns:
:obj:`List[str]`: The sequence after adding special toekens.
"""
token_ids_0 = self._add_eos_if_not_present(token_ids_0)
if token_ids_1 is None:
return token_ids_0
else:
token_ids_1 = self._add_eos_if_not_present(token_ids_1)
return token_ids_0 + token_ids_1
def save_vocabulary(self, save_directory, filename_prefix=None):
"""Save the tokenizer vocabulary to a directory or file."""
if not os.path.isdir(save_directory):
logger.error(f"Vocabulary path ({save_directory}) should be a directory")
return
out_vocab_file = os.path.join(
save_directory,
(filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"],
)
if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file):
copyfile(self.vocab_file, out_vocab_file)
logger.info(f"Copy vocab file to {out_vocab_file}")
return (out_vocab_file,)
| 7,934 | 38.08867 | 164 | py |
libai | libai-main/libai/tokenizer/build.py | # coding=utf-8
# Copyright 2021 The OneFlow Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from libai.config import instantiate
logger = logging.getLogger(__name__)
def build_tokenizer(cfg):
"""Initialize tokenizer."""
tokenizer = instantiate(cfg.tokenizer)
if cfg.append_eod and tokenizer.eod_token is None:
if tokenizer.eos_token is not None:
tokenizer.eod_token = tokenizer.eos_token
else:
tokenizer.eod_token = tokenizer.pad_token
return tokenizer
| 1,059 | 30.176471 | 74 | py |
libai | libai-main/libai/onnx_export/gpt2_to_onnx.py | # coding=utf-8
# Copyright 2021 The OneFlow Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import oneflow as flow
from oneflow import nn
from oneflow_onnx.oneflow2onnx.util import convert_to_onnx_and_check
from libai.config import LazyConfig
from libai.models.utils import GPT2LoaderLiBai
from projects.MagicPrompt.gpt2 import GPTModel
def get_model(config_file):
cfg = LazyConfig.load(config_file)
cfg.model.cfg.pretrained_model_path = None
cfg.dataloader = None
cfg.tokenization = None
print("Building model....")
loader = GPT2LoaderLiBai(GPTModel, cfg.cfg, "/path/to/model")
model = loader.load()
print("Build model finished.")
return model
class gpt2Graph(nn.Graph):
def __init__(self, eager_model):
super().__init__()
self.model = eager_model
def build(
self,
input_ids,
):
out = self.model(
input_ids,
)
return out
if __name__ == "__main__":
model = get_model("projects/MagicPrompt/configs/gpt2_inference.py")
model.eval()
gpt2_graph = gpt2Graph(model)
# Build the static graph model
input_ids = flow.ones(
1, 5, dtype=flow.int64, sbp=flow.sbp.broadcast, placement=flow.placement("cuda", ranks=[0])
)
# check your model.forward is valid
# output = gpt2_graph(
# input_ids
# )
print("Compiling the graph which may make some time, please wait for a moment....")
gpt2_graph._compile(
input_ids,
)
convert_to_onnx_and_check(
gpt2_graph,
external_data=False,
opset=11,
flow_weight_dir=None,
onnx_model_path="./",
dynamic_batch_size=False,
device="gpu_global",
input_tensor_range=[0, 10],
)
| 2,298 | 25.425287 | 99 | py |
libai | libai-main/libai/onnx_export/t5_to_onnx.py | # coding=utf-8
# Copyright 2021 The OneFlow Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import oneflow as flow
from oneflow import nn
from oneflow_onnx.oneflow2onnx.util import convert_to_onnx_and_check
from libai.config import LazyConfig
from projects.MT5.mt5_model import MT5Model
from projects.MT5.utils.mt5_loader import T5LoaderHuggerFace
def get_model(config_file):
cfg = LazyConfig.load(config_file)
cfg.model.cfg.model_type = "mt5"
cfg.model.cfg.pretrained_model_path = None
cfg.dataloader = None
cfg.tokenization = None
print("Building model....")
loader = T5LoaderHuggerFace(MT5Model, cfg.model.cfg, "/path/to/model")
model = loader.load()
print("Build model finished.")
return model
class t5Graph(nn.Graph):
def __init__(self, eager_model):
super().__init__()
self.model = eager_model
def build(
self,
encoder_input_ids,
encoder_attn_mask,
decoder_input_ids,
decoder_attn_mask,
encoder_decoder_attn_mask,
):
out = self.model(
encoder_input_ids,
encoder_attn_mask,
decoder_input_ids,
decoder_attn_mask,
encoder_decoder_attn_mask,
)
return out
if __name__ == "__main__":
model = get_model("projects/MT5/configs/mt5_pretrain.py")
model.eval()
t5_graph = t5Graph(model)
# Build the static graph model
encoder_input_ids = flow.ones(
1, 5, dtype=flow.int64, sbp=flow.sbp.broadcast, placement=flow.placement("cuda", ranks=[0])
)
encoder_attn_mask = flow.ones(
1, 3, dtype=flow.int64, sbp=flow.sbp.broadcast, placement=flow.placement("cuda", ranks=[0])
)
decoder_input_ids = flow.ones(
1,
5,
5,
dtype=flow.bool,
sbp=flow.sbp.broadcast,
placement=flow.placement("cuda", ranks=[0]),
)
decoder_attn_mask = flow.ones(
1,
3,
3,
dtype=flow.bool,
sbp=flow.sbp.broadcast,
placement=flow.placement("cuda", ranks=[0]),
)
encoder_decoder_attn_mask = flow.ones(
1,
3,
5,
dtype=flow.bool,
sbp=flow.sbp.broadcast,
placement=flow.placement("cuda", ranks=[0]),
)
# check your model.forward is valid
# output = t5_graph(
# encoder_input_ids,
# encoder_attn_mask,
# decoder_input_ids,
# decoder_attn_mask,
# encoder_decoder_attn_mask
# )
# print(output)
print("Compiling the graph which may make some time, please wait for a moment....")
t5_graph._compile(
encoder_input_ids,
encoder_attn_mask,
decoder_input_ids,
decoder_attn_mask,
encoder_decoder_attn_mask,
)
convert_to_onnx_and_check(
t5_graph,
external_data=False,
opset=11,
flow_weight_dir=None,
onnx_model_path="./",
dynamic_batch_size=False,
device="gpu_global",
input_tensor_range=[0, 10],
)
| 3,581 | 26.343511 | 99 | py |
libai | libai-main/libai/onnx_export/onnx_inference/gpt2_onnx_infer.py | # coding=utf-8
# Copyright 2021 The OneFlow Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import OrderedDict
from typing import List
import numpy as np
import onnxruntime as ort
class OnnxModel:
def __init__(
self,
onnx_filename,
providers: List[str] = None,
ort_optimize: bool = True,
):
ort_sess_opt = ort.SessionOptions()
ort_sess_opt.graph_optimization_level = (
ort.GraphOptimizationLevel.ORT_ENABLE_EXTENDED
if ort_optimize
else ort.GraphOptimizationLevel.ORT_DISABLE_ALL
)
if providers is None:
if ort.__version__ > "1.9.0":
providers = [
"TensorrtExecutionProvider",
"CUDAExecutionProvider",
"CPUExecutionProvider",
]
else:
providers = ["CPUExecutionProvider"]
self.sess = ort.InferenceSession(
onnx_filename, sess_options=ort_sess_opt, providers=providers
)
def forward(self, input_list):
ipt_dict = OrderedDict()
for idx, ipt in enumerate(self.sess.get_inputs()):
ipt_dict[ipt.name] = input_list[idx]
onnx_res = self.sess.run([], ipt_dict)
return onnx_res
if __name__ == "__main__":
onnx_model = OnnxModel("model.onnx")
input_list = [
np.ones((1, 5)).astype(np.int64).astype(np.int64),
]
print(onnx_model.forward(input_list))
| 2,032 | 30.276923 | 74 | py |
libai | libai-main/libai/onnx_export/onnx_inference/t5_onnx_infer.py | # coding=utf-8
# Copyright 2021 The OneFlow Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import OrderedDict
from typing import List
import numpy as np
import onnxruntime as ort
class OnnxModel:
def __init__(
self,
onnx_filename,
providers: List[str] = None,
ort_optimize: bool = True,
):
ort_sess_opt = ort.SessionOptions()
ort_sess_opt.graph_optimization_level = (
ort.GraphOptimizationLevel.ORT_ENABLE_EXTENDED
if ort_optimize
else ort.GraphOptimizationLevel.ORT_DISABLE_ALL
)
if providers is None:
if ort.__version__ > "1.9.0":
providers = [
"TensorrtExecutionProvider",
"CUDAExecutionProvider",
"CPUExecutionProvider",
]
else:
providers = ["CPUExecutionProvider"]
self.sess = ort.InferenceSession(
onnx_filename, sess_options=ort_sess_opt, providers=providers
)
def forward(self, input_list):
ipt_dict = OrderedDict()
for idx, ipt in enumerate(self.sess.get_inputs()):
ipt_dict[ipt.name] = input_list[idx]
onnx_res = self.sess.run([], ipt_dict)
return onnx_res
if __name__ == "__main__":
onnx_model = OnnxModel("model.onnx")
input_list = [
np.ones((1, 5)).astype(np.int64),
np.ones((1, 3)).astype(np.int64),
np.ones((1, 5, 5)).astype(bool),
np.ones((1, 3, 3)).astype(bool),
np.ones((1, 3, 5)).astype(bool),
]
print(onnx_model.forward(input_list))
| 2,180 | 30.608696 | 74 | py |
libai | libai-main/libai/optim/__init__.py | # coding=utf-8
# Copyright 2021 The OneFlow Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .build import build_optimizer, get_default_optimizer_params
| 686 | 39.411765 | 74 | py |
libai | libai-main/libai/optim/build.py | # coding=utf-8
# Copyright 2021 The OneFlow Authors. All rights reserved.
# Copyright (c) Facebook, Inc. and its affiliates.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
from collections import defaultdict
from typing import Any, Dict, List
import oneflow as flow
from libai.config import instantiate
from libai.layers import LayerNorm
# --------------------------------------------------------
# References:
# https://github.com/facebookresearch/detectron2/blob/main/detectron2/solver/build.py
# --------------------------------------------------------
def build_optimizer(cfg, model):
"""
Build an optimizer from config.
"""
cfg.params.model = model
optim = instantiate(cfg)
return optim
def get_default_optimizer_params(
model,
base_lr=None,
weight_decay=None,
weight_decay_norm=None,
weight_decay_bias=None,
clip_grad_max_norm=None,
clip_grad_norm_type=None,
overrides=None,
):
"""
Get default param list for optimizer, with suport for a few types of overrides.
If no overrides are needed, it is equivalent to `model.parameters()`.
Arguments:
base_lr: lr for every group by default. Can be omitted to use the one in optimizer.
weight_decay: weight decay for every group by default. Can be omitted to use the one
in optimizer.
weight_decay_norm: override weight decay for params in normalization layers
weight_decay_bias: override weight decay for bias parameters
overrides: if not `None`, provides values for optimizer hyperparameters
(LR, weight decay) for module parameters with a given name; e.g.
``{"embedding": {"lr": 0.01, "weight_decay": 0.1}}`` will set the LR and
weight decay values for all module parameters named `embedding`.
For common transformer models, ``weight_decay_norm`` and ``weight_decay_bias``
are usually set to 0.
Example:
::
flow.optim.AdamW(
get_default_optimizer_params(model, weight_decay_norm=0, weight_decay_bias=0),
lr=0.01,
weight_decay=1e-4
)
"""
if overrides is None:
overrides = {}
defaults = {}
if base_lr is not None:
defaults["lr"] = base_lr
if weight_decay is not None:
defaults["weight_decay"] = weight_decay
if clip_grad_max_norm is not None and clip_grad_norm_type is not None:
defaults["clip_grad_max_norm"] = clip_grad_max_norm
defaults["clip_grad_norm_type"] = clip_grad_norm_type
bias_overrides = {}
if weight_decay_bias is not None:
bias_overrides["weight_decay"] = weight_decay_bias
if len(bias_overrides):
if "bias" in overrides:
raise ValueError("Conflicting overrides for 'bias'")
overrides["bias"] = bias_overrides
norm_module_types = (
LayerNorm,
flow.nn.BatchNorm1d,
flow.nn.BatchNorm2d,
flow.nn.BatchNorm3d,
flow.nn.GroupNorm,
flow.nn.InstanceNorm1d,
flow.nn.InstanceNorm2d,
flow.nn.InstanceNorm3d,
flow.nn.FusedBatchNorm1d,
flow.nn.FusedBatchNorm2d,
flow.nn.FusedBatchNorm3d,
)
params = []
memo = set()
for module in model.modules():
for model_param_name, value in module.named_parameters(recurse=False):
if not value.requires_grad:
continue
# Avoid duplicating parameters
if value in memo:
continue
memo.add(value)
hyperparams = copy.copy(defaults)
if isinstance(module, norm_module_types) and weight_decay_norm is not None:
hyperparams["weight_decay"] = weight_decay_norm
hyperparams.update(overrides.get(model_param_name, {}))
params.append({"params": [value], **hyperparams})
return reduce_param_groups(params)
def _expand_param_groups(params: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
"""
Transform parameter groups into per-parameter structure.
Later items in `params` can overwrite parameters set in previous items.
"""
ret = defaultdict(dict)
for item in params:
assert "params" in item
cur_params = {x: y for x, y in item.items() if x != "params"}
for param in item["params"]:
ret[param].update({"params": [param], **cur_params})
return list(ret.values())
def reduce_param_groups(params: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
"""
Reorganize the parameter groups and merge duplicated groups.
The number of parameter groups needs to be as small as possible in order
to efficiently use the OneFlow multi-tensor optimizer. Therefore instead
of using a parameter_group per single parameter, we reorganize the
parameter groups and merge duplicated groups. This approach speeds
up multi-tensor optimizer significantly.
"""
params = _expand_param_groups(params)
groups = defaultdict(list) # re-group all parameter groups by their hyperparams
for item in params:
cur_params = tuple((x, y) for x, y in item.items() if x != "params")
groups[cur_params].extend(item["params"])
ret = []
for param_keys, param_values in groups.items():
cur = {kv[0]: kv[1] for kv in param_keys}
cur["params"] = param_values
ret.append(cur)
return ret
| 5,901 | 35.208589 | 92 | py |
libai | libai-main/libai/layers/embedding.py | # coding=utf-8
# Copyright 2021 The OneFlow Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
import os
import oneflow as flow
from oneflow import nn
from oneflow.nn import init
from libai.utils import distributed as dist
class Embedding(nn.Module):
"""Construct the trainable embedding module, which does not support parallelization.
This can be used for positional embedding and token type embedding.
Arguments:
num_embeddings: size of vocabulary.
embedding_dim: dimension of embeddings.
padding_idx: pad index. Defaults to None.
init_method: method to initialize weights. Defaults to ``flow.nn.init.xavier_normal_``.
amp_enabled: fp16 option for embedding weight. Defaults to False.
"""
def __init__(
self,
num_embeddings,
embedding_dim,
padding_idx=None,
init_method=init.xavier_normal_,
amp_enabled=False,
dtype=flow.float32,
layer_idx=0,
):
super().__init__()
self.num_embeddings = num_embeddings
self.embedding_dim = embedding_dim
if padding_idx is not None:
if padding_idx > 0:
assert (
padding_idx < self.num_embeddings
), "Padding_idx must be within num_embeddings"
elif padding_idx < 0:
assert (
padding_idx >= -self.num_embeddings
), "Padding_idx must be within num_embeddings"
padding_idx = self.num_embeddings + padding_idx
self.padding_idx = padding_idx
self.init_method = init_method
self.amp_enabled = amp_enabled
assert num_embeddings > 0
self.weight = nn.Parameter(
flow.empty(
(num_embeddings, embedding_dim),
dtype=dtype,
placement=dist.get_layer_placement(layer_idx),
sbp=dist.get_nd_sbp([flow.sbp.broadcast, flow.sbp.broadcast]),
)
)
if os.getenv("ONEFLOW_LINEAR_EMBEDDING_SKIP_INIT", "0") != "1":
self.init_method(self.weight)
# FIXME(lxy): Fill padding_idx is not supported in nd_sbp right now.
# self._fill_padding_idx_with_zero()
def forward(self, input_ids):
weight = flow._C.amp_white_identity(self.weight) if self.amp_enabled else self.weight
# embeddings with sbp sign: [B, B]
# [B, B] x [S(0), B] --> [S(0), B]
# ↑ ↑ ↑
# embed pos_ids pos_embed
input_embeds = flow._C.gather(weight, input_ids, axis=0)
return input_embeds
def _fill_padding_idx_with_zero(self) -> None:
if self.padding_idx is not None:
with flow.no_grad():
self.weight[self.padding_idx] = flow.zeros(
self.embedding_dim,
placement=dist.get_layer_placement(0),
sbp=dist.get_nd_sbp([flow.sbp.broadcast, flow.sbp.broadcast]),
)
def extra_repr(self) -> str:
s = "num_embeddings={num_embeddings}, embedding_dim={embedding_dim}"
if self.padding_idx is not None:
s += ", padding_idx={padding_idx}"
return s.format(**self.__dict__)
class VocabEmbedding(nn.Module):
"""Construct the word embeddings, which may be split along vocabulary dimension.
Arguments:
num_embeddings: size of vocabulary.
embedding_dim: dimension of embeddings.
padding_idx: pad index. Defaults to None.
init_method: method to initialize weights. Defaults to ``flow.nn.init.xavier_normal_``.
amp_enabled: fp16 option for embedding weight. Defaults to False.
"""
def __init__(
self,
num_embeddings,
embedding_dim,
padding_idx=None,
init_method=init.xavier_normal_,
amp_enabled=False,
):
super().__init__()
self.num_embeddings = num_embeddings
self.embedding_dim = embedding_dim
if padding_idx is not None:
if padding_idx > 0:
assert (
padding_idx < self.num_embeddings
), "Padding_idx must be within num_embeddings"
elif padding_idx < 0:
assert (
padding_idx >= -self.num_embeddings
), "Padding_idx must be within num_embeddings"
padding_idx = self.num_embeddings + padding_idx
self.padding_idx = padding_idx
self.init_method = init_method
self.amp_enabled = amp_enabled
# Word token embedding shape with (vocab_size, hidden_size)
# sbp: [B, S(0)]
self.weight = nn.Parameter(
flow.empty(
(num_embeddings, embedding_dim),
dtype=flow.float32,
placement=dist.get_layer_placement(0),
sbp=dist.get_nd_sbp([flow.sbp.broadcast, flow.sbp.split(0)]),
)
)
# Initialize the word embedding
if os.getenv("ONEFLOW_LINEAR_EMBEDDING_SKIP_INIT", "0") != "1":
self.init_method(self.weight)
# FIXME(Lxy): Fill padding_idx is not supported in nd_sbp right now.
# self._fill_padding_idx_with_zero()
def forward(self, input_ids):
weight = flow._C.amp_white_identity(self.weight) if self.amp_enabled else self.weight
# input_ids with shape (batch_size, seq_len), and sbp sign: [S(0), B]
# Gather forward sbp sign
# [B, S(0)] x [S(0), B] --> [S(0), P]
# ↑ ↑ ↑
# embed input_ids input_embeds
input_embeds = flow._C.gather(weight, input_ids, axis=0)
# Set the embeds sbp from [S(0), P] --> [S(0), B] to get complete embedding results.
input_embeds = input_embeds.to_global(sbp=dist.get_hidden_sbp())
return input_embeds
def _fill_padding_idx_with_zero(self) -> None:
if self.padding_idx is not None:
with flow.no_grad():
self.weight[self.padding_idx] = flow.zeros(
self.embedding_dim,
placement=dist.get_layer_placement(0),
sbp=dist.get_nd_sbp([flow.sbp.broadcast, flow.sbp.broadcast]),
)
def extra_repr(self) -> str:
s = "num_embeddings={num_embeddings}, embedding_dim={embedding_dim}"
if self.padding_idx is not None:
s += ", padding_idx={padding_idx}"
return s.format(**self.__dict__)
class SinePositionalEmbedding(nn.Module):
"""Construct the sinusoidal positional embeddings.
Arguments:
num_embeddings: size of vocabulary.
embedding_dim: dimension of embeddings.
"""
def __init__(self, num_embeddings, embedding_dim):
super().__init__()
self.embedding_dim = embedding_dim
self.num_embeddings = num_embeddings
position_embedding = flow.zeros(
num_embeddings,
embedding_dim,
dtype=flow.float32,
placement=dist.get_layer_placement(0),
sbp=dist.get_nd_sbp([flow.sbp.broadcast, flow.sbp.broadcast]),
)
position = flow._C.global_arange(
start=0,
end=num_embeddings,
placement=dist.get_layer_placement(0),
sbp=dist.get_nd_sbp([flow.sbp.broadcast, flow.sbp.broadcast]),
dtype=flow.float32,
).unsqueeze(1)
position_range = flow._C.global_arange(
start=0,
end=embedding_dim,
step=2,
placement=dist.get_layer_placement(0),
sbp=dist.get_nd_sbp([flow.sbp.broadcast, flow.sbp.broadcast]),
dtype=flow.float32,
)
div_term = flow.exp(position_range * (-math.log(10000.0) / embedding_dim))
position_embedding[:, 0::2] = flow.sin(position * div_term)
position_embedding[:, 1::2] = flow.cos(position * div_term)
self.register_buffer("position_embedding", position_embedding)
def forward(self, position_ids):
position_embeds = flow._C.gather(self.position_embedding, position_ids, axis=0)
return position_embeds
def extra_repr(self) -> str:
s = "num_embeddings={num_embeddings}, embedding_dim={embedding_dim}"
return s.format(**self.__dict__)
class PatchEmbedding(nn.Module):
"""2D Image to Patch Embedding
Arguments:
img_size: size of input image. Default to 224.
patch_size: embedded patch size. Default to 16.
in_chans: input channel's size. Default to 3.
embed_dim: dimension of embedded patch. Default to 768.
norm_layer: normalization patch embedding or not. Default to None.
flatten: flatten patch embedding or keep the 2-D shape. Default to True.
layer_idx: A layer_idx sign which determines the placement. It will be used in pipeline
parallelism. Default to 0.
"""
def __init__(
self,
img_size=224,
patch_size=16,
in_chans=3,
embed_dim=768,
norm_layer=None,
flatten=True,
*,
layer_idx=0,
):
super().__init__()
img_size = img_size if isinstance(img_size, tuple) else (img_size, img_size)
patch_size = patch_size if isinstance(patch_size, tuple) else (patch_size, patch_size)
self.img_size = img_size
self.patch_size = patch_size
self.grid_size = (img_size[0] // patch_size[0], img_size[1] // patch_size[1])
self.num_patches = self.grid_size[0] * self.grid_size[1]
self.flatten = flatten
self.proj = nn.Conv2d(
in_chans, embed_dim, kernel_size=patch_size, stride=patch_size
).to_global(
sbp=dist.get_nd_sbp([flow.sbp.broadcast, flow.sbp.broadcast]),
placement=dist.get_layer_placement(layer_idx),
)
self.norm = norm_layer(embed_dim) if norm_layer else nn.Identity()
def forward(self, x):
B, C, H, W = x.shape
assert (
H == self.img_size[0]
), f"Input image height ({H}) doesn't match model ({self.img_size[0]})."
assert (
W == self.img_size[1]
), f"Input image width ({W}) doesn't match model ({self.img_size[1]})."
x = self.proj(x)
if self.flatten:
x = x.flatten(2).transpose(1, 2) # BCHW -> BNC
x = self.norm(x)
return x
| 10,963 | 36.676976 | 95 | py |
libai | libai-main/libai/layers/activation.py | # coding=utf-8
# Copyright 2021 The OneFlow Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from enum import Enum
from typing import Optional
import oneflow as flow
from oneflow import nn
class Activation(str, Enum):
SquaredReLU = "squared_relu"
GeLU = "gelu"
GeLUTanh = "gelu_tanh"
LeakyReLU = "leaky_relu"
ReLU = "relu"
Tanh = "tanh"
QuickGELU = "quick_gelu"
# For unit testing / parity comparisons, probably not the fastest way
class SquaredReLU(nn.Module):
def __init__(self) -> None:
super().__init__()
def forward(self, x: flow.Tensor) -> flow.Tensor:
x_ = flow._C.relu(x)
return x_ * x_
class Passthrough(nn.Module):
def __init__(self) -> None:
super().__init__()
def forward(self, x: flow.Tensor) -> flow.Tensor:
return x
class GeLUTanh(nn.Module):
def __init__(self) -> None:
super().__init__()
def forward(self, x: flow.Tensor) -> flow.Tensor:
"""When the approximate argument is 'tanh', Gelu is estimated with:
0.5 * x * (1.0 + flow.tanh(math.sqrt(2.0 / math.pi) * (x + 0.044715 * flow.pow(x, 3.0))))
"""
return flow.nn.functional.gelu(x, approximate="tanh")
class QuickGELU(nn.Module):
def __init__(self) -> None:
super().__init__()
def forward(self, x: flow.Tensor) -> flow.Tensor:
"""QuickGELU is estimated with: x * flow.sigmoid(1.702 * x)"""
return flow._C.quick_gelu(x)
def build_activation(activation: Optional[Activation]):
"""
Fetching activation layers by name, e.g.,
``build_activation("gelu")`` returns ``nn.GELU()`` module.
"""
if not activation:
return Passthrough()
return {
Activation.ReLU: nn.ReLU,
Activation.GeLU: nn.GELU,
Activation.GeLUTanh: GeLUTanh,
Activation.LeakyReLU: nn.LeakyReLU,
Activation.SquaredReLU: SquaredReLU,
Activation.Tanh: nn.Tanh,
Activation.QuickGELU: QuickGELU,
}[activation]()
| 2,535 | 27.818182 | 97 | py |
libai | libai-main/libai/layers/lm_logits.py | # coding=utf-8
# Copyright 2021 The OneFlow Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import oneflow as flow
from oneflow import nn
from libai.utils import distributed as dist
class LMLogits(nn.Module):
def __init__(self, vocab_size, bias=False):
super().__init__()
self.bias = (
nn.Parameter(
flow.zeros(
(vocab_size,),
dtype=flow.float32,
placement=dist.get_layer_placement(-1),
sbp=dist.get_nd_sbp([flow.sbp.broadcast, flow.sbp.split(0)]),
)
)
if bias
else None
)
def forward(self, input, word_embeddings):
"""LM logits using word embedding weights"""
# input with sbp sign [S(0), B] and word_embeddings with sbp sign [S(0), B]
# NOTE(l1aoxingyu): This is for pipeline parallelism
# change word embedding placement from stage(0) to stage(-1)
w = word_embeddings.to_global(placement=input.placement)
# NOTE(l1aoxingyu): input x embed^T = logits with sbp sign
# [S(0), B] x [B, S(1)] --> [S(0), S(1)]
# ↑ ↑ ↑
# input embed^T logits
# Backward pass input.grad = logits.grad x embed with sbp sign
# [S(0), S(1)] x [B, S(0)] --> [S(0), P]
# ↑ ↑ ↑
# logits.grad embed input.grad
# When use input.grad as head node for backward pass, need to convert
# its sbp sign fromm [S(0), P] --> [S(0), B]
input = input.to_global(grad_sbp=input.sbp)
logits = flow._C.matmul(input, w, transpose_b=True)
if self.bias is not None:
logits = logits + self.bias
return logits
| 2,336 | 36.693548 | 83 | py |
libai | libai-main/libai/layers/mlp.py | # coding=utf-8
# Copyright 2021 The OneFlow Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import oneflow as flow
from oneflow import nn
from libai.layers import Linear, build_activation
class MLP(nn.Module):
"""MLP
MLP will take the input with h hidden state, project it to intermediate
hidden dimension, perform gelu transformation, and project the
state back into h hidden dimension.
Arguments:
hidden_size: size of each input and output sample.
ffn_hidden_size: size of each intermediate sample.
output_dropout_prob: Output dropout probability. Defaults to 0.0.
init_method: method to initialize the first linear weight.
Defaults to :func:`nn.init.xavier_normal_`.
output_layer_init_method: method to initialize the second linear weight. If set to None,
it will use ``init_method`` instead. Defaults to None.
bias_gelu_fusion: If set to ``True``, it will fuse bias adding and elementwise
gelu activation. Defaults to ``False``.
bias_dropout_fusion: If set to ``True``, it will fuse bias adding and dropout.
Defaults to ``False``.
layer_idx: A layer_idx sign which determines the placement. It will be used in
pipeline parallelism. Defaults to 0.
"""
def __init__(
self,
hidden_size,
ffn_hidden_size,
output_dropout_prob=0.0,
init_method=nn.init.xavier_normal_,
output_layer_init_method=None,
bias_gelu_fusion=False,
bias_dropout_fusion=False,
*,
layer_idx=0,
):
super().__init__()
self.output_dropout_prob = output_dropout_prob
self.bias_gelu_fusion = bias_gelu_fusion
self.bias_dropout_fusion = bias_dropout_fusion
if output_layer_init_method is None:
output_layer_init_method = init_method
self.dense_h_to_4h = Linear(
hidden_size,
ffn_hidden_size,
bias=True,
parallel="col",
skip_bias_add=bias_gelu_fusion,
init_method=init_method,
layer_idx=layer_idx,
)
if not bias_gelu_fusion:
self.activation_func = build_activation("gelu")
self.dense_4h_to_h = Linear(
ffn_hidden_size,
hidden_size,
bias=True,
parallel="row",
skip_bias_add=bias_dropout_fusion,
init_method=output_layer_init_method,
layer_idx=layer_idx,
)
if not bias_dropout_fusion:
self.dropout = nn.Dropout(self.output_dropout_prob)
def forward(self, hidden_states):
intermediate = self.dense_h_to_4h(hidden_states)
if self.bias_gelu_fusion:
intermediate, bias = intermediate
intermediate = flow._C.fused_bias_add_gelu(
intermediate, bias, axis=intermediate.ndim - 1
)
else:
intermediate = self.activation_func(intermediate)
output = self.dense_4h_to_h(intermediate)
if self.bias_dropout_fusion:
output, bias = output
output = flow._C.fused_bias_add_dropout(
output, bias, p=self.output_dropout_prob, axis=output.ndim - 1
)
else:
output = self.dropout(output)
return output
def extra_repr(self) -> str:
return "bias_gelu_fusion={}, bias_dropout_fusion={}, dropout={}".format(
self.bias_gelu_fusion, self.bias_dropout_fusion, self.output_dropout_prob
)
| 4,108 | 34.730435 | 96 | py |
libai | libai-main/libai/layers/cross_entropy.py | # coding=utf-8
# Copyright 2021 The OneFlow Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import oneflow as flow
from oneflow import nn
class ParallelCrossEntropyLoss(nn.Module):
"""This criterion acts like :class:`~flow.nn.CrossEntropyLoss` except it will
execute distributed cross entropy loss computation cross different GPUs.
"""
def forward(self, logits: flow.Tensor, target: flow.Tensor):
"""Function for the distributed cross entropy.
Args:
logits (flow.Tensor): vocab_parallel_logits with shape
(batch_size, seq_length, vocab_size) and sbp signature is [S(0), S(2)].
target (flow.Tensor): target with shape (batch_size, seq_length) and
sbp signature is [S(0), B].
"""
assert logits.ndim == 3
assert target.ndim == 2
assert logits.shape[0:2] == target.shape
target = target.to_global(placement=logits.placement)
# Change -1 in target to 0 because sparse_softmax_cross_entropy don't accept -1
target = target * (target >= 0)
lm_loss = flow._C.sparse_softmax_cross_entropy(
logits.view(-1, logits.shape[-1]),
target.view(-1),
)
return lm_loss
| 1,779 | 35.326531 | 87 | py |
libai | libai-main/libai/layers/linear.py | # coding=utf-8
# Copyright 2021 The OneFlow Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import oneflow as flow
from oneflow import nn
from libai.utils import distributed as dist
class Linear1D(nn.Module):
r"""Linear layer with 1D parallelism which includes column parallelism and row parallelism.
The linear layer is defined as :math:`y = xA^T + b`.
In column parallelism, A^T is parallelized along the second dimension
as :math:`A^T = [A_1, ..., A_p]`.
In row parallelism, A^T is parallelized along the first dimension and X along its second
dimension as:
.. math::
A^T = \begin{bmatrix}
A\_1 \\
. \\
. \\
. \\
A\_p
\end{bmatrix}
x = \begin{bmatrix}
x\_1 & ... & x\_p
\end{bmatrix}
Arguments:
in_features: size of each input sample.
out_features: size of each output sample.
bias: If set to ``False``, the layer will not learn an additive bias. Defaults to ``True``.
parallel: Parallel mode. Defaults to "data".
init_method: method to initialize weight. Defaults to :func:`nn.init.xavier_normal_`.
skip_bias_add: skip adding bias but instead return it, so that adding bias can be fused with
other elementwise operations. Defaults to ``False``.
layer_idx: A layer_idx sign which determines the placement. It will be used in pipeline
parallelism. Defaults to 0.
dtype: the dtype of weight. Defaults to ``flow.float32``
"""
def __init__(
self,
in_features,
out_features,
bias=True,
parallel="data",
init_method=nn.init.xavier_normal_,
skip_bias_add=False,
dtype=flow.float32,
*,
layer_idx=0, # enforce layer_idx passed with keyword
):
super().__init__()
self.in_features = in_features
self.out_features = out_features
self.parallel = parallel
self.skip_bias_add = skip_bias_add
if parallel == "col":
# Column parallel
# weight sbp sign: [B, S(0)], weight will be transposed when performing matmul
# so weight sbp sign actually be [B, S(1)]
# bias sbp sign: [B, S(0)]
weight_sbp = dist.get_nd_sbp([flow.sbp.broadcast, flow.sbp.split(0)])
bias_sbp = dist.get_nd_sbp([flow.sbp.broadcast, flow.sbp.split(0)])
elif parallel == "row":
# Row parallel
# weight sbp sign: [B, S(1)], weight will be transposed when performing matmul
# so weight sbp sign actually be [B, S(1)]
# bias sbp sign: [B, B]
weight_sbp = dist.get_nd_sbp([flow.sbp.broadcast, flow.sbp.split(1)])
bias_sbp = dist.get_nd_sbp([flow.sbp.broadcast, flow.sbp.broadcast])
elif parallel == "data":
weight_sbp = dist.get_nd_sbp([flow.sbp.broadcast, flow.sbp.broadcast])
bias_sbp = dist.get_nd_sbp([flow.sbp.broadcast, flow.sbp.broadcast])
else:
raise KeyError(f"{parallel} is not supported! Only support ('data', 'row' and 'col')")
self.weight = flow.nn.Parameter(
flow.empty(
(out_features, in_features),
dtype=dtype,
placement=dist.get_layer_placement(layer_idx), # for pipeline parallelism placement
sbp=weight_sbp,
)
)
if os.getenv("ONEFLOW_LINEAR_EMBEDDING_SKIP_INIT", "0") != "1":
init_method(self.weight)
self.bias = (
flow.nn.Parameter(
flow.zeros(
(out_features,),
dtype=dtype,
placement=dist.get_layer_placement(layer_idx),
sbp=bias_sbp,
)
)
if bias
else None
)
def forward(self, x):
if dist.same_sbp(self.weight.sbp, dist.get_nd_sbp([flow.sbp.broadcast, flow.sbp.split(0)])):
# If the last dim of weight sbp sign is S(0), then last dim of weight.t sbp
# sign is S(1), so the last dim of x sbp sign must be B.
if self.weight.sbp[-1] == flow.sbp.split(0):
x_sbp = x.sbp[:-1] + (flow.sbp.broadcast,)
x = x.to_global(sbp=x_sbp)
# x.grad sbp must be x.sbp, otherwise backward pass cannot be performed correctly.
x = x.to_global(grad_sbp=x.sbp)
x = flow.matmul(x, self.weight, transpose_b=True)
elif dist.same_sbp(
self.weight.sbp, dist.get_nd_sbp([flow.sbp.broadcast, flow.sbp.split(1)])
):
# If the last dim of weight sbp sign is S(1), then last dim of weight.t sbp
# sign is S(0), so the last dim of x sbp sign must be S(ndim-1).
if self.weight.sbp[-1] == flow.sbp.split(1):
x_sbp = x.sbp[:-1] + (flow.sbp.split(x.ndim - 1),)
x = x.to_global(sbp=x_sbp)
out_sbp = x.sbp[:-1] + (flow.sbp.broadcast,)
else:
out_sbp = x.sbp
x = flow.matmul(x, self.weight, transpose_b=True)
# Change x.sbp for followup forward pass.
# This line can be removed when sbp can be auto inferred.
x = x.to_global(sbp=out_sbp)
elif dist.same_sbp(
self.weight.sbp, dist.get_nd_sbp([flow.sbp.broadcast, flow.sbp.broadcast])
):
# x.grad sbp must be x.sbp, otherwise backward pass cannot be performed correctly.
x = x.to_global(grad_sbp=x.sbp)
# NOTE(chengcheng): when input x is [S(0), B], there is no need to change sbp for x.
# x = x.to_global(sbp=dist.get_nd_sbp([flow.sbp.split(0), flow.sbp.split(0)]))
x = flow.matmul(x, self.weight, transpose_b=True)
else:
# Not supported weight_sbp, deduce sbp and communicate with nccl automatically.
x = flow.matmul(x, self.weight, transpose_b=True)
if self.bias is not None:
if self.skip_bias_add:
return x, self.bias
else:
return x + self.bias
else:
return x
def extra_repr(self) -> str:
return "in_features={}, out_features={}, bias={}, parallel={}".format(
self.in_features,
self.out_features,
self.bias is not None,
self.parallel,
)
# Give an alias for Linear1d
Linear = Linear1D
| 7,098 | 38.220994 | 100 | py |
libai | libai-main/libai/layers/transformer_layer.py | # coding=utf-8
# Copyright 2021 The OneFlow Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import oneflow.nn as nn
from libai.utils import distributed as dist
from .attention import AttnMaskType, MultiheadAttention
from .droppath import DropPath
from .layer_norm import LayerNorm
from .mlp import MLP
class TransformerLayer(nn.Module):
"""A single transformer layer.
Transformer layer takes input with size [bsz, seq_length, hidden size] and returns an
output of the same size.
The input and output has same sbp sign, (S(0), B).
Arguments:
hidden_size: size of hidden state.
ffn_hidden_size: size of feed forword neural network.
num_attention_heads: number of attention heads.
is_decoder: used to specify whether this is transformer encoder layer or transformer
decoder layer. Default: ``False``.
attention_dropout_prob: dropout probability of attention weights.
output_dropout_prob: dropout probability of output.
layernorm_epsilon: epsilon used in layernorm layer. Default: `1e-5`.
init_method: method to initialize the input layer weights.
output_layer_init_method: method to initialize the output layer weights.
If None, use `init_method`.
bias_gelu_fusion: whether fuse add bias and gelu. Default: ``False``.
bias_dropout_fusion: whether fuse add bias and dropout. Default: ``False``.
scale_mask_softmax_fusion: whether to fuse scale, mask and softmax. Default: ``False``.
apply_query_key_layer_scaling: if `true`, scaling the attention score by layer index.
Default: ``False``.
apply_residual_post_layernorm: if ``true``, use original BERT residual
connection ordering. Otherwise, use Megatron BERT residual connection which
is more stable when scaling model size introduced in
https://arxiv.org/pdf/1909.08053.pdf.
Default: ``False``.
layer_idx: the layer index, which determines the placement.
"""
def __init__(
self,
hidden_size,
ffn_hidden_size,
num_attention_heads,
is_decoder=False,
attention_dropout_prob=0.0,
output_dropout_prob=0.0,
drop_path_prob=0.0,
layernorm_epsilon=1e-5,
init_method=nn.init.xavier_normal_,
output_layer_init_method=None,
bias_gelu_fusion=False,
bias_dropout_fusion=False,
scale_mask_softmax_fusion=False,
apply_query_key_layer_scaling=False,
apply_residual_post_layernorm=False,
attn_mask_type=AttnMaskType.padding,
*,
layer_idx=0
):
super().__init__()
self.hidden_size = hidden_size
self.ffn_hidden_size = ffn_hidden_size
self.num_attention_heads = num_attention_heads
self.attention_dropout_prob = attention_dropout_prob
self.output_dropout_prob = output_dropout_prob
self.layernorm_epsilon = layernorm_epsilon
self.attn_mask_type = attn_mask_type
self.layer_idx = layer_idx
self.is_decoder = is_decoder
self.bias_gelu_fusion = bias_gelu_fusion
self.bias_dropout_fusion = bias_dropout_fusion
self.scale_mask_softmax_fusion = scale_mask_softmax_fusion
self.apply_query_key_layer_scaling = apply_query_key_layer_scaling
self.apply_residual_post_layernorm = apply_residual_post_layernorm
self.init_method = init_method
if output_layer_init_method is None:
output_layer_init_method = init_method
self.output_layer_init_method = output_layer_init_method
self.drop_path = DropPath(drop_path_prob) if drop_path_prob > 0.0 else nn.Identity()
self.input_layernorm = LayerNorm(
self.hidden_size, eps=self.layernorm_epsilon, layer_idx=self.layer_idx
)
self.self_attention = self.build_attention(is_cross_attention=False)
self.post_attention_layernorm = LayerNorm(
self.hidden_size, eps=self.layernorm_epsilon, layer_idx=self.layer_idx
)
if self.is_decoder:
self.cross_attention = self.build_attention(is_cross_attention=True)
self.post_cross_attention_layernorm = LayerNorm(
self.hidden_size, eps=self.layernorm_epsilon, layer_idx=self.layer_idx
)
self.mlp = MLP(
self.hidden_size,
self.ffn_hidden_size,
self.output_dropout_prob,
self.init_method,
output_layer_init_method=self.output_layer_init_method,
bias_gelu_fusion=self.bias_gelu_fusion,
bias_dropout_fusion=self.bias_dropout_fusion,
layer_idx=self.layer_idx,
)
def forward(
self,
hidden_states,
attention_mask=None,
encoder_states=None,
encoder_attention_mask=None,
past_key_value=None,
use_cache=False,
):
"""
Args:
hidden_states: shape is (batch_size, seq_length, hidden_size),
sbp signature is (S(0), B).
attention_mask: the combination of key padding mask and casual mask of hidden states
with shape (batch_size, 1, seq_length, seq_length) and the sbp
signature is (S(0), B),
encoder_states: encoder output with shape (batch_size, seq_length, hidden_size)
and the sbp signature is (S(0), B), which will be used in cross attention.
encoder_attention_mask: key padding mask of encoder states with shape
(batch_size, 1, seq_length, seq_length) and the sbp signature is (S(0), B).
past_key_value: tuple of key and value, each shape is
(seq_length, bsz, num_heads, head_size), For decoder layer,
the past_key_value contains the states both from self attention
and cross attention.
use_cache: it will be set to `True` when the model is in the inference phase and
used for incremental decoding.
"""
# Change placement for pipeline parallelsim
hidden_states = hidden_states.to_global(placement=dist.get_layer_placement(self.layer_idx))
# hidden_states shape: (batch_size, seq_length, hidden_size)
if attention_mask is not None:
attention_mask = attention_mask.to_global(
placement=dist.get_layer_placement(self.layer_idx)
)
if past_key_value is not None:
if self.is_decoder:
assert len(past_key_value) == 4
self_attn_past_key_value = past_key_value[:2]
cross_attn_past_key_value = past_key_value[2:]
else:
self_attn_past_key_value = past_key_value
cross_attn_past_key_value = None
else:
self_attn_past_key_value, cross_attn_past_key_value = None, None
layernorm_output = self.input_layernorm(hidden_states)
attention_output = self.self_attention(
layernorm_output,
attention_mask=attention_mask,
past_key_value=self_attn_past_key_value,
use_cache=use_cache,
)
attention_output = self.drop_path(attention_output)
if use_cache:
attention_output, presents = attention_output
if self.apply_residual_post_layernorm:
residual = layernorm_output
else:
residual = hidden_states
hidden_states = residual + attention_output
layernorm_output = self.post_attention_layernorm(hidden_states)
if self.is_decoder:
attention_output = self.cross_attention(
layernorm_output,
encoder_states,
attention_mask=encoder_attention_mask,
past_key_value=cross_attn_past_key_value,
use_cache=use_cache,
)
if use_cache:
attention_output, decoder_presents = attention_output
presents += decoder_presents
attention_output = self.drop_path(attention_output)
if self.apply_residual_post_layernorm:
residual = layernorm_output
else:
residual = hidden_states
hidden_states = residual + attention_output
layernorm_output = self.post_cross_attention_layernorm(hidden_states)
mlp_output = self.mlp(layernorm_output)
mlp_output = self.drop_path(mlp_output)
if self.apply_residual_post_layernorm:
residual = layernorm_output
else:
residual = hidden_states
output = residual + mlp_output
if use_cache:
output = (output, presents)
return output
def build_attention(self, is_cross_attention=False):
return MultiheadAttention(
self.hidden_size,
self.num_attention_heads,
is_cross_attention=is_cross_attention,
attention_dropout_prob=self.attention_dropout_prob,
output_dropout_prob=self.output_dropout_prob,
init_method=self.init_method,
output_layer_init_method=self.output_layer_init_method,
bias_dropout_fusion=self.bias_dropout_fusion,
scale_mask_softmax_fusion=self.scale_mask_softmax_fusion,
apply_query_key_layer_scaling=self.apply_query_key_layer_scaling,
attn_mask_type=self.attn_mask_type,
layer_idx=self.layer_idx,
)
| 10,084 | 39.502008 | 99 | py |
libai | libai-main/libai/layers/__init__.py | # coding=utf-8
# Copyright 2021 The OneFlow Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .activation import build_activation
from .cross_entropy import ParallelCrossEntropyLoss
from .embedding import Embedding, SinePositionalEmbedding, VocabEmbedding, PatchEmbedding
from .layer_norm import LayerNorm, RMSLayerNorm
from .linear import Linear, Linear1D
from .conv import Conv1D
from .lm_logits import LMLogits
from .mlp import MLP
from .transformer_layer import TransformerLayer
from .attention import MultiheadAttention
from .droppath import DropPath, drop_path
__all__ = [
"Embedding",
"VocabEmbedding",
"SinePositionalEmbedding",
"PatchEmbedding",
"build_activation",
"Linear",
"Linear1D",
"Conv1D",
"MLP",
"LayerNorm",
"RMSLayerNorm",
"TransformerLayer",
"MultiheadAttention",
"ParallelCrossEntropyLoss",
"LMLogits",
"drop_path",
"DropPath",
]
| 1,453 | 29.93617 | 89 | py |
libai | libai-main/libai/layers/conv.py | # coding=utf-8
# Copyright 2021 The OneFlow Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import oneflow as flow
from oneflow import nn
from libai.utils import distributed as dist
class Conv1D(nn.Module):
def __init__(
self,
in_features,
out_features,
bias=True,
parallel="data",
init_method=nn.init.xavier_normal_,
skip_bias_add=False,
dtype=flow.float32,
*,
layer_idx=0,
):
super().__init__()
self.in_features = in_features
self.out_features = out_features
self.parallel = parallel
self.skip_bias_add = skip_bias_add
if parallel == "col":
weight_sbp = dist.get_nd_sbp([flow.sbp.broadcast, flow.sbp.split(1)])
bias_sbp = dist.get_nd_sbp([flow.sbp.broadcast, flow.sbp.broadcast])
elif parallel == "row":
weight_sbp = dist.get_nd_sbp([flow.sbp.broadcast, flow.sbp.split(0)])
bias_sbp = dist.get_nd_sbp([flow.sbp.broadcast, flow.sbp.split(0)])
elif parallel == "data":
weight_sbp = dist.get_nd_sbp([flow.sbp.broadcast, flow.sbp.broadcast])
bias_sbp = dist.get_nd_sbp([flow.sbp.broadcast, flow.sbp.broadcast])
else:
raise KeyError(f"{parallel} is not supported! Only support ('data', 'row' and 'col')")
self.weight = flow.nn.Parameter(
flow.empty(
(in_features, out_features),
dtype=dtype,
placement=dist.get_layer_placement(layer_idx), # for pipeline parallelism placement
sbp=weight_sbp,
)
)
if os.getenv("ONEFLOW_LINEAR_EMBEDDING_SKIP_INIT", "0") != "1":
init_method(self.weight)
self.bias = (
flow.nn.Parameter(
flow.zeros(
(out_features,),
dtype=dtype,
placement=dist.get_layer_placement(layer_idx),
sbp=bias_sbp,
)
)
if bias
else None
)
def forward(self, x):
if dist.same_sbp(self.weight.sbp, dist.get_nd_sbp([flow.sbp.broadcast, flow.sbp.split(1)])):
if self.weight.sbp[-1] == flow.sbp.split(1):
x_sbp = x.sbp[:-1] + (flow.sbp.broadcast,)
x = x.to_global(sbp=x_sbp)
x = x.to_global(grad_sbp=x.sbp)
x = flow.matmul(x, self.weight)
elif dist.same_sbp(
self.weight.sbp, dist.get_nd_sbp([flow.sbp.broadcast, flow.sbp.split(0)])
):
if self.weight.sbp[-1] == flow.sbp.split(0):
x_sbp = x.sbp[:-1] + (flow.sbp.split(x.ndim - 1),)
x = x.to_global(sbp=x_sbp)
out_sbp = x.sbp[:-1] + (flow.sbp.broadcast,)
else:
out_sbp = x.sbp
x = flow.matmul(x, self.weight)
x = x.to_global(sbp=out_sbp)
elif dist.same_sbp(
self.weight.sbp, dist.get_nd_sbp([flow.sbp.broadcast, flow.sbp.broadcast])
):
x = x.to_global(grad_sbp=x.sbp)
x = flow.matmul(x, self.weight)
else:
x = flow.matmul(x, self.weight)
if self.bias is not None:
if self.skip_bias_add:
return x, self.bias
else:
return x + self.bias
else:
return x
def extra_repr(self) -> str:
return "in_features={}, out_features={}, bias={}, parallel={}".format(
self.in_features,
self.out_features,
self.bias is not None,
self.parallel,
)
| 4,218 | 31.960938 | 100 | py |
libai | libai-main/libai/layers/layer_norm.py | # coding=utf-8
# Copyright 2021 The OneFlow Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import oneflow as flow
from oneflow import nn
from libai.utils import distributed as dist
class LayerNorm(nn.Module):
"""Applies Layer Normalization over a mini-batch of inputs in 1D parallelism.
Args:
normalized_shape: input shape from an expected input of size.
eps: a value added to the denominator for numerical stability. Defaults to 1e-5.
elementwise_affine: a boolean value that when set to ``True``, this module
has learnable per-element affine parameters initialized to ones (for weights)
and zeros (for biases). Default: ``True``.
elementwise_affine: a boolean value that when set to ``True``, this module
has learnable per-element affine parameters initialized to ones (for weights)
and zeros (for biases). Default: ``True``.
bias: If set to ``False``, the layer will not learn an additive bias. Defaults to ``True``.
layer_idx: a layer_idx sign which determines the placement. It will be used in pipeline
parallelism. Defaults to 0.
"""
def __init__(
self, normalized_shape, eps=1e-5, elementwise_affine=True, bias=True, *, layer_idx=0
):
super().__init__()
if isinstance(normalized_shape, int):
normalized_shape = (normalized_shape,)
self.normalized_shape = tuple(normalized_shape)
self.eps = eps
self.elementwise_affine = elementwise_affine
self.layer_idx = layer_idx
if elementwise_affine:
self.weight = nn.Parameter(
flow.ones(
normalized_shape,
dtype=flow.float32,
placement=dist.get_layer_placement(layer_idx),
sbp=dist.get_nd_sbp([flow.sbp.broadcast, flow.sbp.broadcast]),
)
)
self.bias = nn.Parameter(
flow.zeros(
normalized_shape,
dtype=flow.float32,
placement=dist.get_layer_placement(layer_idx),
sbp=dist.get_nd_sbp([flow.sbp.broadcast, flow.sbp.broadcast]),
),
requires_grad=bias,
)
else:
self.weight = None
self.bias = None
def forward(self, x):
x = x.to_global(placement=self.weight.placement)
assert x.shape[-len(self.normalized_shape) :] == self.normalized_shape
begin_norm_axis = x.ndim - len(self.normalized_shape)
begin_params_axis = x.ndim - len(self.normalized_shape)
if self.elementwise_affine:
y = flow._C.layer_norm_affine(
x,
self.weight,
self.bias,
begin_norm_axis=begin_norm_axis,
begin_params_axis=begin_params_axis,
epsilon=self.eps,
)
else:
y = flow._C.layer_norm(
x,
begin_norm_axis=begin_norm_axis,
begin_params_axis=begin_params_axis,
epsilon=self.eps,
)
return y
def extra_repr(self) -> str:
return "{normalized_shape}, eps={eps}, elementwise_affine={elementwise_affine}".format(
**self.__dict__
)
class RMSLayerNorm(nn.Module):
"""T5 uses a layer_norm which only scales and doesn't shift, which is also known as
Root Mean Square Layer Normalization thus varience is calculated w/o mean and
there is no bias. More details see: https://arxiv.org/abs/1910.07467.
Args:
normalized_shape: input shape from an expected input of size.
eps: a value added to the denominator for numerical stability. Defaults to 1e-5.
elementwise_affine: a boolean value that when set to ``True``, this module
has learnable per-element affine parameters initialized to ones (for weights)
and zeros (for biases). Default: ``True``.
layer_idx: a layer_idx sign which determines the placement. It will be used in pipeline
parallelism. Defaults to 0.
"""
def __init__(self, normalized_shape, eps=1e-6, layer_idx=0):
super().__init__()
self.layer_idx = layer_idx
self.weight = flow.nn.Parameter(
flow.ones(
normalized_shape,
dtype=flow.float32,
placement=dist.get_layer_placement(layer_idx),
sbp=dist.get_nd_sbp([flow.sbp.broadcast, flow.sbp.broadcast]),
)
)
self.l2norm_epsilon = eps
def forward(self, hidden_states):
hidden_states = hidden_states.to_global(placement=self.weight.placement)
return flow._C.rms_norm(hidden_states, self.weight, self.weight.shape, self.l2norm_epsilon)
| 5,406 | 39.962121 | 99 | py |
libai | libai-main/libai/layers/droppath.py | # coding=utf-8
# Copyright 2021 The OneFlow Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import oneflow as flow
import oneflow.nn as nn
def drop_path(x, drop_prob: float = 0.5, training: bool = False, scale_by_keep: bool = True):
"""Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks)."""
if drop_prob == 0.0 or not training:
return x
keep_prob = 1 - drop_prob
shape = (x.shape[0],) + (1,) * (x.ndim - 1) # work with diff dim tensors, not just 2D ConvNets
# similar opeartion to new_tensor(shape).bernoulli_(keep_prob)
random_tensor = flow.rand(*shape, dtype=x.dtype, sbp=x.sbp, placement=x.placement)
random_tensor = (random_tensor < keep_prob).to(flow.float32)
if keep_prob > 0.0 and scale_by_keep:
random_tensor = random_tensor / keep_prob
return x * random_tensor
class DropPath(nn.Module):
"""Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks)."""
def __init__(self, drop_prob: float = 0.0, scale_by_keep: bool = True):
super(DropPath, self).__init__()
self.drop_prob = drop_prob
self.scale_by_keep = scale_by_keep
def forward(self, x):
return drop_path(x, self.drop_prob, self.training, self.scale_by_keep)
| 1,826 | 37.87234 | 99 | py |
libai | libai-main/libai/layers/attention.py | # coding=utf-8
# Copyright 2021 The OneFlow Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import enum
import math
from typing import Tuple
import oneflow as flow
from oneflow import nn
from .linear import Linear
class AttnMaskType(enum.Enum):
padding = 1
causal = 2
class MultiheadAttention(nn.Module):
"""Multi-head attention layer, support self attention and cross attention.
Args:
hidden_size: size of hidden state.
num_attention_heads: number of attention heads.
is_cross_attention: used to specify whether it is self attention or cross attention.
Defaults to False.
attention_dropout_prob: dropout probability of attention weights.
Defaults to 0.0.
output_dropout_prob: dropout probability of output. Defaults to 0.0.
init_method: method to initialize the input layer weights.
Defaults to ``init.xavier_normal_``.
output_layer_init_method: method to initialize the output layer weights.
If None, use ``init_method``.
bias_dropout_fusion: whether to fuse add bias and dropout.
Defaults to False.
scale_mask_softmax_fusion: whether to fuse scale, mask and softmax.
Defaults to False.
apply_query_key_layer_scaling: if `True`, scaling the attention score by layer index.
Defaults to False.
layer_idx: a layer_idx sign which determines the placements.
It will be used in pipeline parallelism. Defaults to 0.
"""
def __init__(
self,
hidden_size,
num_attention_heads,
is_cross_attention=False,
attention_dropout_prob=0.0,
output_dropout_prob=0.0,
init_method=nn.init.xavier_normal_,
output_layer_init_method=None,
bias_dropout_fusion=False,
scale_mask_softmax_fusion=False,
apply_query_key_layer_scaling=False,
attn_mask_type=AttnMaskType.padding,
*,
layer_idx=0
):
super().__init__()
self.hidden_size = hidden_size
if output_layer_init_method is None:
output_layer_init_method = init_method
assert (
hidden_size % num_attention_heads == 0
), "hidden_size must be divisible by num_attention_heads."
self.num_heads = num_attention_heads
self.head_size = hidden_size // num_attention_heads
self.attn_mask_type = attn_mask_type
self.attention_dropout_prob = attention_dropout_prob
self.dropout = nn.Dropout(p=attention_dropout_prob)
self.norm_factor = 1.0 / math.sqrt(float(self.head_size))
self.coeff = None
if apply_query_key_layer_scaling:
self.coeff = layer_idx + 1
self.norm_factor /= self.coeff
self.is_cross_attention = is_cross_attention
self.scale_mask_softmax_fusion = scale_mask_softmax_fusion
self.bias_dropout_fusion = bias_dropout_fusion
if self.bias_dropout_fusion:
self.output_dropout_prob = output_dropout_prob
else:
self.output_dropout = nn.Dropout(p=output_dropout_prob)
if self.is_cross_attention:
self.query = Linear(
self.hidden_size,
self.hidden_size,
parallel="col",
init_method=init_method,
layer_idx=layer_idx,
)
self.key_value = Linear(
self.hidden_size,
self.hidden_size * 2,
parallel="col",
init_method=init_method,
layer_idx=layer_idx,
)
else:
self.query_key_value = Linear(
self.hidden_size,
self.hidden_size * 3,
parallel="col",
init_method=init_method,
layer_idx=layer_idx,
)
self.dense = Linear(
self.hidden_size,
self.hidden_size,
parallel="row",
init_method=output_layer_init_method,
skip_bias_add=self.bias_dropout_fusion,
layer_idx=layer_idx,
)
def forward(
self,
hidden_states: flow.Tensor,
encoder_states: flow.Tensor = None,
attention_mask: flow.Tensor = None,
past_key_value: Tuple[flow.Tensor, flow.Tensor] = None,
use_cache: bool = False,
):
"""
Args:
hidden_states (flow.Tensor): shape is [bsz, tgt_len, hidden_size].
encoder_states (flow.Tensor, optional): shape is [bsz, src_len, hidden_size].
Defaults to None.
attention_mask (flow.Tensor, optional): shape is [bsz, 1, tgt_len, src_len].
It should be the combination of padding mask and casual mask.
It is the padding mask of source input when used with self-attention in encoder.
And it is the combination of padding mask of target input and casual mask when
used with self-attention in decoder. It is the padding mask of source input when
used with cross-attention in decoder.
Defaults to None.
past_key_value (Tuple[flow.Tensor, flow.Tensor], optional): tuple of key and value,
each shape is [bsz, num_heads, src_len, head_size]. Defaults to None.
use_cache (bool, optional): it will be set to True, when the model is in the inference
phase and used for incremental decoding. Defaults to False.
"""
# hidden_states, encoder_states: [S(0), B]
# attention_mask: [S(0), B]
if encoder_states is not None:
encoder_states = encoder_states.to_global(placement=hidden_states.placement)
if attention_mask is not None:
attention_mask = attention_mask.to_global(placement=hidden_states.placement)
bsz, tgt_len = hidden_states.size()[:2]
if self.is_cross_attention:
# if it is cross attention, key and value should be calculated only once, and the
# result can be reused.
query = self.query(hidden_states)
query = query.view(bsz, -1, self.num_heads, self.head_size)
query = query.permute(0, 2, 1, 3)
if past_key_value is not None:
key, value = past_key_value
elif encoder_states is not None:
key_value = self.key_value(encoder_states)
key_value = key_value.view(bsz, -1, self.num_heads, 2 * self.head_size)
key_value = key_value.permute(0, 2, 1, 3)
key, value = flow.chunk(key_value, chunks=2, dim=-1)
else:
raise ValueError(
"past_key_value and encoder_states cannot be None at the same time."
)
else:
# if it is self attention, query, key, and value are all obtained from hidden_states.
# when in the inference phase of an incremental decoder,
# hidden_states is the last-added state,
# the full key and value could be obtained by concatenating with past_key_value.
query_key_value = self.query_key_value(hidden_states)
query_key_value = query_key_value.view(bsz, -1, self.num_heads, 3 * self.head_size)
query_key_value = query_key_value.permute(
0, 2, 1, 3
) # [bsz, num_heads, src_len, 3 * head_size]
query, key, value = flow.chunk(query_key_value, chunks=3, dim=-1)
if past_key_value is not None:
past_key, past_value = past_key_value
key = flow.cat((past_key.type_as(key), key), dim=2)
value = flow.cat((past_value.type_as(value), value), dim=2)
# query, key, value: [S(0), S(1)], shape: [bsz, num_heads, seq_length, head_size]
if use_cache:
past_key_value = (key, value)
# [bsz, num_heads, tgt_len, src_len] with [S(0), S(1)]
attention_scores = flow.matmul(query, key, transpose_b=True, alpha=self.norm_factor)
# [S(0), S(1)] x [S(0), B] = [S(0), S(1)]
if attention_mask is not None:
if self.scale_mask_softmax_fusion:
if self.attn_mask_type == AttnMaskType.padding:
attention_mask = (
attention_mask.expand_as(attention_scores) if use_cache else attention_mask
)
attention_weights = flow._C.fused_scale_mask_softmax_dropout(
attention_scores,
attention_mask,
fill_value=-10000.0,
scale=self.coeff,
p=self.attention_dropout_prob,
)[0]
else:
if self.coeff is not None:
attention_scores *= self.coeff
attention_scores = flow.mul(attention_scores, attention_mask)
attention_scores = attention_scores - 10000.0 * (1 - attention_mask)
# TODO(xingyu.liao): graph will occur `where_scalar` errors
# when using `masked_fill`
# attention_scores = attention_scores.masked_fill(1 - attention_mask, -10000.0)
attention_weights = flow.softmax(attention_scores, dim=-1)
# [bsz, num_heads, tgt_len, src_len]
attention_weights = self.dropout(attention_weights)
else:
if self.scale_mask_softmax_fusion and self.attn_mask_type == AttnMaskType.causal:
attention_weights = flow._C.fused_scale_tril_softmax_mask_scale(
attention_scores,
p=self.attention_dropout_prob,
diagonal=0,
tril_scale_value=self.coeff,
tril_fill_value=-10000.0,
)[0]
else:
attention_weights = flow.softmax(attention_scores, dim=-1)
# [bsz, num_heads, tgt_len, src_len]
attention_weights = self.dropout(attention_weights)
# Context shape: [bsz, num_heads, tgt_len, head_size] with [S(0), S(1)]
context = flow.matmul(attention_weights, value)
# Change shape: [bsz, num_heads, tgt_len, head_size] -> [bsz, tgt_len, num_heads, head_size]
context = context.transpose(1, 2)
# Concat multi-head results from
# [bsz, tgt_len, num_heads, head_size] -> [bsz, tgt_len, num_heads * head_size]
# SBP sign: [S(0), S(2)]
# [S(0), S(2)] x [B, S(0)] = [S(0), P] -> [S(0), B]
output = self.dense(context.flatten(2))
if self.bias_dropout_fusion:
output, bias = output
output = flow._C.fused_bias_add_dropout(
output, bias, p=self.output_dropout_prob, axis=output.ndim - 1
)
else:
output = self.output_dropout(output)
if use_cache:
output = (output, past_key_value)
return output
def extra_repr(self) -> str:
return "hidden_size={}, num_heads={}, is_cross_attention={}".format(
self.hidden_size,
self.num_heads,
self.is_cross_attention,
)
| 11,794 | 40.826241 | 100 | py |
libai | libai-main/libai/engine/hooks.py | # coding=utf-8
# Copyright 2021 The OneFlow Authors. All rights reserved.
# Copyright (c) Facebook, Inc. and its affiliates.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
import logging
import math
import operator
import time
from collections import Counter
import oneflow as flow
from libai.evaluation import flatten_results_dict
from libai.utils import distributed as dist
from libai.utils.checkpoint import Checkpointer
from libai.utils.checkpoint import PeriodicCheckpointer as _PeriodicCheckpointer
from libai.utils.events import EventWriter
from libai.utils.timer import Timer
from .trainer import HookBase
# --------------------------------------------------------
# References:
# https://github.com/facebookresearch/detectron2/blob/main/detectron2/engine/hooks.py
# --------------------------------------------------------
"""
Implement some common hooks.
"""
logger = logging.getLogger(__name__)
class CallbackHook(HookBase):
"""
Create a hook using callback functions provided by the user.
"""
def __init__(self, *, before_train=None, after_train=None, before_step=None, after_step=None):
"""
Each argument is a function that takes one argument: the trainer.
"""
self._before_train = before_train
self._before_step = before_step
self._after_step = after_step
self._after_train = after_train
def before_train(self):
if self._before_train:
self._before_train(self.trainer)
def after_train(self):
if self._after_train:
self._after_train(self.trainer)
# The functions may be closures that hold reference to the trainer
# Therefore, delete them to avoid circular reference.
del self._before_train, self._after_train
del self._before_step, self._after_step
def before_step(self):
if self._before_step:
self._before_step(self.trainer)
def after_step(self):
if self._after_step:
self._after_step(self.trainer)
class IterationTimer(HookBase):
"""
Track the time spent for each iteration (each run_step call in the trainer).
Print a summary in the end of training.
This hook uses the time between the call to its :meth:`before_step`
and :meth:`after_step` methods.
Under the convention that :meth:`before_step` of all hooks should only
take negligible amount of time, the :class:`IterationTimer` hook should be
placed at the beginning of the list of hooks to obtain accurate timing.
"""
def __init__(self, warmup_iter=3):
"""
Args:
warmup_iter (int): the number of iterations at the beginning to exclude
from timing.
"""
self._warmup_iter = warmup_iter
self._step_timer = Timer()
def before_train(self):
self._start_time = time.perf_counter()
self._total_timer = Timer()
self._total_timer.pause()
def after_train(self):
total_time = time.perf_counter() - self._start_time
total_time_minus_hooks = self._total_timer.seconds()
hook_time = total_time - total_time_minus_hooks
num_iter = self.trainer.iter + 1 - self.trainer.start_iter - self._warmup_iter
if num_iter > 0 and total_time_minus_hooks > 0:
# Speed is meaningful only after warmup
# NOTE this format is parsed by grep in some scripts
logger.info(
"Overall training speed: {} iterations in {} ({:.4f} s / it)".format(
num_iter,
str(datetime.timedelta(seconds=int(total_time_minus_hooks))),
total_time_minus_hooks / num_iter,
)
)
logger.info(
"Total training time: {} ({} on hooks)".format(
str(datetime.timedelta(seconds=int(total_time))),
str(datetime.timedelta(seconds=int(hook_time))),
)
)
def before_step(self):
self._step_timer.reset()
self._total_timer.resume()
def after_step(self):
# +1 because we're in after_step
iter_done = self.trainer.iter - self.trainer.start_iter + 1
if iter_done >= self._warmup_iter:
sec = self._step_timer.seconds()
self.trainer.storage.put_scalars(time=sec)
else:
self._start_time = time.perf_counter()
self._total_timer.reset()
self._total_timer.pause()
class PeriodicWriter(HookBase):
"""
Write events to EventStorage periodically.
It is executed every ``period`` iterations and after the last iteration.
"""
def __init__(self, writers, period=20):
"""
Args:
writers (list[EventWriter]): a list of EventWriter objects
period (int):
"""
self._writers = writers
for w in writers:
assert isinstance(w, EventWriter), w
self._period = period
def after_step(self):
if (self.trainer.iter + 1) % self._period == 0 or (
self.trainer.iter == self.trainer.max_iter - 1
):
for writer in self._writers:
writer.write()
def after_train(self):
for writer in self._writers:
writer.close()
class PeriodicCheckpointer(_PeriodicCheckpointer, HookBase):
"""
Same as :class:`libai.utils.checkpoint.PeriodicCheckpointer`, but as a hook.
Note that when used as a hook,
it is unable to save additional data other than what's defined
by the given `checkpointer`.
It is executed every ``period`` iterations and after the last iteration.
"""
def before_train(self):
self.max_iter = self.trainer.max_iter
def after_step(self):
self.step(self.trainer.iter)
class BestCheckpointer(HookBase):
"""
Checkpoints best weights based off given metric.
This hook should be used in conjunction to and executed after the hook
that produces the metric, e.g. `EvalHook`.
"""
def __init__(
self,
eval_period: int,
checkpointer: Checkpointer,
val_metric: str,
mode: str = "max",
file_prefix: str = "model_best",
) -> None:
"""
Args:
eval_period (int): the period `EvalHook` is set to run.
checkpointer: the checkpointer object used to save checkpoints.
val_metric (str): validation metric to track for best checkpoint, e.g. "acc@1"
mode (str): one of {'max', 'min'}. controls whether the chosen val metric should be
maximized or minimized, e.g. for "acc@1" it should be "max"
file_prefix (str): the prefix of checkpoint's filename, defaults to "model_best"
"""
self._period = eval_period
self._val_metric = val_metric
assert mode in [
"max",
"min",
], f'Mode "{mode}" to `BestCheckpointer` is unknown. It should be one of {"max", "min"}.'
if mode == "max":
self._compare = operator.gt
else:
self._compare = operator.lt
self._checkpointer = checkpointer
self._file_prefix = file_prefix
self.best_metric = None
self.best_iter = None
def _update_best(self, val, iteration):
if math.isnan(val) or math.isinf(val):
return False
self.best_metric = val
self.best_iter = iteration
return True
def _best_checking(self):
metric_tuple = self.trainer.storage.latest().get(self._val_metric)
flag = flow.zeros(1)
if dist.is_main_process():
if metric_tuple is None:
logger.warning(
f"Given val metric {self._val_metric} does not seem to be computed/stored. "
"Will not be checkpointed based on that."
)
else:
latest_metric, metric_iter = metric_tuple
if self.best_metric is None:
if self._update_best(latest_metric, metric_iter):
flag = flag + 1
logger.info(
f"Saved first model at {self.best_metric:0.5f} @ {self.best_iter} steps"
)
elif self._compare(latest_metric, self.best_metric):
flag = flag + 1
logger.info(
f"Saved best model as latest eval score for {self._val_metric} is "
f"{latest_metric:0.5f}, better than last best score "
f"{self.best_metric:0.5f} @ iteration {self.best_iter}."
)
self._update_best(latest_metric, metric_iter)
else:
logger.info(
f"Not saving as latest eval score for "
f"{self._val_metric} is {latest_metric:0.5f}, "
f"not better than best score {self.best_metric:0.5f} "
f"@ iteration {self.best_iter}."
)
dist.synchronize()
flag = flag.to_global(
sbp=flow.sbp.broadcast, placement=flow.env.all_device_placement("cpu")
)
if flag.to_local().item() == 1:
self._checkpointer.save(f"{self._file_prefix}")
def after_step(self):
# same conditions as `EvalHook`
next_iter = self.trainer.iter + 1
if (
self._period > 0
and next_iter % self._period == 0
and next_iter != self.trainer.max_iter
):
self._best_checking()
def after_train(self):
# same conditions as `EvalHook`
if self.trainer.iter + 1 >= self.trainer.max_iter:
self._best_checking()
class EvalHook(HookBase):
"""
Run an evaluation function periodically, and at the end of training.
It is executed every ``eval_period`` iterations and after the last iteration.
"""
def __init__(self, eval_period, eval_function):
"""
Args:
eval_period (int): the period to run `eval_function`.
eval_function (callable): a function which takes no arguments, and
returns a nested dict of evaluation metrics.
Note:
This hook must be enabled in all or none workers.
If you would like only certain workers to perform evaluation,
give other workers a no-op function (`eval_function=lambda: None`).
"""
self._period = eval_period
self._func = eval_function
def _do_eval(self):
results = self._func()
if results:
assert isinstance(
results, dict
), "Eval function must return a dict. Got {} instead.".format(results)
flattened_results = flatten_results_dict(results)
# fixme: flatten_results_dict is not defined
for k, v in flattened_results.items():
try:
v = float(v)
except Exception:
raise ValueError(
"[EvalHook] eval_function should return a nested dict of float. "
"Got '{}: {}' instead.".format(k, v)
)
self.trainer.storage.put_scalars(**flattened_results, smoothing_hint=False)
# Evaluation may take different time among workers.
# A barrier make them start the next iteration together.
dist.synchronize()
def after_step(self):
next_iter = self.trainer.iter + 1
if self._period > 0 and next_iter % self._period == 0:
# do the last eval in after_train
if next_iter != self.trainer.max_iter:
self._do_eval()
def after_train(self):
# This condition is to prevent the eval from running after a failed training
if self.trainer.iter + 1 >= self.trainer.max_iter:
self._do_eval()
# func is likely a closure that holds reference to the trainer
# therefore we clean it to avoid circular reference in the end
del self._func
class LRScheduler(HookBase):
"""
A hook which executes a oneflow builtin LR scheduler and summarizes the LR.
It is executed after every iteration.
"""
def __init__(self, optimizer=None, scheduler=None):
"""
Args:
optimizer (flow.optim.Optimizer):
scheduler (flow.optim.LRScheduler):
if a :class:`ParamScheduler` object, it defines the multiplier over the base LR
in the optimizer.
If any argument is not given, will try to obtain it from the trainer.
"""
self._optimizer = optimizer
self._scheduler = scheduler
def before_train(self):
self._optimizer = self._optimizer or self.trainer.optimizer
self._best_param_group_id = LRScheduler.get_best_param_group_id(self._optimizer)
@staticmethod
def get_best_param_group_id(optimizer):
# NOTE: some heuristics on what LR to summarize
# summarize the param group with most parameters
largest_group = max(len(g["params"]) for g in optimizer.state_dict()["param_groups"])
if largest_group == 1:
# If all groups have one parameter,
# then find the most common initial LR, and use it for summary
lr_count = Counter(
[g["_options"]["lr"] for g in optimizer.state_dict()["param_groups"]]
)
lr = lr_count.most_common()[0][0]
for i, g in enumerate(optimizer.state_dict()["param_groups"]):
if g["_options"]["lr"] == lr:
return i
else:
for i, g in enumerate(optimizer.state_dict()["param_groups"]):
if len(g["params"]) == largest_group:
return i
def after_step(self):
lr = self.scheduler.get_last_lr()[self._best_param_group_id]
self.trainer.storage.put_scalar("lr", lr, smoothing_hint=False)
self.scheduler.step()
@property
def scheduler(self):
return self._scheduler or self.trainer.lr_scheduler
def state_dict(self):
if isinstance(self.scheduler, flow.optim.lr_scheduler._LRScheduler):
return self.scheduler.state_dict()
return {}
def load_state_dict(self, state_dict):
if isinstance(self.scheduler, flow.optim.lr_scheduler._LRScheduler):
logger.info("Loading scheduler from state_dict ...")
self.scheduler.load_state_dict(state_dict)
| 15,141 | 35.22488 | 100 | py |
libai | libai-main/libai/engine/__init__.py | # coding=utf-8
# Copyright 2021 The OneFlow Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .default import DefaultTrainer, default_setup
| 672 | 38.588235 | 74 | py |
libai | libai-main/libai/engine/trainer.py | # coding=utf-8
# Copyright 2021 The OneFlow Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import time
import weakref
from typing import Callable, List, Mapping
import oneflow as flow
from libai.utils import distributed as dist
from libai.utils.events import EventStorage, get_event_storage
# --------------------------------------------------------
# References:
# https://github.com/facebookresearch/detectron2/blob/main/detectron2/engine/train_loop.py
# --------------------------------------------------------
class HookBase:
"""
Base class for hooks that can be registered with :class:`TrainerBase`.
Each hook can implement 4 methods. The way they are called is demonstrated
in the following snippet:
::
hook.before_train()
for iter in range(start_iter, max_iter):
hook.before_step()
trainer.run_step()
hook.after_step()
iter += 1
hook.after_train()
Notes:
1. In the hook method, users can access ``self.trainer`` to access more
properties about the context (e.g., model, current iteration, or config
if using :class:`DefaultTrainer`).
2. A hook that does something in :meth:`before_step` can often be
implemented equivalently in :meth:`after_step`.
If the hook takes non-trivial time, it is strongly recommended to
implement the hook in :meth:`after_step` instead of :meth:`before_step`.
The convention is that :meth:`before_step` should only take negligible time.
Following this convention will allow hooks that do care about the difference
between :meth:`before_step` and :meth:`after_step` (e.g., timer) to
function properly.
"""
trainer: "TrainerBase" = None
"""
A weak reference to the trainer object. Set by the trainer when the hook is registered.
"""
def before_train(self):
"""
Called before the first iteration.
"""
def after_train(self):
"""
Called after the last iteration.
"""
def before_step(self):
"""
Called before each iteration.
"""
def after_step(self):
"""
Called after each iteration.
"""
class TrainerBase:
"""
Base class for iterative trainer with hooks.
The only assumption we made here is: the training runs in a loop.
A subclass can implement what the loop is.
We made no assumptions about the existence of dataloader, optimizer, model, etc.
Attributes:
iter(int): The current iteration.
start_iter(int): The iteration to start with.
By convention the minimum possible value is 0.
max_iter(int): The iteration to end training.
storage(EventStorage): An EventStorage that's opened during the course of training.
"""
def __init__(self):
self._hooks: List[HookBase] = []
self.iter: int = 0
self.start_iter: int = 0
self.max_iter: int
self.storage: EventStorage
def register_hooks(self, hooks):
"""
Register hooks to the trainer. The hooks are executed in the order
they are registered.
Args:
hooks (list[Optional[HookBase]]): list of hooks
"""
hooks = [h for h in hooks if h is not None]
for h in hooks:
assert isinstance(h, HookBase)
# To avoid circular reference, hooks and trainer cannot own each other.
# This normally does not matter, but will cause memory leak if the
# involved objects contain __del__:
# See http://engineering.hearsaysocial.com/2013/06/16/circular-references-in-python/
h.trainer = weakref.proxy(self)
self._hooks.extend(hooks)
def train(self, start_iter: int, max_iter: int):
"""
Args:
start_iter, max_iter (int): See docs above
"""
logger = logging.getLogger(__name__)
logger.info("Starting training from iteration {}".format(start_iter))
self.iter = self.start_iter = start_iter
self.max_iter = max_iter
with EventStorage(self.start_iter) as self.storage:
try:
self.before_train()
for self.iter in range(start_iter, max_iter):
self.before_step()
self.run_step()
self.after_step()
# self.iter == max_iter can be used by `after_train` to
# tell whether the training successfully finished or failed
# due to exceptions.
self.iter += 1
except Exception:
logger.exception("Exception during training:")
raise
finally:
self.after_train()
def before_train(self):
for h in self._hooks:
h.before_train()
def after_train(self):
for h in self._hooks:
h.after_train()
def before_step(self):
self.storage.iter = self.iter
for h in self._hooks:
h.before_step()
def after_step(self):
self.storage.samples = (self.iter + 1) * self.cfg.train.global_batch_size
for h in self._hooks:
h.after_step()
def run_step(self):
raise NotImplementedError
@staticmethod
def write_metrics(
loss_dict: Mapping[str, flow.Tensor],
data_time: float,
prefix: str = "",
) -> None:
"""
Args:
loss_dict (dict): dict of scalar losses
data_time (float): time taken by the dataloader iteration
prefix (str): prefix for logging keys
"""
# get metric value, remove it to rank0 cause logger.info only work in rank0
metrics_dict = {
k: dist.tensor_to_rank0(v, device="cpu", to_local=True) for k, v in loss_dict.items()
}
metrics_dict["data_time"] = data_time
# TODO: Gather metrics among all workers for logging
# all_metrics_dict = dist.gather(metrics_dict)
all_metrics_dict = metrics_dict
if dist.is_main_process():
storage = get_event_storage()
# data_time among workers can have high variance. The actual latency
# caused by data_time is the maximum among workers.
# data_time = np.max([x.pop("data_time") for x in all_metrics_dict])
data_time = all_metrics_dict.pop("data_time")
storage.put_scalar("data_time", data_time)
# average the rest metrics
# metrics_dict = {
# k: np.mean([x[k] for x in all_metrics_dict]) for k in all_metrics_dict[0].keys()
# }
metrics_dict = all_metrics_dict
total_losses_reduced = sum(v for k, v in metrics_dict.items() if "loss" in k)
storage.put_scalar("{}total_loss".format(prefix), total_losses_reduced)
if len(metrics_dict) > 1:
storage.put_scalars(**metrics_dict)
class EagerTrainer(TrainerBase):
"""
A simple eager trainer for the most common type of task:
single-cost single-optimizer single-data-source iterative optimization,
optionally using data-parallelism.
It assumes that in every step, you:
1. Compute the loss with a data from the data_loader.
2. Compute the gradients with the above loss.
3. Update the model with the optimizer.
All other tasks during training (checkpointing, logging, evaluation, LR schedule)
are maintained by hooks, which can be registered by :meth:`TrainerBase.register_hooks`.
If you want to do anything fancier than this,
either subclass TrainerBase and implement your own `run_step`,
or write your own training loop.
"""
def __init__(self, model, data_loader, optimizer, grad_acc_steps=1):
"""
Args:
model: a flow.nn.Module. Takes a data from data_loader and returns a
dict of losses.
data_loader: an iterable. Contains data to be used to call model.
optimizer: a flow optimizer.
"""
super().__init__()
# We set the model to training mode in the trainer.
# However it's valid to train a model that's in eval mode.
# If you want your model (or a submodule of it) to behave
# like evaluation during training, you can overwrite its train() method.
model.train()
self.model = model
self.data_loader = data_loader
self._data_loader_iter = iter(data_loader)
self.optimizer = optimizer
self.grad_acc_steps = grad_acc_steps
def run_step(self, get_batch: Callable, input_placement_device: str = "cuda"):
"""
Implement the standard training logic described above.
"""
assert self.model.training, "[SimpleTrainer] model was changed to eval mode!"
start = time.perf_counter()
# If you want to do something with the data, you can wrap the dataloader.
data = next(self._data_loader_iter)
data = get_batch(
data, input_placement_device, getattr(self.data_loader, "mixup_func", None)
)
data_time = time.perf_counter() - start
loss_dict = self.model(**data)
losses = sum(v for k, v in loss_dict.items() if "loss" in k) / self.grad_acc_steps
losses.backward()
self.write_metrics(loss_dict, data_time)
if (self.iter + 1) % self.grad_acc_steps == 0:
self.optimizer.clip_grad()
self.optimizer.step()
self.optimizer.zero_grad()
class GraphTrainer(TrainerBase):
"""
A simple graph trainer for training and evaluating models in a static graph mode.
"""
def __init__(self, graph, data_loader, grad_acc_steps=1):
super().__init__()
graph.model.train()
self.data_loader = data_loader
self._data_loader_iter = iter(data_loader)
self.graph = graph
self.grad_acc_steps = grad_acc_steps
self._temp_data = None
self._temp_count = 0
def run_step(self, get_batch: Callable, input_placement_device: str = "cuda"):
"""
Implement the standard training logic described above.
"""
assert self.graph.model.training, "[SimpleTrainer] model was changed to eval mode!"
start = time.perf_counter()
while self._temp_count != self.grad_acc_steps:
# If you want to do something with the data, you can wrap the dataloader.
data = next(self._data_loader_iter)
self._temp_count += 1
if self._temp_data is None:
self._temp_data = data
else:
# In static graph mode, data will be sliced in nn.Graph automatically,
# for geting mini-batch_size, we concat local_tensor first.
for key, value in data.get_fields().items():
temp_value = self._temp_data.get(key)
self._temp_data.get(key).tensor = flow.cat(
(temp_value.tensor, value.tensor), dim=0
)
data = self._temp_data
self._temp_count = 0
self._temp_data = None
data = get_batch(
data, input_placement_device, getattr(self.data_loader, "mixup_func", None)
)
data_time = time.perf_counter() - start
# If you want to do something with the losses, you can wrap the model.
loss_dict = self.graph(**data)
# Add this because when set up gradient accumulations, graph will return
# an unpacked n-d tensor whose size is accumulation step
for key, value in loss_dict.items():
if "loss" in key:
loss_dict[key] = value.mean()
else:
# NOTE: only support scalar tensor currently
loss_dict[key] = value.sum()
self.write_metrics(loss_dict, data_time)
| 12,550 | 34.86 | 98 | py |
libai | libai-main/libai/engine/default.py | # coding=utf-8
# Copyright 2021 The OneFlow Authors. All rights reserved.
# Copyright (c) Facebook, Inc. and its affiliates.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import math
import os
import time
from collections import OrderedDict
from typing import Callable, Optional
import oneflow as flow
from omegaconf import OmegaConf
from termcolor import colored
from libai.config import LazyConfig, instantiate, try_get_key
from libai.data import Instance
from libai.engine import hooks
from libai.engine.trainer import EagerTrainer, GraphTrainer, TrainerBase
from libai.evaluation import inference_on_dataset, print_csv_format
from libai.models import build_graph, build_model
from libai.optim import build_optimizer
from libai.scheduler import build_lr_scheduler
from libai.tokenizer import build_tokenizer
from libai.utils import distributed as dist
from libai.utils.checkpoint import Checkpointer
from libai.utils.events import CommonMetricPrinter, JSONWriter, TensorboardXWriter
from libai.utils.logger import setup_logger
# --------------------------------------------------------
# References:
# https://github.com/facebookresearch/detectron2/blob/main/detectron2/engine/defaults.py
# --------------------------------------------------------
def _highlight(code, filename):
try:
import pygments
except ImportError:
return code
from pygments.formatters import Terminal256Formatter
from pygments.lexers import Python3Lexer, YamlLexer
lexer = Python3Lexer() if filename.endswith(".py") else YamlLexer()
code = pygments.highlight(code, lexer, Terminal256Formatter(style="monokai"))
return code
def _check_batch_size(cfg):
train_micro_batch_size = try_get_key(cfg, "train.train_micro_batch_size", default=None)
global_batch_size = try_get_key(cfg, "train.global_batch_size", default=None)
num_accumulation_steps = try_get_key(cfg, "train.num_accumulation_steps", default=None)
if train_micro_batch_size is not None and global_batch_size is not None:
if num_accumulation_steps is None:
if global_batch_size % (train_micro_batch_size * dist.get_data_parallel_size()) != 0:
raise ValueError(
f"global_batch_size {global_batch_size} must be divisible by "
"train_micro_batch_size * data_parallel_size "
f"({train_micro_batch_size} * {dist.get_data_parallel_size()})"
)
cfg.train.num_accumulation_steps = global_batch_size // (
train_micro_batch_size * dist.get_data_parallel_size()
)
else:
if (
global_batch_size
!= train_micro_batch_size * dist.get_data_parallel_size() * num_accumulation_steps
):
raise ValueError(
f"global_batch_size {global_batch_size} must equal to "
"train_micro_batch_size * data_parallel_size * num_accumulation_steps "
f"({train_micro_batch_size} * {dist.get_data_parallel_size()} * {num_accumulation_steps})" # noqa
)
elif train_micro_batch_size is not None and global_batch_size is None:
if num_accumulation_steps is None:
cfg.train.num_accumulation_steps = 1
cfg.train.global_batch_size = (
train_micro_batch_size
* dist.get_data_parallel_size()
* cfg.train.num_accumulation_steps
)
elif train_micro_batch_size is None and global_batch_size is not None:
if num_accumulation_steps is None:
cfg.train.num_accumulation_steps = 1
if (
global_batch_size % (dist.get_data_parallel_size() * cfg.train.num_accumulation_steps)
!= 0
):
raise ValueError(
f"global_batch_size {global_batch_size} must be divisible by "
"data_parallel_size * num_accumulation_steps "
f"({dist.get_data_parallel_size()} * {cfg.train.num_accumulation_steps})"
)
cfg.train.train_micro_batch_size = global_batch_size // (
dist.get_data_parallel_size() * cfg.train.num_accumulation_steps
)
else:
raise ValueError("train_micro_batch_size and global_batch_size must be set either")
# Set total training samples.
cfg.train.samples = cfg.train.train_iter * cfg.train.global_batch_size
def _compile_dependencies():
logger = logging.getLogger(__name__)
# =========================
# Compile dataset C++ code.
# =========================
# TODO: move this to ninja
if dist.get_local_rank() == 0:
start_time = time.time()
logger.info("> compiling dataset index builder ...")
from libai.data.data_utils import compile_helper
compile_helper()
logger.info(
">>> done with dataset index builder. Compilation time: {:.3f} "
"seconds".format(time.time() - start_time)
)
dist.synchronize()
if dist.get_local_rank() == 0:
logger.info(
">>> done with compiling. "
"Compilation time: {:.3f} seconds".format(time.time() - start_time)
)
def default_setup(cfg, args):
"""
Perform some basic common setups at the beginning of a job, including:
1. Set up the libai logger
2. Log basic information about environment, cmdline arguments, and config
3. Setup the distributed environment
4. Setup tokenizer if it's an NLP related task
5. Check batch_size
6. Backup the config to the output directory
7. Compile dependencies
Args:
args (argparse.NameSpace): the command line arguments to be logged
"""
output_dir = try_get_key(cfg, "train.output_dir")
if dist.is_main_process() and output_dir:
os.makedirs(output_dir, exist_ok=True)
cfg.train.resume = args.resume
rank = dist.get_rank()
logger = setup_logger(output_dir, distributed_rank=rank)
logger.info("Rank of current process: {}. World size: {}".format(rank, dist.get_world_size()))
logger.info("Command line arguments: " + str(args))
if hasattr(args, "config_file") and args.config_file != "":
logger.info(
"Contents of args.config_file={}:\n{}".format(
args.config_file,
_highlight(open(args.config_file, "r").read(), args.config_file),
)
)
dist.setup_dist_util(cfg.train.dist)
_check_batch_size(cfg)
if dist.is_main_process() and output_dir:
# Note: some of our scripts may expect the existence of
# config.yaml in output directory
path = os.path.join(output_dir, "config.yaml")
LazyConfig.save(cfg, path)
logger.info("Full config saved to {}".format(path))
flow.boxing.nccl.set_fusion_threshold_mbytes(
try_get_key(cfg, "train.nccl_fusion_threshold_mb", default=16)
)
flow.boxing.nccl.set_fusion_max_ops_num(
try_get_key(cfg, "train.nccl_fusion_max_ops", default=24)
)
_compile_dependencies()
class DefaultTrainer(TrainerBase):
"""
A trainer with default training logic. Compared to `TrainerBase`, it
also contains the following logic:
1. Create model, optimizer, scheduler, dataloader from the given config.
2. Load a checkpoint or `cfg.MODEL.WEIGHTS`, if exists.
3. Register a few common hooks defined by the config.
With standard features, it is created to simplify the **standard model training workflow** and
reduce code boilerplate for users who only need the standard training workflow.
It means this class makes **many assumptions** about your training logic that
may easily become invalid in a new research. In fact, any assumptions beyond those made in the
:class:`TrainerBase` are too much for research.
The code of this class has been annotated about restrictive assumptions it made.
When they do not work for you, you're encouraged to:
1. Overwrite methods of this class, OR:
2. Use :class:`TrainerBase`, which only does minimal SGD training and
nothing else. You can then add your own hooks if needed. OR:
3. Write your own training loop similar to ``tools/train_net.py``.
Also note that the behavior of this class, like other functions/classes in
this file, is not stable, since it is meant to represent the "common default behavior".
It is only guaranteed to work well with the standard models and training workflow in libai.
To obtain more stable behavior, write your own training logic with other public APIs.
Examples:
.. code-block:: python
trainer = DefaultTrainer(cfg)
trainer.resume_or_load() # load last checkpoint or MODEL.WEIGHTS
trainer.train()
Attributes:
scheduler:
checkpointer (Checkpointer):
cfg (omegaconf.dictconfig.DictConfig):
"""
def __init__(self, cfg):
"""
Args:
cfg (omegaconf.dictconfig.DictConfig):
"""
super().__init__()
self.cfg = cfg
logger = logging.getLogger("libai")
# setup_logger is not called for LiBai
if not logger.isEnabledFor(logging.INFO):
setup_logger()
# Initialize tokenizer
self.tokenizer = self.build_tokenizer(cfg)
self.start_iter = 0
if cfg.train.resume:
save_file = os.path.join(cfg.train.output_dir, "last_checkpoint")
try:
with open(save_file, "r") as f:
last_saved = f.read().strip()
assert (
last_saved != "model_final"
), "model training has finished, check your model in train.output_dir"
self.start_iter = int(last_saved.split("_")[-1]) + 1
except IOError:
# If file doesn't exist, maybe because it has just been deleted.
# We just set start_iter to 0.
self.start_iter = 0
if cfg.graph.enabled:
cfg.dataloader.consumed_samples = self.start_iter * cfg.train.global_batch_size
else:
cfg.dataloader.consumed_samples = (
self.start_iter * cfg.train.global_batch_size // cfg.train.num_accumulation_steps
)
self.train_loader = None
self.test_loader = []
train_loader, val_loader, test_loader = self.build_train_loader(cfg, self.tokenizer)
self.train_loader = train_loader
if val_loader is not None:
self.test_loader.append(val_loader)
if test_loader is not None:
self.test_loader.append(test_loader)
self.test_loader.extend(self.build_test_loader(cfg, self.tokenizer))
if cfg.train.rdma_enabled:
# set rdma
flow.env.init_rdma()
# Automatically scale the hyperparams
self.auto_scale_hyperparams(cfg, self.train_loader)
# Assume these objects must be constructed in this order.
dist.synchronize()
start_time = time.time()
logger.info("> Start building model...")
self.model = self.build_model(cfg)
dist.synchronize()
logger.info(
">>> done with building model. "
"Building time: {:.3f} seconds".format(time.time() - start_time)
)
self.optimizer = self.build_optimizer(cfg, self.model)
self.lr_scheduler = self.build_lr_scheduler(cfg, self.optimizer)
if cfg.graph.enabled:
self.graph_train = self.build_graph(
cfg, self.model, self.optimizer, self.lr_scheduler, is_train=True
)
self.graph_eval = self.build_graph(cfg, self.model, is_train=False)
self._trainer = GraphTrainer(
self.graph_train, self.train_loader, cfg.train.num_accumulation_steps
)
else:
self._trainer = EagerTrainer(
self.model, self.train_loader, self.optimizer, cfg.train.num_accumulation_steps
)
# Assume no other objects need to be checkpointed.
# We can later make it checkpoint the stateful hooks
if cfg.graph.enabled:
self.checkpointer = Checkpointer(
# Assume you want to save checkpoints together with logs/statistics
self.model,
cfg.train.output_dir,
# In static graph mode, optimizer and scheduler state_dict will
# be saved with graph.state_dict().
graph=self.graph_train,
# We print lr by `LRScheduler` hook, so we need to save/load eager lr_scheduler,
# otherwise, lr will be reset to initial state when resuming training.
lr_scheduler=self.lr_scheduler,
)
else:
self.checkpointer = Checkpointer(
# Assume you want to save checkpoints together with logs/statistics
self.model,
cfg.train.output_dir,
optimizer=self.optimizer,
lr_scheduler=self.lr_scheduler,
)
# Loading checkpoint before dataloader construction, because
# dataloader needs to know the consumed iterations from
# the last breakpoint.
self.resume_or_load(cfg.train.resume)
cfg.train.start_iter = self.start_iter
# global_batch_size = micro_batch_size * num_gpus * num_accumulation_steps
# When using gradient accumulation in graph mode, each run_step
# handle `global_batch_size` samples.
# When using gradient accumulation in eager mode, each run_step just handle
# `micro_batch_size * num_gpus` samples, so we need to divide `num_accumulation_steps`
# to get the actual `batch_size` for computing `throughput` and `consumed_samples`
self.global_batch_size = (
cfg.train.global_batch_size
if cfg.graph.enabled
else cfg.train.global_batch_size // cfg.train.num_accumulation_steps
)
self.max_iter = cfg.train.train_iter
self.register_hooks(self.build_hooks())
def resume_or_load(self, resume=True):
"""
If `resume==True` and `cfg.train.output_dir` contains the last checkpoint (defined by
a `last_checkpoint` file), resume from the file. Resuming means loading all
available states (eg. optimizer and scheduler) and update iteration counter
from the checkpoint. ``cfg.train.load_weight`` will not be used.
Otherwise, this is considered as an independent training. The method will load model
weights from the file ``cfg.train.load_weight`` (but will not load other states) and start
from iteration 0.
Args:
resume (bool): whether to do resume or not
"""
weight_path = self.cfg.train.load_weight
assert isinstance(
weight_path, str
), f"cfg.train.load_weight:{self.cfg.train.load_weight} must be string"
if resume:
assert self.checkpointer.has_checkpoint()
# The checkpoint stores the training iteration that just finished, thus we start
# at the next iteration (or iter zero if there's no checkpoint).
assert self.start_iter == (
self.checkpointer.resume_or_load(None, resume=True).get("iter", -1) + 1
)
elif len(weight_path) != 0:
assert os.path.isdir(
weight_path
), f"cfg.train.load_weight:{self.cfg.train.load_weight} must be directory"
self.checkpointer.load(weight_path, checkpointables=[])
def build_hooks(self):
"""
Build a list of default hooks, including timing, evaluation,
checkpointing, lr scheduling, precise BN, writing events.
Returns:
list[HookBase]:
"""
ret = [
hooks.IterationTimer(),
hooks.LRScheduler(), # for beauty lr scheduler printer in `nn.Graph` mode
hooks.PeriodicCheckpointer(
self.checkpointer,
self.cfg.train.checkpointer.period,
max_to_keep=self.cfg.train.checkpointer.max_to_keep,
),
]
if self.cfg.train.evaluation.enabled:
assert self.cfg.train.evaluation.eval_iter > 0, "run_iter must be positive number"
def test_and_save_results():
model = self.graph_eval if self.cfg.graph.enabled else self.model
self._last_eval_results = self.test(self.cfg, self.test_loader, model)
return self._last_eval_results
ret.append(hooks.EvalHook(self.cfg.train.evaluation.eval_period, test_and_save_results))
ret.append(
hooks.BestCheckpointer(
self.cfg.train.evaluation.eval_period,
self.checkpointer,
val_metric=try_get_key(
self.cfg, "train.evaluation.eval_metric", default="Acc@1"
),
mode=try_get_key(self.cfg, "train.evaluation.eval_mode", default="max"),
)
)
if dist.is_main_process():
# run writers in the end, so that evaluation metrics are written
ret.append(hooks.PeriodicWriter(self.build_writers(), self.cfg.train.log_period))
return ret
def build_writers(self):
"""
Build a list of writers to be used. By default it contains
writers that write metrics to the screen,
a json file, and a tensorboard event file respectively.
If you'd like a different list of writers, you can overwrite it in
your trainer.
Returns:
list[EventWriter]: a list of :class:`EventWriter` objects.
It is now implemented by:
.. code-block:: python
return [
CommonMetricPrinter(self.global_batch_size, self.max_iter),
JSONWriter(os.path.join(self.cfg.train.output_dir, "metrics.json")),
TensorboardXWriter(self.cfg.train.output_dir),
]
"""
# Assume the default print/log frequency.
return [
# It may not always print what you want to see, since it prints "common" metrics only.
CommonMetricPrinter(self.global_batch_size, self.max_iter),
JSONWriter(os.path.join(self.cfg.train.output_dir, "metrics.json")),
TensorboardXWriter(self.cfg.train.output_dir),
]
def train(self):
"""
Run training.
Returns:
OrderedDict of results, if evaluation is enabled. Otherwise None.
"""
super().train(self.start_iter, self.max_iter)
def run_step(self):
self._trainer.iter = self.iter
self._trainer.run_step(self.get_batch, self.cfg.train.input_placement_device)
@classmethod
def get_batch(
cls,
data: Instance,
input_placement_device: str = "cuda",
mixup_func: Optional[Callable] = None,
):
"""
Convert batched local tensor to distributed tensor for model step running.
If you want to do something with batched data before model, (e.g. mixup),
you can rewrite this function.
"""
if isinstance(data, flow.utils.data._utils.worker.ExceptionWrapper):
data.reraise()
if mixup_func is not None:
images, labels = mixup_func(
data.get("images").tensor.cuda(),
data.get("labels").tensor.cuda(),
)
data.get("images").tensor = images
data.get("labels").tensor = labels
ret_dict = {}
for key, value in data.get_fields().items():
value.to_global(device_type=input_placement_device)
ret_dict[key] = value.tensor
return ret_dict
@classmethod
def build_tokenizer(cls, cfg):
"""
Returns:
libai.tokenizer.PreTrainedTokenizer:
It now calls :func:`libai.tokenizer.build_tokenizer`.
"""
tokenizer = None
if try_get_key(cfg, "tokenization") is not None:
tokenizer = build_tokenizer(cfg.tokenization)
# FIXME(lxy): In case model is not defined with cfg, the `vocab_size` can be
# accessed by `model.vocab_size`.
if try_get_key(cfg, "model.cfg.vocab_size", default=None) is not None:
# In case the model does not need vocab_size as argument
multiple = (
cfg.tokenization.make_vocab_size_divisible_by
* cfg.train.dist.tensor_parallel_size
)
if hasattr(tokenizer, "padded_vocab_size"):
cfg.model.cfg.vocab_size = tokenizer.padded_vocab_size(multiple)
return tokenizer
@classmethod
def build_model(cls, cfg):
"""
Returns:
flow.nn.Module:
It now calls :func:`libai.models.build_model`.
Overwrite it if you'd like a different model.
"""
assert try_get_key(cfg, "model") is not None, "cfg must contain `model` namespace"
# Set model fp16 option because of embedding layer `white_identity` manual
# insert for amp training if provided.
if try_get_key(cfg.model, "cfg.amp_enabled") is not None:
cfg.model.cfg.amp_enabled = cfg.train.amp.enabled and cfg.graph.enabled
# In case some model define without cfg keyword.
elif try_get_key(cfg.model, "amp_enabled") is not None:
cfg.model.amp_enabled = cfg.train.amp.enabled and cfg.graph.enabled
model = build_model(cfg.model)
logger = logging.getLogger(__name__)
logger.info("Model:\n{}".format(model))
model._apply(dist.convert_to_distributed_default_setting)
return model
@classmethod
def build_graph(cls, cfg, model, optimizer=None, lr_scheduler=None, is_train=True):
assert try_get_key(cfg, "graph") is not None, "cfg must contain `graph` namespace"
graph = build_graph(cfg, model, optimizer, lr_scheduler, is_train)
debug_graph = try_get_key(cfg, "graph.debug", default=-1)
if debug_graph >= 0:
logger = logging.getLogger(__name__)
logger.info("Graph debug mode on, automatically output debug info.")
graph.debug(cfg.graph.debug)
return graph
@classmethod
def build_optimizer(cls, cfg, model):
"""
Returns:
flow.optim.Optimizer:
It now calls :func:`libai.optim.build_optimizer`.
Overwrite it if you'd like a different optimizer.
"""
assert try_get_key(cfg, "optim") is not None, "cfg must contain `optim` namespace"
return build_optimizer(cfg.optim, model)
@classmethod
def build_lr_scheduler(cls, cfg, optimizer):
"""
It now calls :func:`libai.scheduler.build_lr_scheduler`.
Overwrite it if you'd like a different scheduler.
"""
assert (
try_get_key(cfg, "train.scheduler") is not None
), "cfg.train must contain `scheduler` namespace"
return build_lr_scheduler(cfg.train.scheduler, optimizer)
@classmethod
def build_train_loader(cls, cfg, tokenizer=None):
"""
Returns:
iterable
It now calls :func:`libai.data.build_train_valid_test_loader`.
Overwrite it if you'd like a different data loader.
"""
assert (
try_get_key(cfg, "dataloader.train") is not None
), "cfg must contain `dataloader.train` namespace"
logger = logging.getLogger(__name__)
logger.info("Prepare training, validating, testing set")
if cfg.graph.enabled:
# In static graph mode, data will be sliced in nn.Graph automatically,
# dataloader will get micro-batch-size and data will be concated
# in graph_trainer.run_step to get mini-batch-size.
cfg.dataloader.train.train_batch_size = cfg.train.train_micro_batch_size
else:
# In eager mode, gradient accumulation will act like PyTorch, so dataloader
# will get micro-batch-size
cfg.dataloader.train.train_batch_size = cfg.train.train_micro_batch_size
cfg.dataloader.train.test_batch_size = cfg.train.test_micro_batch_size
cfg.dataloader.train.seed = cfg.train.seed
# used by nlp dataloader
if hasattr(cfg.dataloader.train, "train_val_test_num_samples"):
eval_iter = (
(cfg.train.train_iter // cfg.train.evaluation.eval_period + 1)
* cfg.train.evaluation.eval_iter
if cfg.train.evaluation.enabled
# samples for test_dataset must be larger than 0 even if there is no evaluation
else 1
)
test_iter = cfg.train.evaluation.eval_iter if cfg.train.evaluation.enabled else 1
cfg.dataloader.train.train_val_test_num_samples = [
int(cfg.train.samples),
int(eval_iter * cfg.train.test_micro_batch_size * dist.get_data_parallel_size()),
int(test_iter * cfg.train.test_micro_batch_size * dist.get_data_parallel_size()),
]
if OmegaConf.is_list(cfg.dataloader.train.dataset):
for dataset in cfg.dataloader.train.dataset:
if hasattr(dataset, "seed"):
dataset.seed = cfg.train.seed
else:
dataset = cfg.dataloader.train.dataset
if hasattr(dataset, "seed"):
dataset.seed = cfg.train.seed
# Set tokenizer for each dataset
if tokenizer:
if OmegaConf.is_list(cfg.dataloader.train.dataset):
for dataset in cfg.dataloader.train.dataset:
dataset.tokenizer = tokenizer
else:
cfg.dataloader.train.dataset.tokenizer = tokenizer
train_loader, valid_loader, test_loader = instantiate(
cfg.dataloader.train, _recursive_=False
)
return train_loader, valid_loader, test_loader
@classmethod
def build_test_loader(cls, cfg, tokenizer=None):
"""
Returns:
iterable
It now calls :func:`libai.data.build_image_test_loader` for CV tasks
or :func:`libai.data.build_nlp_test_loader` for NLP tasks.
Overwrite it if you'd like a different data loader.
"""
# If there is no test_loader, just return []
if not try_get_key(cfg, "dataloader.test", default=False):
return []
logger = logging.getLogger(__name__)
logger.info("Prepare testing set")
assert OmegaConf.is_list(
cfg.dataloader.test
), f"dataloader.test must be list but got type of {type(cfg.dataloader.test)}"
for i in range(len(cfg.dataloader.test)):
cfg.dataloader.test[i].test_batch_size = cfg.train.test_micro_batch_size
cfg.dataloader.test[i].seed = cfg.train.seed # set seed
if tokenizer:
cfg.dataloader.test[i].dataset.tokenizer = tokenizer
# list[dataloader1, dataloader2, ...]
test_loader = instantiate(cfg.dataloader.test, _recursive_=False)
return test_loader
@classmethod
def auto_scale_hyperparams(cls, cfg, data_loader):
logger = logging.getLogger(__name__)
log_info = ""
# Get or set default iteration cfg
train_iter = try_get_key(cfg, "train.train_iter", default=0)
train_epoch = try_get_key(cfg, "train.train_epoch", default=0)
warmup_ratio = try_get_key(cfg, "train.warmup_ratio", default=0)
assert (
warmup_ratio < 1 and warmup_ratio >= 0
), "warmup_ratio must be in [0, 1) that presents the ratio of warmup iter to the train iter"
# Automatically scale iteration num depend on the settings
# The total iters in one epoch is `len(dataset) / global_batch_size`
cfg.train.train_iter = max(
math.ceil(len(data_loader.dataset) * train_epoch / cfg.train.global_batch_size),
train_iter,
)
cfg.train.warmup_iter = math.ceil(cfg.train.train_iter * cfg.train.warmup_ratio)
if not cfg.graph.enabled:
# In eager mode, dataloader only get micro-batch-size each iter,
# which is mini-batch-size // num_accumulation, so scale `train_iter`
# and `warmup_iter` to be consistent with static graph mode.
cfg.train.train_iter *= cfg.train.num_accumulation_steps
cfg.train.warmup_iter *= cfg.train.num_accumulation_steps
log_info += "Auto-scaling the config to train.train_iter={}, train.warmup_iter={}".format(
cfg.train.train_iter, cfg.train.warmup_iter
)
# Automatically scale the milestones
if try_get_key(cfg, "train.scheduler.milestones"):
if len(
[
milestone
for milestone in cfg.train.scheduler.milestones
if milestone < 0 or milestone >= 1
]
):
raise ValueError(
"milestones should be a list of increasing ratio in [0, 1), but got {}".format(
cfg.train.scheduler.milestones
)
)
cfg.train.scheduler.milestones = [
int(milestone * cfg.train.train_iter)
for milestone in cfg.train.scheduler.milestones
]
log_info += f", scheduler milestones={cfg.train.scheduler.milestones}"
logger.info(log_info)
# Global scheduler cfg
cfg.train.scheduler.warmup_iter = cfg.train.warmup_iter
cfg.train.scheduler.max_iter = cfg.train.train_iter
# train iter per epoch
iter_per_epoch = len(data_loader.dataset) // cfg.train.global_batch_size
# rescale eval period
if try_get_key(cfg, "train.evaluation.eval_after_n_epoch"):
cfg.train.evaluation.eval_period = (
iter_per_epoch * cfg.train.evaluation.eval_after_n_epoch
)
logger.info(
f"Auto-scaling the config "
f"train.evaluation.eval_after_n_epoch={cfg.train.evaluation.eval_after_n_epoch} "
f"to train.evaluation.eval_period={cfg.train.evaluation.eval_period}"
)
# rescale save model period
if try_get_key(cfg, "train.checkpointer.save_model_after_n_epoch"):
cfg.train.checkpointer.period = (
iter_per_epoch * cfg.train.checkpointer.save_model_after_n_epoch
)
logger.info(
f"Auto-scaling the config "
f"train.checkpointer.save_model_after_n_epoch="
f"{cfg.train.checkpointer.save_model_after_n_epoch} "
f"to train.checkpointer.period={cfg.train.checkpointer.period}"
)
@classmethod
def build_evaluator(cls, cfg):
evaluator = instantiate(cfg.train.evaluation.evaluator)
return evaluator
@classmethod
def test(cls, cfg, test_loaders, model, evaluator=None):
"""
Evaluate the given model. The given model is expected to already contain
weights to evaluate.
Args:
cfg (CfgNode):
test_loaders: list [dataloader1, dataloader2, ...]
model (nn.Graph):
evaluators (list[DatasetEvaluator] or None): if None, will call
:meth:`build_evaluator`. Otherwise, must have the same length as
``cfg.DATASETS.TEST``.
Returns:
dict: a dict of result metrics
"""
logger = logging.getLogger(__name__)
# TODO: support multi evaluator
# if isinstance(evaluators, DatasetEvaluator):
# evaluators = [evaluators]
test_batch_size = cfg.train.test_micro_batch_size * dist.get_data_parallel_size()
evaluator = cls.build_evaluator(cfg) if not evaluator else evaluator
results = OrderedDict()
for idx, data_loader in enumerate(test_loaders):
# When evaluators are passed in as arguments,
# implicitly assume that evaluators can be created before data_loader.
dataset_name = type(data_loader.dataset).__name__
# TODO: support multi evaluator
# if evaluators is not None:
# evaluator = evaluators[idx]
# else:
# try:
# evaluator = cls.build_evaluator(cfg)
# except NotImplementedError:
# logger.warn(
# "No evaluator found. Use `DefaultTrainer.test(evaluators=)`, "
# "or implement its `build_evaluator` method."
# )
# results[dataset_name] = {}
# continue
results_i = inference_on_dataset(
model,
data_loader,
test_batch_size,
cfg.train.evaluation.eval_iter,
cls.get_batch,
cfg.train.input_placement_device,
evaluator,
)
results[dataset_name] = results_i
if dist.is_main_process():
assert isinstance(
results_i, dict
), "Evaluator must return a dict on the main process. Got {} instead.".format(
results_i
)
logger.info(
"Evaluation results for {} in csv format:".format(
colored(dataset_name, "green")
)
)
print_csv_format(results_i)
if len(results) == 1:
results = list(results.values())[0]
return results
| 34,511 | 39.650177 | 118 | py |
libai | libai-main/libai/utils/checkpoint.py | # coding=utf-8
# Copyright 2021 The OneFlow Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import logging
import os
import shutil
from collections import defaultdict
from typing import Any, Dict, Iterable, List, NamedTuple, Optional, Tuple
import numpy as np
import oneflow as flow
from oneflow import nn
from termcolor import colored
import libai.utils.distributed as dist
from libai.utils.file_io import HTTPURLHandler, PathManagerBase
class _IncompatibleKeys(
NamedTuple(
# pyre-fixme[10]: Name `IncompatibleKeys` is used but not defined.
"IncompatibleKeys",
[
("missing_keys", List[str]),
("unexpected_keys", List[str]),
# pyre-fixme[24]: Generic type `tuple` expects at least 1 type parameter.
# pyre-fixme[24]: Generic type `tuple` expects at least 1 type parameter.
# pyre-fixme[24]: Generic type `tuple` expects at least 1 type parameter.
("incorrect_shapes", List[Tuple]),
],
)
):
pass
class Checkpointer(object):
"""
A checkpointer that can save/load model as well as extra checkpointable
objects.
"""
# NOTE: only support data_parallel for saving model
# TODO: save model: support model_parallel and pipeline parallel
def __init__(
self,
model: nn.Module,
save_dir: str = "",
*,
save_to_disk: bool = True,
**checkpointables: object,
):
"""
Args:
model (nn.Module): model.
save_dir (str): a directory to save and find checkpoints.
save_to_disk (bool): if True, save checkpoint to disk, otherwise
disable saving for this checkpointer.
checkpointables (object): any checkpointable objects, i.e., objects
that have the `state_dict()` and `load_state_dict()` method. For
example, it can be used like
`Checkpointer(model, "dir", optimizer=optimizer)`.
"""
self.model = model
self.checkpointables = copy.copy(checkpointables)
self.logger = logging.getLogger(__name__)
self.save_dir = save_dir
self.save_to_disk = save_to_disk
# Default PathManager, support HTTP URLs
# A user may want to use a different project-specific PathManagerBase'
self.path_manager: PathManagerBase = PathManagerBase()
self.path_manager.register_handler(HTTPURLHandler())
def save(self, name: str, **kwargs: Dict[str, str]):
"""
Dump model and checkpointables to a file.
Args:
name (str): name of the file.
kwargs (dict): extra arbitrary data to save.
"""
data = {}
data["model"] = self.model.state_dict()
for key, obj in self.checkpointables.items():
data[key] = obj.state_dict()
data.update(kwargs)
basename = name
save_dir = os.path.join(self.save_dir, basename)
assert os.path.basename(save_dir) == basename, basename
if not self.path_manager.exists(save_dir):
self.path_manager.mkdirs(save_dir)
self.logger.info("Saving checkpoint to {}".format(save_dir))
for save_name in data:
if save_name == "iteration":
continue
save_file = os.path.join(save_dir, save_name)
# If directory existing, remove it for saving
if self.path_manager.exists(save_file):
self.path_manager.mkdirs(save_file)
flow.save(data[save_name], save_file, global_dst_rank=0)
if basename != "model_best":
self.tag_last_checkpoint(basename)
def load(self, path: str, checkpointables: Optional[List[str]] = None) -> object:
"""
Load from the given checkpoint. When path points to network file, this
function has to be called on all ranks.
Args:
path (str): path or url to the checkpoint. If empty, will not load
anything.
checkpointables (list): List of checkpointable names to load. If not
specified (None), will load all the possible checkpointables.
Returns:
dict:
extra data loaded from the checkpoint that has not been
processed. For example, those saved with
:meth:`.save(**extra_data)`.
"""
if not path:
# no checkpoint provided
self.logger.info("No checkpoint found. Training model from scratch")
return {}
self.logger.info("Loading checkpoint from {}".format(path))
checkpoint = self._load_file(path)
incompatible = self._load_model(checkpoint)
if incompatible is not None: # handle some existing subclasses that returns None
self._log_incompatible_keys(incompatible)
for key in self.checkpointables if checkpointables is None else checkpointables:
if key in checkpoint: # pyre-ignore
self.logger.info("Loading {} from {}".format(key, path))
obj = self.checkpointables[key]
obj.load_state_dict(checkpoint.pop(key)) # pyre-ignore
# return any further checkpoint data
return checkpoint
def has_checkpoint(self):
"""
Returns:
bool: whether a checkpoint exists in the target directory.
"""
save_file = os.path.join(self.save_dir, "last_checkpoint")
return self.path_manager.exists(save_file)
def get_checkpoint_file(self):
"""
Returns:
str: The latest checkpoint file in target directory.
"""
save_file = os.path.join(self.save_dir, "last_checkpoint")
try:
# load checkpoint file in rank0
if flow.env.get_rank() == 0:
with open(save_file, "r") as f:
last_saved = f.read().strip()
else:
last_saved = None
# broadcast checkpoint file to other ranks
last_saved = dist.broadcast_py_object(last_saved, src=0)
except IOError:
# if file doesn't exist, maybe because it has just been
# deleted by a separate process
return ""
return os.path.join(self.save_dir, last_saved)
def resume_or_load(self, path: str, *, resume: bool = True):
"""
If `resume` is True, this method attempts to resume from the last
checkpoint (if exists). Otherwise, load checkpoint from the given path.
This is useful when restarting an interrupted training job.
Args:
path (str): path to the checkpoint.
resume (bool): if True, resume from the last checkpoint if it exists.
Returns:
same as :meth:`load`.
"""
if resume and self.has_checkpoint():
path = self.get_checkpoint_file()
return self.load(path)
else:
return self.load(path, checkpointables=[])
def tag_last_checkpoint(self, last_filename_basename: str):
"""
Tag the last checkpoint.
Args:
last_filename_basename (str): the basename of the last filename.
"""
save_file = os.path.join(self.save_dir, "last_checkpoint")
with self.path_manager.open(save_file, "w") as f:
f.write(last_filename_basename) # pyre-ignore
def _load_file(self, f: str):
"""
Load a checkpoint file. Can be overwritten by subclasses to support
different formats.
Args:
f (str): a locally mounted file path.
Returns:
dict: with keys "model" and optionally others that are saved by
the checkpointer dict["model"] must be a dict which maps strings
to flow.Tensor or numpy arrays.
"""
data = {}
keys = self.path_manager.ls(f)
# broadcast checkpointer keys to other ranks
keys = dist.broadcast_py_object(keys, src=0)
for key in keys:
data[key] = flow.load(os.path.join(f, key), global_src_rank=0)
try:
data["iter"] = int(f.split("_")[-1])
except: # noqa
self.logger.info(f"iter info in {f} not found, set iter to 0")
data["iter"] = 0
return data
def _load_model(self, checkpoint: Any):
"""
Load weights from a checkpoint.
Args:
checkpoint (Any): checkpoint contains the weights.
"""
checkpoint_state_dict = checkpoint.pop("model")
self._convert_ndarray_to_tensor(checkpoint_state_dict)
# if the state_dict comes from a model that was wrapped in a
# DataParallel or DistributedDataParallel during serialization,
# remove the "module" prefix before performing the matching.
_strip_prefix_if_present(checkpoint_state_dict, "module.")
model_state_dict = self.model.state_dict()
incorrect_shapes = []
for k in list(checkpoint_state_dict.keys()):
if k in model_state_dict:
shape_model = tuple(model_state_dict[k].shape)
shape_checkpoint = tuple(checkpoint_state_dict[k].shape)
if shape_model != shape_checkpoint:
incorrect_shapes.append((k, shape_checkpoint, shape_model))
checkpoint_state_dict.pop(k)
incompatible = self.model.load_state_dict(checkpoint_state_dict, strict=False)
return _IncompatibleKeys(
missing_keys=incompatible.missing_keys,
unexpected_keys=incompatible.unexpected_keys,
incorrect_shapes=incorrect_shapes,
)
def _log_incompatible_keys(self, incompatible: _IncompatibleKeys) -> None:
"""
Log information about the incompatible keys returned by ``_load_model``.
"""
for k, shape_checkpoint, shape_model in incompatible.incorrect_shapes:
self.logger.warning(
"Skip loading parameter '{}' to the model due to incompatible "
"shapes: {} in the checkpoint but {} in the "
"model! You might want to double check if this is expected.".format(
k, shape_checkpoint, shape_model
)
)
if incompatible.missing_keys:
missing_keys = _filter_reused_missing_keys(self.model, incompatible.missing_keys)
if missing_keys:
self.logger.info(get_missing_parameters_message(missing_keys))
if incompatible.unexpected_keys:
self.logger.info(get_unexpected_parameters_message(incompatible.unexpected_keys))
def _convert_ndarray_to_tensor(self, state_dict: dict):
"""
In-place convert all numpy arrays in the state_dict to flow tensor.
Args:
state_dict (dict): a state-dict to be loaded to the model.
"""
# model could be an OrderedDict with _metadata attribute
# (as returned by oneflow's state_dict()). We should preserve these
# properties.
for k in list(state_dict.keys()):
v = state_dict[k]
if not isinstance(v, np.ndarray) and not isinstance(v, flow.Tensor):
raise ValueError("Unsupported type found in checkpoint! {}: {}".format(k, type(v)))
# If it's local tensor, convert it to global tensor.
if not v.is_global:
if k in self.model.state_dict():
model_v = self.model.state_dict()[k]
state_dict[k] = v.to_global(sbp=model_v.sbp, placement=model_v.placement)
class PeriodicCheckpointer:
"""
Save checkpoints periodically. When `.step(iteration)` is called, it will
execute `checkpointer.save` on the given checkpointer, if iteration is a
multiple of period or if `max_iter` is reached.
"""
def __init__(
self,
checkpointer: Checkpointer,
period: int,
max_iter: Optional[int] = None,
max_to_keep: Optional[int] = None,
file_prefix: str = "model",
):
"""
Args:
checkpointer (Any): the checkpointer object used to save
checkpoints.
period (int): the period to save checkpoint.
max_epoch (int): maximum number of epochs. When it is reached,
a checkpoint named "model_final" will be saved.
"""
self.checkpointer = checkpointer
self.period = int(period)
self.max_iter = max_iter
if max_to_keep is not None:
assert max_to_keep > 0
self.max_to_keep = max_to_keep
self.recent_checkpoints: List[str] = []
self.file_prefix = file_prefix
self.path_manager: PathManagerBase = checkpointer.path_manager
def step(self, iteration: int, **kwargs: Any):
"""
Perform the appropriate action at the given iteration.
Args:
iteration (int): the current epoch, ranged in [0, max_iter-1].
kwargs (Any): extra data to save, same as in
:meth:`Checkpointer.save`.
"""
iteration = int(iteration)
additional_state = {"iteration": iteration}
additional_state.update(kwargs)
if (iteration + 1) % self.period == 0:
self.checkpointer.save(
"{}_{:07d}".format(self.file_prefix, iteration), **additional_state
)
if self.max_to_keep is not None:
self.recent_checkpoints.append(self.checkpointer.get_checkpoint_file())
if len(self.recent_checkpoints) > self.max_to_keep:
file_to_delete = self.recent_checkpoints.pop(0)
if (
dist.is_main_process()
and self.path_manager.exists(file_to_delete)
and not file_to_delete.endswith(
"{}_{:07d}".format(self.file_prefix, iteration)
)
):
if not self.path_manager.isfile(file_to_delete):
shutil.rmtree(file_to_delete)
else:
self.path_manager.rm(file_to_delete)
if self.max_iter is not None:
if iteration >= self.max_iter - 1:
self.checkpointer.save(f"{self.file_prefix}_final", **additional_state)
def save(self, name: str, **kwargs: Any):
"""
Same argument as :meth:`Checkpointer.save`.
Use this method to manually save checkpoints outside the schedule.
Args:
name (str): file name.
kwargs (Any): extra data to save, same as in
:meth:`Checkpointer.save`.
"""
self.checkpointer.save(name, **kwargs)
def _filter_reused_missing_keys(model: nn.Module, keys: List[str]) -> List[str]:
"""
Filter "missing keys" to not include keys that have been loaded with another name.
"""
keyset = set(keys)
param_to_names = defaultdict(set) # param -> names that points to it
for module_prefix, module in _named_modules_with_dup(model):
for name, param in list(module.named_parameters(recurse=False)) + list(
module.named_buffers(recurse=False) # pyre-ignore
):
full_name = (module_prefix + "." if module_prefix else "") + name
param_to_names[param].add(full_name)
for names in param_to_names.values():
# if one name appears missing but its alias exists, then this
# name is not considered missing
if any(n in keyset for n in names) and not all(n in keyset for n in names):
[keyset.remove(n) for n in names if n in keyset]
return list(keyset)
def get_missing_parameters_message(keys: List[str]) -> str:
"""
Get a logging-friendly message to report parameter names (keys) that are in
the model but not found in a checkpoint.
Args:
keys (list[str]): List of keys that were not found in the checkpoint.
Returns:
str: message.
"""
groups = _group_checkpoint_keys(keys)
msg = "Some model parameters or buffers are not found in the checkpoint:\n"
msg += "\n".join(" " + colored(k + _group_to_str(v), "blue") for k, v in groups.items())
return msg
def get_unexpected_parameters_message(keys: List[str]) -> str:
"""
Get a logging-friendly message to report parameter names (keys) that are in
the checkpoint but not found in the model.
Args:
keys (list[str]): List of keys that were not found in the model.
Returns:
str: message.
"""
groups = _group_checkpoint_keys(keys)
msg = "The checkpoint state_dict contains keys that are not used by the model:\n"
msg += "\n".join(" " + colored(k + _group_to_str(v), "magenta") for k, v in groups.items())
return msg
def _strip_prefix_if_present(state_dict: Dict[str, Any], prefix: str) -> None:
"""
Strip the prefix in metadata, if any.
Args:
state_dict (OrderedDict): a state-dict to be loaded to the model.
prefix (str): prefix.
"""
keys = sorted(state_dict.keys())
if not all(len(key) == 0 or key.startswith(prefix) for key in keys):
return
for key in keys:
newkey = key[len(prefix) :]
state_dict[newkey] = state_dict.pop(key)
# also strip the prefix in metadata, if any..
try:
metadata = state_dict._metadata # pyre-ignore
except AttributeError:
pass
else:
for key in list(metadata.keys()):
# for the metadata dict, the key can be:
# '': for the DDP module, which we want to remove.
# 'module': for the actual model.
# 'module.xx.xx': for the rest.
if len(key) == 0:
continue
newkey = key[len(prefix) :]
metadata[newkey] = metadata.pop(key)
def _group_checkpoint_keys(keys: List[str]) -> Dict[str, List[str]]:
"""
Group keys based on common prefixes. A prefix is the string up to the final
"." in each key.
Args:
keys (list[str]): list of parameter names, i.e. keys in the model
checkpoint dict.
Returns:
dict[list]: keys with common prefixes are grouped into lists.
"""
groups = defaultdict(list)
for key in keys:
pos = key.rfind(".")
if pos >= 0:
head, tail = key[:pos], [key[pos + 1 :]]
else:
head, tail = key, []
groups[head].extend(tail)
return groups
def _group_to_str(group: List[str]) -> str:
"""
Format a group of parameter name suffixes into a loggable string.
Args:
group (list[str]): list of parameter name suffixes.
Returns:
str: formated string.
"""
if len(group) == 0:
return ""
if len(group) == 1:
return "." + group[0]
return ".{" + ", ".join(group) + "}"
def _named_modules_with_dup(model: nn.Module, prefix: str = "") -> Iterable[Tuple[str, nn.Module]]:
"""
The same as `model.named_modules()`, except that it includes
duplicated modules that have more than one name.
"""
yield prefix, model
for name, module in model._modules.items(): # pyre-ignore
if module is None:
continue
submodule_prefix = prefix + ("." if prefix else "") + name
yield from _named_modules_with_dup(module, submodule_prefix)
| 20,051 | 37.194286 | 99 | py |
libai | libai-main/libai/utils/timer.py | # coding=utf-8
# Copyright 2021 The OneFlow Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from time import perf_counter
from typing import Optional
# --------------------------------------------------------
# References:
# https://github.com/facebookresearch/fvcore/blob/main/fvcore/common/timer.py
# --------------------------------------------------------
class Timer:
"""
A timer which computes the time elapsed since the start/reset of the timer.
"""
def __init__(self):
self.reset()
def reset(self):
"""
Reset the timer.
"""
self._start = perf_counter()
self._paused: Optional[float] = None
self._total_paused = 0
self._count_start = 1
def pause(self):
"""
Pause the timer.
"""
if self._paused is not None:
raise ValueError("Trying to pause a Timer that is already paused!")
self._paused = perf_counter()
def is_paused(self) -> bool:
"""
Returns:
bool: whether the timer is currently paused
"""
return self._paused is not None
def resume(self):
"""
Resume the timer.
"""
if self._paused is None:
raise ValueError("Trying to resume a Timer that is not paused!")
self._total_paused += perf_counter() - self._paused
self._paused = None
self._count_start += 1
def seconds(self) -> float:
"""
Returns:
(float): the total number of seconds since the start/reset of the
timer, excluding the time when the timer is paused.
"""
if self._paused is not None:
end_time: float = self._paused # type: ignore
else:
end_time = perf_counter()
return end_time - self._start - self._total_paused
def avg_seconds(self) -> float:
"""
Returns:
(float): the average number of seconds between every start/reset and
pause.
"""
return self.seconds() / self._count_start
| 2,616 | 29.08046 | 80 | py |
libai | libai-main/libai/utils/events.py | # coding=utf-8
# Copyright 2021 The OneFlow Authors. All rights reserved.
# Copyright (c) Facebook, Inc. and its affiliates.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
import json
import logging
import os
import time
from collections import defaultdict
from contextlib import contextmanager
from libai.utils.file_io import PathManager
from libai.utils.history_buffer import HistoryBuffer
# --------------------------------------------------------
# References:
# https://github.com/facebookresearch/detectron2/blob/main/detectron2/utils/events.py
# --------------------------------------------------------
__all__ = [
"get_event_storage",
"JSONWriter",
"CommonMetricPrinter",
"EventStorage",
]
_CURRENT_STORAGE_STACK = []
def get_event_storage():
"""
Returns:
The :class:`EventStorage` object that's currently being used.
Throw an error if no :class:`EventStorage` is currently enabled.
"""
assert len(
_CURRENT_STORAGE_STACK
), "get_event_storage() has to be called inside a 'with EventStorage(...)' context!"
return _CURRENT_STORAGE_STACK[-1]
class EventWriter:
"""
Base class for writers that obtain events from :class:`EventStorage` and process them.
"""
def write(self):
raise NotImplementedError
def close(self):
pass
class JSONWriter(EventWriter):
"""
Write scalars to a json file.
It saves scalars as one json per line (instead of a big json) for easy parsing.
Example of parsing such a json file:
::
$ cat metrics.json | jq -s '.[0:2]'
[
{
"data_time": 0.008433341979980469,
"iteration": 19,
"total_loss": 1.9228371381759644,
"lr": 0.007173333333333333,
"time": 0.25401854515075684
},
{
"data_time": 0.007216215133666992,
"iteration": 39,
"total_loss": 1.282649278640747,
"lr": 0.007706666666666667,
"time": 0.2490077018737793
}
]
$ cat metrics.json | jq '.loss_mask'
0.7126231789588928
0.689423680305481
0.6776131987571716
...
"""
def __init__(self, json_file, window_size=20):
"""
Args:
json_file (str): path to the json file. New data will be appended if the file exists.
window_size (int): the window size of median smoothing for the scalars whose
`smoothing_hint` are True.
"""
self._file_handle = PathManager.open(json_file, "a")
self._window_size = window_size
self._last_write = -1
def write(self):
storage = get_event_storage()
to_save = defaultdict(dict)
for k, (v, iter) in storage.latest_with_smoothing_hint(self._window_size).items():
# keep scalars that have not been written
if iter <= self._last_write:
continue
to_save[iter][k] = v
if len(to_save):
all_iters = sorted(to_save.keys())
self._last_write = max(all_iters)
for itr, scalars_per_iter in to_save.items():
scalars_per_iter["iteration"] = itr
self._file_handle.write(json.dumps(scalars_per_iter, sort_keys=True) + "\n")
self._file_handle.flush()
try:
os.fsync(self._file_handle.fileno())
except AttributeError:
pass
def close(self):
self._file_handle.close()
class TensorboardXWriter(EventWriter):
"""
Write all scalars to a tensorboard file
"""
def __init__(self, log_dir: str, window_size: int = 20, **kwargs):
"""
Args:
log_dir (str): the directory to save the output events
window_size (int): the scalars will be median-smoothed by this window size
kwargs: other arguments passed to `tensorboardX.SummaryWriter(...)`
"""
self._window_size = window_size
from tensorboardX import SummaryWriter
self._writer = SummaryWriter(log_dir=log_dir, **kwargs)
self._last_write = -1
def write(self):
storage = get_event_storage()
new_last_write = self._last_write
for k, (v, iter) in storage.latest_with_smoothing_hint(self._window_size).items():
if iter > self._last_write:
self._writer.add_scalar(k, v, iter)
new_last_write = max(new_last_write, iter)
self._last_write = new_last_write
# TODO: add write image
if len(storage._histograms) >= 1:
for params in storage._histograms:
self._writer.add_histogram_raw(**params)
storage.clear_histograms()
def close(self):
if hasattr(self, "_writer"): # doesn't exist when the code fails at import
self._writer.close()
class CommonMetricPrinter(EventWriter):
"""
Print **common** metrics to the terminal, including
iteration time, ETA, memory, all losses, and the learning rate.
It also applies smoothing using a window of 20 elements.
It's meant to print common metrics in common ways.
To print something in more customized ways, please implement a similar printer by yourself.
"""
def __init__(self, batch_size, max_iter):
"""
Args:
max_iter (int): the maximum number of iterations to train.
Used to compute ETA.
"""
self.logger = logging.getLogger(__name__)
self._batch_size = batch_size
self._max_iter = max_iter
self._last_write = None
def write(self):
storage = get_event_storage()
iteration = storage.iter
consumed_samples = storage.samples
if iteration == self._max_iter:
# This hook only reports training progress (loss, ETA, etc) but not other data,
# therefore do not write anything after training succeeds, even if this method
# is called.
return
try:
data_time = storage.history("data_time").avg(20)
except KeyError:
# they may not exist in the first few iterations (due to warmup)
# or when SimpleTrainer is not used
data_time = None
eta_string = None
try:
iter_time = storage.history("time").global_avg()
eta_seconds = storage.history("time").median(1000) * (self._max_iter - iteration - 1)
storage.put_scalar("eta_seconds", eta_seconds, smoothing_hint=False)
eta_string = str(datetime.timedelta(seconds=int(eta_seconds)))
except KeyError:
iter_time = None
# estimate eta on our own - more noisy
if self._last_write is not None:
estimate_iter_time = (time.perf_counter() - self._last_write[1]) / (
iteration - self._last_write[0]
)
eta_seconds = estimate_iter_time * (self._max_iter - iteration - 1)
eta_string = str(datetime.timedelta(seconds=int(eta_seconds)))
self._last_write = (iteration, time.perf_counter())
try:
lr = "{:.2e}".format(storage.history("lr").latest())
except KeyError:
lr = "N/A"
max_mem_mb = None
# NOTE: max_mem is parsed by grep in "dev/parse_results.sh"
self.logger.info(
" {eta}{iter} {sample} {losses} {time}{data_time} {tpt} lr: {lr} {memory}".format(
eta=f"eta: {eta_string} " if eta_string else "",
iter=f"iteration: {iteration}/{self._max_iter}",
sample=f"consumed_samples: {consumed_samples}",
losses=" ".join(
[
"{}: {:.4g}".format(k, v.median(200))
for k, v in storage.histories().items()
if "loss" in k
]
),
time="time: {:.4f} s/iter ".format(iter_time) if iter_time is not None else "",
data_time="data_time: {:.4f} s/iter".format(data_time)
if data_time is not None
else "",
tpt="total_throughput: {:.2f} samples/s".format(self._batch_size / iter_time)
if iter_time is not None
else "",
lr=lr,
memory="max_mem: {:.0f}M".format(max_mem_mb) if max_mem_mb is not None else "",
)
)
class EventStorage:
"""
The user-facing class that provides metric storage functionalities.
In the future we may add support for storing / logging other types of data if needed.
"""
def __init__(self, start_iter=0):
"""
Args:
start_iter (int): the iteration number to start with
"""
self._history = defaultdict(HistoryBuffer)
self._smoothing_hints = {}
self._latest_scalars = {}
self._iter = start_iter
self._batch_size = 0
self._current_prefix = ""
self._vis_data = []
self._histograms = []
def put_image(self, img_name, img_tensor):
"""
Add an `img_tensor` associated with `img_name` to be shown on
tensorboard.
Args:
img_name (str): The name of the image to put into tensorboard.
img_tensor (flow.Tensor or numpy.array): An `uint8` or `float`
Tensor of shape `[channel, height, width]` where `channel` is
3. The image format should be RGB. The elements in img_tensor
can either have values in [0, 1] (float32) or [0, 255] (uint8).
The `img_tensor` will be visualized in tensorboard.
"""
self._vis_data.append((img_name, img_tensor, self._iter))
def put_scalar(self, name, value, smoothing_hint=True):
"""
Add a scalar `value` to the `HistoryBuffer` associated with `name`.
Args:
smoothing_hint (bool): a 'hint' on whether this scalar is noisy and should be
smoothed when logged. The hint will be accessible through
:meth:`EventStorage.smoothing_hints`. A writer may ignore the hint
and apply custom smoothing rule.
It defaults to True because most scalars we save need to be smoothed to
provide any useful signal.
"""
name = self._current_prefix + name
history = self._history[name]
value = float(value)
history.update(value, self._iter)
self._latest_scalars[name] = (value, self._iter)
existing_hint = self._smoothing_hints.get(name)
if existing_hint is not None:
assert (
existing_hint == smoothing_hint
), "Scalar {} was put with a different smoothing_hint!".format(name)
else:
self._smoothing_hints[name] = smoothing_hint
def put_scalars(self, *, smoothing_hint=True, **kwargs):
"""
Put multiple scalars from keyword arguments.
Example:
.. code-block:: python
storage.put_scalars(loss=my_loss, accuracy=my_accuracy, smoothing_hint=True)
"""
for k, v in kwargs.items():
self.put_scalar(k, v, smoothing_hint=smoothing_hint)
def history(self, name):
"""
Returns:
HistoryBuffer: the scalar history for name
"""
ret = self._history.get(name, None)
if ret is None:
raise KeyError("No history metric available for {}!".format(name))
return ret
def histories(self):
"""
Returns:
dict[name -> HistoryBuffer]: the HistoryBuffer for all scalars
"""
return self._history
def latest(self):
"""
Returns:
dict[str -> (float, int)]: mapping from the name of each scalar to the most
recent value and the iteration number its added.
"""
return self._latest_scalars
def latest_with_smoothing_hint(self, window_size=20):
"""
Similar to :meth:`latest`, but the returned values
are either the un-smoothed original latest value,
or a median of the given window_size,
depending on whether the smoothing_hint is True.
This provides a default behavior that other writers can use.
"""
result = {}
for k, (v, itr) in self._latest_scalars.items():
result[k] = (
self._history[k].median(window_size) if self._smoothing_hints[k] else v,
itr,
)
return result
def smoothing_hints(self):
"""
Returns:
dict[name -> bool]: the user-provided hint on whether the scalar
is noisy and needs smoothing.
"""
return self._smoothing_hints
def step(self):
"""
User should either: (1) Call this function to increment storage.iter when needed.
Or (2) Set `storage.iter` to the correct iteration number before each iteration.
The storage will then be able to associate the new data with an iteration number.
"""
self._iter += 1
@property
def iter(self):
"""
Returns the current iteration number. When used together with a trainer,
this is ensured to be the same as trainer.iter.
"""
return self._iter
@iter.setter
def iter(self, val):
self._iter = int(val)
@property
def samples(self):
return self._samples
@samples.setter
def samples(self, val):
self._samples = int(val)
def __enter__(self):
_CURRENT_STORAGE_STACK.append(self)
return self
def __exit__(self, exc_type, exc_val, exc_tb):
assert _CURRENT_STORAGE_STACK[-1] == self
_CURRENT_STORAGE_STACK.pop()
@contextmanager
def name_scope(self, name):
"""
Yields:
A context within which all the events added to this storage
will be prefixed by the name scope.
"""
old_prefix = self._current_prefix
self._current_prefix = name.rstrip("/") + "/"
yield
self._current_prefix = old_prefix
def clear_images(self):
"""
Delete all the stored images for visualization. This should be called
after images are written to tensorboard.
"""
self._vis_data = []
def clear_histograms(self):
"""
Delete all the stored histograms for visualization.
This should be called after histograms are written to tensorboard.
"""
self._histograms = []
| 15,226 | 32.762749 | 98 | py |
libai | libai-main/libai/utils/download.py | # coding=utf-8
# Copyright 2021 The OneFlow Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
import shutil
from typing import Callable, List, Optional
from urllib import request
# --------------------------------------------------------
# References:
# https://github.com/facebookresearch/iopath/blob/main/iopath/common/download.py
# --------------------------------------------------------
def download(url: str, dir: str, *, filename: Optional[str] = None, progress: bool = True) -> str:
"""
Download a file from a given URL to a directory. If file exists, will not
overwrite the existing file.
Args:
url (str):
dir (str): the directory to download the file
filename (str or None): the basename to save the file.
Will use the name in the URL if not given.
progress (bool): whether to use tqdm to draw a progress bar.
Returns:
str: the path to the downloaded file or the existing one.
"""
os.makedirs(dir, exist_ok=True)
if filename is None:
filename = url.split("/")[-1]
assert len(filename), "Cannot obtain filename from url {}".format(url)
fpath = os.path.join(dir, filename)
logger = logging.getLogger(__name__)
if os.path.isfile(fpath):
logger.info("File {} exists! Skipping download.".format(filename))
return fpath
tmp = fpath + ".tmp" # download to a tmp file first, to be more atomic.
try:
logger.info("Downloading from {} ...".format(url))
if progress:
import tqdm
def hook(t: tqdm.tqdm) -> Callable[[int, int, Optional[int]], None]:
last_b: List[int] = [0]
def inner(b: int, bsize: int, tsize: Optional[int] = None) -> None:
if tsize is not None:
t.total = tsize
t.update((b - last_b[0]) * bsize) # type: ignore
last_b[0] = b
return inner
with tqdm.tqdm( # type: ignore
unit="B", unit_scale=True, miniters=1, desc=filename, leave=True
) as t:
tmp, _ = request.urlretrieve(url, filename=tmp, reporthook=hook(t))
else:
tmp, _ = request.urlretrieve(url, filename=tmp)
statinfo = os.stat(tmp)
size = statinfo.st_size
if size == 0:
raise IOError("Downloaded an empty file from {}!".format(url))
# download to tmp first and move to fpath, to make this function more
# atomic.
shutil.move(tmp, fpath)
except IOError:
logger.error("Failed to download {}".format(url))
raise
finally:
try:
os.unlink(tmp)
except IOError:
pass
logger.info("Successfully downloaded " + fpath + ". " + str(size) + " bytes.")
return fpath
| 3,408 | 34.884211 | 98 | py |
libai | libai-main/libai/utils/logger.py | # coding=utf-8
# Copyright 2021 The OneFlow Authors. All rights reserved.
# Copyright (c) Facebook, Inc. and its affiliates.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import atexit
import functools
import logging
import os
import sys
import time
from collections import Counter
from termcolor import colored
from libai.utils.file_io import PathManager
# --------------------------------------------------------
# References:
# https://github.com/facebookresearch/detectron2/blob/main/detectron2/utils/logger.py
# --------------------------------------------------------
class _ColorfulFormatter(logging.Formatter):
def __init__(self, *args, **kwargs):
self._root_name = kwargs.pop("root_name") + "."
self._abbrev_name = kwargs.pop("abbrev_name", "")
if len(self._abbrev_name):
self._abbrev_name = self._abbrev_name + "."
super(_ColorfulFormatter, self).__init__(*args, **kwargs)
def formatMessage(self, record):
record.name = record.name.replace(self._root_name, self._abbrev_name)
log = super(_ColorfulFormatter, self).formatMessage(record)
if record.levelno == logging.WARNING:
prefix = colored("WARNING", "red", attrs=["blink"])
elif record.levelno == logging.ERROR or record.levelno == logging.CRITICAL:
prefix = colored("ERROR", "red", attrs=["blink", "underline"])
else:
return log
return prefix + " " + log
@functools.lru_cache() # so that calling setup_logger multiple times won't add many handlers
def setup_logger(output=None, distributed_rank=0, *, color=True, name="libai", abbrev_name=None):
"""
Args:
output (str): a file name or a directory to save log. If None, will not save log file.
If ends with ".txt" or ".log", assumed to be a file name.
Otherwise, logs will be saved to `output/log.txt`.
name (str): the root module name of this logger
abbrev_name (str): an abbreviation of the module, to avoid long names in logs.
Set to "" to not log the root module in logs.
By default, will abbreviate "detectron2" to "d2" and leave other
modules unchanged.
"""
logger = logging.getLogger(name)
logger.setLevel(logging.DEBUG)
logger.propagate = False
if abbrev_name is None:
abbrev_name = "lb" if name == "libai" else name
plain_formatter = logging.Formatter(
"[%(asctime)s] %(name)s %(levelname)s: %(message)s", datefmt="%m/%d %H:%M:%S"
)
# stdout logging: master only
if distributed_rank == 0:
ch = logging.StreamHandler(stream=sys.stdout)
ch.setLevel(logging.DEBUG)
if color:
formatter = _ColorfulFormatter(
colored("[%(asctime)s %(name)s]: ", "green") + "%(message)s",
datefmt="%m/%d %H:%M:%S",
root_name=name,
abbrev_name=str(abbrev_name),
)
else:
formatter = plain_formatter
ch.setFormatter(formatter)
logger.addHandler(ch)
# file logging: all workers
if output is not None:
if output.endswith(".txt") or output.endswith(".log"):
filename = output
else:
filename = os.path.join(output, "log.txt")
if distributed_rank > 0:
filename = filename + ".rank{}".format(distributed_rank)
PathManager.mkdirs(os.path.dirname(filename))
fh = logging.StreamHandler(_cached_log_stream(filename))
fh.setLevel(logging.DEBUG)
fh.setFormatter(plain_formatter)
logger.addHandler(fh)
return logger
# cache the opened file object, so that different calls to `setup_logger`
# with the same file name can safely write to the same file.
@functools.lru_cache(maxsize=None)
def _cached_log_stream(filename):
# use 1K buffer if writing to cloud storage
io = PathManager.open(filename, "a", buffering=1024 if "://" in filename else -1)
atexit.register(io.close)
return io
"""
Below are some other convenient logging methods.
They are mainly adopted from
https://github.com/abseil/abseil-py/blob/master/absl/logging/__init__.py
"""
def _find_caller():
"""
Returns:
str: module name of the caller
tuple: a hashable key to be used to identify different callers
"""
frame = sys._getframe(2)
while frame:
code = frame.f_code
if os.path.join("utils", "logger.") not in code.co_filename:
mod_name = frame.f_globals["__name__"]
if mod_name == "__main__":
mod_name = "libai"
return mod_name, (code.co_filename, frame.f_lineno, code.co_name)
frame = frame.f_back
_LOG_COUNTER = Counter()
_LOG_TIMER = {}
def log_first_n(lvl, msg, n=1, *, name=None, key="caller"):
"""
Log only for the first n times.
Args:
lvl (int): the logging level
msg (str):
n (int):
name (str): name of the logger to use. Will use the caller's module by default.
key (str or tuple[str]): the string(s) can be one of "caller" or
"message", which defines how to identify duplicated logs.
For example, if called with `n=1, key="caller"`, this function
will only log the first call from the same caller, regardless of
the message content.
If called with `n=1, key="message"`, this function will log the
same content only once, even if they are called from different places.
If called with `n=1, key=("caller", "message")`, this function
will not log only if the same caller has logged the same message before.
"""
if isinstance(key, str):
key = (key,)
assert len(key) > 0
caller_module, caller_key = _find_caller()
hash_key = ()
if "caller" in key:
hash_key = hash_key + caller_key
if "message" in key:
hash_key = hash_key + (msg,)
_LOG_COUNTER[hash_key] += 1
if _LOG_COUNTER[hash_key] <= n:
logging.getLogger(name or caller_module).log(lvl, msg)
def log_every_n(lvl, msg, n=1, *, name=None):
"""
Log once per n times.
Args:
lvl (int): the logging level
msg (str):
n (int):
name (str): name of the logger to use. Will use the caller's module by default.
"""
caller_module, key = _find_caller()
_LOG_COUNTER[key] += 1
if n == 1 or _LOG_COUNTER[key] % n == 1:
logging.getLogger(name or caller_module).log(lvl, msg)
def log_every_n_seconds(lvl, msg, n=1, *, name=None):
"""
Log no more than once per n seconds.
Args:
lvl (int): the logging level
msg (str):
n (int):
name (str): name of the logger to use. Will use the caller's module by default.
"""
caller_module, key = _find_caller()
last_logged = _LOG_TIMER.get(key, None)
current_time = time.time()
if last_logged is None or current_time - last_logged >= n:
logging.getLogger(name or caller_module).log(lvl, msg)
_LOG_TIMER[key] = current_time
| 7,609 | 34.395349 | 97 | py |
libai | libai-main/libai/utils/file_io.py | # coding=utf-8
# Copyright 2021 The OneFlow Authors. All rights reserved.
# Copyright (c) Facebook, Inc. and its affiliates.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import base64
import concurrent.futures
import errno
import logging
import os
import shutil
import tempfile
import traceback
from collections import OrderedDict
from typing import IO, Any, Callable, Dict, Iterable, List, MutableMapping, Optional, Set, Union
from urllib.parse import urlparse
import portalocker
from libai.utils.download import download
from libai.utils.non_blocking_io import NonBlockingIOManager
# --------------------------------------------------------
# References:
# https://github.com/facebookresearch/iopath/blob/main/iopath/common/file_io.py
# https://github.com/facebookresearch/detectron2/blob/main/detectron2/utils/file_io.py
# --------------------------------------------------------
__all__ = ["LazyPath", "PathManager", "get_cache_dir", "file_lock"]
def get_cache_dir(cache_dir: Optional[str] = None) -> str:
"""
Returns a default directory to cache static files
(usually downloaded from Internet), if None is provided.
Args:
cache_dir (None or str): if not None, will be returned as is.
If None, returns the default cache directory as:
1) $LIBAI_CACHE, if set
2) otherwise ~/.oneflow/iopath_cache
"""
if cache_dir is None:
cache_dir = os.path.expanduser(os.getenv("LIBAI_CACHE", "~/.oneflow/iopath_cache"))
try:
g_pathmgr.mkdirs(cache_dir)
assert os.access(cache_dir, os.W_OK)
except (OSError, AssertionError):
tmp_dir = os.path.join(tempfile.gettempdir(), "iopath_cache")
logger = logging.getLogger(__name__)
logger.warning(f"{cache_dir} is not accessible! Using {tmp_dir} instead!")
cache_dir = tmp_dir
return cache_dir
def file_lock(path: str): # type: ignore
"""
A file lock. Once entered, it is guaranteed that no one else holds the
same lock. Others trying to enter the lock will block for 30 minutes and
raise an exception.
This is useful to make sure workers don't cache files to the same location.
Args:
path (str): a path to be locked. This function will create a lock named
`path + ".lock"`
Examples:
filename = "/path/to/file"
with file_lock(filename):
if not os.path.isfile(filename):
do_create_file()
"""
dirname = os.path.dirname(path)
try:
os.makedirs(dirname, exist_ok=True)
except OSError:
# makedir is not atomic. Exceptions can happen when multiple workers try
# to create the same dir, despite exist_ok=True.
# When this happens, we assume the dir is created and proceed to creating
# the lock. If failed to create the directory, the next line will raise
# exceptions.
pass
return portalocker.Lock(path + ".lock", timeout=3600) # type: ignore
class LazyPath(os.PathLike):
"""
A path that's lazily evaluated when it's used.
Users should be careful to not use it like a str, because
it behaves differently from a str.
Path manipulation functions in Python such as `os.path.*` all accept
PathLike objects already.
It can be materialized to a str using `os.fspath`.
"""
def __init__(self, func: Callable[[], str]) -> None:
"""
Args:
func: a function that takes no arguments and returns the
actual path as a str. It will be called at most once.
"""
self._func = func
self._value: Optional[str] = None
def _get_value(self) -> str:
if self._value is None:
self._value = self._func()
return self._value # pyre-ignore
def __fspath__(self) -> str:
return self._get_value()
# before more like a str after evaluated
def __getattr__(self, name: str): # type: ignore
if self._value is None:
raise AttributeError(f"Uninitialized LazyPath has no attribute: {name}.")
return getattr(self._value, name)
def __getitem__(self, key): # type: ignore
if self._value is None:
raise TypeError("Uninitialized LazyPath is not subscriptable.")
return self._value[key] # type: ignore
def __str__(self) -> str:
if self._value is not None:
return self._value # type: ignore
else:
return super().__str__()
class PathHandler:
"""
PathHandler is a base class that defines common I/O functionality for a URI
protocol. It routes I/O for a generic URI which may look like "protocol://*"
or a canonical filepath "/foo/bar/baz".
"""
_strict_kwargs_check = True
def __init__(
self,
async_executor: Optional[concurrent.futures.Executor] = None,
) -> None:
"""
When registering a `PathHandler`, the user can optionally pass in a
`Executor` to run the asynchronous file operations.
NOTE: For regular non-async operations of `PathManager`, there is
no need to pass `async_executor`.
Args:
async_executor (optional `Executor`): Used for async file operations.
Usage:
```
path_handler = NativePathHandler(async_executor=exe)
path_manager.register_handler(path_handler)
```
"""
self._non_blocking_io_manager = None
self._non_blocking_io_executor = async_executor
def _check_kwargs(self, kwargs: Dict[str, Any]) -> None:
"""
Checks if the given arguments are empty. Throws a ValueError if strict
kwargs checking is enabled and args are non-empty. If strict kwargs
checking is disabled, only a warning is logged.
Args:
kwargs (Dict[str, Any])
"""
if self._strict_kwargs_check:
if len(kwargs) > 0:
raise ValueError("Unused arguments: {}".format(kwargs))
else:
logger = logging.getLogger(__name__)
for k, v in kwargs.items():
logger.warning("[PathManager] {}={} argument ignored".format(k, v))
def _get_supported_prefixes(self) -> List[str]:
"""
Returns:
List[str]: the list of URI prefixes this PathHandler can support
"""
raise NotImplementedError()
def _get_local_path(self, path: str, force: bool = False, **kwargs: Any) -> str:
"""
Get a filepath which is compatible with native Python I/O such as `open`
and `os.path`.
If URI points to a remote resource, this function may download and cache
the resource to local disk. In this case, the cache stays on filesystem
(under `file_io.get_cache_dir()`) and will be used by a different run.
Therefore this function is meant to be used with read-only resources.
Args:
path (str): A URI supported by this PathHandler
force(bool): Forces a download from backend if set to True.
Returns:
local_path (str): a file path which exists on the local file system
"""
raise NotImplementedError()
def _copy_from_local(
self, local_path: str, dst_path: str, overwrite: bool = False, **kwargs: Any
) -> None:
"""
Copies a local file to the specified URI.
If the URI is another local path, this should be functionally identical
to copy.
Args:
local_path (str): a file path which exists on the local file system
dst_path (str): A URI supported by this PathHandler
overwrite (bool): Bool flag for forcing overwrite of existing URI
Returns:
status (bool): True on success
"""
raise NotImplementedError()
def _opent(
self, path: str, mode: str = "r", buffering: int = 32, **kwargs: Any
) -> Iterable[Any]:
raise NotImplementedError()
def _open(
self, path: str, mode: str = "r", buffering: int = -1, **kwargs: Any
) -> Union[IO[str], IO[bytes]]:
"""
Open a stream to a URI, similar to the built-in `open`.
Args:
path (str): A URI supported by this PathHandler
mode (str): Specifies the mode in which the file is opened. It defaults
to 'r'.
buffering (int): An optional integer used to set the buffering policy.
Pass 0 to switch buffering off and an integer >= 1 to indicate the
size in bytes of a fixed-size chunk buffer. When no buffering
argument is given, the default buffering policy depends on the
underlying I/O implementation.
Returns:
file: a file-like object.
"""
raise NotImplementedError()
def _opena(
self,
path: str,
mode: str = "r",
callback_after_file_close: Optional[Callable[[None], None]] = None,
buffering: int = -1,
**kwargs: Any,
) -> Union[IO[str], IO[bytes]]:
"""
Open a stream to a URI with asynchronous permissions.
NOTE: Writes to the same path are serialized so they are written in
the same order as they were called but writes to distinct paths can
happen concurrently.
Usage (default / without callback function):
for n in range(50):
results = run_a_large_task(n)
with path_manager.opena(uri, "w") as f:
f.write(results) # Runs in separate thread
# Main process returns immediately and continues to next iteration
path_manager.async_close()
Usage (advanced / with callback function):
# To write local and then copy to Manifold:
def cb():
path_manager.copy_from_local(
"checkpoint.pt", "manifold://path/to/bucket"
)
f = pm.opena("checkpoint.pt", "wb", callback_after_file_close=cb)
flow.save({...}, f)
f.close()
Args:
...same args as `_open`...
callback_after_file_close (Callable): An optional argument that can
be passed to perform operations that depend on the asynchronous
writes being completed. The file is first written to the local
disk and then the callback is executed.
buffering (int): An optional argument to set the buffer size for
buffered asynchronous writing.
Returns:
file: a file-like object with asynchronous methods.
"""
# Restrict mode until `NonBlockingIO` has async read feature.
valid_modes = {"w", "a", "b"}
if not all(m in valid_modes for m in mode):
raise ValueError("`opena` mode must be write or append")
# TODO: Each `PathHandler` should set its own `self._buffered`
# parameter and pass that in here. Until then, we assume no
# buffering for any storage backend.
if not self._non_blocking_io_manager:
self._non_blocking_io_manager = NonBlockingIOManager(
buffered=False,
executor=self._non_blocking_io_executor,
)
try:
return self._non_blocking_io_manager.get_non_blocking_io(
path=self._get_path_with_cwd(path),
io_obj=self._open(path, mode, **kwargs),
callback_after_file_close=callback_after_file_close,
buffering=buffering,
)
except ValueError:
# When `_strict_kwargs_check = True`, then `open_callable`
# will throw a `ValueError`. This generic `_opena` function
# does not check the kwargs since it may include any `_open`
# args like `encoding`, `ttl`, `has_user_data`, etc.
logger = logging.getLogger(__name__)
logger.exception(
"An exception occurred in `NonBlockingIOManager`. This "
"is most likely due to invalid `opena` args. Make sure "
"they match the `open` args for the `PathHandler`."
)
self._async_close()
def _async_join(self, path: Optional[str] = None, **kwargs: Any) -> bool:
"""
Ensures that desired async write threads are properly joined.
Args:
path (str): Pass in a file path to wait until all asynchronous
activity for that path is complete. If no path is passed in,
then this will wait until all asynchronous jobs are complete.
Returns:
status (bool): True on success
"""
if not self._non_blocking_io_manager:
logger = logging.getLogger(__name__)
logger.warning(
"This is an async feature. No threads to join because " "`opena` was not used."
)
self._check_kwargs(kwargs)
return self._non_blocking_io_manager._join(self._get_path_with_cwd(path) if path else None)
def _async_close(self, **kwargs: Any) -> bool:
"""
Closes the thread pool used for the asynchronous operations.
Returns:
status (bool): True on success
"""
if not self._non_blocking_io_manager:
logger = logging.getLogger(__name__)
logger.warning(
"This is an async feature. No threadpool to close because " "`opena` was not used."
)
self._check_kwargs(kwargs)
return self._non_blocking_io_manager._close_thread_pool()
def _copy(self, src_path: str, dst_path: str, overwrite: bool = False, **kwargs: Any) -> bool:
"""
Copies a source path to a destination path.
Args:
src_path (str): A URI supported by this PathHandler
dst_path (str): A URI supported by this PathHandler
overwrite (bool): Bool flag for forcing overwrite of existing file
Returns:
status (bool): True on success
"""
raise NotImplementedError()
def _mv(self, src_path: str, dst_path: str, **kwargs: Any) -> bool:
"""
Moves (renames) a source path to a destination path.
Args:
src_path (str): A URI supported by this PathHandler
dst_path (str): A URI supported by this PathHandler
Returns:
status (bool): True on success
"""
raise NotImplementedError()
def _exists(self, path: str, **kwargs: Any) -> bool:
"""
Checks if there is a resource at the given URI.
Args:
path (str): A URI supported by this PathHandler
Returns:
bool: true if the path exists
"""
raise NotImplementedError()
def _isfile(self, path: str, **kwargs: Any) -> bool:
"""
Checks if the resource at the given URI is a file.
Args:
path (str): A URI supported by this PathHandler
Returns:
bool: true if the path is a file
"""
raise NotImplementedError()
def _isdir(self, path: str, **kwargs: Any) -> bool:
"""
Checks if the resource at the given URI is a directory.
Args:
path (str): A URI supported by this PathHandler
Returns:
bool: true if the path is a directory
"""
raise NotImplementedError()
def _ls(self, path: str, **kwargs: Any) -> List[str]:
"""
List the contents of the directory at the provided URI.
Args:
path (str): A URI supported by this PathHandler
Returns:
List[str]: list of contents in given path
"""
raise NotImplementedError()
def _mkdirs(self, path: str, **kwargs: Any) -> None:
"""
Recursive directory creation function. Like mkdir(), but makes all
intermediate-level directories needed to contain the leaf directory.
Similar to the native `os.makedirs`.
Args:
path (str): A URI supported by this PathHandler
"""
raise NotImplementedError()
def _rm(self, path: str, **kwargs: Any) -> None:
"""
Remove the file (not directory) at the provided URI.
Args:
path (str): A URI supported by this PathHandler
"""
raise NotImplementedError()
def _symlink(self, src_path: str, dst_path: str, **kwargs: Any) -> bool:
"""
Symlink the src_path to the dst_path
Args:
src_path (str): A URI supported by this PathHandler to symlink from
dst_path (str): A URI supported by this PathHandler to symlink to
"""
raise NotImplementedError()
def _set_cwd(self, path: Union[str, None], **kwargs: Any) -> bool:
"""
Set the current working directory. PathHandler classes prepend the cwd
to all URI paths that are handled.
Args:
path (str) or None: A URI supported by this PathHandler. Must be a valid
absolute path or None to set the cwd to None.
Returns:
bool: true if cwd was set without errors
"""
raise NotImplementedError()
def _get_path_with_cwd(self, path: str) -> str:
"""
Default implementation. PathHandler classes that provide a `_set_cwd`
feature should also override this `_get_path_with_cwd` method.
Args:
path (str): A URI supported by this PathHandler.
Returns:
path (str): Full path with the cwd attached.
"""
return path
class NativePathHandler(PathHandler):
"""
Handles paths that can be accessed using Python native system calls. This
handler uses `open()` and `os.*` calls on the given path.
"""
_cwd = None
def _get_local_path(self, path: str, force: bool = False, **kwargs: Any) -> str:
self._check_kwargs(kwargs)
return os.fspath(path)
def _copy_from_local(
self, local_path: str, dst_path: str, overwrite: bool = False, **kwargs: Any
) -> None:
self._check_kwargs(kwargs)
local_path = self._get_path_with_cwd(local_path)
dst_path = self._get_path_with_cwd(dst_path)
assert self._copy(src_path=local_path, dst_path=dst_path, overwrite=overwrite, **kwargs)
def _open(
self,
path: str,
mode: str = "r",
buffering: int = -1,
encoding: Optional[str] = None,
errors: Optional[str] = None,
newline: Optional[str] = None,
closefd: bool = True,
opener: Optional[Callable] = None,
**kwargs: Any,
) -> Union[IO[str], IO[bytes]]:
"""
Open a path.
Args:
path (str): A URI supported by this PathHandler
mode (str): Specifies the mode in which the file is opened. It defaults
to 'r'.
buffering (int): An optional integer used to set the buffering policy.
Pass 0 to switch buffering off and an integer >= 1 to indicate the
size in bytes of a fixed-size chunk buffer. When no buffering
argument is given, the default buffering policy works as follows:
* Binary files are buffered in fixed-size chunks; the size of
the buffer is chosen using a heuristic trying to determine the
underlying device’s “block size” and falling back on
io.DEFAULT_BUFFER_SIZE. On many systems, the buffer will
typically be 4096 or 8192 bytes long.
encoding (Optional[str]): the name of the encoding used to decode or
encode the file. This should only be used in text mode.
errors (Optional[str]): an optional string that specifies how encoding
and decoding errors are to be handled. This cannot be used in binary
mode.
newline (Optional[str]): controls how universal newlines mode works
(it only applies to text mode). It can be None, '', '\n', '\r',
and '\r\n'.
closefd (bool): If closefd is False and a file descriptor rather than
a filename was given, the underlying file descriptor will be kept
open when the file is closed. If a filename is given closefd must
be True (the default) otherwise an error will be raised.
opener (Optional[Callable]): A custom opener can be used by passing
a callable as opener. The underlying file descriptor for the file
object is then obtained by calling opener with (file, flags).
opener must return an open file descriptor (passing os.open as opener
results in functionality similar to passing None).
See https://docs.python.org/3/library/functions.html#open for details.
Returns:
file: a file-like object.
"""
self._check_kwargs(kwargs)
return open( # type: ignore
self._get_path_with_cwd(path),
mode,
buffering=buffering,
encoding=encoding,
errors=errors,
newline=newline,
closefd=closefd,
opener=opener,
)
def _copy(self, src_path: str, dst_path: str, overwrite: bool = False, **kwargs: Any) -> bool:
"""
Copies a source path to a destination path.
Args:
src_path (str): A URI supported by this PathHandler
dst_path (str): A URI supported by this PathHandler
overwrite (bool): Bool flag for forcing overwrite of existing file
Returns:
status (bool): True on success
"""
self._check_kwargs(kwargs)
src_path = self._get_path_with_cwd(src_path)
dst_path = self._get_path_with_cwd(dst_path)
if os.path.exists(dst_path) and not overwrite:
logger = logging.getLogger(__name__)
logger.error("Destination file {} already exists.".format(dst_path))
return False
try:
shutil.copyfile(src_path, dst_path)
return True
except Exception as e:
logger = logging.getLogger(__name__)
logger.error("Error in file copy - {}".format(str(e)))
return False
def _mv(self, src_path: str, dst_path: str, **kwargs: Any) -> bool:
"""
Moves (renames) a source path to a destination path.
Args:
src_path (str): A URI supported by this PathHandler
dst_path (str): A URI supported by this PathHandler
Returns:
status (bool): True on success
"""
self._check_kwargs(kwargs)
src_path = self._get_path_with_cwd(src_path)
dst_path = self._get_path_with_cwd(dst_path)
if os.path.exists(dst_path):
logger = logging.getLogger(__name__)
logger.error("Destination file {} already exists.".format(dst_path))
return False
try:
shutil.move(src_path, dst_path)
return True
except Exception as e:
logger = logging.getLogger(__name__)
logger.error("Error in move operation - {}".format(str(e)))
return False
def _symlink(self, src_path: str, dst_path: str, **kwargs: Any) -> bool:
"""
Creates a symlink to the src_path at the dst_path
Args:
src_path (str): A URI supported by this PathHandler
dst_path (str): A URI supported by this PathHandler
Returns:
status (bool): True on success
"""
self._check_kwargs(kwargs)
src_path = self._get_path_with_cwd(src_path)
dst_path = self._get_path_with_cwd(dst_path)
logger = logging.getLogger(__name__)
if not os.path.exists(src_path):
logger.error("Source path {} does not exist".format(src_path))
return False
if os.path.exists(dst_path):
logger.error("Destination path {} already exists.".format(dst_path))
return False
try:
os.symlink(src_path, dst_path)
return True
except Exception as e:
logger.error("Error in symlink - {}".format(str(e)))
return False
def _exists(self, path: str, **kwargs: Any) -> bool:
self._check_kwargs(kwargs)
return os.path.exists(self._get_path_with_cwd(path))
def _isfile(self, path: str, **kwargs: Any) -> bool:
self._check_kwargs(kwargs)
return os.path.isfile(self._get_path_with_cwd(path))
def _isdir(self, path: str, **kwargs: Any) -> bool:
self._check_kwargs(kwargs)
return os.path.isdir(self._get_path_with_cwd(path))
def _ls(self, path: str, **kwargs: Any) -> List[str]:
self._check_kwargs(kwargs)
return os.listdir(self._get_path_with_cwd(path))
def _mkdirs(self, path: str, **kwargs: Any) -> None:
self._check_kwargs(kwargs)
try:
os.makedirs(path, exist_ok=True)
except OSError as e:
# EEXIST it can still happen if multiple processes are creating the dir
if e.errno != errno.EEXIST:
raise
def _rm(self, path: str, **kwargs: Any) -> None:
self._check_kwargs(kwargs)
os.remove(path)
def _set_cwd(self, path: Union[str, None], **kwargs: Any) -> bool:
self._check_kwargs(kwargs)
# Remove cwd path if None
if path is None:
self._cwd = None
return True
# Make sure path is a valid Unix path
if not os.path.exists(path):
raise ValueError(f"{path} is not a valid Unix path")
# Make sure path is an absolute path
if not os.path.isabs(path):
raise ValueError(f"{path} is not an absolute path")
self._cwd = path
return True
def _get_path_with_cwd(self, path: str) -> str:
return os.path.normpath(path if not self._cwd else os.path.join(self._cwd, path))
class HTTPURLHandler(PathHandler):
"""
Download URLs and cache them to disk.
"""
def __init__(self) -> None:
self.cache_map: Dict[str, str] = {}
def _get_supported_prefixes(self) -> List[str]:
return ["http://", "https://", "ftp://"]
def _get_local_path(self, path: str, force: bool = False, **kwargs: Any) -> str:
"""
This implementation downloads the remote resource and caches it locally.
The resource will only be downloaded if not previously requested.
"""
self._check_kwargs(kwargs)
if force or path not in self.cache_map or not os.path.exists(self.cache_map[path]):
logger = logging.getLogger(__name__)
parsed_url = urlparse(path)
dirname = os.path.join(get_cache_dir(), os.path.dirname(parsed_url.path.lstrip("/")))
filename = path.split("/")[-1]
cached = os.path.join(dirname, filename)
with file_lock(cached):
if not os.path.isfile(cached):
logger.info("Downloading {} ...".format(path))
cached = download(path, dirname, filename=filename)
logger.info("URL {} cached in {}".format(path, cached))
self.cache_map[path] = cached
return self.cache_map[path]
def _open(
self, path: str, mode: str = "r", buffering: int = -1, **kwargs: Any
) -> Union[IO[str], IO[bytes]]:
"""
Open a remote HTTP path. The resource is first downloaded and cached
locally.
Args:
path (str): A URI supported by this PathHandler
mode (str): Specifies the mode in which the file is opened. It defaults
to 'r'.
buffering (int): Not used for this PathHandler.
Returns:
file: a file-like object.
"""
self._check_kwargs(kwargs)
assert mode in ("r", "rb"), "{} does not support open with {} mode".format(
self.__class__.__name__, mode
)
assert (
buffering == -1
), f"{self.__class__.__name__} does not support the `buffering` argument"
local_path = self._get_local_path(path, force=False)
return open(local_path, mode)
class OneDrivePathHandler(HTTPURLHandler):
"""
Map OneDrive (short) URLs to direct download links
"""
ONE_DRIVE_PREFIX = "https://1drv.ms/u/s!"
def create_one_drive_direct_download(self, one_drive_url: str) -> str:
"""
Converts a short OneDrive URI into a download link that can be used with wget
Args:
one_drive_url (str): A OneDrive URI supported by this PathHandler
Returns:
result_url (str): A direct download URI for the file
"""
data_b64 = base64.b64encode(bytes(one_drive_url, "utf-8"))
data_b64_string = data_b64.decode("utf-8").replace("/", "_").replace("+", "-").rstrip("=")
result_url = f"https://api.onedrive.com/v1.0/shares/u!{data_b64_string}/root/content"
return result_url
def _get_supported_prefixes(self) -> List[str]:
return [self.ONE_DRIVE_PREFIX]
def _get_local_path(self, path: str, force: bool = False, **kwargs: Any) -> str:
"""
This implementation downloads the remote resource and caches it locally.
The resource will only be downloaded if not previously requested.
"""
logger = logging.getLogger(__name__)
direct_url = self.create_one_drive_direct_download(path)
logger.info(f"URL {path} mapped to direct download link {direct_url}")
return super()._get_local_path(os.fspath(direct_url), force=force, **kwargs)
class PathManagerBase:
"""
A class for users to open generic paths or translate generic paths to file names.
path_manager.method(path) will do the following:
1. Find a handler by checking the prefixes in `self._path_handlers`.
2. Call handler.method(path) on the handler that's found
"""
def __init__(self) -> None:
self._path_handlers: MutableMapping[str, PathHandler] = OrderedDict()
"""
Dict for path prefix to handler
"""
self._native_path_handler: PathHandler = NativePathHandler()
"""
A NativePathHandler that works on posix paths. This is used as the fallback.
"""
self._cwd: Optional[str] = None
"""
Keeps track of the single cwd (if set).
NOTE: Only one PathHandler can have a cwd set at a time.
"""
self._async_handlers: Set[PathHandler] = set()
"""
Keeps track of the PathHandler subclasses where `opena` was used so
all of the threads can be properly joined when calling
`PathManager.join`.
"""
def __get_path_handler(self, path: Union[str, os.PathLike]) -> PathHandler:
"""
Finds a PathHandler that supports the given path. Falls back to the native
PathHandler if no other handler is found.
Args:
path (str or os.PathLike): URI path to resource
Returns:
handler (PathHandler)
"""
path = os.fspath(path) # pyre-ignore
for p in self._path_handlers.keys():
if path.startswith(p):
return self._path_handlers[p]
return self._native_path_handler
def opent(
self, path: str, mode: str = "r", buffering: int = 32, **kwargs: Any
) -> Iterable[Any]:
"""
Open a tabular data source. Only reading is supported.
The opent() returns a Python iterable collection object, compared to
bytes/text data with open()
Args:
path (str): A URI supported by this PathHandler
mode (str): Specifies the mode in which the file is opened. It defaults
to 'r'
buffering (int): number of rows fetched and cached
Returns:
An iterable collection object.
"""
return self.__get_path_handler(path)._opent(path, mode, buffering, **kwargs)
def open(
self, path: str, mode: str = "r", buffering: int = -1, **kwargs: Any
) -> Union[IO[str], IO[bytes]]:
"""
Open a stream to a URI, similar to the built-in `open`.
Args:
path (str): A URI supported by this PathHandler
mode (str): Specifies the mode in which the file is opened. It defaults
to 'r'.
buffering (int): An optional integer used to set the buffering policy.
Pass 0 to switch buffering off and an integer >= 1 to indicate the
size in bytes of a fixed-size chunk buffer. When no buffering
argument is given, the default buffering policy depends on the
underlying I/O implementation.
Returns:
file: a file-like object.
"""
return self.__get_path_handler(path)._open( # type: ignore
path, mode, buffering=buffering, **kwargs
)
# NOTE: This feature is only implemented for `NativePathHandler` and can
# currently only be used in write mode.
def opena(
self,
path: str,
mode: str = "r",
buffering: int = -1,
callback_after_file_close: Optional[Callable[[None], None]] = None,
**kwargs: Any,
) -> Union[IO[str], IO[bytes]]:
"""
Open a file with asynchronous permissions. `f.write()` calls (and
potentially `f.read()` calls in the future) will be dispatched
asynchronously such that the main program can continue running.
NOTE: Writes to the same path are serialized so they are written in
the same order as they were called but writes to distinct paths can
happen concurrently.
Usage (default / without callback function):
for n in range(50):
results = run_a_large_task(n)
# `f` is a file-like object with asynchronous methods
with path_manager.opena(uri, "w") as f:
f.write(results) # Runs in separate thread
# Main process returns immediately and continues to next iteration
path_manager.async_close()
Usage (advanced / with callback function):
# To asynchronously write to Manifold:
def cb():
path_manager.copy_from_local(
"checkpoint.pt", "manifold://path/to/bucket"
)
f = pm.opena("checkpoint.pt", "wb", callback_after_file_close=cb)
oneflow.save({...}, f)
f.close()
Args:
...
callback_after_file_close (Callable): An optional argument that can
be passed to perform operations that depend on the asynchronous
writes being completed. The file is first written to the local
disk and then the callback is executed.
Returns:
file: a file-like object with asynchronous methods.
"""
non_blocking_io = self.__get_path_handler(path)._opena(
path,
mode,
buffering=buffering,
callback_after_file_close=callback_after_file_close,
**kwargs,
)
# Keep track of the path handlers where `opena` is used so that all of the
# threads can be properly joined on `PathManager.join`.
self._async_handlers.add(self.__get_path_handler(path))
return non_blocking_io
def async_join(self, *paths: str, **kwargs: Any) -> bool:
"""
Ensures that desired async write threads are properly joined.
Usage:
Wait for asynchronous methods operating on specific file paths to
complete.
async_join("path/to/file1.txt")
async_join("path/to/file2.txt", "path/to/file3.txt")
Wait for all asynchronous methods to complete.
async_join()
Args:
*paths (str): Pass in any number of file paths and `async_join` will wait
until all asynchronous activity for those paths is complete. If no
paths are passed in, then `async_join` will wait until all asynchronous
jobs are complete.
Returns:
status (bool): True on success
"""
success = True
if not paths: # Join all.
for handler in self._async_handlers:
success = handler._async_join(**kwargs) and success
else: # Join specific paths.
for path in paths:
success = self.__get_path_handler(path)._async_join(path, **kwargs) and success
return success
def async_close(self, **kwargs: Any) -> bool:
"""
`async_close()` must be called at the very end of any script that uses the
asynchronous `opena` feature. This calls `async_join()` first and then closes
the thread pool used for the asynchronous operations.
Returns:
status (bool): True on success
"""
success = self.async_join(**kwargs)
for handler in self._async_handlers:
success = handler._async_close(**kwargs) and success
self._async_handlers.clear()
return success
def copy(self, src_path: str, dst_path: str, overwrite: bool = False, **kwargs: Any) -> bool:
"""
Copies a source path to a destination path.
Args:
src_path (str): A URI supported by this PathHandler
dst_path (str): A URI supported by this PathHandler
overwrite (bool): Bool flag for forcing overwrite of existing file
Returns:
status (bool): True on success
"""
# Copying across handlers is not supported.
assert self.__get_path_handler(src_path) == self.__get_path_handler( # type: ignore
dst_path
)
return self.__get_path_handler(src_path)._copy(src_path, dst_path, overwrite, **kwargs)
def mv(self, src_path: str, dst_path: str, **kwargs: Any) -> bool:
"""
Moves (renames) a source path supported by NativePathHandler to
a destination path.
Args:
src_path (str): A URI supported by NativePathHandler
dst_path (str): A URI supported by NativePathHandler
Returns:
status (bool): True on success
Exception:
Asserts if both the src and dest paths are not supported by
NativePathHandler.
"""
# Moving across handlers is not supported.
assert self.__get_path_handler(src_path) == self.__get_path_handler( # type: ignore
dst_path
), "Src and dest paths must be supported by the same path handler."
return self.__get_path_handler(src_path)._mv(src_path, dst_path, **kwargs)
def get_local_path(self, path: str, force: bool = False, **kwargs: Any) -> str:
"""
Get a filepath which is compatible with native Python I/O such as `open`
and `os.path`.
If URI points to a remote resource, this function may download and cache
the resource to local disk.
Args:
path (str): A URI supported by this PathHandler
force(bool): Forces a download from backend if set to True.
Returns:
local_path (str): a file path which exists on the local file system
"""
path = os.fspath(path)
return self.__get_path_handler(path)._get_local_path( # type: ignore
path, force=force, **kwargs
)
def copy_from_local(
self, local_path: str, dst_path: str, overwrite: bool = False, **kwargs: Any
) -> None:
"""
Copies a local file to the specified URI.
If the URI is another local path, this should be functionally identical
to copy.
Args:
local_path (str): a file path which exists on the local file system
dst_path (str): A URI supported by this PathHandler
overwrite (bool): Bool flag for forcing overwrite of existing URI
Returns:
status (bool): True on success
"""
assert os.path.exists(local_path)
return self.__get_path_handler(dst_path)._copy_from_local(
local_path=local_path, dst_path=dst_path, overwrite=overwrite, **kwargs
)
def exists(self, path: str, **kwargs: Any) -> bool:
"""
Checks if there is a resource at the given URI.
Args:
path (str): A URI supported by this PathHandler
Returns:
bool: true if the path exists
"""
return self.__get_path_handler(path)._exists(path, **kwargs) # type: ignore
def isfile(self, path: str, **kwargs: Any) -> bool:
"""
Checks if there the resource at the given URI is a file.
Args:
path (str): A URI supported by this PathHandler
Returns:
bool: true if the path is a file
"""
return self.__get_path_handler(path)._isfile(path, **kwargs) # type: ignore
def isdir(self, path: str, **kwargs: Any) -> bool:
"""
Checks if the resource at the given URI is a directory.
Args:
path (str): A URI supported by this PathHandler
Returns:
bool: true if the path is a directory
"""
return self.__get_path_handler(path)._isdir(path, **kwargs) # type: ignore
def ls(self, path: str, **kwargs: Any) -> List[str]:
"""
List the contents of the directory at the provided URI.
Args:
path (str): A URI supported by this PathHandler
Returns:
List[str]: list of contents in given path
"""
return self.__get_path_handler(path)._ls(path, **kwargs)
def mkdirs(self, path: str, **kwargs: Any) -> None:
"""
Recursive directory creation function. Like mkdir(), but makes all
intermediate-level directories needed to contain the leaf directory.
Similar to the native `os.makedirs`.
Args:
path (str): A URI supported by this PathHandler
"""
return self.__get_path_handler(path)._mkdirs(path, **kwargs) # type: ignore
def rm(self, path: str, **kwargs: Any) -> None:
"""
Remove the file (not directory) at the provided URI.
Args:
path (str): A URI supported by this PathHandler
"""
return self.__get_path_handler(path)._rm(path, **kwargs) # type: ignore
def symlink(self, src_path: str, dst_path: str, **kwargs: Any) -> bool:
"""Symlink the src_path to the dst_path
Args:
src_path (str): A URI supported by this PathHandler to symlink from
dst_path (str): A URI supported by this PathHandler to symlink to
"""
# Copying across handlers is not supported.
assert self.__get_path_handler(src_path) == self.__get_path_handler( # type: ignore
dst_path
)
return self.__get_path_handler(src_path)._symlink(src_path, dst_path, **kwargs)
def set_cwd(self, path: Union[str, None], **kwargs: Any) -> bool:
"""
Set the current working directory. PathHandler classes prepend the cwd
to all URI paths that are handled.
Args:
path (str) or None: A URI supported by this PathHandler. Must be a valid
absolute Unix path or None to set the cwd to None.
Returns:
bool: true if cwd was set without errors
"""
if path is None and self._cwd is None:
return True
if self.__get_path_handler(path or self._cwd)._set_cwd(path, **kwargs): # type: ignore
self._cwd = path
return True
return False
def register_handler(self, handler: PathHandler, allow_override: bool = False) -> None:
"""
Register a path handler associated with `handler._get_supported_prefixes`
URI prefixes.
Args:
handler (PathHandler)
allow_override (bool): allow overriding existing handler for prefix
"""
logger = logging.getLogger(__name__)
assert isinstance(handler, PathHandler), handler
# Allow override of `NativePathHandler` which is automatically
# instantiated by `PathManager`.
if isinstance(handler, NativePathHandler):
if allow_override:
self._native_path_handler = handler
else:
raise ValueError(
"`NativePathHandler` is registered by default. Use the "
"`allow_override=True` kwarg to override it."
)
return
for prefix in handler._get_supported_prefixes():
if prefix not in self._path_handlers:
self._path_handlers[prefix] = handler
continue
old_handler_type = type(self._path_handlers[prefix])
if allow_override:
# if using the global PathManager, show the warnings
global g_pathmgr
if self == g_pathmgr:
logger.warning(
f"[PathManager] Attempting to register prefix '{prefix}' from "
"the following call stack:\n" + "".join(traceback.format_stack(limit=5))
# show the most recent callstack
)
logger.warning(
f"[PathManager] Prefix '{prefix}' is already registered "
f"by {old_handler_type}. We will override the old handler. "
"To avoid such conflicts, create a project-specific PathManager "
"instead."
)
self._path_handlers[prefix] = handler
else:
raise KeyError(
f"[PathManager] Prefix '{prefix}' already registered by {old_handler_type}!"
)
# Sort path handlers in reverse order so longer prefixes take priority,
# eg: http://foo/bar before http://foo
self._path_handlers = OrderedDict(
sorted(self._path_handlers.items(), key=lambda t: t[0], reverse=True)
)
def set_strict_kwargs_checking(self, enable: bool) -> None:
"""
Toggles strict kwargs checking. If enabled, a ValueError is thrown if any
unused parameters are passed to a PathHandler function. If disabled, only
a warning is given.
With a centralized file API, there's a tradeoff of convenience and
correctness delegating arguments to the proper I/O layers. An underlying
`PathHandler` may support custom arguments which should not be statically
exposed on the `PathManager` function. For example, a custom `HTTPURLHandler`
may want to expose a `cache_timeout` argument for `open()` which specifies
how old a locally cached resource can be before it's refetched from the
remote server. This argument would not make sense for a `NativePathHandler`.
If strict kwargs checking is disabled, `cache_timeout` can be passed to
`PathManager.open` which will forward the arguments to the underlying
handler. By default, checking is enabled since it is innately unsafe:
multiple `PathHandler`s could reuse arguments with different semantic
meanings or types.
Args:
enable (bool)
"""
self._native_path_handler._strict_kwargs_check = enable
for handler in self._path_handlers.values():
handler._strict_kwargs_check = enable
class PathManagerFactory:
"""
PathManagerFactory is the class responsible for creating new PathManager
instances and removing them when no longer needed.
PathManager can be instantiated directly too, but it is recommended that
you use PathManagerFactory to create them.
"""
GLOBAL_PATH_MANAGER = "global_path_manager"
pm_list = {}
@staticmethod
def get(key=GLOBAL_PATH_MANAGER) -> PathManagerBase:
"""
Get the path manager instance associated with a key.
A new instance will be created if there is no existing
instance associated with the key passed in.
Args:
key (str):
"""
if key not in PathManagerFactory.pm_list:
PathManagerFactory.pm_list[key] = PathManagerBase()
return PathManagerFactory.pm_list[key]
@staticmethod
def remove(key):
"""
Remove the path manager instance associated with a key.
Args:
key (str):
"""
if key in PathManagerFactory.pm_list:
_pm = PathManagerFactory.pm_list.pop(key) # noqa
del _pm
"""
A global instance of PathManager.
This global instance is provided for backward compatibility, but it is
recommended that clients use PathManagerFactory
"""
g_pathmgr = PathManagerFactory.get()
PathManager = PathManagerBase()
PathManager.register_handler(HTTPURLHandler())
PathManager.register_handler(OneDrivePathHandler())
| 50,046 | 36.742836 | 99 | py |
libai | libai-main/libai/utils/history_buffer.py | # coding=utf-8
# Copyright 2021 The OneFlow Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import List, Tuple
import numpy as np
# --------------------------------------------------------
# References:
# https://github.com/facebookresearch/fvcore/blob/main/fvcore/common/history_buffer.py
# --------------------------------------------------------
class HistoryBuffer:
"""
Track a series of scalar values and provide access to smoothed values over a
window or the global average of the series.
"""
def __init__(self, max_length: int = 1000000):
"""
Args:
max_length: maximal number of values that can be stored in the
buffer. When the capacity of the buffer is exhausted, old
values will be removed.
"""
self._max_length: int = max_length
self._data: List[Tuple[float, float]] = [] # (value, iteration) pairs
self._count: int = 0
self._global_avg: float = 0
def update(self, value: float, iteration: float = None):
"""
Add a new scalar value produced at certain iteration. If the length
of the buffer exceeds self._max_length, the oldest element will be
removed from the buffer.
"""
if iteration is None:
iteration = self._count
if len(self._data) == self._max_length:
self._data.pop(0)
self._data.append((value, iteration))
self._count += 1
self._global_avg += (value - self._global_avg) / self._count
def latest(self):
"""
Return the latest scalar value added to the buffer.
"""
return self._data[-1][0]
def median(self, window_size: int):
"""
Return the median of the latest `window_size` values in the buffer.
"""
return np.median([x[0] for x in self._data[-window_size:]])
def avg(self, window_size: int):
"""
Return the mean of the latest `window_size` values in the buffer.
"""
return np.mean([x[0] for x in self._data[-window_size:]])
def global_avg(self):
"""
Return the mean of all the elements in the buffer. Note that this
includes those getting removed due to limited buffer storage.
"""
return self._global_avg
def values(self):
"""
Returns:
list[(number, iteration)]: content of the current buffer.
"""
return self._data
| 3,019 | 32.555556 | 86 | py |
libai | libai-main/libai/utils/distributed.py | # coding=utf-8
# Copyright 2021 The OneFlow Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import dill
import numpy as np
import oneflow as flow
from omegaconf import OmegaConf
from libai.config import try_get_key
logger = logging.getLogger(__name__)
_DIST_UTIL = None
def _merge_devices(devices):
num_gpus_per_node = get_world_size() // get_num_nodes()
node_devices = [node_id * num_gpus_per_node + device_id for node_id, device_id in devices]
return node_devices
class _DistributeUtil(object):
def __init__(self, cfg):
self._init_distributed_env(cfg)
self._init_parallel_size(cfg)
self._init_placement_group(cfg)
self._init_parallel_hierarchy()
def _init_distributed_env(self, cfg):
"""Initialize the distributed environment."""
num_nodes = get_num_nodes()
num_gpus_per_node = get_world_size() // num_nodes
if try_get_key(cfg, "num_gpus_per_node", default=num_gpus_per_node) != num_gpus_per_node:
# This means key(num_gpus_per_node) saved in config is not equal
# to environment variable.
# Give user a warning about inconsistent reproduce environment.
logger.warning(
"'train.dist.num_gpus_per_node' are not equal to environment variable. "
f"{cfg.num_gpus_per_node} != {num_gpus_per_node}"
)
if try_get_key(cfg, "num_nodes", default=num_nodes) != num_nodes:
logger.warning(
"'train.dist.num_nodes' are not equal to"
f"environment variable. {cfg.num_nodes} != {num_nodes}"
)
# Set the actual value to config
cfg.num_nodes = num_nodes
cfg.num_gpus_per_node = num_gpus_per_node
self._num_nodes = num_nodes
self._num_gpus_per_node = num_gpus_per_node
self._world_size = num_gpus_per_node * num_nodes
# Add set device type
self._device_type = try_get_key(cfg, "device_type", default="cuda")
def _init_parallel_size(self, cfg):
# tensor parallel size
self._tensor_parallel_size = min(cfg.tensor_parallel_size, self.world_size)
assert self.world_size % self._tensor_parallel_size == 0, (
f"world size ({self.world_size}) is not divisible by"
f" tensor parallel size ({self._tensor_parallel_size})"
)
# Set the actual tensor parallel size to cfg
cfg.tensor_parallel_size = self._tensor_parallel_size
# pipeline parallel size
self._pipeline_parallel_size = min(
cfg.pipeline_parallel_size, self.world_size // cfg.tensor_parallel_size
)
# Set the actual pipeline parallel size to cfg
cfg.pipeline_parallel_size = self._pipeline_parallel_size
if cfg.pipeline_parallel_size > 1:
assert (
try_get_key(cfg, "pipeline_num_layers") is not None
), "cfg.train.dist.pipeline_num_layers must be set when run pipeline parallel"
assert cfg.pipeline_num_layers >= self._pipeline_parallel_size, (
f"number of layers ({cfg.pipeline_num_layers}) is less than"
f" pipeline model parallel size ({self._pipeline_parallel_size})"
)
if try_get_key(cfg, "custom_pipeline_stage_id") is not None:
assert OmegaConf.is_list(
cfg.custom_pipeline_stage_id
), "type of cfg.train.dist.custom_pipeline_stage_id must be list"
cfg.custom_pipeline_stage_id = list(cfg.custom_pipeline_stage_id)
assert max(cfg.custom_pipeline_stage_id) < self._world_size, (
f"the element {max(cfg.custom_pipeline_stage_id)} in"
" cfg.train.dist.custom_pipeline_stage_id is out of range"
f" for total rank {self._world_size}"
)
assert len(cfg.custom_pipeline_stage_id) == cfg.pipeline_num_layers, (
"the length of cfg.train.dist.custom_pipeline_stage_id"
f" {len(cfg.custom_pipeline_stage_id)} must be equal to"
" cfg.train.dist.pipeline_num_layers"
f" {cfg.train.dist.pipeline_num_layers}"
)
else:
# no pipeline parallel, just set 10000
if try_get_key(cfg, "pipeline_num_layers") is None:
cfg.pipeline_num_layers = 10000
self._model_parallel_size = self._pipeline_parallel_size * self._tensor_parallel_size
assert self.world_size % self._model_parallel_size == 0, (
f"world size ({self.world_size}) is not divisible by"
f" tensor model parallel size ({self._tensor_parallel_size}) times"
f" pipeline model parallel size ({self._pipeline_parallel_size})"
)
# data parallel size
self._data_parallel_size = self.world_size // self._model_parallel_size
# Set the actual data parallel size to cfg
cfg.data_parallel_size = self._data_parallel_size
def _init_placement_group(self, cfg):
node_ids = [i // self.num_gpus_per_node for i in range(self.world_size)]
device_ids = list(range(self.num_gpus_per_node)) * self.num_nodes
# [(0, 0), (0, 1), (0, 2), (0, 3), (1, 0), (1, 1), (1, 2), (1, 3)]
devices = [(n, d) for n, d in zip(node_ids, device_ids)]
num_devices_per_stage = self.world_size // self._pipeline_parallel_size
stages_devices = [
_merge_devices(devices[i : (i + num_devices_per_stage)])
for i in range(0, self.world_size, num_devices_per_stage)
]
# change pipeline_num_layers to make the middle stages contain more layers
if (
self._pipeline_parallel_size >= 4
and cfg.pipeline_num_layers >= 8
and cfg.pipeline_num_layers % self._pipeline_parallel_size == 0
):
temp_num_layers_per_stage = cfg.pipeline_num_layers // self._pipeline_parallel_size
actual_pipeline_num_layers = cfg.pipeline_num_layers + min(
self._pipeline_parallel_size - 1, temp_num_layers_per_stage
)
else:
actual_pipeline_num_layers = cfg.pipeline_num_layers
num_layers_per_stage = actual_pipeline_num_layers // self._pipeline_parallel_size
stage_offset = actual_pipeline_num_layers % self._pipeline_parallel_size
# stage_offset can make the later stages contain more layers when pipeline_num_layers
# cannot be divided by pipeline_parallel_size.
# This can make pipeline parallel more memory efficient.
self._layer_stage_ids = []
for i in range(0, actual_pipeline_num_layers - stage_offset, num_layers_per_stage):
stage_id = i // num_layers_per_stage
if stage_id >= (self._pipeline_parallel_size - stage_offset):
self._layer_stage_ids.append(stage_id)
self._layer_stage_ids.extend([stage_id] * num_layers_per_stage)
self._layer_stage_ids = self._layer_stage_ids[: cfg.pipeline_num_layers]
# when pipeline_parallel_size > 1, we add pipeline_stage_id infomation into cfg
if cfg.pipeline_parallel_size > 1:
cfg.auto_pipeline_stage_id = self._layer_stage_ids
# set pipeline_stage_id by users' setting
if try_get_key(cfg, "custom_pipeline_stage_id") is not None:
self._layer_stage_ids = cfg.custom_pipeline_stage_id
cfg.actual_pipeline_stage_id = self._layer_stage_ids
self._layer_ranks = [stages_devices[stage_id] for stage_id in self._layer_stage_ids]
def _init_parallel_hierarchy(self):
if self.is_data_model_parallel():
self._parallel_hierarchy = (
self._data_parallel_size,
self._tensor_parallel_size,
)
else:
self._parallel_hierarchy = None
@property
def num_nodes(self):
return self._num_nodes
@property
def num_gpus_per_node(self):
return self._num_gpus_per_node
@property
def world_size(self):
return self._world_size
@property
def parallel_hierarchy(self):
return self._parallel_hierarchy
@property
def tensor_parallel_size(self):
return self._tensor_parallel_size
@property
def pipeline_parallel_size(self):
return self._pipeline_parallel_size
@property
def model_parallel_size(self):
return self._tensor_parallel_size
@property
def data_parallel_size(self):
return self._data_parallel_size
@property
def device_type(self):
return self._device_type
def set_device_type(self, device_type):
assert device_type in ["cpu", "cuda"], f"not supported for {device_type}"
self._device_type = device_type
def get_layer_ranks(self, layer_idx):
layer_ranks = self._layer_ranks[layer_idx]
if self._parallel_hierarchy is None:
return layer_ranks
else:
assert len(self._parallel_hierarchy) == 2
return np.asarray(layer_ranks).reshape(self._parallel_hierarchy).tolist()
def get_layer_stage_id(self, layer_idx):
return self._layer_stage_ids[layer_idx]
def is_tensor_model_parallel(self):
return self._tensor_parallel_size > 1
def is_data_parallel(self):
return self._data_parallel_size > 1
def is_pipeline_model_parallel(self):
return self._pipeline_parallel_size > 1
def is_data_model_parallel(self):
return self.is_tensor_model_parallel() and self.is_data_parallel()
def setup_dist_util(cfg):
"""Initialize the distributed environment with configuration.
Example:
.. code-block:: python
from omegaconf import DictConfig
# set the hybrid parallel distributed environment with 2D mesh GPUs
setup_dist_util(
DictConfig(
dict(
data_parallel_size=2,
tensor_parallel_size=2,
pipeline_parallel_size=1,
)
)
)
"""
global _DIST_UTIL
_DIST_UTIL = _DistributeUtil(cfg)
def get_dist_util():
"""Get distributed utils if it's been setup. Otherwise, initialize it with
single node/single gpu environment."""
global _DIST_UTIL
if _DIST_UTIL is None:
logger.warning(
"Distributed env is not set up, configure it by default (single node, single gpu)."
)
from omegaconf import DictConfig
setup_dist_util(
DictConfig(
dict(
data_parallel_size=1,
tensor_parallel_size=1,
pipeline_parallel_size=1,
)
)
)
return _DIST_UTIL
def get_layer_placement(layer_idx, device_type=None):
"""
Get ``flow.placement`` object with the initialized distributed environment
according to the ``layer_idx``.
Args:
layer_idx (int): layer index indicating the rank groups. This is very useful for pipeline
parallelism training where different layers are on different ranks.
device_type (str, optional): device type. Defaults to "cuda".
"""
dist_util = get_dist_util()
device_type = dist_util.device_type if device_type is None else device_type
if not flow.cuda.is_available() and device_type == "cuda":
device_type = "cpu"
return flow.placement(
device_type,
dist_util.get_layer_ranks(layer_idx),
)
def get_nd_sbp(sbp_list):
"""Get nd sbp signature list, which is consistent with 1D/2D mesh GPUs.
Args:
sbp_list (list): a sbp list with 2D mesh.
Returns:
A modified sbp list according to the initialized distributed environment.
"""
assert isinstance(sbp_list, list)
assert len(sbp_list) == 2
assert all(isinstance(sbp, flow.sbp.sbp) for sbp in sbp_list)
dist_util = get_dist_util()
if dist_util.is_data_model_parallel():
return sbp_list
elif dist_util.is_data_parallel():
return sbp_list[:1]
elif dist_util.is_tensor_model_parallel():
return sbp_list[1:]
else:
return [flow.sbp.broadcast]
def get_hidden_sbp():
"""Hidden states sbp."""
return get_nd_sbp([flow.sbp.split(0), flow.sbp.broadcast])
def get_data_parallel_rank():
dist_util = get_dist_util()
return (flow.env.get_rank() // dist_util.model_parallel_size) % dist_util.data_parallel_size
def get_data_parallel_size():
dist_util = get_dist_util()
return dist_util.data_parallel_size
def get_tensor_parallel_size():
dist_util = get_dist_util()
return dist_util.tensor_parallel_size
def get_pipeline_parallel_size():
dist_util = get_dist_util()
return dist_util.pipeline_parallel_size
def same_sbp(lhs_sbp, rhs_sbp):
"""Determine if two sbp signatures are the same."""
assert len(lhs_sbp) == len(rhs_sbp)
for i in range(len(lhs_sbp)):
if lhs_sbp[i] != rhs_sbp[i]:
return False
return True
def get_rank() -> int:
return flow.env.get_rank()
def get_local_rank() -> int:
return flow.env.get_local_rank()
def is_main_process() -> bool:
return get_rank() == 0
def is_last_process() -> bool:
return get_rank() == get_world_size() - 1
def get_world_size():
return flow.env.get_world_size()
def get_num_nodes():
return flow.env.get_node_size()
def set_device_type(device_type):
dist_util = get_dist_util()
dist_util.set_device_type(device_type)
def broadcast_py_object(obj, src: int = 0):
rank = flow.env.get_rank()
if src == rank:
obj_bytes = dill.dumps(obj)
return dill.loads(flow._oneflow_internal.cpu_broadcast(obj_bytes, src))
else:
return dill.loads(flow._oneflow_internal.cpu_broadcast(None, src))
def convert_to_distributed_default_setting(t):
"""
Helper function to convert all eager local tensor in :attr:`nn.Module` in the model to
global tensor with data parallelism as default.
"""
if not t.is_global:
return t.to_global(
sbp=get_nd_sbp([flow.sbp.broadcast, flow.sbp.broadcast]),
placement=get_layer_placement(0),
)
else:
dist_util = get_dist_util()
device_type = dist_util.device_type
return t.to_global(placement=flow.placement(device_type, ranks=t.placement.ranks))
def ttol(tensor, pure_local=False, ranks=None):
"""Global tensor to local tensor."""
if tensor.is_global:
placement = tensor.placement if not ranks else flow.placement("cuda", ranks)
if pure_local:
tensor = tensor.to_global(placement=placement).to_local()
else:
tensor = tensor.to_global(
sbp=get_nd_sbp([flow.sbp.broadcast, flow.sbp.broadcast]), placement=placement
).to_local()
return tensor
def tton(tensor, local_only=False, ranks=None):
"""Global tensor to numpy ndarray."""
if tensor.is_global:
tensor = ttol(tensor, local_only, ranks)
return tensor.numpy()
def tensor_to_rank0(tensor, device="cuda", to_local=False):
"""Global tensor to rank0."""
assert device in ["cpu", "cuda"], f"not supported for device:{device}"
if tensor.is_global:
# Consider if it's 2d mesh, ranks should be [[0]] instead of [0]
placement = flow.placement(device, ranks=[0] if tensor.placement.ranks.ndim == 1 else [[0]])
tensor = tensor.to_global(
sbp=get_nd_sbp([flow.sbp.broadcast, flow.sbp.broadcast]), placement=placement
)
if to_local:
tensor = ttol(tensor)
return tensor
def synchronize():
"""
Helper function to synchronize (barrier) among all processes when
using distributed training.
"""
world_size = get_world_size()
if world_size == 1:
return
flow.comm.barrier()
| 16,559 | 33.214876 | 100 | py |
libai | libai-main/libai/utils/non_blocking_io.py | # coding=utf-8
# Copyright 2021 The OneFlow Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import concurrent.futures
import io
import logging
from dataclasses import dataclass
from queue import Queue
from threading import Thread
from typing import IO, Callable, Optional, Union
# --------------------------------------------------------
# References:
# https://github.com/facebookresearch/iopath/blob/main/iopath/common/non_blocking_io.py
# --------------------------------------------------------
"""
This file is used for asynchronous file operations.
When `opena` is called for the first time for a specific
`PathHandler`, a `NonBlockingIOManager` is instantiated. The
manager returns a `NonBlockingIO` (or `NonBlockingBufferedIO`)
instance to the caller, and the manager maintains all of the
thread management and data management.
"""
@dataclass
class PathData:
"""
Manage the IO job queue and polling thread for a single
path. This is done to ensure that write calls to the same
path are serialized so they are written in the same order
as they were called.
On each `f.write` call where `f` is of type `NonBlockingIO`,
we send the job to the manager where it is enqueued to the
Queue. The polling Thread picks up on the job, executes it,
waits for it to finish, and then continues to poll.
"""
queue: Queue
thread: Thread
class NonBlockingIOManager:
"""
All `opena` calls pass through this class so that it can
keep track of the threads for proper cleanup at the end
of the script. Each path that is opened with `opena` is
assigned a single queue and polling thread that is kept
open until it is cleaned up by `PathManager.async_join()`.
"""
def __init__(
self,
buffered: Optional[bool] = False,
executor: Optional[concurrent.futures.Executor] = None,
) -> None:
"""
Args:
buffered (bool): IO instances will be `NonBlockingBufferedIO`
or `NonBlockingIO` based on this value. This bool is set
manually for each `PathHandler` in `_opena`.
executor: User can optionally attach a custom executor to
perform async operations through `PathHandler.__init__`.
"""
self._path_to_data = {} # Map from path to `PathData` object
self._buffered = buffered
self._IO = NonBlockingBufferedIO if self._buffered else NonBlockingIO
self._pool = executor or concurrent.futures.ThreadPoolExecutor()
def get_non_blocking_io(
self,
path: str,
io_obj: Union[IO[str], IO[bytes]],
callback_after_file_close: Optional[Callable[[None], None]] = None,
buffering: Optional[int] = -1,
) -> Union[IO[str], IO[bytes]]:
"""
Called by `PathHandler._opena` with the path and returns a
`NonBlockingIO` instance.
Args:
path (str): A path str to operate on. This path should be
simplified to ensure that each absolute path has only a single
path str that maps onto it. For example, in `NativePathHandler`,
we can use `os.path.normpath`.
io_obj (IO): a reference to the IO object returned by the
`PathHandler._open` function.
callback_after_file_close (Callable): An optional argument that can
be passed to perform operations that depend on the asynchronous
writes being completed. The file is first written to the local
disk and then the callback is executed.
buffering (int): An optional argument to set the buffer size for
buffered asynchronous writing.
"""
if not self._buffered and buffering != -1:
raise ValueError(
"NonBlockingIO is not using a buffered writer but `buffering` "
f"arg is set to non-default value of {buffering} != -1."
)
if path not in self._path_to_data:
# Initialize job queue and a polling thread
queue = Queue()
t = Thread(target=self._poll_jobs, args=(queue,))
t.start()
# Store the `PathData`
self._path_to_data[path] = PathData(queue, t)
kwargs = {} if not self._buffered else {"buffering": buffering}
return self._IO(
notify_manager=lambda io_callable: ( # Pass async jobs to manager
self._path_to_data[path].queue.put(io_callable)
),
io_obj=io_obj,
callback_after_file_close=callback_after_file_close,
**kwargs,
)
def _poll_jobs(self, queue: Optional[Callable[[], None]]) -> None:
"""
A single thread runs this loop. It waits for an IO callable to be
placed in a specific path's `Queue` where the queue contains
callable functions. It then waits for the IO job to be completed
before looping to ensure write order.
"""
while True:
# `func` is a callable function (specifically a lambda function)
# and can be any of:
# - func = file.write(b)
# - func = file.close()
# - func = None
func = queue.get() # Blocks until item read.
if func is None: # Thread join signal.
break
self._pool.submit(func).result() # Wait for job to finish.
def _join(self, path: Optional[str] = None) -> bool:
"""
Waits for write jobs for a specific path or waits for all
write jobs for the path handler if no path is provided.
Args:
path (str): Pass in a file path and will wait for the
asynchronous jobs to be completed for that file path.
If no path is passed in, then all threads operating
on all file paths will be joined.
"""
if path and path not in self._path_to_data:
raise ValueError(
f"{path} has no async IO associated with it. "
f"Make sure `opena({path})` is called first."
)
# If a `_close` call fails, we print the error and continue
# closing the rest of the IO objects.
paths_to_close = [path] if path else list(self._path_to_data.keys())
success = True
for _path in paths_to_close:
try:
path_data = self._path_to_data.pop(_path)
path_data.queue.put(None)
path_data.thread.join()
except Exception:
logger = logging.getLogger(__name__)
logger.exception(f"`NonBlockingIO` thread for {_path} failed to join.")
success = False
return success
def _close_thread_pool(self) -> bool:
"""
Closes the ThreadPool.
"""
try:
self._pool.shutdown()
except Exception:
logger = logging.getLogger(__name__)
logger.exception("`NonBlockingIO` thread pool failed to close.")
return False
return True
# NOTE: We currently only support asynchronous writes (not reads).
class NonBlockingIO(io.IOBase):
def __init__(
self,
notify_manager: Callable[[Callable[[], None]], None],
io_obj: Union[IO[str], IO[bytes]],
callback_after_file_close: Optional[Callable[[None], None]] = None,
) -> None:
"""
Returned to the user on an `opena` call. Uses a Queue to manage the
IO jobs that need to be run to ensure order preservation and a
polling Thread that checks the Queue. Implementation for these are
lifted to `NonBlockingIOManager` since `NonBlockingIO` closes upon
leaving the context block.
NOTE: Writes to the same path are serialized so they are written in
the same order as they were called but writes to distinct paths can
happen concurrently.
Args:
notify_manager (Callable): a callback function passed in from the
`NonBlockingIOManager` so that all IO jobs can be stored in
the manager. It takes in a single argument, namely another
callable function.
Example usage:
```
notify_manager(lambda: file.write(data))
notify_manager(lambda: file.close())
```
Here, we tell `NonBlockingIOManager` to add a write callable
to the path's Queue, and then to add a close callable to the
path's Queue. The path's polling Thread then executes the write
callable, waits for it to finish, and then executes the close
callable. Using `lambda` allows us to pass callables to the
manager.
io_obj (IO): a reference to the IO object returned by the
`PathHandler._open` function.
callback_after_file_close (Callable): An optional argument that can
be passed to perform operations that depend on the asynchronous
writes being completed. The file is first written to the local
disk and then the callback is executed.
"""
super().__init__()
self._notify_manager = notify_manager
self._io = io_obj
self._callback_after_file_close = callback_after_file_close
self._close_called = False
def readable(self) -> bool:
return False
def writable(self) -> bool:
return True
def seekable(self) -> bool:
return True
def write(self, b: Union[bytes, bytearray]) -> None:
"""
Called on `f.write()`. Gives the manager the write job to call.
"""
self._notify_manager(lambda: self._io.write(b))
def seek(self, offset: int, whence: int = 0) -> int:
"""
Called on `f.seek()`.
"""
self._notify_manager(lambda: self._io.seek(offset, whence))
def tell(self) -> int:
"""
Called on `f.tell()`.
"""
raise ValueError("ioPath async writes does not support `tell` calls.")
def truncate(self, size: int = None) -> int:
"""
Called on `f.truncate()`.
"""
self._notify_manager(lambda: self._io.truncate(size))
def close(self) -> None:
"""
Called on `f.close()` or automatically by the context manager.
We add the `close` call to the file's queue to make sure that
the file is not closed before all of the write jobs are complete.
"""
# `ThreadPool` first closes the file and then executes the callback.
# We only execute the callback once even if there are multiple
# `f.close` calls.
self._notify_manager(lambda: self._io.close())
if not self._close_called and self._callback_after_file_close:
self._notify_manager(self._callback_after_file_close)
self._close_called = True
# NOTE: To use this class, use `buffered=True` in `NonBlockingIOManager`.
# NOTE: This class expects the IO mode to be buffered.
class NonBlockingBufferedIO(io.IOBase):
MAX_BUFFER_BYTES = 10 * 1024 * 1024 # 10 MiB
def __init__(
self,
notify_manager: Callable[[Callable[[], None]], None],
io_obj: Union[IO[str], IO[bytes]],
callback_after_file_close: Optional[Callable[[None], None]] = None,
buffering: int = -1,
) -> None:
"""
Buffered version of `NonBlockingIO`. All write data is stored in an
IO buffer until the buffer is full, or `flush` or `close` is called.
Args:
Same as `NonBlockingIO` args.
buffering (int): An optional argument to set the buffer size for
buffered asynchronous writing.
"""
super().__init__()
self._notify_manager = notify_manager
self._io = io_obj
self._callback_after_file_close = callback_after_file_close
self._buffers = [io.BytesIO()]
self._buffer_size = buffering if buffering > 0 else self.MAX_BUFFER_BYTES
self._close_called = False
def readable(self) -> bool:
return False
def writable(self) -> bool:
return True
def seekable(self) -> bool:
return False
def write(self, b: Union[bytes, bytearray]) -> None:
"""
Called on `f.write()`. Gives the manager the write job to call.
"""
buffer = self._buffers[-1]
with memoryview(b) as view:
buffer.write(view)
if buffer.tell() < self._buffer_size:
return
self.flush()
def close(self) -> None:
"""
Called on `f.close()` or automatically by the context manager.
We add the `close` call to the file's queue to make sure that
the file is not closed before all of the write jobs are complete.
"""
self.flush()
# Close the last buffer created by `flush`.
self._notify_manager(lambda: self._buffers[-1].close())
# `ThreadPool` first closes the file and then executes the callback.
self._notify_manager(lambda: self._io.close())
if not self._close_called and self._callback_after_file_close:
self._notify_manager(self._callback_after_file_close)
self._close_called = True
def flush(self) -> None:
"""
Called on `f.write()` if the buffer is filled (or overfilled). Can
also be explicitly called by user.
NOTE: Buffering is used in a strict manner. Any buffer that exceeds
`self._buffer_size` will be broken into multiple write jobs where
each has a write call with `self._buffer_size` size.
"""
buffer = self._buffers[-1]
if buffer.tell() == 0:
return
pos = 0
total_size = buffer.seek(0, io.SEEK_END)
view = buffer.getbuffer()
# Chunk the buffer in case it is larger than the buffer size.
while pos < total_size:
item = view[pos : pos + self._buffer_size]
# `item=item` is needed due to Python's late binding closures.
self._notify_manager(lambda item=item: self._io.write(item))
pos += self._buffer_size
# Close buffer immediately after being written to file and create
# a new buffer.
self._notify_manager(lambda: buffer.close())
self._buffers.append(io.BytesIO())
| 15,063 | 38.95756 | 87 | py |
libai | libai-main/libai/utils/file_utils.py | """
Utilities for working with the local dataset cache.
This file is adapted from the AllenNLP library at https://github.com/allenai/allennlp
Copyright by the AllenNLP authors.
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import fnmatch
import hashlib
import json
import logging
import os
import shutil
import sys
import tempfile
from functools import wraps
from io import open
from pathlib import Path
import boto3
import requests
import wget
from botocore.config import Config
from botocore.exceptions import ClientError
from tqdm import tqdm
logger = logging.getLogger(__name__) # pylint: disable=invalid-name
cache_home = Path(os.getenv("OF_CACHE_ROOT", Path.home() / ".of_cache"))
default_cache_path = str(cache_home / "libai")
try:
from urllib.parse import urlparse
except ImportError:
from urlparse import urlparse
def url_to_filename(url, etag=None):
"""
Convert `url` into a hashed filename in a repeatable way.
If `etag` is specified, append its hash to the url's, delimited
by a period.
If the url ends with .h5 (Keras HDF5 weights) ands '.h5' to the name
so that TF 2.0 can identify it as a HDF5 file
(see https://github.com/tensorflow/tensorflow/blob/00fad90125b18b80fe054de1055770cfb8fe4ba3
/tensorflow/python/keras/engine/network.py#L1380)
"""
url_bytes = url.encode("utf-8")
url_hash = hashlib.sha256(url_bytes)
filename = url_hash.hexdigest()
if etag:
etag_bytes = etag.encode("utf-8")
etag_hash = hashlib.sha256(etag_bytes)
filename += "." + etag_hash.hexdigest()
if url.endswith(".h5"):
filename += ".h5"
return filename
def filename_to_url(filename, cache_dir=None):
"""
Return the url and etag (which may be ``None``) stored for `filename`.
Raise ``EnvironmentError`` if `filename` or its stored metadata do not exist.
"""
if cache_dir is None:
cache_dir = default_cache_path
if sys.version_info[0] == 3 and isinstance(cache_dir, Path):
cache_dir = str(cache_dir)
cache_path = os.path.join(cache_dir, filename)
if not os.path.exists(cache_path):
raise EnvironmentError("file {} not found".format(cache_path))
meta_path = cache_path + ".json"
if not os.path.exists(meta_path):
raise EnvironmentError("file {} not found".format(meta_path))
with open(meta_path, encoding="utf-8") as meta_file:
metadata = json.load(meta_file)
url = metadata["url"]
etag = metadata["etag"]
return url, etag
def cached_path(url_or_filename, cache_dir=None, force_download=False, proxies=None):
"""
Given something that might be a URL (or might be a local path),
determine which. If it's a URL, download the file and cache it, and
return the path to the cached file. If it's already a local path,
make sure the file exists and then return the path.
Args:
cache_dir: specify a cache directory to save the file to (overwrite the default cache dir).
force_download: if True, re-dowload the file even if it's already cached in the cache dir.
"""
if cache_dir is None:
cache_dir = default_cache_path
if sys.version_info[0] == 3 and isinstance(url_or_filename, Path):
url_or_filename = str(url_or_filename)
if sys.version_info[0] == 3 and isinstance(cache_dir, Path):
cache_dir = str(cache_dir)
parsed = urlparse(url_or_filename)
if parsed.scheme in ("http", "https", "s3"):
# URL, so get it from the cache (downloading if necessary)
return get_from_cache(
url_or_filename,
cache_dir=cache_dir,
force_download=force_download,
proxies=proxies,
)
elif os.path.exists(url_or_filename):
# File, and it exists.
return url_or_filename
elif parsed.scheme == "":
# File, but it doesn't exist.
raise EnvironmentError("file {} not found".format(url_or_filename))
else:
# Something unknown
raise ValueError("unable to parse {} as a URL or as a local path".format(url_or_filename))
def split_s3_path(url):
"""Split a full s3 path into the bucket name and path."""
parsed = urlparse(url)
if not parsed.netloc or not parsed.path:
raise ValueError("bad s3 path {}".format(url))
bucket_name = parsed.netloc
s3_path = parsed.path
# Remove '/' at beginning of path.
if s3_path.startswith("/"):
s3_path = s3_path[1:]
return bucket_name, s3_path
def s3_request(func):
"""
Wrapper function for s3 requests in order to create more helpful error
messages.
"""
@wraps(func)
def wrapper(url, *args, **kwargs):
try:
return func(url, *args, **kwargs)
except ClientError as exc:
if int(exc.response["Error"]["Code"]) == 404:
raise EnvironmentError("file {} not found".format(url))
else:
raise
return wrapper
@s3_request
def s3_etag(url, proxies=None):
"""Check ETag on S3 object."""
s3_resource = boto3.resource("s3", config=Config(proxies=proxies))
bucket_name, s3_path = split_s3_path(url)
s3_object = s3_resource.Object(bucket_name, s3_path)
return s3_object.e_tag
@s3_request
def s3_get(url, temp_file, proxies=None):
"""Pull a file directly from S3."""
s3_resource = boto3.resource("s3", config=Config(proxies=proxies))
bucket_name, s3_path = split_s3_path(url)
s3_resource.Bucket(bucket_name).download_fileobj(s3_path, temp_file)
def http_get(url, temp_file, proxies=None):
req = requests.get(url, stream=True, proxies=proxies)
content_length = req.headers.get("Content-Length")
total = int(content_length) if content_length is not None else None
progress = tqdm(unit="B", total=total)
for chunk in req.iter_content(chunk_size=1024):
if chunk: # filter out keep-alive new chunks
progress.update(len(chunk))
temp_file.write(chunk)
progress.close()
def get_from_cache(url, cache_dir=None, force_download=False, proxies=None, etag_timeout=10):
"""
Given a URL, look for the corresponding dataset in the local cache.
If it's not there, download it. Then return the path to the cached file.
"""
if cache_dir is None:
cache_dir = default_cache_path
if sys.version_info[0] == 3 and isinstance(cache_dir, Path):
cache_dir = str(cache_dir)
if not os.path.exists(cache_dir):
os.makedirs(cache_dir)
# Get eTag to add to filename, if it exists.
if url.startswith("s3://"):
etag = s3_etag(url, proxies=proxies)
else:
try:
response = requests.head(
url, allow_redirects=True, proxies=proxies, timeout=etag_timeout
)
if response.status_code != 200:
etag = None
else:
etag = response.headers.get("ETag")
except (EnvironmentError, requests.exceptions.Timeout):
etag = None
filename = url_to_filename(url, etag)
# get cache path to put the file
cache_path = os.path.join(cache_dir, filename)
# If we don't have a connection (etag is None) and can't identify the file
# try to get the last downloaded one
if not os.path.exists(cache_path) and etag is None:
matching_files = fnmatch.filter(os.listdir(cache_dir), filename + ".*")
matching_files = list(filter(lambda s: not s.endswith(".json"), matching_files))
if matching_files:
cache_path = os.path.join(cache_dir, matching_files[-1])
if not os.path.exists(cache_path) or force_download:
# Download to temporary file, then copy to cache dir once finished.
# Otherwise you get corrupt cache entries if the download gets interrupted.
with tempfile.NamedTemporaryFile() as temp_file:
logger.info(
"%s not found in cache or force_download set to True, downloading to %s",
url,
temp_file.name,
)
# GET file object
if url.startswith("s3://"):
s3_get(url, temp_file, proxies=proxies)
else:
http_get(url, temp_file, proxies=proxies)
# we are copying the file before closing it, so flush to avoid truncation
temp_file.flush()
# shutil.copyfileobj() starts at the current position, so go to the start
temp_file.seek(0)
logger.info("copying %s to cache at %s", temp_file.name, cache_path)
with open(cache_path, "wb") as cache_file:
shutil.copyfileobj(temp_file, cache_file)
logger.info("creating metadata file for %s", cache_path)
meta = {"url": url, "etag": etag}
meta_path = cache_path + ".json"
with open(meta_path, "w") as meta_file:
output_string = json.dumps(meta)
meta_file.write(output_string)
logger.info("removing temp file %s", temp_file.name)
return cache_path
def get_md5(fname):
hash_md5 = hashlib.md5()
with open(fname, "rb") as f:
for chunk in iter(lambda: f.read(4096), b""):
hash_md5.update(chunk)
result = hash_md5.hexdigest()
return result
def download_file(out_path: str, url):
logger.info(f"downloading from {url} to {out_path}")
wget.download(url, out=out_path)
def get_data_from_cache(url, cache_dir=None, force_download=False, md5=None):
"""
Given a URL, look for the corresponding dataset in the local cache.
If it's not there, download it. Then return the path to the cached file.
"""
if cache_dir is None:
cache_dir = default_cache_path
if sys.version_info[0] == 3 and isinstance(cache_dir, Path):
cache_dir = str(cache_dir)
if not os.path.exists(cache_dir):
os.makedirs(cache_dir)
filename = url.split("/")[-1]
# get cache path to put the file
cache_path = os.path.join(cache_dir, filename)
# If we have already get the file, just check the md5 if provided
if os.path.exists(cache_path) and md5 is not None:
local_file_md5 = get_md5(cache_path)
if local_file_md5 != md5:
os.unlink(cache_path)
download_file(cache_path, url)
# If we don't have a connection (etag is None) and can't identify the file
# try to get the last downloaded one
if not os.path.exists(cache_path):
download_file(cache_path, url)
if not os.path.exists(cache_path) or force_download:
# Download to temporary file, then copy to cache dir once finished.
# Otherwise you get corrupt cache entries if the download gets interrupted.
with tempfile.NamedTemporaryFile() as temp_file:
logger.info(
"%s not found in cache or force_download set to True, downloading to %s",
url,
temp_file.name,
)
# GET file object
http_get(url, temp_file)
# we are copying the file before closing it, so flush to avoid truncation
temp_file.flush()
# shutil.copyfileobj() starts at the current position, so go to the start
temp_file.seek(0)
logger.info("copying %s to cache at %s", temp_file.name, cache_path)
with open(cache_path, "wb") as cache_file:
shutil.copyfileobj(temp_file, cache_file)
logger.info("creating metadata file for %s", cache_path)
meta = {"url": url}
meta_path = cache_path + ".json"
with open(meta_path, "w") as meta_file:
output_string = json.dumps(meta)
meta_file.write(output_string)
logger.info("removing temp file %s", temp_file.name)
return cache_path
| 11,913 | 33.734694 | 99 | py |
libai | libai-main/libai/utils/__init__.py | # coding=utf-8
# Copyright 2021 The OneFlow Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| 620 | 40.4 | 74 | py |
libai | libai-main/libai/data/structures.py | # coding=utf-8
# Copyright 2021 The OneFlow Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import OrderedDict
from dataclasses import dataclass, field
from typing import Any, List
import oneflow as flow
from libai.utils import distributed as dist
@dataclass
class DistTensorData:
tensor: flow.Tensor
sbp_list: list = field(default_factory=lambda: ["split_0", "broadcast"])
placement_idx: int = 0
# Tensor-like methods
def to_global(self, sbp=None, placement=None, device_type="cuda"):
if sbp is not None:
self.sbp = sbp
else:
sbp_list = []
for sbp in self.sbp_list:
sbp = sbp.split("_")
if len(sbp) > 1:
# split dim
assert sbp[0] == "split"
split_dim = int(sbp[1])
sbp_list.append(flow.sbp.split(split_dim))
else:
sbp_sign = sbp[0]
sbp_list.append(getattr(flow.sbp, sbp_sign))
self.sbp = dist.get_nd_sbp(sbp_list)
if placement is not None:
self.tensor = self.tensor.to_global(sbp=self.sbp, placement=placement)
else:
# Convert local tensor to global tensor with default setting,
# if the placement parameter is not provided.
# When enable pipeline parallel training,
# all the devices will be grouped into several device groups
# and the model will be split into several stages.
# Each stage will be placed on the corresponding device group.
# For those tensors to be used in the last stage,
# we first convert them to global tensor by only retain those on the device group 0,
# then transfer the result to the last stage.
# We do that to make sure that all the tensors used by the model are all generated
# by the fist device group, in case that each device group containg
# some random augmentations to the tensors without setting the same global seed.
main_placement = dist.get_layer_placement(0, device_type)
self.tensor = self.tensor.to_global(sbp=self.sbp, placement=main_placement)
if self.placement_idx != 0:
self.tensor = self.tensor.to_global(
placement=dist.get_layer_placement(self.placement_idx, device_type)
)
@staticmethod
def stack(distTensor_lists: List["DistTensorData"]) -> "DistTensorData":
if not isinstance(distTensor_lists[0].tensor, flow.Tensor):
raise TypeError(
"DistTensorData.tensor must be a flow.Tensor, but got {}. "
"Please check the return values of `__getitem__` in dataset.".format(
type(distTensor_lists[0].tensor)
)
)
assert len(distTensor_lists) > 0
if len(distTensor_lists) == 1:
# TODO(l1aoxingyu): add inplace unsqueeze
# distTensor_lists[0].tensor.unsqueeze_(0) # add batch dim
distTensor_lists[0].tensor = distTensor_lists[0].tensor.unsqueeze(0) # add batch dim
return distTensor_lists[0]
tensor_size = distTensor_lists[0].tensor.size()
sbp_list = distTensor_lists[0].sbp_list
placement_idx = distTensor_lists[0].placement_idx
tensors = []
for data in distTensor_lists:
assert (
data.tensor.size() == tensor_size
), f"tensor shape is not equal, {data.tensor.size()} != {tensor_size}"
assert (
data.sbp_list == sbp_list
), f"sbp_list is not equal, {data.sbp_list} != {sbp_list}!"
assert (
data.placement_idx == placement_idx
), f"placement_idx is not equal, {data.placement_idx} != {placement_idx}"
tensors.append(data.tensor)
tensors = flow.stack(tensors, dim=0)
ret = DistTensorData(tensors, sbp_list=sbp_list, placement_idx=placement_idx)
return ret
class Instance:
"""
This class represents a instance with metadata as attributes.
It stores the attributes of an instance (e.g., image, tokens) as "fields".
all other (non-filed) attributes of this class are considered private:
they must start with '_' and are not modifiable by a user.
Some basic usage:
1. Set/get/check a field:
.. code-block:: python
instance.tokens = Metadata(...)
instance.mask = Metadata(...)
print(instance.tokens)
print(instance.has("mask")) # True
2. ``len(instance)`` returns the number of instance
"""
def __init__(self, **kwargs):
self._fields = OrderedDict()
for k, v in kwargs.items():
self.set(k, v)
def __setattr__(self, name: str, val: Any) -> None:
if name.startswith("_"):
super().__setattr__(name, val)
else:
self.set(name, val)
def __getattr__(self, name: str):
if name == "_fields" or name not in self._fields:
raise AttributeError(f"Cannot find field '{name}' in the given Instance!")
return self._fields[name]
def set(self, name: str, value: Any):
"""
Set the field named `name` to `value`.
"""
self._fields[name] = value
def has(self, name: str):
return name in self._fields
def remove(self, name: str):
del self._fields[name]
def get(self, name: str):
return self._fields[name]
def get_fields(self):
return self._fields
def __len__(self):
return len(self._fields.keys())
def __iter__(self):
raise NotImplementedError("`Instances` object is not iterable!")
@staticmethod
def stack(instance_lists: List["Instance"]) -> "Instance":
assert all(isinstance(i, Instance) for i in instance_lists)
assert len(instance_lists) > 0
ret = Instance()
for k in instance_lists[0]._fields.keys():
values = [i.get(k) for i in instance_lists]
v0 = values[0]
if isinstance(v0, flow.Tensor):
values = flow.stack(values, dim=0)
elif isinstance(v0, list):
pass
elif hasattr(type(v0), "stack"):
values = type(v0).stack(values)
else:
raise ValueError("Unsupported type {} for stack.".format(type(v0)))
ret.set(k, values)
return ret
def __str__(self):
s = self.__class__.__name__ + "("
s += "fields=[{}]".format(", ".join((f"{k}: {v}" for k, v in self._fields.items())))
return s
__repr__ = __str__
| 7,308 | 36.101523 | 97 | py |
libai | libai-main/libai/data/__init__.py | # coding=utf-8
# Copyright 2021 The OneFlow Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .structures import DistTensorData, Instance
from .build import (
build_image_train_loader,
build_image_test_loader,
build_nlp_train_val_test_loader,
build_nlp_test_loader,
)
| 816 | 34.521739 | 74 | py |
libai | libai-main/libai/data/build.py | # coding=utf-8
# Copyright 2021 The OneFlow Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from omegaconf import OmegaConf
from oneflow.utils.data import DataLoader
from oneflow.utils.data.dataset import ConcatDataset
from libai.config import LazyCall, instantiate
from libai.utils import distributed as dist
from .data_utils import get_train_valid_test_split_
from .samplers import CyclicSampler, SingleRoundSampler
from .structures import Instance
def build_nlp_train_val_test_loader(
dataset,
splits,
weights,
train_val_test_num_samples,
train_batch_size,
test_batch_size,
train_sampler=LazyCall(CyclicSampler)(shuffle=True),
test_sampler=LazyCall(SingleRoundSampler)(shuffle=False, drop_last=False),
num_workers=4,
consumed_samples=0,
seed=0,
collate_fn=None,
dataset_mixer=ConcatDataset,
):
"""
Build nlp train_val_test dataloader, used for dataset lack of valid/test dataset
Returns:
It will return train/valid/test dataloader
* train_loader: dataloader for training
* valid_loader: dataloader for validation
* test_loader: dataloader for testing
Arguments:
dataset: dataset from which to load the data. e.g.: dataset or [dataset1, dataset2, ...]
splits: ratio config for spliting dataset to train/valid/test. e.g.: [[7, 2, 1], ...]
weights: ratio config for concate dataset list (Not Supported yet). e.g.: [1.0, ...]
train_batch_size: how many samples per batch to load in training (micro-batch-size per GPU).
test_batch_size: how many samples per batch to load in testing (micro-batch-size per GPU).
sampler: defines the strategy to draw
samples from the dataset. Can be any ``Iterable`` with ``__len__``
implemented.
num_workers: how many subprocesses to use for data
loading. ``0`` means that the data will be loaded in the main process.
(default: ``4``).
consumed_samples: the number of samples that have been trained at the current time,
used for resuming training (default: ``0``).
seed: random seed, used for reproducing experiments (default: ``0``).
collate_fn: merges a list of samples to form a
mini-batch of Tensor(s). Used when using batched loading from a
map-style dataset.
dataset_mixer: function for concating list dataset.
"""
def build_dataset(index, dataset):
doc_idx_ptr = indexed_dataset.get_doc_idx()
start_index = ds_splits[index]
end_index = ds_splits[index + 1] + 1
indexed_dataset.set_doc_idx(doc_idx_ptr[start_index:end_index])
dataset.indexed_dataset = indexed_dataset
dataset.max_num_samples = train_val_test_num_samples[index]
dataset = instantiate(dataset)
# Set the original pointer so dataset remains the main dataset.
indexed_dataset.set_doc_idx(doc_idx_ptr)
# check
assert indexed_dataset.doc_idx[0] == 0
assert indexed_dataset.doc_idx.shape[0] == (total_num_of_documents + 1)
return dataset
if OmegaConf.is_list(dataset):
dataset = list(dataset)
elif not isinstance(dataset, list):
dataset = [dataset]
assert len(dataset) == len(splits), "datasets length must equal splits length"
assert len(dataset) == len(weights), "datasets length must equal weights length"
train_datasets, val_datasets, test_datasets = [], [], []
for dst, split in zip(dataset, splits):
indexed_dataset = instantiate(dst.indexed_dataset)
total_num_of_documents = indexed_dataset.doc_idx.shape[0] - 1
ds_splits = get_train_valid_test_split_(total_num_of_documents, split)
train_dataset = build_dataset(0, dst)
val_dataset = build_dataset(1, dst)
test_dataset = build_dataset(2, dst)
train_datasets.append(train_dataset)
val_datasets.append(val_dataset)
test_datasets.append(test_dataset)
# [dataset, dataset] -> dataset -> dataloader
train_dataset = dataset_mixer(train_datasets)
val_dataset = dataset_mixer(val_datasets)
test_dataset = dataset_mixer(test_datasets)
collate_fn = trivial_batch_collator if collate_fn is None else collate_fn
train_loader, _, _ = build_nlp_train_loader(
dataset=train_dataset,
train_batch_size=train_batch_size,
test_batch_size=None,
sampler=train_sampler,
num_workers=num_workers,
consumed_samples=consumed_samples,
seed=seed,
collate_fn=collate_fn,
)
valid_loader = build_nlp_test_loader(
dataset=val_dataset,
test_batch_size=test_batch_size,
sampler=test_sampler,
num_workers=num_workers,
seed=seed,
collate_fn=collate_fn,
)
test_loader = build_nlp_test_loader(
dataset=test_dataset,
test_batch_size=test_batch_size,
sampler=test_sampler,
num_workers=num_workers,
seed=seed,
collate_fn=collate_fn,
)
return train_loader, valid_loader, test_loader
def build_nlp_train_loader(
dataset,
train_batch_size,
test_batch_size=None,
sampler=LazyCall(CyclicSampler)(shuffle=True),
num_workers=4,
consumed_samples=0,
seed=0,
collate_fn=None,
dataset_mixer=ConcatDataset,
**kwargs
):
"""
Build nlp train dataloader, it's used for train dataset
Returns:
It will return train dataloader, and Nonetype for valid/test dataloader
* train_loader: dataloader for training
* None: Nonetype
* None: Nonetype
Arguments:
dataset: dataset from which to load the data. e.g.: dataset or [dataset1, dataset2, ...]
train_batch_size: how many samples per batch to load in training (micro-batch-size per GPU).
test_batch_size: no use, set it to None.
sampler: defines the strategy to draw
samples from the dataset. Can be any ``Iterable`` with ``__len__``
implemented.
num_workers: how many subprocesses to use for data
loading. ``0`` means that the data will be loaded in the main process.
(default: ``4``).
consumed_samples: the number of samples that have been trained at the current time,
used for resuming training (default: ``0``).
seed: random seed, used for reproducing experiments (default: ``0``).
collate_fn: merges a list of samples to form a
mini-batch of Tensor(s). Used when using batched loading from a
map-style dataset.
dataset_mixer: function for concating list dataset.
"""
dataset = instantiate(dataset)
if OmegaConf.is_list(dataset):
dataset = list(dataset)
elif not isinstance(dataset, list):
dataset = [dataset]
if len(dataset) > 1:
dataset = dataset_mixer(dataset)
else:
dataset = dataset[0]
sampler.dataset = dataset
sampler.micro_batch_size = train_batch_size
sampler.consumed_samples = consumed_samples
sampler.data_parallel_rank = dist.get_data_parallel_rank()
sampler.data_parallel_size = dist.get_data_parallel_size()
sampler.seed = seed
sampler = instantiate(sampler)
dataloader = DataLoader(
dataset,
batch_sampler=sampler,
num_workers=num_workers,
persistent_workers=True if num_workers > 0 else False,
collate_fn=trivial_batch_collator if collate_fn is None else collate_fn,
**kwargs,
)
return dataloader, None, None
def build_nlp_test_loader(
dataset,
test_batch_size,
sampler=LazyCall(SingleRoundSampler)(shuffle=False, drop_last=False),
num_workers=4,
seed=0,
collate_fn=None,
):
"""
Build nlp test dataloader, it's used for test dataset
Returns:
It will return test dataloader
* test_loader: dataloader for testing
Arguments:
dataset: dataset from which to load the data. e.g.: dataset or [dataset1, dataset2, ...]
test_batch_size: how many samples per batch to load in testing (micro-batch-size per GPU).
sampler: defines the strategy to draw
samples from the dataset. Can be any ``Iterable`` with ``__len__``
implemented.
num_workers: how many subprocesses to use for data
loading. ``0`` means that the data will be loaded in the main process.
(default: ``4``).
seed: random seed, used for reproducing experiments (default: ``0``).
collate_fn: merges a list of samples to form a
mini-batch of Tensor(s). Used when using batched loading from a
map-style dataset.
"""
dataset = instantiate(dataset)
collate_fn = trivial_batch_collator if collate_fn is None else collate_fn
sampler.dataset = dataset
sampler.micro_batch_size = test_batch_size
sampler.data_parallel_rank = dist.get_data_parallel_rank()
sampler.data_parallel_size = dist.get_data_parallel_size()
sampler.seed = seed
sampler = instantiate(sampler)
test_loader = DataLoader(
dataset,
batch_sampler=sampler,
num_workers=num_workers,
persistent_workers=True if num_workers > 0 else False,
collate_fn=collate_fn,
)
return test_loader
def build_image_train_loader(
dataset,
train_batch_size,
test_batch_size=None,
sampler=LazyCall(CyclicSampler)(shuffle=True),
num_workers=4,
consumed_samples=0,
seed=0,
collate_fn=None,
dataset_mixer=ConcatDataset,
mixup_func=None,
**kwargs
):
"""
Build image train dataloader, it's used for train dataset
Returns:
It will return train dataloader, and Nonetype for valid/test dataloader
* train_loader: dataloader for training
* None: Nonetype
* None: Nonetype
Arguments:
dataset: dataset from which to load the data. e.g.: dataset or [dataset1, dataset2, ...]
train_batch_size: how many samples per batch to load in training (micro-batch-size per GPU).
test_batch_size: no use, set it to None.
sampler: defines the strategy to draw
samples from the dataset. Can be any ``Iterable`` with ``__len__``
implemented.
num_workers: how many subprocesses to use for data
loading. ``0`` means that the data will be loaded in the main process.
(default: ``4``).
consumed_samples: the number of samples that have been trained at the current time,
used for resuming training (default: ``0``).
seed: random seed, used for reproducing experiments (default: ``0``).
collate_fn: merges a list of samples to form a
mini-batch of Tensor(s). Used when using batched loading from a
map-style dataset.
dataset_mixer: function for concating list dataset.
mixup_func: function for data argumentation.
"""
dataset = instantiate(dataset)
if OmegaConf.is_list(dataset):
dataset = list(dataset)
elif not isinstance(dataset, list):
dataset = [dataset]
if len(dataset) > 1:
dataset = dataset_mixer(dataset)
else:
dataset = dataset[0]
sampler.dataset = dataset
sampler.micro_batch_size = train_batch_size
sampler.consumed_samples = consumed_samples
sampler.data_parallel_rank = dist.get_data_parallel_rank()
sampler.data_parallel_size = dist.get_data_parallel_size()
sampler.seed = seed
sampler = instantiate(sampler)
dataloader = DataLoader(
dataset,
batch_sampler=sampler,
num_workers=num_workers,
persistent_workers=True if num_workers > 0 else False,
collate_fn=trivial_batch_collator if collate_fn is None else collate_fn,
**kwargs,
)
# Bind up mixup_func to dataloader, and this will be used in Trainer.get_batch
dataloader.mixup_func = instantiate(mixup_func)
return dataloader, None, None
def build_image_test_loader(
dataset,
test_batch_size,
sampler=LazyCall(SingleRoundSampler)(shuffle=True, drop_last=False),
num_workers=4,
seed=0,
collate_fn=None,
**kwargs
):
"""
Build image test dataloader, used for test dataset
Returns:
It will return test dataloader
* test_loader: dataloader for testing
Arguments:
dataset: dataset from which to load the data. e.g.: dataset or [dataset1, dataset2, ...]
test_batch_size: how many samples per batch to load in testing (micro-batch-size per GPU).
sampler: defines the strategy to draw
samples from the dataset. Can be any ``Iterable`` with ``__len__``
implemented.
num_workers: how many subprocesses to use for data
loading. ``0`` means that the data will be loaded in the main process.
(default: ``4``).
seed: random seed, used for reproducing experiments (default: ``0``).
collate_fn: merges a list of samples to form a
mini-batch of Tensor(s). Used when using batched loading from a
map-style dataset.
"""
dataset = instantiate(dataset)
sampler.dataset = dataset
sampler.micro_batch_size = test_batch_size
sampler.data_parallel_rank = dist.get_data_parallel_rank()
sampler.data_parallel_size = dist.get_data_parallel_size()
sampler.seed = seed
sampler = instantiate(sampler)
return DataLoader(
dataset,
batch_sampler=sampler,
num_workers=num_workers,
persistent_workers=True if num_workers > 0 else False,
collate_fn=trivial_batch_collator if collate_fn is None else collate_fn,
**kwargs,
)
def trivial_batch_collator(batch):
assert isinstance(batch[0], Instance), "batch[0] must be `instance` for trivial batch collator"
batch = Instance.stack(batch)
return batch
| 14,522 | 35.126866 | 100 | py |
libai | libai-main/libai/data/datasets/gpt_dataset.py | # coding=utf-8
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""GPT style dataset."""
import logging
import os
import time
import numpy as np
import oneflow as flow
from libai.data.structures import DistTensorData, Instance
from libai.utils import distributed as dist
from ..data_utils import is_shared_folder
logger = logging.getLogger(__name__)
class GPT2Dataset(flow.utils.data.Dataset):
def __init__(
self,
name,
tokenizer,
data_prefix,
indexed_dataset,
max_num_samples,
max_seq_length,
seed=1234,
):
self.name = name
self.tokenizer = tokenizer
self.indexed_dataset = indexed_dataset
documents = np.arange(start=0, stop=indexed_dataset.sizes.shape[0], step=1, dtype=np.int32)
# Build index mappings.
self.doc_idx, self.sample_idx, self.shuffle_idx = _build_index_mappings(
self.name,
data_prefix,
documents,
self.indexed_dataset.sizes,
max_num_samples,
max_seq_length,
seed,
)
def __len__(self):
# -1 is due to data structure used to retrieve the index:
# sample i --> [sample_idx[i], sample_idx[i+1])
return self.sample_idx.shape[0] - 1
def __getitem__(self, idx):
# Get the shuffled index.
idx = self.shuffle_idx[idx]
# Start and end documents and offsets.
doc_index_f = self.sample_idx[idx][0]
doc_index_l = self.sample_idx[idx + 1][0]
offset_f = self.sample_idx[idx][1]
offset_l = self.sample_idx[idx + 1][1]
# If we are within the same document, just extract the chunk.
if doc_index_f == doc_index_l:
sample = self.indexed_dataset.get(
self.doc_idx[doc_index_f], offset=offset_f, length=offset_l - offset_f + 1
)
else:
# Otherwise, get the rest of the initial document.
sample_list = [self.indexed_dataset.get(self.doc_idx[doc_index_f], offset=offset_f)]
# Loop over all in between documents and add the entire document.
for i in range(doc_index_f + 1, doc_index_l):
sample_list.append(self.indexed_dataset.get(self.doc_idx[i]))
# And finally add the relevant portion of last document.
sample_list.append(
self.indexed_dataset.get(self.doc_idx[doc_index_l], length=offset_l + 1)
)
sample = np.concatenate(sample_list)
input_ids = flow.tensor(np.array(sample[:-1], dtype=np.int64))
lm_labels = flow.tensor(np.array(sample[1:], dtype=np.int64))
sample = Instance(
input_ids=DistTensorData(input_ids),
labels=DistTensorData(lm_labels, placement_idx=-1),
)
return sample
def _build_index_mappings(name, data_prefix, documents, sizes, num_samples, seq_length, seed):
"""Build doc-idx, sample-idx, and shuffle-idx.
doc-idx: is an array (ordered) of documents to be used in training.
sample-idx: is the start document index and document offset for each
training sample.
shuffle-idx: maps the sample index into a random index into sample-idx.
"""
# Number of tokens in each epoch and number of required epochs.
tokens_per_epoch = _num_tokens(documents, sizes)
num_epochs = _num_epochs(tokens_per_epoch, seq_length, num_samples)
# rng state
np_rng = np.random.RandomState(seed=seed)
# Filename of the index mappings.
_filename = data_prefix
_filename += "_{}_indexmap".format(name)
_filename += "_{}ns".format(num_samples)
_filename += "_{}sl".format(seq_length)
_filename += "_{}s".format(seed)
doc_idx_filename = _filename + "_doc_idx.npy"
sample_idx_filename = _filename + "_sample_idx.npy"
shuffle_idx_filename = _filename + "_shuffle_idx.npy"
file_folder = os.path.dirname(_filename)
# Build the indexed mapping if not exist.
# NOTE: use `get_local_rank() == 0` to promise samples will be build in each node.
# use `get_rank() == 0` to promise samples will be build only once for a shared folder.
cur_rank = flow.env.get_rank() if is_shared_folder(file_folder) else flow.env.get_local_rank()
if cur_rank == 0:
if (
(not os.path.isfile(doc_idx_filename))
or (not os.path.isfile(sample_idx_filename))
or (not os.path.isfile(shuffle_idx_filename))
):
logger.info(
" > WARNING: could not find index map files, building " "the indices on rank 0 ..."
)
# For the last epoch, decide whether include the entire epoch
# in the global shuffle or not.
# If we need only one epoch, then separating last epoch does
# not mean anything.
if num_epochs == 1:
separate_last_epoch = False
logger.info(" > only one epoch required, setting " "separate_last_epoch to False")
else:
# Get the number of samples for the last epoch
num_samples_from_epochs_minus_one = (
(num_epochs - 1) * tokens_per_epoch - 1
) // seq_length
last_epoch_num_samples = num_samples - num_samples_from_epochs_minus_one
assert (
last_epoch_num_samples >= 0
), "last epoch number of samples should be non-negative."
num_samples_per_epoch = (tokens_per_epoch - 1) // seq_length
assert last_epoch_num_samples < (
num_samples_per_epoch + 1
), "last epoch number of samples exceeded max value."
# If we have less than 80% of the samples for the last epoch,
# separate out the epoch and treat it differently.
# Note: the 80% number is just based on common sense and can
# be adjusted if needed.
separate_last_epoch = last_epoch_num_samples < int(0.80 * num_samples_per_epoch)
if separate_last_epoch:
string = (
" > last epoch number of samples ({}) is smaller "
"than 80% of number of samples per epoch ({}), "
"setting separate_last_epoch to True"
)
else:
string = (
" > last epoch number of samples ({}) is larger "
"than 80% of number of samples per epoch ({}), "
"setting separate_last_epoch to False"
)
logger.info(string.format(last_epoch_num_samples, num_samples_per_epoch))
# doc-idx.
logger.info("start to build and save doc-idx mapping ...")
start_time = time.time()
doc_idx = _build_doc_idx(documents, num_epochs, np_rng, separate_last_epoch)
np.save(doc_idx_filename, doc_idx, allow_pickle=True)
logger.info(
" > elapsed time to build and save doc-idx mapping "
"(seconds): {:4f}".format(time.time() - start_time)
)
# sample-idx.
logger.info("start to build and save sample-idx mapping ...")
start_time = time.time()
# Use C++ implementation for speed.
# First compile and then import.
from libai.data.data_utils import helpers
assert doc_idx.dtype == np.int32
assert sizes.dtype == np.int32
sample_idx = helpers.build_sample_idx(
sizes, doc_idx, seq_length, num_epochs, tokens_per_epoch
)
# sample_idx = _build_sample_idx(sizes, doc_idx, seq_length,
# num_epochs, tokens_per_epoch)
np.save(sample_idx_filename, sample_idx, allow_pickle=True)
logger.info(
" > elapsed time to build and save sample-idx mapping "
"(seconds): {:4f}".format(time.time() - start_time)
)
# shuffle-idx.
start_time = time.time()
# -1 is due to data structure used to retrieve the index:
# sample i --> [sample_idx[i], sample_idx[i+1])
if separate_last_epoch:
num_samples_ = num_samples_from_epochs_minus_one
else:
num_samples_ = sample_idx.shape[0] - 1
shuffle_idx = _build_shuffle_idx(num_samples_, sample_idx.shape[0] - 1, np_rng)
np.save(shuffle_idx_filename, shuffle_idx, allow_pickle=True)
logger.info(
" > elapsed time to build and save shuffle-idx mapping"
" (seconds): {:4f}".format(time.time() - start_time)
)
# This should be a barrier but nccl barrier assumes
# device_index=rank which is not the case for model
# parallel case
dist.synchronize()
# Load mappings.
start_time = time.time()
logger.info(" > loading doc-idx mapping from {}".format(doc_idx_filename))
doc_idx = np.load(doc_idx_filename, allow_pickle=True, mmap_mode="r")
logger.info(" > loading sample-idx mapping from {}".format(sample_idx_filename))
sample_idx = np.load(sample_idx_filename, allow_pickle=True, mmap_mode="r")
logger.info(" > loading shuffle-idx mapping from {}".format(shuffle_idx_filename))
shuffle_idx = np.load(shuffle_idx_filename, allow_pickle=True, mmap_mode="r")
logger.info(" loaded indexed file in {:3.3f} seconds".format(time.time() - start_time))
logger.info(" total number of samples: {}".format(sample_idx.shape[0]))
logger.info(" total number of epochs: {}".format(num_epochs))
return doc_idx, sample_idx, shuffle_idx
def _num_tokens(documents, sizes):
"""Total number of tokens in the dataset."""
return np.sum(sizes[documents])
def _num_epochs(tokens_per_epoch, seq_length, num_samples):
"""Based on number of samples and sequence length, calculate how many
epochs will be needed."""
num_epochs = 0
total_tokens = 0
while True:
num_epochs += 1
total_tokens += tokens_per_epoch
# -1 is because we need to retrieve seq_length + 1 token each time
# but the last token will overlap with the first token of the next
# sample except for the last sample.
if ((total_tokens - 1) // seq_length) >= num_samples:
return num_epochs
def _build_doc_idx(documents, num_epochs, np_rng, separate_last_epoch):
"""Build an array with length = number-of-epochs * number-of-documents.
Each index is mapped to a corresponding document."""
if not separate_last_epoch or num_epochs == 1:
doc_idx = np.mgrid[0:num_epochs, 0 : len(documents)][1]
doc_idx[:] = documents
doc_idx = doc_idx.reshape(-1)
doc_idx = doc_idx.astype(np.int32)
np_rng.shuffle(doc_idx)
return doc_idx
doc_idx_first = _build_doc_idx(documents, num_epochs - 1, np_rng, False)
doc_idx_last = _build_doc_idx(documents, 1, np_rng, False)
return np.concatenate((doc_idx_first, doc_idx_last))
def _build_shuffle_idx(num_samples, total_size, np_rng):
"""Build the range [0, size) and shuffle."""
logger.info(
" > building shuffle index with split [0, {}) and [{}, {}) "
"...".format(num_samples, num_samples, total_size)
)
dtype_ = np.uint32
if total_size >= (np.iinfo(np.uint32).max - 1):
dtype_ = np.int64
shuffle_idx_first = np.arange(start=0, stop=num_samples, step=1, dtype=dtype_)
np_rng.shuffle(shuffle_idx_first)
if num_samples == total_size:
return shuffle_idx_first
shuffle_idx_last = np.arange(start=num_samples, stop=total_size, step=1, dtype=dtype_)
np_rng.shuffle(shuffle_idx_last)
return np.concatenate((shuffle_idx_first, shuffle_idx_last))
| 12,541 | 40.529801 | 99 | py |
libai | libai-main/libai/data/datasets/bert_dataset.py | # coding=utf-8
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""BERT Style dataset."""
import numpy as np
import oneflow as flow
from libai.data.structures import DistTensorData, Instance
from ..data_utils import create_masked_lm_predictions, get_samples_mapping
class BertDataset(flow.utils.data.Dataset):
"""Dataset containing sentence pairs for BERT training.
Each index corresponds to a randomly generated sentence pair.
Args:
name: Name of dataset for clarification.
tokenizer: Tokenizer to use.
data_prefix: Path to the training dataset.
indexed_dataset: Indexed dataset to use.
max_seq_length: Maximum length of the sequence. All values are padded to
this length. Defaults to 512.
mask_lm_prob: Probability to mask tokens. Defaults to 0.15.
short_seq_prob: Probability of producing a short sequence. Defaults to 0.0.
max_predictions_per_seq: Maximum number of mask tokens in each sentence. Defaults to None.
seed: Seed for random number generator for reproducibility. Defaults to 1234.
binary_head: Specifies whether the underlying dataset
generates a pair of blocks along with a sentence_target or not.
Setting it to True assumes that the underlying dataset generates a
label for the pair of sentences which is surfaced as
sentence_target. Defaults to True.
"""
def __init__(
self,
name,
tokenizer,
indexed_dataset,
data_prefix,
max_num_samples,
mask_lm_prob,
max_seq_length,
short_seq_prob=0.0,
seed=1234,
binary_head=True,
masking_style="bert",
):
# Params to store.
self.name = name
self.seed = seed
self.masked_lm_prob = mask_lm_prob
self.max_seq_length = max_seq_length
self.binary_head = binary_head
self.masking_style = masking_style
# Dataset.
self.indexed_dataset = indexed_dataset
# Build the samples mapping.
self.samples_mapping = get_samples_mapping(
self.indexed_dataset,
data_prefix,
None,
max_num_samples,
self.max_seq_length - 3, # account for added tokens
short_seq_prob,
self.seed,
self.name,
self.binary_head,
)
# Vocab stuff.
self.tokenizer = tokenizer
self.vocab_id_list = list(tokenizer.get_vocab().values())
self.vocab_id_to_token_dict = {v: k for k, v in tokenizer.get_vocab().items()}
self.cls_id = tokenizer.cls_token_id
self.sep_id = tokenizer.sep_token_id
self.mask_id = tokenizer.mask_token_id
self.pad_id = tokenizer.pad_token_id
def __len__(self):
return self.samples_mapping.shape[0]
def __getitem__(self, idx):
start_idx, end_idx, seq_length = self.samples_mapping[idx]
sample = [self.indexed_dataset[i] for i in range(start_idx, end_idx)]
# Note that this rng state should be numpy and not python since
# python randint is inclusive whereas the numpy one is exclusive.
# We % 2**32 since numpy requires the seed to be between 0 and 2**32 - 1
np_rng = np.random.RandomState(seed=((self.seed + idx) % 2 ** 32))
return build_training_sample(
self.tokenizer,
sample,
seq_length,
self.max_seq_length, # needed for padding
self.vocab_id_list,
self.vocab_id_to_token_dict,
self.cls_id,
self.sep_id,
self.mask_id,
self.pad_id,
self.masked_lm_prob,
np_rng,
self.binary_head,
masking_style=self.masking_style,
)
def build_training_sample(
tokenizer,
sample,
target_seq_length,
max_seq_length,
vocab_id_list,
vocab_id_to_token_dict,
cls_id,
sep_id,
mask_id,
pad_id,
masked_lm_prob,
np_rng,
binary_head,
masking_style="bert",
):
"""Build training sample.
Arguments:
sample: A list of sentences in which each sentence is a list token ids.
target_seq_length: Desired sequence length.
max_seq_length: Maximum length of the sequence. All values are padded to
this length.
vocab_id_list: List of vocabulary ids. Used to pick a random id.
vocab_id_to_token_dict: A dictionary from vocab ids to text tokens.
cls_id: Start of example id.
sep_id: Separator id.
mask_id: Mask token id.
pad_id: Padding token id.
masked_lm_prob: Probability to mask tokens.
np_rng: Random number genenrator. Note that this rng state should be
numpy and not python since python randint is inclusive for
the upper bound whereas the numpy one is exclusive.
"""
if binary_head:
# We assume that we have at least two sentences in the sample
assert len(sample) > 1
assert target_seq_length <= max_seq_length
# Divide sample into two segments (A and B).
if binary_head:
tokens_a, tokens_b, is_next_random = get_a_and_b_segments(sample, np_rng)
else:
tokens_a = []
for j in range(len(sample)):
tokens_a.extend(sample[j])
tokens_b = []
is_next_random = False
# Truncate to `target_sequence_length`.
max_num_tokens = target_seq_length
truncate_segments(tokens_a, tokens_b, len(tokens_a), len(tokens_b), max_num_tokens, np_rng)
# Build tokens and toketypes.
tokens, tokentypes = create_tokens_and_tokentypes(tokens_a, tokens_b, cls_id, sep_id)
# Masking.
max_predictions_per_seq = masked_lm_prob * max_num_tokens
(tokens, masked_positions, masked_labels, _, _) = create_masked_lm_predictions(
tokenizer,
tokens,
vocab_id_list,
vocab_id_to_token_dict,
masked_lm_prob,
cls_id,
sep_id,
mask_id,
max_predictions_per_seq,
np_rng,
masking_style=masking_style,
)
# Padding.
tokens_np, tokentypes_np, labels_np, padding_mask_np, loss_mask_np = pad_and_convert_to_numpy(
tokens, tokentypes, masked_positions, masked_labels, pad_id, max_seq_length
)
train_sample = Instance(
input_ids=DistTensorData(flow.tensor(tokens_np)),
attention_mask=DistTensorData(flow.tensor(padding_mask_np)),
tokentype_ids=DistTensorData(flow.tensor(tokentypes_np)),
ns_labels=DistTensorData(
flow.tensor(int(is_next_random), dtype=flow.long), placement_idx=-1
),
lm_labels=DistTensorData(flow.tensor(labels_np), placement_idx=-1),
loss_mask=DistTensorData(flow.tensor(loss_mask_np), placement_idx=-1),
)
return train_sample
def pad_and_convert_to_numpy(
tokens, tokentypes, masked_positions, masked_labels, pad_id, max_seq_length
):
"""Pad sequences and convert them to numpy."""
# Some checks.
num_tokens = len(tokens)
padding_length = max_seq_length - num_tokens
assert padding_length >= 0
assert len(tokentypes) == num_tokens
assert len(masked_positions) == len(masked_labels)
# Tokens and token types.
filler = [pad_id] * padding_length
tokens_np = np.array(tokens + filler, dtype=np.int64)
tokentypes_np = np.array(tokentypes + filler, dtype=np.int64)
# Padding mask.
padding_mask_np = np.array([1] * num_tokens + [0] * padding_length, dtype=bool)
# Lables and loss mask.
labels = [-1] * max_seq_length
loss_mask = [0] * max_seq_length
for i in range(len(masked_positions)):
assert masked_positions[i] < num_tokens
labels[masked_positions[i]] = masked_labels[i]
loss_mask[masked_positions[i]] = 1
labels_np = np.array(labels, dtype=np.int64)
loss_mask_np = np.array(loss_mask, dtype=bool)
return tokens_np, tokentypes_np, labels_np, padding_mask_np, loss_mask_np
def get_a_and_b_segments(sample, np_rng):
"""Divide sample into a and b segments."""
# Number of sentences in the sample.
n_sentences = len(sample)
# Make sure we always have two sentences.
assert n_sentences > 1, "make sure each sample has at least two sentences."
# First part:
# `a_end` is how many sentences go into the `A`.
a_end = 1
if n_sentences >= 3:
# Note that randin in numpy is exclusive.
a_end = np_rng.randint(1, n_sentences)
tokens_a = []
for j in range(a_end):
tokens_a.extend(sample[j])
# Second part:
tokens_b = []
for j in range(a_end, n_sentences):
tokens_b.extend(sample[j])
# Random next:
is_next_random = False
if np_rng.random() < 0.5:
is_next_random = True
tokens_a, tokens_b = tokens_b, tokens_a
return tokens_a, tokens_b, is_next_random
def truncate_segments(tokens_a, tokens_b, len_a, len_b, max_num_tokens, np_rng):
"""Truncates a pair of sequences to a maximum sequence length."""
assert len_a > 0
if len_a + len_b <= max_num_tokens:
return False
while len_a + len_b > max_num_tokens:
if len_a > len_b:
len_a -= 1
tokens = tokens_a
else:
len_b -= 1
tokens = tokens_b
if np_rng.random() < 0.5:
del tokens[0]
else:
tokens.pop()
return True
def create_tokens_and_tokentypes(tokens_a, tokens_b, cls_id, sep_id):
"""Merge segments A and B, add [CLS] and [SEP] and build tokentypes."""
tokens = []
tokentypes = []
# [CLS].
tokens.append(cls_id)
tokentypes.append(0)
# Segment A.
for token in tokens_a:
tokens.append(token)
tokentypes.append(0)
# [SEP].
tokens.append(sep_id)
tokentypes.append(0)
# Segment B.
for token in tokens_b:
tokens.append(token)
tokentypes.append(1)
if tokens_b:
# [SEP].
tokens.append(sep_id)
tokentypes.append(1)
return tokens, tokentypes
| 10,691 | 31.697248 | 98 | py |
libai | libai-main/libai/data/datasets/roberta_dataset.py | # coding=utf-8
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Roberta Style dataset."""
import numpy as np
import oneflow as flow
from libai.data.structures import DistTensorData, Instance
from ..data_utils import create_masked_lm_predictions, get_samples_mapping
from .bert_dataset import pad_and_convert_to_numpy
class RobertaDataset(flow.utils.data.Dataset):
"""Dataset containing sentence for RoBERTa training.
Each index corresponds to a randomly selected sentence.
Args:
name: Name of dataset for clarification.
tokenizer: Tokenizer to use.
data_prefix: Path to the training dataset.
indexed_dataset: Indexed dataset to use.
max_seq_length: Maximum length of the sequence. All values are padded to
this length. Defaults to 512.
mask_lm_prob: Probability to mask tokens. Defaults to 0.15.
short_seq_prob: Probability of producing a short sequence. Defaults to 0.0.
max_predictions_per_seq: Maximum number of mask tokens in each sentence. Defaults to None.
seed: Seed for random number generator for reproducibility. Defaults to 1234.
"""
def __init__(
self,
name,
tokenizer,
indexed_dataset,
data_prefix,
max_num_samples,
mask_lm_prob,
max_seq_length,
short_seq_prob=0.0,
seed=1234,
masking_style="bert",
):
super().__init__()
# Params to store.
self.name = name
self.seed = seed
self.masked_lm_prob = mask_lm_prob
self.max_seq_length = max_seq_length
self.masking_style = masking_style
# Dataset.
self.indexed_dataset = indexed_dataset
# Build the samples mapping.
self.samples_mapping = get_samples_mapping(
self.indexed_dataset,
data_prefix,
None,
max_num_samples,
self.max_seq_length - 2, # account for added tokens
short_seq_prob,
self.seed,
self.name,
binary_head=False,
)
# Vocab stuff.
self.tokenizer = tokenizer
self.vocab_id_list = list(tokenizer.get_vocab().values())
self.vocab_id_to_token_dict = {v: k for k, v in tokenizer.get_vocab().items()}
self.cls_id = tokenizer.cls_token_id
self.sep_id = tokenizer.sep_token_id
self.mask_id = tokenizer.mask_token_id
self.pad_id = tokenizer.pad_token_id
def __len__(self):
return self.samples_mapping.shape[0]
def __getitem__(self, idx):
start_idx, end_idx, seq_length = self.samples_mapping[idx]
sample = [self.indexed_dataset[i] for i in range(start_idx, end_idx)]
# Note that this rng state should be numpy and not python since
# python randint is inclusive whereas the numpy one is exclusive.
# We % 2**32 since numpy requires the seed to be between 0 and 2**32 - 1
np_rng = np.random.RandomState(seed=((self.seed + idx) % 2 ** 32))
return build_training_sample(
self.tokenizer,
sample,
seq_length,
self.max_seq_length, # needed for padding
self.vocab_id_list,
self.vocab_id_to_token_dict,
self.cls_id,
self.sep_id,
self.mask_id,
self.pad_id,
self.masked_lm_prob,
np_rng,
masking_style=self.masking_style,
)
def build_training_sample(
tokenizer,
sample,
target_seq_length,
max_seq_length,
vocab_id_list,
vocab_id_to_token_dict,
cls_id,
sep_id,
mask_id,
pad_id,
masked_lm_prob,
np_rng,
masking_style="bert",
):
"""Build training sample.
Arguments:
sample: A list of sentences in which each sentence is a list token ids.
target_seq_length: Desired sequence length.
max_seq_length: Maximum length of the sequence. All values are padded to
this length.
vocab_id_list: List of vocabulary ids. Used to pick a random id.
vocab_id_to_token_dict: A dictionary from vocab ids to text tokens.
cls_id: Start of example id.
sep_id: Separator id.
mask_id: Mask token id.
pad_id: Padding token id.
masked_lm_prob: Probability to mask tokens.
np_rng: Random number genenrator. Note that this rng state should be
numpy and not python since python randint is inclusive for
the upper bound whereas the numpy one is exclusive.
"""
assert target_seq_length <= max_seq_length
tokens = []
for j in range(len(sample)):
tokens.extend(sample[j])
max_num_tokens = target_seq_length
truncate_segments(tokens, len(tokens), max_num_tokens, np_rng)
# create tokens and tokentypes
tokens, tokentypes = create_tokens_and_tokentypes(tokens, cls_id, sep_id)
# Masking
max_predictions_per_seq = masked_lm_prob * max_num_tokens
(tokens, masked_positions, masked_labels, _, _) = create_masked_lm_predictions(
tokenizer,
tokens,
vocab_id_list,
vocab_id_to_token_dict,
masked_lm_prob,
cls_id,
sep_id,
mask_id,
max_predictions_per_seq,
np_rng,
masking_style=masking_style,
)
# Padding.
tokens_np, tokentypes_np, labels_np, padding_mask_np, loss_mask_np = pad_and_convert_to_numpy(
tokens, tokentypes, masked_positions, masked_labels, pad_id, max_seq_length
)
train_sample = Instance(
input_ids=DistTensorData(flow.tensor(tokens_np)),
attention_mask=DistTensorData(flow.tensor(padding_mask_np)),
tokentype_ids=DistTensorData(flow.tensor(tokentypes_np)),
lm_labels=DistTensorData(flow.tensor(labels_np), placement_idx=-1),
loss_mask=DistTensorData(flow.tensor(loss_mask_np), placement_idx=-1),
)
return train_sample
def truncate_segments(tokens, len_tokens, max_num_tokens, np_rng):
"""Truncates a sequences to a maximum sequence length."""
assert len_tokens > 0
if len_tokens <= max_num_tokens:
return False
while len_tokens > max_num_tokens:
if np_rng.random() < 0.5:
del tokens[0]
else:
tokens.pop()
len_tokens -= 1
return True
def create_tokens_and_tokentypes(tokens, cls_id, sep_id):
"""Add [CLS] and [SEP] and build tokentypes."""
# [CLS].
tokens.insert(0, cls_id)
# [SPE].
tokens.append(sep_id)
tokentypes = [0] * len(tokens)
return tokens, tokentypes
| 7,186 | 31.668182 | 98 | py |
libai | libai-main/libai/data/datasets/t5_dataset.py | # coding=utf-8
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""T5 Style dataset."""
import collections
import numpy as np
import oneflow as flow
from libai.data.structures import DistTensorData, Instance
from ..data_utils import create_masked_lm_predictions, get_samples_mapping
class T5Dataset(flow.utils.data.Dataset):
"""
Dataset containing sentences for T5 training.
Args:
name: Name of dataset.
tokenizer: Tokenizer to use.
data_prefix (str): Path to the training dataset.
indexed_dataset: Indexed dataset to use.
max_seq_length (int, optional): Maximum length of the sequence passing into encoder.
All values are padded to this length. Defaults to 512.
max_seq_length_dec (int, optional): Maximum length of the sequence passing into decoder.
All values are padded to this length. Defaults to 128.
mask_lm_prob (float, optional): Probability to mask tokens. Defaults to 0.15.
max_preds_per_seq (int, optional): Maximum number of masked tokens in each sentence.
Defaults to None.
short_seq_prob (float, optional):
Probability of producing a short sequence. Defaults to 0.0.
seed (int, optional):
Seed for random number generator for reproducibility. Defaults to 1234.
"""
def __init__(
self,
name,
tokenizer,
indexed_dataset,
data_prefix,
max_num_samples,
masked_lm_prob,
max_seq_length,
max_seq_length_dec,
short_seq_prob,
seed,
):
# Params to store.
self.name = name
self.seed = seed
self.masked_lm_prob = masked_lm_prob
self.max_seq_length = max_seq_length
self.max_seq_length_dec = max_seq_length_dec
# Dataset.
self.indexed_dataset = indexed_dataset
# Build the samples mapping.
self.samples_mapping = get_samples_mapping(
self.indexed_dataset,
data_prefix,
None,
max_num_samples,
self.max_seq_length - 2, # account for added tokens
short_seq_prob,
self.seed,
self.name,
False,
)
# Vocab stuff.
self.tokenizer = tokenizer
tokenizer.add_tokens(
[tokenizer._bos_token, tokenizer._eos_token, *tokenizer._additional_special_tokens]
)
vocab = tokenizer.get_vocab()
inv_vocab = {v: k for k, v in vocab.items()}
self.vocab_id_list = list(inv_vocab.keys())
self.vocab_id_to_token_dict = inv_vocab
self.cls_id = vocab[tokenizer._cls_token]
self.sep_id = vocab[tokenizer._sep_token]
self.mask_id = vocab[tokenizer._mask_token]
self.pad_id = vocab[tokenizer._pad_token]
self.bos_id = vocab[tokenizer._bos_token]
self.eos_id = vocab[tokenizer._eos_token]
self.sentinel_tokens = [vocab[x] for x in tokenizer._additional_special_tokens]
assert len(self.sentinel_tokens) > 0
def __len__(self):
return self.samples_mapping.shape[0]
def __getitem__(self, idx):
start_index, end_index, seq_length = self.samples_mapping[idx]
sample = []
for index in range(start_index, end_index):
sample.append(self.indexed_dataset[index])
# Note that this rng state should be numpy and not python since
# python randint is inclusive whereas the numpy one is exclusive.
np_rng = np.random.RandomState(seed=(self.seed + idx))
return build_training_sample(
self.tokenizer,
sample,
seq_length,
self.max_seq_length, # needed for padding
self.max_seq_length_dec,
self.vocab_id_list,
self.vocab_id_to_token_dict,
self.cls_id,
self.sep_id,
self.mask_id,
self.pad_id,
self.masked_lm_prob,
np_rng,
self.bos_id,
self.eos_id,
self.sentinel_tokens,
)
def build_training_sample(
tokenizer,
sample,
target_seq_length,
max_seq_length,
max_seq_length_dec,
vocab_id_list,
vocab_id_to_token_dict,
cls_id,
sep_id,
mask_id,
pad_id,
masked_lm_prob,
np_rng,
bos_id=None,
eos_id=None,
sentinel_tokens=None,
):
"""Build training sample.
Arguments:
sample: A list of sentences in which each sentence is a list token ids.
target_seq_length: Desired sequence length.
max_seq_length: Maximum length of the sequence. All values are padded to
this length.
vocab_id_list: List of vocabulary ids. Used to pick a random id.
vocab_id_to_token_dict: A dictionary from vocab ids to text tokens.
cls_id: Start of example id.
sep_id: Separator id.
mask_id: Mask token id.
pad_id: Padding token id.
masked_lm_prob: Probability to mask tokens.
np_rng: Random number genenrator. Note that this rng state should be
numpy and not python since python randint is inclusive for
the opper bound whereas the numpy one is exclusive.
bos_id: start of decoder example id
eos_id: end of generation id
sentinel_tokens: unique value to be substituted for every replaced span
"""
assert target_seq_length <= max_seq_length
# flatten sentences into one list
tokens = [token for sentence in sample for token in sentence]
# Truncate to `target_sequence_length`.
max_num_tokens = target_seq_length
len(tokens) > max_num_tokens
tokens = tokens[:max_num_tokens]
# Masking.
max_predictions_per_seq = masked_lm_prob * max_num_tokens
(tokens, masked_positions, masked_labels, _, masked_spans) = create_masked_lm_predictions(
tokenizer,
tokens,
vocab_id_list,
vocab_id_to_token_dict,
masked_lm_prob,
cls_id,
sep_id,
mask_id,
max_predictions_per_seq,
np_rng,
max_ngrams=10,
geometric_dist=True,
masking_style="t5",
)
# Padding.
(
tokens_enc,
tokens_dec_in,
labels,
enc_mask,
dec_mask,
enc_dec_mask,
loss_mask,
) = pad_and_convert_to_numpy(
tokens,
masked_positions,
masked_labels,
pad_id,
max_seq_length,
max_seq_length_dec,
masked_spans,
bos_id,
eos_id,
sentinel_tokens,
)
sample = Instance(
encoder_input_ids=DistTensorData(tokens_enc),
decoder_input_ids=DistTensorData(tokens_dec_in),
encoder_attn_mask=DistTensorData(enc_mask),
decoder_attn_mask=DistTensorData(dec_mask),
encoder_decoder_attn_mask=DistTensorData(enc_dec_mask),
lm_labels=DistTensorData(labels, placement_idx=-1),
loss_mask=DistTensorData(loss_mask, placement_idx=-1),
)
return sample
def pad_and_convert_to_numpy(
tokens,
masked_positions,
masked_labels,
pad_id,
max_seq_length,
max_seq_length_dec,
masked_spans=None,
bos_id=None,
eos_id=None,
sentinel_tokens=None,
):
"""Pad sequences and convert them to numpy."""
sentinel_tokens = collections.deque(sentinel_tokens)
t5_input = []
(t5_decoder_in, t5_decoder_out) = ([bos_id], [])
(start_index, end_index) = (0, None)
for span in masked_spans:
flag = sentinel_tokens.popleft()
# Append the same tokens in decoder input and output
t5_decoder_in.append(flag)
t5_decoder_in.extend(span.label)
t5_decoder_out.append(flag)
t5_decoder_out.extend(span.label)
end_index = span.index[0]
t5_input.extend(tokens[start_index:end_index])
t5_input.append(flag)
# the next start index is the token after the last span token
start_index = span.index[-1] + 1
# Add <eos> token to the t5_decoder_out
t5_decoder_out.append(eos_id)
# Add the remaining tokens to the t5 input
t5_input.extend(tokens[start_index:])
# assert (len(t5_input) - len(masked_spans)) + \
# (len(t5_decoder_in) - (len(masked_spans) + 1)) == len(tokens)
# Some checks.
# Encoder-side padding mask.
num_tokens = len(t5_input)
padding_length = max_seq_length - num_tokens
assert padding_length >= 0
assert len(masked_positions) == len(masked_labels)
# Tokens..
filler = [pad_id] * padding_length
tokens_enc = np.array(t5_input + filler, dtype=np.int64)
# Decoder-side padding mask.
num_tokens_dec = len(t5_decoder_in)
padding_length_dec = max_seq_length_dec - num_tokens_dec
assert padding_length_dec >= 0
filler_dec = [pad_id] * padding_length_dec
tokens_dec_in = np.array(t5_decoder_in + filler_dec, dtype=np.int64)
# Create attention masks
enc_mask = make_attention_mask(tokens_enc, tokens_enc)
enc_dec_mask = make_attention_mask(tokens_dec_in, tokens_enc)
dec_mask = make_attention_mask(tokens_dec_in, tokens_dec_in)
dec_mask = dec_mask * make_history_mask(tokens_dec_in)
# Labels mask.
labels = t5_decoder_out + ([-1] * padding_length_dec)
labels = np.array(labels, dtype=np.int64)
# Loss mask
loss_mask = ([1] * num_tokens_dec) + ([0] * padding_length_dec)
loss_mask = np.array(loss_mask, dtype=bool)
tokens_enc = flow.tensor(tokens_enc, dtype=flow.long)
tokens_dec_in = flow.tensor(tokens_dec_in, dtype=flow.long)
labels = flow.tensor(labels, dtype=flow.long)
enc_mask = flow.tensor(enc_mask, dtype=flow.bool)
dec_mask = flow.tensor(dec_mask, dtype=flow.bool)
enc_dec_mask = flow.tensor(enc_dec_mask, dtype=flow.bool)
loss_mask = flow.tensor(loss_mask, dtype=flow.bool)
return tokens_enc, tokens_dec_in, labels, enc_mask, dec_mask, enc_dec_mask, loss_mask
def make_attention_mask(source_block, target_block):
"""
Returns a 2-dimensional (2-D) attention mask
:param source_block: 1-D array
:param target_block: 1-D array
"""
mask = (target_block[None, :] >= 1) * (source_block[:, None] >= 1)
mask = mask.astype(np.int64)
# (source_length, target_length)
return mask
def make_history_mask(block):
length = block.shape[0]
arange = np.arange(length)
history_mask = (
arange[
None,
]
<= arange[:, None]
)
history_mask = history_mask.astype(np.int64)
return history_mask
| 11,149 | 31.04023 | 96 | py |
libai | libai-main/libai/data/datasets/__init__.py | # coding=utf-8
# Copyright 2021 The OneFlow Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .cifar import CIFAR10Dataset, CIFAR100Dataset
from .imagenet import ImageNetDataset
from .mnist import MNISTDataset
from .bert_dataset import BertDataset
from .roberta_dataset import RobertaDataset
from .gpt_dataset import GPT2Dataset
from .t5_dataset import T5Dataset
| 895 | 37.956522 | 74 | py |
libai | libai-main/libai/data/datasets/cifar.py | # coding=utf-8
# Copyright 2021 The OneFlow Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Callable, Optional
import oneflow as flow
from flowvision import datasets
from libai.data.structures import DistTensorData, Instance
class CIFAR10Dataset(datasets.CIFAR10):
r"""`CIFAR10 <https://www.cs.toronto.edu/~kriz/cifar.html>`_ Dataset in LiBai.
Args:
root (string): Root directory of dataset where directory
``cifar-10-batches-py`` exists or will be saved to if download is set to True.
train (bool, optional): If True, creates dataset from training set, otherwise
creates from test set.
transform (callable, optional): A function/transform that takes in a PIL image
and returns a transformed version. E.g, ``transforms.RandomCrop``
download (bool, optional): If true, downloads the dataset from the internet and
puts it in root directory. If the dataset is already downloaded, it will not be
downloaded again.
"""
def __init__(
self,
root: str,
train: bool = True,
transform: Optional[Callable] = None,
download: bool = False,
**kwargs
):
super(CIFAR10Dataset, self).__init__(
root=root, train=train, transform=transform, download=download, **kwargs
)
def __getitem__(self, index: int):
img, target = super().__getitem__(index)
data_sample = Instance(
images=DistTensorData(img, placement_idx=0),
labels=DistTensorData(flow.tensor(target, dtype=flow.long), placement_idx=-1),
)
return data_sample
class CIFAR100Dataset(datasets.CIFAR100):
r"""`CIFAR100 <https://www.cs.toronto.edu/~kriz/cifar.html>`_ Dataset in LiBai.
Args:
root (string): Root directory of dataset where directory
``cifar-10-batches-py`` exists or will be saved to if download is set to True.
train (bool, optional): If True, creates dataset from training set, otherwise
creates from test set.
transform (callable, optional): A function/transform that takes in a PIL image
and returns a transformed version. E.g, ``transforms.RandomCrop``
download (bool, optional): If true, downloads the dataset from the internet and
puts it in root directory. If the dataset is already downloaded, it will not be
downloaded again.
dataset_name (str, optional): Name for the dataset as an identifier. E.g, ``cifar100``
"""
def __init__(
self,
root: str,
train: bool = True,
transform: Optional[Callable] = None,
download: bool = False,
**kwargs
):
super(CIFAR100Dataset, self).__init__(
root=root, train=train, transform=transform, download=download, **kwargs
)
def __getitem__(self, index: int):
img, target = super().__getitem__(index)
data_sample = Instance(
images=DistTensorData(img, placement_idx=0),
labels=DistTensorData(flow.tensor(target, dtype=flow.long), placement_idx=-1),
)
return data_sample
| 3,726 | 37.42268 | 94 | py |
libai | libai-main/libai/data/datasets/imagenet.py | # coding=utf-8
# Copyright 2021 The OneFlow Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from typing import Callable, Optional
import oneflow as flow
from flowvision import datasets
from libai.data.structures import DistTensorData, Instance
class ImageNetDataset(datasets.ImageFolder):
r"""`ImageNet <http://image-net.org/>`_ 2012 Classification Dataset in LiBai.
Args:
root (string): Root directory of the ImageNet Dataset.
train (bool, optional): If True, creates dataset from training set, otherwise
creates from test set.
transform (callable, optional): A function/transform that takes in a PIL image
and returns a transformed version. E.g, ``transforms.RandomCrop``
"""
def __init__(
self, root: str, train: bool = True, transform: Optional[Callable] = None, **kwargs
):
prefix = "train" if train else "val"
root = os.path.join(root, prefix)
super(ImageNetDataset, self).__init__(root=root, transform=transform, **kwargs)
def __getitem__(self, index: int):
sample, target = super().__getitem__(index)
data_sample = Instance(
images=DistTensorData(sample, placement_idx=0),
labels=DistTensorData(flow.tensor(target, dtype=flow.long), placement_idx=-1),
)
return data_sample
| 1,889 | 36.058824 | 91 | py |
libai | libai-main/libai/data/datasets/mnist.py | # coding=utf-8
# Copyright 2021 The OneFlow Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Callable, Optional
import oneflow as flow
from flowvision import datasets
from libai.data.structures import DistTensorData, Instance
class MNISTDataset(datasets.MNIST):
r"""`MNIST <http://yann.lecun.com/exdb/mnist/>`_ Dataset in LiBai.
Args:
root (string): Root directory of dataset where ``MNIST/processed/training.pt``
and ``MNIST/processed/test.pt`` exist.
train (bool, optional): If True, creates dataset from ``training.pt``,
otherwise from ``test.pt``.
download (bool, optional): If true, downloads the dataset from the internet and
puts it in root directory. If the dataset is already downloaded, it will not be
downloaded again.
transform (callable, optional): A function/transform that takes in a PIL image
and returns a transformed version. E.g, ``transforms.RandomCrop``
dataset_name (str, optional): Name for the dataset as an identifier. E.g, ``mnist``
"""
def __init__(
self,
root: str,
train: bool = True,
transform: Optional[Callable] = None,
download: bool = False,
**kwargs
):
super(MNISTDataset, self).__init__(
root=root, train=train, transform=transform, download=download, **kwargs
)
def __getitem__(self, index: int):
img, target = super().__getitem__(index)
data_sample = Instance(
images=DistTensorData(img, placement_idx=0),
labels=DistTensorData(flow.tensor(target, dtype=flow.long), placement_idx=-1),
)
return data_sample
| 2,256 | 37.254237 | 91 | py |
libai | libai-main/libai/data/samplers/__init__.py | # coding=utf-8
# Copyright 2021 The OneFlow Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .samplers import CyclicSampler, SingleRoundSampler
| 677 | 38.882353 | 74 | py |
libai | libai-main/libai/data/samplers/samplers.py | # coding=utf-8
# Copyright 2021 The OneFlow Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import oneflow as flow
from oneflow.utils.data import Sampler
class CyclicSampler(Sampler):
"""
This sampler supports cyclic sampling, and it is also compatible with
non-data parallelism and data parallelism.
Arguments:
dataset: dataset to be sampled.
micro_batch_size: batch size for per model instance.
global_batch_size is micro_batch_size times data_parallel_size.
shuffle: whether to shuffle the dataset.
consumed_samples: the number of samples that have been trained at the current time,
used for resuming training (default: ``0``).
data_parallel_rank: local rank for data parallelism.
data_parallel_size: the size of data parallelism.
seed: random seed, used for reproducing experiments (default: ``0``).
"""
def __init__(
self,
dataset,
micro_batch_size,
shuffle=False,
consumed_samples=0,
data_parallel_rank=0,
data_parallel_size=1,
seed=0,
):
self.dataset = dataset
self.data_size = len(self.dataset)
self.shuffle = shuffle
self.data_parallel_rank = data_parallel_rank
self.data_parallel_size = data_parallel_size
self.micro_batch_size = micro_batch_size
self.actual_batch_size = self.micro_batch_size * self.data_parallel_size
self.data_size_per_epoch = self.data_size // self.actual_batch_size * self.micro_batch_size
self.consumed_samples = consumed_samples
self.seed = seed
def __iter__(self):
"""divide the data into data_parallel_size buckets,
and shuffle it if `shuffle` is set to `True`.
Each processor samples from its own buckets and data_loader
will load the corresponding data.
"""
epoch = self.consumed_samples // self.data_size_per_epoch
current_epoch_samples = self.consumed_samples % self.data_size_per_epoch
batch = []
while True:
bucket_offset = current_epoch_samples // self.data_parallel_size
start_idx = self.data_parallel_rank * self.data_size_per_epoch
if self.shuffle:
generator = flow.Generator()
generator.manual_seed(self.seed + epoch)
random_idx = flow.randperm(self.data_size_per_epoch, generator=generator).tolist()
indices = [start_idx + x for x in random_idx[bucket_offset:]]
else:
seq_idx = flow.arange(self.data_size_per_epoch).tolist()
indices = [start_idx + x for x in seq_idx[bucket_offset:]]
epoch += 1
if hasattr(self.dataset, "supports_prefetch") and self.dataset.supports_prefetch:
self.dataset.prefetch(indices)
for idx in indices:
batch.append(idx)
if len(batch) == self.micro_batch_size:
self.consumed_samples += self.actual_batch_size
yield batch
batch = []
current_epoch_samples = 0
def __len__(self):
return self.data_size
def set_consumed_samples(self, consumed_samples):
"""You can recover the training iteration by setting `consumed_samples`."""
self.consumed_samples = consumed_samples
def set_epoch(self, epoch):
"""Used for restoring training status."""
self.epoch = epoch
class SingleRoundSampler(Sampler):
"""
This sampler supports single round sampling, and it is also compatible with
non data parallelism and data parallelism.
Arguments:
dataset: dataset to be sampled.
micro_batch_size: batch size for per model instance, global_batch_size
is micro_batch_size times data_parallel_size.
shuffle: whether to shuffle the dataset.
data_parallel_rank: local rank for data parallelism.
data_parallel_size: the size of data parallelism.
seed: random seed, used for reproducing experiments (default: ``0``).
drop_last: whether to drop the remaining data (default: ``False``).
"""
def __init__(
self,
dataset,
micro_batch_size,
shuffle=False,
data_parallel_rank=0,
data_parallel_size=1,
seed=0,
drop_last=False,
):
self.dataset = dataset
self.data_size = len(self.dataset)
self.shuffle = shuffle
self.data_parallel_rank = data_parallel_rank
self.data_parallel_size = data_parallel_size
self.micro_batch_size = micro_batch_size
self.seed = seed
self.drop_last = drop_last
def __iter__(self):
bucket_size = self.data_size // self.data_parallel_size
remain = self.data_size % self.data_parallel_size
start_idx = self.data_parallel_rank * bucket_size
if self.data_parallel_rank < remain:
bucket_size += 1
start_idx += min(self.data_parallel_rank, remain)
if self.shuffle:
generator = flow.Generator()
generator.manual_seed(self.seed)
random_idx = flow.randperm(bucket_size, generator=generator).tolist()
indices = [start_idx + x for x in random_idx]
else:
seq_idx = flow.arange(bucket_size).tolist()
indices = [start_idx + x for x in seq_idx]
if hasattr(self.dataset, "supports_prefetch") and self.dataset.supports_prefetch:
self.dataset.prefetch(indices)
batch = []
for idx in indices:
batch.append(idx)
if len(batch) == self.micro_batch_size:
yield batch
batch = []
if not self.drop_last:
if self.data_parallel_rank >= remain and remain > 0:
batch.append(0)
if len(batch) > 0:
yield batch
def __len__(self):
global_batch_size = self.micro_batch_size * self.data_parallel_size
if self.drop_last:
return self.data_size // global_batch_size
else:
return (self.data_size + global_batch_size - 1) // global_batch_size
| 6,786 | 35.489247 | 99 | py |
libai | libai-main/libai/data/data_utils/dataset_utils.py | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors, and NVIDIA.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
import logging
import os
import re
import time
import numpy as np
import oneflow as flow
from libai.utils import distributed as dist
logger = logging.getLogger(__name__)
# Most of the code here has been copied from:
# https://github.com/google-research/albert/blob/master/create_pretraining_data.py
# with some modifications.
def is_shared_folder(filename):
"""
Check if the given filename is a shared folder.
"""
path = os.path.abspath(filename)
st = os.stat(path)
# the command `export IS_SHARED_FILE=1` to indicate filename is a shared file.
if os.environ.get("IS_SHARED_FILE") == "1":
return True
if st.st_dev == os.stat("/").st_dev:
return False
else:
return os.path.ismount(os.path.dirname(path)) and os.path.realpath(
os.path.dirname(path)
).startswith(("/nfs", "/smb", "/cifs"))
def compile_helper():
"""Compile helper function at runtime. Make sure this
is invoked on a single process."""
import os
import subprocess
path = os.path.abspath(os.path.dirname(__file__))
ret = subprocess.run(["make", "-C", path])
if ret.returncode != 0:
logger.info("Making C++ dataset helpers module failed, exiting.")
import sys
sys.exit(1)
MaskedLmInstance = collections.namedtuple("MaskedLmInstance", ["index", "label"])
def is_start_piece(piece):
"""Check if the current word piece is the starting piece (BERT)."""
# When a word has been split into
# WordPieces, the first token does not have any marker and any subsequence
# tokens are prefixed with ##. So whenever we see the ## token, we
# append it to the previous set of word indexes.
return not piece.startswith("##")
def create_masked_lm_predictions(
tokenizer,
tokens,
vocab_id_list,
vocab_id_to_token_dict,
masked_lm_prob,
cls_id,
sep_id,
mask_id,
max_predictions_per_seq,
np_rng,
max_ngrams=3,
do_whole_word_mask=True,
favor_longer_ngram=False,
do_permutation=False,
geometric_dist=False,
masking_style="bert",
):
"""Creates the predictions for the masked LM objective.
Note: Tokens here are vocab ids and not text tokens."""
cand_indexes = []
# Note(mingdachen): We create a list for recording if the piece is
# the starting piece of current token, where 1 means true, so that
# on-the-fly whole word masking is possible.
token_boundary = [0] * len(tokens)
for (i, token) in enumerate(tokens):
if token == cls_id or token == sep_id:
token_boundary[i] = 1
continue
# Whole Word Masking means that if we mask all of the wordpieces
# corresponding to an original word.
#
# Note that Whole Word Masking does *not* change the training code
# at all -- we still predict each WordPiece independently, softmaxed
# over the entire vocabulary.
if (
do_whole_word_mask
and len(cand_indexes) >= 1
and not is_start_piece(vocab_id_to_token_dict[token])
):
cand_indexes[-1].append(i)
else:
cand_indexes.append([i])
if is_start_piece(vocab_id_to_token_dict[token]):
token_boundary[i] = 1
output_tokens = list(tokens)
# add by ganruyi
if masking_style == "bert-cn-wwm":
# if non chinese is False, that means it is chinese,
# then try to remove "##" which is added previously
new_token_ids = []
for token_id in output_tokens:
token = tokenizer.convert_ids_to_tokens([token_id])[0]
if len(re.findall("##[\u4E00-\u9FA5]", token)) > 0:
token = token[2:]
new_token_id = tokenizer.convert_tokens_to_ids([token])[0]
new_token_ids.append(new_token_id)
output_tokens = new_token_ids
masked_lm_positions = []
masked_lm_labels = []
if masked_lm_prob == 0:
return (output_tokens, masked_lm_positions, masked_lm_labels, token_boundary)
num_to_predict = min(max_predictions_per_seq, max(1, int(round(len(tokens) * masked_lm_prob))))
ngrams = np.arange(1, max_ngrams + 1, dtype=np.int64)
if not geometric_dist:
# Note(mingdachen):
# By default, we set the probabities to favor shorter ngrams sequences.
pvals = 1.0 / np.arange(1, max_ngrams + 1)
pvals /= pvals.sum(keepdims=True)
if favor_longer_ngram:
pvals = pvals[::-1]
ngram_indexes = []
for idx in range(len(cand_indexes)):
ngram_index = []
for n in ngrams:
ngram_index.append(cand_indexes[idx : idx + n])
ngram_indexes.append(ngram_index)
np_rng.shuffle(ngram_indexes)
(masked_lms, masked_spans) = ([], [])
covered_indexes = set()
for cand_index_set in ngram_indexes:
if len(masked_lms) >= num_to_predict:
break
if not cand_index_set:
continue
# Note(mingdachen):
# Skip current piece if they are covered in lm masking or previous ngrams.
for index_set in cand_index_set[0]:
for index in index_set:
if index in covered_indexes:
continue
if not geometric_dist:
n = np_rng.choice(
ngrams[: len(cand_index_set)],
p=pvals[: len(cand_index_set)] / pvals[: len(cand_index_set)].sum(keepdims=True),
)
else:
# Sampling "n" from the geometric distribution and clipping it to
# the max_ngrams. Using p=0.2 default from the SpanBERT paper
# https://arxiv.org/pdf/1907.10529.pdf (Sec 3.1)
n = min(np_rng.geometric(0.2), max_ngrams)
index_set = sum(cand_index_set[n - 1], [])
n -= 1
# Note(mingdachen):
# Repeatedly looking for a candidate that does not exceed the
# maximum number of predictions by trying shorter ngrams.
while len(masked_lms) + len(index_set) > num_to_predict:
if n == 0:
break
index_set = sum(cand_index_set[n - 1], [])
n -= 1
# If adding a whole-word mask would exceed the maximum number of
# predictions, then just skip this candidate.
if len(masked_lms) + len(index_set) > num_to_predict:
continue
is_any_index_covered = False
for index in index_set:
if index in covered_indexes:
is_any_index_covered = True
break
if is_any_index_covered:
continue
for index in index_set:
covered_indexes.add(index)
masked_token = None
if masking_style == "bert":
# 80% of the time, replace with [MASK]
if np_rng.random() < 0.8:
masked_token = mask_id
else:
# 10% of the time, keep original
if np_rng.random() < 0.5:
masked_token = tokens[index]
# 10% of the time, replace with random word
else:
masked_token = vocab_id_list[np_rng.randint(0, len(vocab_id_list))]
elif masking_style == "bert-cn-wwm":
# 80% of the time, replace with [MASK]
if np_rng.random() < 0.8:
masked_token = mask_id
else:
# 10% of the time, keep original
if np_rng.random() < 0.5:
# if it's chinese wwm, remove ## in toknes
token_id = tokens[index]
token = tokenizer.convert_ids_to_tokens([token_id])[0]
if len(re.findall("##[\u4E00-\u9FA5]", token)) > 0:
token = token[2:]
new_token_id = tokenizer.convert_tokens_to_ids([token])[0]
masked_token = new_token_id
# 10% of the time, replace with random word
else:
masked_token = vocab_id_list[np_rng.randint(0, len(vocab_id_list))]
elif masking_style == "t5":
masked_token = mask_id
else:
raise ValueError("invalid value of masking style")
output_tokens[index] = masked_token
masked_lms.append(MaskedLmInstance(index=index, label=tokens[index]))
masked_spans.append(
MaskedLmInstance(index=index_set, label=[tokens[index] for index in index_set])
)
assert len(masked_lms) <= num_to_predict
np_rng.shuffle(ngram_indexes)
select_indexes = set()
if do_permutation:
for cand_index_set in ngram_indexes:
if len(select_indexes) >= num_to_predict:
break
if not cand_index_set:
continue
# Note(mingdachen):
# Skip current piece if they are covered in lm masking or previous ngrams.
for index_set in cand_index_set[0]:
for index in index_set:
if index in covered_indexes or index in select_indexes:
continue
n = np.random.choice(
ngrams[: len(cand_index_set)],
p=pvals[: len(cand_index_set)] / pvals[: len(cand_index_set)].sum(keepdims=True),
)
index_set = sum(cand_index_set[n - 1], [])
n -= 1
while len(select_indexes) + len(index_set) > num_to_predict:
if n == 0:
break
index_set = sum(cand_index_set[n - 1], [])
n -= 1
# If adding a whole-word mask would exceed the maximum number of
# predictions, then just skip this candidate.
if len(select_indexes) + len(index_set) > num_to_predict:
continue
is_any_index_covered = False
for index in index_set:
if index in covered_indexes or index in select_indexes:
is_any_index_covered = True
break
if is_any_index_covered:
continue
for index in index_set:
select_indexes.add(index)
assert len(select_indexes) <= num_to_predict
select_indexes = sorted(select_indexes)
permute_indexes = list(select_indexes)
np_rng.shuffle(permute_indexes)
orig_token = list(output_tokens)
for src_i, tgt_i in zip(select_indexes, permute_indexes):
output_tokens[src_i] = orig_token[tgt_i]
masked_lms.append(MaskedLmInstance(index=src_i, label=orig_token[src_i]))
masked_lms = sorted(masked_lms, key=lambda x: x.index)
# Sort the spans by the index of the first span
masked_spans = sorted(masked_spans, key=lambda x: x.index[0])
for p in masked_lms:
masked_lm_positions.append(p.index)
masked_lm_labels.append(p.label)
return (
output_tokens,
masked_lm_positions,
masked_lm_labels,
token_boundary,
masked_spans,
)
def get_samples_mapping(
indexed_dataset,
data_prefix,
num_epochs,
max_num_samples,
max_seq_length,
short_seq_prob,
seed,
name,
binary_head,
):
"""Get a list that maps a sample index to a starting sentence index,
end sentence index, and length"""
if not num_epochs:
if not max_num_samples:
raise ValueError("Need to specify either max_num_samples " "or num_epochs")
num_epochs = np.iinfo(np.int32).max - 1
if not max_num_samples:
max_num_samples = np.iinfo(np.int64).max - 1
# Filename of the index mapping
indexmap_filename = data_prefix
indexmap_filename += "_{}_indexmap".format(name)
if num_epochs != (np.iinfo(np.int32).max - 1):
indexmap_filename += "_{}ep".format(num_epochs)
if max_num_samples != (np.iinfo(np.int64).max - 1):
indexmap_filename += "_{}mns".format(max_num_samples)
indexmap_filename += "_{}msl".format(max_seq_length)
indexmap_filename += "_{:0.2f}ssp".format(short_seq_prob)
indexmap_filename += "_{}s".format(seed)
indexmap_filename += ".npy"
file_folder = os.path.dirname(indexmap_filename)
# Build the indexed mapping if not exist.
# NOTE: use `get_local_rank() == 0` to promise samples will be build in each node.
# use `get_rank() == 0` to promise samples will be build only once for a shared folder.
cur_rank = flow.env.get_rank() if is_shared_folder(file_folder) else flow.env.get_local_rank()
if cur_rank == 0 and not os.path.isfile(indexmap_filename):
logger.info(
" > WARNING: could not find index map file {}, building "
"the indices on rank 0 ...".format(indexmap_filename)
)
# Make sure the types match the helpers input types.
assert indexed_dataset.doc_idx.dtype == np.int64
assert indexed_dataset.sizes.dtype == np.int32
# Build samples mapping
verbose = flow.env.get_local_rank() == 0
start_time = time.time()
logger.info(" > building samples index mapping for {} ...".format(name))
# First compile and then import.
from libai.data.data_utils import helpers
samples_mapping = helpers.build_mapping(
indexed_dataset.doc_idx,
indexed_dataset.sizes,
num_epochs,
max_num_samples,
max_seq_length,
short_seq_prob,
seed,
verbose,
2 if binary_head else 1,
)
logger.info(" > done building samples index maping")
np.save(indexmap_filename, samples_mapping, allow_pickle=True)
logger.info(" > saved the index mapping in {}".format(indexmap_filename))
# Make sure all the ranks have built the mapping
logger.info(
" > elapsed time to build and save samples mapping "
"(seconds): {:4f}".format(time.time() - start_time)
)
# This should be a barrier but nccl barrier assumes
# device_index=rank which is not the case for model
# parallel case
dist.synchronize()
# Load indexed dataset.
logger.info(" > loading indexed mapping from {}".format(indexmap_filename))
start_time = time.time()
samples_mapping = np.load(indexmap_filename, allow_pickle=True, mmap_mode="r")
logger.info(" loaded indexed file in {:3.3f} seconds".format(time.time() - start_time))
logger.info(" total number of samples: {}".format(samples_mapping.shape[0]))
return samples_mapping
def get_train_valid_test_split_(size, splits=None):
"""
Split a dataset into subsets given proportions of how
much to allocate per split. If a split is 0% returns None for that split.
Purpose: Useful for creating train/val/test splits
Arguments:
ds (Dataset or array-like): Data to be split.
split (1D array-like): proportions to split `ds`. `sum(splits) != 0`
"""
if splits is None:
splits = [0.8, 0.2, 0.0]
while len(splits) < 3:
splits.append(0.0)
splits = splits[:3]
splits_sum = sum(splits)
assert splits_sum > 0.0, "Split sum must be larger than 0."
splits = [split / splits_sum for split in splits]
splits_index = [0]
for index, split in enumerate(splits):
splits_index.append(splits_index[index] + int(round(split * float(size))))
diff = splits_index[-1] - size
for index in range(1, len(splits_index)):
splits_index[index] -= diff
assert len(splits_index) == 4
assert splits_index[-1] == size
return splits_index
| 16,410 | 35.878652 | 99 | py |
libai | libai-main/libai/data/data_utils/__init__.py | # coding=utf-8
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .dataset_utils import (
compile_helper,
is_shared_folder,
create_masked_lm_predictions,
get_samples_mapping,
get_train_valid_test_split_,
)
from .indexed_dataset import (
IndexedCachedDataset,
IndexedDataset,
MMapIndexedDataset,
get_indexed_dataset,
)
| 920 | 29.7 | 74 | py |
libai | libai-main/libai/data/data_utils/indexed_dataset.py | # coding=utf-8
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# copied from fairseq/fairseq/data/indexed_dataset.py
# Removed IndexedRawTextDataset since it relied on Fairseq dictionary
# other slight modifications to remove fairseq dependencies
# Added document index to index file and made it accessible.
# An empty sentence no longer separates documents.
import logging
import os
import shutil
import struct
import time
from functools import lru_cache
from itertools import accumulate
import numpy as np
import oneflow as flow
logger = logging.getLogger(__name__)
def __best_fitting_dtype(vocab_size=None):
if vocab_size is not None and vocab_size < 65500:
return np.uint16
else:
return np.int32
def get_available_dataset_impl():
return ["lazy", "cached", "mmap"]
def infer_dataset_impl(path):
if IndexedDataset.exists(path):
with open(index_file_path(path), "rb") as f:
magic = f.read(8)
if magic == IndexedDataset._HDR_MAGIC:
return "cached"
elif magic == MMapIndexedDataset.Index._HDR_MAGIC[:8]:
return "mmap"
else:
return None
else:
logger.info(f"Dataset does not exist: {path}")
logger.info(
"Path should be a basename that both .idx and .bin can be "
"appended to get full filenames."
)
return None
def make_builder(out_file, impl, vocab_size=None):
if impl == "mmap":
return MMapIndexedDatasetBuilder(out_file, dtype=__best_fitting_dtype(vocab_size))
else:
return IndexedDatasetBuilder(out_file)
def make_dataset(path, impl, skip_warmup=False):
if not IndexedDataset.exists(path):
logger.info(f"Dataset does not exist: {path}")
logger.info(
"Path should be a basename that both .idx and .bin can be "
"appended to get full filenames."
)
raise ValueError(f"Dataset does not exist: {path}")
if impl == "infer":
impl = infer_dataset_impl(path)
if impl == "lazy" and IndexedDataset.exists(path):
return IndexedDataset(path)
elif impl == "cached" and IndexedDataset.exists(path):
return IndexedCachedDataset(path)
elif impl == "mmap" and MMapIndexedDataset.exists(path):
return MMapIndexedDataset(path, skip_warmup)
logger.info(f"Unknown dataset implementation: {impl}")
return None
def dataset_exists(path, impl):
if impl == "mmap":
return MMapIndexedDataset.exists(path)
else:
return IndexedDataset.exists(path)
def read_longs(f, n):
a = np.empty(n, dtype=np.int64)
f.readinto(a)
return a
def write_longs(f, a):
f.write(np.array(a, dtype=np.int64))
dtypes = {
1: np.uint8,
2: np.int8,
3: np.int16,
4: np.int32,
5: np.int64,
6: np.float32,
7: np.double,
8: np.uint16,
}
def code(dtype):
for k in dtypes.keys():
if dtypes[k] == dtype:
return k
raise ValueError(dtype)
def index_file_path(prefix_path):
return prefix_path + ".idx"
def data_file_path(prefix_path):
return prefix_path + ".bin"
def create_doc_idx(sizes):
doc_idx = [0]
for i, s in enumerate(sizes):
if s == 0:
doc_idx.append(i + 1)
return doc_idx
class IndexedDataset(flow.utils.data.Dataset):
"""Loader for IndexedDataset"""
_HDR_MAGIC = b"TNTIDX\x00\x00"
def __init__(self, path):
super().__init__()
self.path = path
self.data_file = None
self.read_index(path)
def read_index(self, path):
with open(index_file_path(path), "rb") as f:
magic = f.read(8)
assert magic == self._HDR_MAGIC, (
"Index file doesn't match expected format. "
"Make sure that --dataset-impl is configured properly."
)
version = f.read(8)
assert struct.unpack("<Q", version) == (1,)
code, self.element_size = struct.unpack("<QQ", f.read(16))
self.dtype = dtypes[code]
self._len, self.s = struct.unpack("<QQ", f.read(16))
self.doc_count = struct.unpack("<Q", f.read(8))
self.dim_offsets = read_longs(f, self._len + 1)
self.data_offsets = read_longs(f, self._len + 1)
self.sizes = read_longs(f, self.s)
self.doc_idx = read_longs(f, self.doc_count)
def read_data(self, path):
self.data_file = open(data_file_path(path), "rb", buffering=0)
def check_index(self, i):
if i < 0 or i >= self._len:
raise IndexError("index out of range")
def __del__(self):
if self.data_file:
self.data_file.close()
# @lru_cache(maxsize=8)
def __getitem__(self, idx):
if not self.data_file:
self.read_data(self.path)
if isinstance(idx, int):
i = idx
self.check_index(i)
tensor_size = self.sizes[self.dim_offsets[i] : self.dim_offsets[i + 1]]
a = np.empty(tensor_size, dtype=self.dtype)
self.data_file.seek(self.data_offsets[i] * self.element_size)
self.data_file.readinto(a)
return a
elif isinstance(idx, slice):
start, stop, step = idx.indices(len(self))
if step != 1:
raise ValueError("Slices into indexed_dataset must be contiguous")
sizes = self.sizes[self.dim_offsets[start] : self.dim_offsets[stop]]
size = sum(sizes)
a = np.empty(size, dtype=self.dtype)
self.data_file.seek(self.data_offsets[start] * self.element_size)
self.data_file.readinto(a)
offsets = list(accumulate(sizes))
sents = np.split(a, offsets[:-1])
return sents
def __len__(self):
return self._len
def num_tokens(self, index):
return self.sizes[index]
def size(self, index):
return self.sizes[index]
@staticmethod
def exists(path):
return os.path.exists(index_file_path(path)) and os.path.exists(data_file_path(path))
@property
def supports_prefetch(self):
return False # avoid prefetching to save memory
class IndexedCachedDataset(IndexedDataset):
def __init__(self, path):
super().__init__(path)
self.cache = None
self.cache_index = {}
@property
def supports_prefetch(self):
return True
def prefetch(self, indices):
if all(i in self.cache_index for i in indices):
return
if not self.data_file:
self.read_data(self.path)
indices = sorted(set(indices))
total_size = 0
for i in indices:
total_size += self.data_offsets[i + 1] - self.data_offsets[i]
self.cache = np.empty(total_size, dtype=self.dtype)
ptx = 0
self.cache_index.clear()
for i in indices:
self.cache_index[i] = ptx
size = self.data_offsets[i + 1] - self.data_offsets[i]
a = self.cache[ptx : ptx + size]
self.data_file.seek(self.data_offsets[i] * self.element_size)
self.data_file.readinto(a)
ptx += size
if self.data_file:
# close and delete data file after prefetch so we can pickle
self.data_file.close()
self.data_file = None
# @lru_cache(maxsize=8)
def __getitem__(self, idx):
if isinstance(idx, int):
i = idx
self.check_index(i)
tensor_size = self.sizes[self.dim_offsets[i] : self.dim_offsets[i + 1]]
a = np.empty(tensor_size, dtype=self.dtype)
ptx = self.cache_index[i]
np.copyto(a, self.cache[ptx : ptx + a.size])
return a
elif isinstance(idx, slice):
# Hack just to make this work, can optimizer later if necessary
sents = []
for i in range(*idx.indices(len(self))):
sents.append(self[i])
return sents
class IndexedDatasetBuilder(object):
element_sizes = {
np.uint8: 1,
np.int8: 1,
np.int16: 2,
np.int32: 4,
np.int64: 8,
np.float32: 4,
np.double: 8,
}
def __init__(self, out_file, dtype=np.int32):
self.out_file = open(out_file, "wb")
self.dtype = dtype
self.data_offsets = [0]
self.dim_offsets = [0]
self.sizes = []
self.element_size = self.element_sizes[self.dtype]
self.doc_idx = [0]
def add_item(self, tensor):
bytes = self.out_file.write(np.array(tensor.numpy(), dtype=self.dtype))
self.data_offsets.append(self.data_offsets[-1] + bytes / self.element_size)
for s in tensor.size():
self.sizes.append(s)
self.dim_offsets.append(self.dim_offsets[-1] + len(tensor.size()))
def end_document(self):
self.doc_idx.append(len(self.sizes))
def merge_file_(self, another_file):
index = IndexedDataset(another_file)
assert index.dtype == self.dtype
begin = self.data_offsets[-1]
for offset in index.data_offsets[1:]:
self.data_offsets.append(begin + offset)
self.sizes.extend(index.sizes)
begin = self.dim_offsets[-1]
for dim_offset in index.dim_offsets[1:]:
self.dim_offsets.append(begin + dim_offset)
with open(data_file_path(another_file), "rb") as f:
while True:
data = f.read(1024)
if data:
self.out_file.write(data)
else:
break
def finalize(self, index_file):
self.out_file.close()
index = open(index_file, "wb")
index.write(b"TNTIDX\x00\x00")
index.write(struct.pack("<Q", 1))
index.write(struct.pack("<QQ", code(self.dtype), self.element_size))
index.write(struct.pack("<QQ", len(self.data_offsets) - 1, len(self.sizes)))
index.write(struct.pack("<Q", len(self.doc_idx)))
write_longs(index, self.dim_offsets)
write_longs(index, self.data_offsets)
write_longs(index, self.sizes)
write_longs(index, self.doc_idx)
index.close()
def _warmup_mmap_file(path):
with open(path, "rb") as stream:
while stream.read(100 * 1024 * 1024):
pass
class MMapIndexedDataset(flow.utils.data.Dataset):
class Index(object):
_HDR_MAGIC = b"MMIDIDX\x00\x00"
@classmethod
def writer(cls, path, dtype):
class _Writer(object):
def __enter__(self):
self._file = open(path, "wb")
self._file.write(cls._HDR_MAGIC)
self._file.write(struct.pack("<Q", 1))
self._file.write(struct.pack("<B", code(dtype)))
return self
@staticmethod
def _get_pointers(sizes):
dtype_size = dtype().itemsize
address = 0
pointers = []
for size in sizes:
pointers.append(address)
address += size * dtype_size
return pointers
def write(self, sizes, doc_idx):
pointers = self._get_pointers(sizes)
self._file.write(struct.pack("<Q", len(sizes)))
self._file.write(struct.pack("<Q", len(doc_idx)))
sizes = np.array(sizes, dtype=np.int32)
self._file.write(sizes.tobytes(order="C"))
del sizes
pointers = np.array(pointers, dtype=np.int64)
self._file.write(pointers.tobytes(order="C"))
del pointers
doc_idx = np.array(doc_idx, dtype=np.int64)
self._file.write(doc_idx.tobytes(order="C"))
def __exit__(self, exc_type, exc_val, exc_tb):
self._file.close()
return _Writer()
def __init__(self, path, skip_warmup=False):
with open(path, "rb") as stream:
magic_test = stream.read(9)
assert self._HDR_MAGIC == magic_test, (
"Index file doesn't match expected format. "
"Make sure that --dataset-impl is configured properly."
)
version = struct.unpack("<Q", stream.read(8))
assert (1,) == version
(dtype_code,) = struct.unpack("<B", stream.read(1))
self._dtype = dtypes[dtype_code]
self._dtype_size = self._dtype().itemsize
self._len = struct.unpack("<Q", stream.read(8))[0]
self._doc_count = struct.unpack("<Q", stream.read(8))[0]
offset = stream.tell()
if not skip_warmup:
logger.info("warming up index mmap file...")
_warmup_mmap_file(path)
self._bin_buffer_mmap = np.memmap(path, mode="r", order="C")
self._bin_buffer = memoryview(self._bin_buffer_mmap)
logger.info("reading sizes...")
self._sizes = np.frombuffer(
self._bin_buffer, dtype=np.int32, count=self._len, offset=offset
)
logger.info("reading pointers...")
self._pointers = np.frombuffer(
self._bin_buffer,
dtype=np.int64,
count=self._len,
offset=offset + self._sizes.nbytes,
)
logger.info("reading document index...")
self._doc_idx = np.frombuffer(
self._bin_buffer,
dtype=np.int64,
count=self._doc_count,
offset=offset + self._sizes.nbytes + self._pointers.nbytes,
)
def __del__(self):
self._bin_buffer_mmap._mmap.close()
del self._bin_buffer_mmap
@property
def dtype(self):
return self._dtype
@property
def sizes(self):
return self._sizes
@property
def doc_idx(self):
return self._doc_idx
@lru_cache(maxsize=8)
def __getitem__(self, i):
return self._pointers[i], self._sizes[i]
def __len__(self):
return self._len
def __init__(self, path, skip_warmup=False):
super().__init__()
self._path = None
self._index = None
self._bin_buffer = None
self._do_init(path, skip_warmup)
def __getstate__(self):
return self._path
def __setstate__(self, state):
self._do_init(state)
def _do_init(self, path, skip_warmup):
self._path = path
self._index = self.Index(index_file_path(self._path), skip_warmup)
if not skip_warmup:
logger.info("warming up data mmap file...")
_warmup_mmap_file(data_file_path(self._path))
logger.info("creating numpy buffer of mmap...")
self._bin_buffer_mmap = np.memmap(data_file_path(self._path), mode="r", order="C")
logger.info("creating memory view of numpy buffer...")
self._bin_buffer = memoryview(self._bin_buffer_mmap)
def __del__(self):
self._bin_buffer_mmap._mmap.close()
del self._bin_buffer_mmap
del self._index
def __len__(self):
return len(self._index)
# @lru_cache(maxsize=8)
def __getitem__(self, idx):
if isinstance(idx, int):
ptr, size = self._index[idx]
np_array = np.frombuffer(
self._bin_buffer, dtype=self._index.dtype, count=size, offset=ptr
)
return np_array
elif isinstance(idx, slice):
start, stop, step = idx.indices(len(self))
if step != 1:
raise ValueError("Slices into indexed_dataset must be contiguous")
ptr = self._index._pointers[start]
sizes = self._index._sizes[idx]
offsets = list(accumulate(sizes))
total_size = sum(sizes)
np_array = np.frombuffer(
self._bin_buffer, dtype=self._index.dtype, count=total_size, offset=ptr
)
sents = np.split(np_array, offsets[:-1])
return sents
def get(self, idx, offset=0, length=None):
"""Retrieves a single item from the dataset with the option to only
return a portion of the item.
get(idx) is the same as [idx] but get() does not support slicing.
"""
ptr, size = self._index[idx]
if length is None:
length = size - offset
ptr += offset * np.dtype(self._index.dtype).itemsize
np_array = np.frombuffer(
self._bin_buffer, dtype=self._index.dtype, count=length, offset=ptr
)
return np_array
@property
def sizes(self):
return self._index.sizes
@property
def doc_idx(self):
return self._index.doc_idx
def get_doc_idx(self):
return self._index._doc_idx
def set_doc_idx(self, doc_idx_):
self._index._doc_idx = doc_idx_
@property
def supports_prefetch(self):
return False
@staticmethod
def exists(path):
return os.path.exists(index_file_path(path)) and os.path.exists(data_file_path(path))
class MMapIndexedDatasetBuilder(object):
def __init__(self, out_file, dtype=np.int64):
self._data_file = open(out_file, "wb")
self._dtype = dtype
self._sizes = []
self._doc_idx = [0]
def add_item(self, tensor):
np_array = np.array(tensor.numpy(), dtype=self._dtype)
self._data_file.write(np_array.tobytes(order="C"))
self._sizes.append(np_array.size)
def end_document(self):
self._doc_idx.append(len(self._sizes))
def merge_file_(self, another_file):
# Concatenate index
index = MMapIndexedDataset.Index(index_file_path(another_file))
assert index.dtype == self._dtype
for size in index.sizes:
self._sizes.append(size)
# Concatenate data
with open(data_file_path(another_file), "rb") as f:
shutil.copyfileobj(f, self._data_file)
def finalize(self, index_file):
self._data_file.close()
with MMapIndexedDataset.Index.writer(index_file, self._dtype) as index:
index.write(self._sizes, self._doc_idx)
def get_indexed_dataset(data_prefix, data_impl, skip_warmup):
logger.info("building dataset index ...")
start_time = time.time()
indexed_dataset = make_dataset(data_prefix, data_impl, skip_warmup)
assert indexed_dataset.sizes.shape[0] == indexed_dataset.doc_idx[-1]
logger.info(
"Finished creating indexed dataset in {:4f} " "seconds".format(time.time() - start_time)
)
logger.info("indexed dataset stats:")
logger.info("number of documents: {}".format(indexed_dataset.doc_idx.shape[0] - 1))
logger.info("number of sentences: {}".format(indexed_dataset.sizes.shape[0]))
return indexed_dataset
| 19,323 | 30.993377 | 96 | py |
libai | libai-main/libai/inference/text_classification.py | # coding=utf-8
# Copyright 2021 The OneFlow Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import oneflow as flow
from libai.data.structures import DistTensorData, Instance
from libai.inference.basic import BasePipeline
class TextClassificationPipeline(BasePipeline):
def __init__(
self,
config_file,
data_parallel=None,
tensor_parallel=None,
pipeline_parallel=None,
pipeline_stage_id=None,
pipeline_num_layers=None,
model_path=None,
mode="libai",
**kwargs,
):
super().__init__(
config_file,
data_parallel,
tensor_parallel,
pipeline_parallel,
pipeline_stage_id,
model_path,
pipeline_num_layers,
mode,
**kwargs,
)
def update_cfg(
self,
data_parallel=1,
tensor_parallel=1,
pipeline_parallel=1,
pipeline_stage_id=None,
pipeline_num_layers=None,
):
super().update_cfg(
data_parallel,
tensor_parallel,
pipeline_parallel,
pipeline_stage_id,
pipeline_num_layers,
)
self.cfg.model.cfg.hidden_dropout_prob = 0.0
self.cfg.model.cfg.attention_probs_dropout_prob = 0.0
assert "num_labels" in self.cfg.model.cfg, "The model's config must contain num_labels"
if "label2id" not in self.cfg.model.cfg:
label2id = {"Label_" + str(i): i for i in range(self.cfg.model.cfg.num_labels)}
id2label = {ind: label for label, ind in label2id.items()}
self.cfg.model.cfg["label2id"] = label2id
self.cfg.model.cfg["id2label"] = id2label
def _parse_parameters(self, **pipeline_parameters):
preprocess_params = {}
forward_params = {}
postprocess_params = {**pipeline_parameters}
return preprocess_params, forward_params, postprocess_params
def preprocess(
self,
inputs,
pad: bool = False,
**kwargs,
) -> dict:
# tokenizer encoder
input_ids = flow.tensor(np.array(self.tokenizer.encode(inputs)))
padding_mask = flow.tensor(np.ones(input_ids.shape), dtype=flow.bool)
# set batch size = 1
input_ids = input_ids.unsqueeze(0)
padding_mask = padding_mask.unsqueeze(0)
# to global tensor
model_input = Instance(
input_ids=DistTensorData(input_ids),
attention_mask=DistTensorData(padding_mask),
)
mdoel_input_dict = {}
for key, value in model_input.get_fields().items():
value.to_global()
mdoel_input_dict[key] = value.tensor
return mdoel_input_dict
def forward(self, mdoel_input_dict) -> dict:
model_outputs_dict = self.model(**mdoel_input_dict)
return model_outputs_dict
def postprocess(
self, model_outputs_dict, function_to_apply=None, return_all_scores=False, **kwargs
) -> dict:
# prepare
num_labels = self.cfg.model.cfg.num_labels
if function_to_apply is not None:
function_to_apply = function_to_apply.lower()
assert function_to_apply in [
"sigmoid",
"softmax",
"none",
], f"Unrecognized `function_to_apply` argument: {function_to_apply}"
else:
if num_labels == 1:
function_to_apply = "sigmoid"
elif num_labels > 1:
function_to_apply = "softmax"
# process, logits: [num_labels]
logits = model_outputs_dict["logits"][0]
if function_to_apply == "sigmoid":
scores = flow.sigmoid(logits)
elif function_to_apply == "softmax":
scores = flow.softmax(logits)
else:
scores = logits
scores = scores.detach().numpy()
if return_all_scores:
return [
{"label": self.cfg.model.cfg.id2label[i], "score": score.item()}
for i, score in enumerate(scores)
]
else:
return {
"label": self.cfg.model.cfg.id2label[scores.argmax().item()],
"score": scores.max().item(),
}
| 4,842 | 32.4 | 95 | py |
libai | libai-main/libai/inference/text_generation.py | # coding=utf-8
# Copyright 2021 The OneFlow Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from libai.inference.basic import BasePipeline
from libai.utils import distributed as dist
class TextGenerationPipeline(BasePipeline):
def load_pretrain_weight(self, libai_cfg_model, model_path, mode="huggingface"):
"""load pretrained model.
Args:
libai_cfg_model (libai.models): Lazy config Model in Libai, you can import it
by `from libai.config.configs.common.models.bert
import pretrain_model as libai_cfg_model`
model_path (str): The directory path of pretrained model,
"""
if mode == "huggingface":
from projects.MT5.utils.mt5_loader import T5LoaderHuggerFace
model_loader = T5LoaderHuggerFace(
libai_cfg_model,
libai_cfg_model.cfg,
model_path,
hidden_dropout_prob=0.0,
attention_probs_dropout_prob=0.0,
embedding_dropout_prob=0.0,
)
return model_loader.load()
elif mode == "libai":
from projects.MT5.utils.mt5_loader import T5LoaderLibai
model_loader = T5LoaderLibai(
libai_cfg_model,
libai_cfg_model.cfg,
model_path,
)
return model_loader.load()
elif mode == "random":
from libai.engine import DefaultTrainer
return DefaultTrainer.build_model(self.cfg)
else:
raise NotImplementedError
def _parse_parameters(self, **pipeline_parameters):
preprocess_params = {}
forward_params = {**pipeline_parameters}
postprocess_params = {}
return preprocess_params, forward_params, postprocess_params
def preprocess(
self,
inputs,
pad: bool = False,
**kwargs,
) -> dict:
# tokenizer encoder
encoder_ids = self.tokenizer.encode(inputs, return_tensors="of", is_global=True)
encoder_input_dict = {
"encoder_ids": encoder_ids,
}
return encoder_input_dict
def forward(self, encoder_input_dict, **kwargs) -> dict:
outputs = self.model.generate(encoder_input_dict["encoder_ids"], **kwargs)
return {"return_ids": outputs}
def postprocess(self, model_output_dict, **kwargs) -> dict:
return_ids = model_output_dict["return_ids"]
records = [
{"generated_text": self.tokenizer.decode(return_ids[i], skip_special_tokens=True)}
for i in range(return_ids.size(0))
]
return records
if __name__ == "__main__":
pipeline = TextGenerationPipeline(
"/path/to/libai/projects/MT5/configs/t5_inference.py",
data_parallel=1,
tensor_parallel=2,
pipeline_parallel=2,
pipeline_stage_id=[0] * 12 + [1] * 12,
pipeline_num_layers=12 * 2,
model_path="/path/to/t5-base",
mode="huggingface",
)
text = ["summarize: She is a student, She is tall, She loves study"]
dict1 = pipeline(text)
if dist.is_main_process():
print(dict1)
| 3,718 | 33.119266 | 94 | py |
libai | libai-main/libai/inference/image_classification.py | # coding=utf-8
# Copyright 2021 The OneFlow Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import oneflow as flow
from PIL import Image
from libai.config import instantiate
from libai.data.structures import DistTensorData, Instance
from libai.inference.basic import BasePipeline
class ImageClassificationPipeline(BasePipeline):
def __init__(
self,
config_file,
data_parallel=None,
tensor_parallel=None,
pipeline_parallel=None,
pipeline_stage_id=None,
pipeline_num_layers=None,
model_path=None,
mode="libai",
**kwargs,
):
super().__init__(
config_file,
data_parallel,
tensor_parallel,
pipeline_parallel,
pipeline_stage_id,
pipeline_num_layers,
model_path,
mode,
**kwargs,
)
if "num_classes" in self.cfg.model:
self.num_classes = self.cfg.model.num_classes
elif "num_classes" in self.cfg.model.cfg:
self.num_classes = self.cfg.model.cfg.num_classes
else:
raise AttributeError("The model's config must contain num_classes")
label2id = self.label2id(self.num_classes)
self.id2label = {ind: label for label, ind in label2id.items()}
self.transform = instantiate(self.cfg.dataloader.test[0].dataset.transform)
def _parse_parameters(self, **pipeline_parameters):
preprocess_params = {}
forward_params = {}
postprocess_params = {**pipeline_parameters}
return preprocess_params, forward_params, postprocess_params
def preprocess(
self,
inputs,
**kwargs,
) -> dict:
assert os.path.exists(inputs), "inputs must be an existing image path!"
with open(inputs, "rb") as f:
img = Image.open(f).convert("RGB")
img = self.transform(img)
img = img.unsqueeze(0)
# to global tensor
model_input = Instance(
images=DistTensorData(img),
)
mdoel_input_dict = {}
for key, value in model_input.get_fields().items():
value.to_global()
mdoel_input_dict[key] = value.tensor
return mdoel_input_dict
def forward(self, mdoel_input_dict) -> dict:
model_outputs_dict = self.model(**mdoel_input_dict)
return model_outputs_dict
def postprocess(
self, model_outputs_dict, function_to_apply=None, return_all_scores=False, **kwargs
) -> dict:
# prepare
num_labels = self.num_classes
if function_to_apply is not None:
function_to_apply = function_to_apply.lower()
assert function_to_apply in [
"sigmoid",
"softmax",
"none",
], f"Unrecognized `function_to_apply` argument: {function_to_apply}"
else:
if num_labels == 1:
function_to_apply = "sigmoid"
elif num_labels > 1:
function_to_apply = "softmax"
# process, logits: [num_labels]
logits = model_outputs_dict["prediction_scores"][0]
if function_to_apply == "sigmoid":
scores = flow.sigmoid(logits)
elif function_to_apply == "softmax":
scores = flow.softmax(logits)
else:
scores = logits
scores = scores.detach().numpy()
if return_all_scores:
return [
{"label": self.id2label[i], "score": score.item()} for i, score in enumerate(scores)
]
else:
return {
"label": self.id2label[scores.argmax().item()],
"score": scores.max().item(),
}
def label2id(self, num_classes):
"""
Args:
num_classes (int): the number of total classes
Returns:
labels (list): a dict contains all the labels for inference,
each item should be the form as follows:
{
"tench": 0,
"tiger": 1,
"xxx", n,
}
"""
from libai.inference.utils.imagenet_class import IMAGENET_LABELS as labels
assert num_classes == len(labels), "number of labels must be equal to num_classes"
return {label: i for (i, label) in enumerate(labels)}
if __name__ == "__main__":
pipeline = ImageClassificationPipeline("/home/chengpeng/config.yaml", 1, 1, 1)
print(pipeline("data_test/inference_test_data/ILSVRC2012_val_00000293.JPEG"))
| 5,202 | 32.785714 | 100 | py |
libai | libai-main/libai/inference/basic.py | # coding=utf-8
# Copyright 2021 The OneFlow Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from abc import ABCMeta, abstractmethod
from typing import Any, Dict
import oneflow as flow
from libai.config import LazyConfig, try_get_key
from libai.engine import DefaultTrainer
from libai.utils import distributed as dist
from libai.utils.logger import setup_logger
logger = setup_logger(distributed_rank=dist.get_rank())
logger = logging.getLogger("libai.inference")
class BasePipeline(metaclass=ABCMeta):
"""
Base class for all task pipeline
"""
def __init__(
self,
config_file,
data_parallel=None,
tensor_parallel=None,
pipeline_parallel=None,
pipeline_stage_id=None,
pipeline_num_layers=None,
model_path=None,
mode="libai",
**kwargs,
):
# init cfg
self.cfg = LazyConfig.load(config_file)
flow.boxing.nccl.set_fusion_threshold_mbytes(
try_get_key(self.cfg, "train.nccl_fusion_threshold_mb", default=16)
)
flow.boxing.nccl.set_fusion_max_ops_num(
try_get_key(self.cfg, "train.nccl_fusion_max_ops", default=24)
)
self.update_cfg(
data_parallel,
tensor_parallel,
pipeline_parallel,
pipeline_stage_id,
pipeline_num_layers,
)
dist.setup_dist_util(self.cfg.train.dist)
logger.info(self.cfg.train.dist)
# initial and load model
self.model = self.load_pretrain_weight(self.cfg.model, model_path, mode=mode)
self.model._apply(dist.convert_to_distributed_default_setting)
self.model = self.model.eval()
# initial tokenizer
if dist.is_main_process():
self.tokenizer = self.build_tokenizer(self.cfg)
else:
self.tokenizer = None
self.tokenizer = dist.broadcast_py_object(self.tokenizer, src=0)
# set parameters
(
self._preprocess_params,
self._forward_params,
self._postprocess_params,
) = self._parse_parameters(**kwargs)
def update_cfg(
self,
data_parallel=1,
tensor_parallel=1,
pipeline_parallel=1,
pipeline_stage_id=None,
pipeline_num_layers=None,
):
self.cfg.train.dist.data_parallel_size = data_parallel
self.cfg.train.dist.tensor_parallel_size = tensor_parallel
self.cfg.train.dist.pipeline_parallel_size = pipeline_parallel
self.cfg.train.dist.custom_pipeline_stage_id = pipeline_stage_id
if pipeline_num_layers is not None:
self.cfg.train.dist.pipeline_num_layers = pipeline_num_layers
if self.cfg.train.dist.pipeline_parallel_size > 1:
assert (
try_get_key(self.cfg.train.dist, "pipeline_num_layers") is not None
), "cfg.train.dist.pipeline_num_layers must be set when run pipeline parallel"
def load_pretrain_weight(
self,
libai_cfg_model,
model_path,
mode="libai",
):
"""load pretrained model.
Args:
libai_cfg_model (libai.models): Lazy config Model in Libai, you can import it
by `from libai.config.configs.common.models.bert
import pretrain_model as libai_cfg_model`
model_path (str): The directory path of pretrained model
mode (str): set it to `libai` for loading trained model from libai,
set it to `random` for quickly debugging by random initialized model
"""
if mode == "libai":
from libai.models.utils.model_loader.base_loader import ModelLoaderLiBai
model_loader = ModelLoaderLiBai(libai_cfg_model, libai_cfg_model.cfg, model_path)
model_loader.base_model_prefix_1 = None
model_loader.base_model_prefix_2 = ""
return model_loader.load()
elif mode == "random":
return DefaultTrainer.build_model(self.cfg)
else:
raise NotImplementedError
def build_tokenizer(self, cfg):
tokenizer = None
if try_get_key(cfg, "tokenization") is not None:
tokenizer = DefaultTrainer.build_tokenizer(cfg)
return tokenizer
@abstractmethod
def _parse_parameters(self, **pipeline_parameters):
raise NotImplementedError("_parse_parameters not implemented")
def __call__(self, inputs, *args, batch_size=None, **kwargs) -> dict:
preprocess_params, forward_params, postprocess_params = self._parse_parameters(
**kwargs
) # noqa
# Fuse __init__ params and __call__ params without modifying the __init__ ones.
preprocess_params = {**self._preprocess_params, **preprocess_params}
forward_params = {**self._forward_params, **forward_params}
postprocess_params = {**self._postprocess_params, **postprocess_params}
with flow.no_grad():
model_inputs_dict = self.preprocess(inputs, **preprocess_params)
model_outputs_dict = self.forward(model_inputs_dict, **forward_params)
model_outputs_dict = self.to_local(model_outputs_dict)
if dist.is_main_process():
outputs_dict = self.postprocess(model_outputs_dict, **postprocess_params)
else:
outputs_dict = {}
dist.synchronize()
return outputs_dict
def to_local(self, model_outputs_dict):
for key, value in model_outputs_dict.items():
if isinstance(value, flow.Tensor) and value.is_global:
model_outputs_dict[key] = dist.ttol(
value, ranks=[0] if value.placement.ranks.ndim == 1 else [[0]]
)
if flow.cuda.is_available():
dist.synchronize()
return model_outputs_dict
@abstractmethod
def preprocess(self, input_: Any, **preprocess_parameters: Dict) -> dict:
raise NotImplementedError("preprocess not implemented")
@abstractmethod
def forward(self, **kwargs: Dict) -> dict:
raise NotImplementedError("forward not implemented")
@abstractmethod
def postprocess(self, **kwargs: Dict) -> dict:
raise NotImplementedError("postprocess not implemented")
| 6,847 | 35.620321 | 93 | py |
libai | libai-main/libai/inference/utils/imagenet_class.py | IMAGENET_LABELS = [
"tench, Tinca tinca",
"goldfish, Carassius auratus",
"great white shark, white shark, man-eater, man-eating shark, Carcharodon carcharias", # noqa: E501
"tiger shark, Galeocerdo cuvieri",
"hammerhead, hammerhead shark",
"electric ray, crampfish, numbfish, torpedo",
"stingray",
"cock",
"hen",
"ostrich, Struthio camelus",
"brambling, Fringilla montifringilla",
"goldfinch, Carduelis carduelis",
"house finch, linnet, Carpodacus mexicanus",
"junco, snowbird",
"indigo bunting, indigo finch, indigo bird, Passerina cyanea",
"robin, American robin, Turdus migratorius",
"bulbul",
"jay",
"magpie",
"chickadee",
"water ouzel, dipper",
"kite",
"bald eagle, American eagle, Haliaeetus leucocephalus",
"vulture",
"great grey owl, great gray owl, Strix nebulosa",
"European fire salamander, Salamandra salamandra",
"common newt, Triturus vulgaris",
"eft",
"spotted salamander, Ambystoma maculatum",
"axolotl, mud puppy, Ambystoma mexicanum",
"bullfrog, Rana catesbeiana",
"tree frog, tree-frog",
"tailed frog, bell toad, ribbed toad, tailed toad, Ascaphus trui",
"loggerhead, loggerhead turtle, Caretta caretta",
"leatherback turtle, leatherback, leathery turtle, Dermochelys coriacea", # noqa: E501
"mud turtle",
"terrapin",
"box turtle, box tortoise",
"banded gecko",
"common iguana, iguana, Iguana iguana",
"American chameleon, anole, Anolis carolinensis",
"whiptail, whiptail lizard",
"agama",
"frilled lizard, Chlamydosaurus kingi",
"alligator lizard",
"Gila monster, Heloderma suspectum",
"green lizard, Lacerta viridis",
"African chameleon, Chamaeleo chamaeleon",
"Komodo dragon, Komodo lizard, dragon lizard, giant lizard, Varanus komodoensis", # noqa: E501
"African crocodile, Nile crocodile, Crocodylus niloticus",
"American alligator, Alligator mississipiensis",
"triceratops",
"thunder snake, worm snake, Carphophis amoenus",
"ringneck snake, ring-necked snake, ring snake",
"hognose snake, puff adder, sand viper",
"green snake, grass snake",
"king snake, kingsnake",
"garter snake, grass snake",
"water snake",
"vine snake",
"night snake, Hypsiglena torquata",
"boa constrictor, Constrictor constrictor",
"rock python, rock snake, Python sebae",
"Indian cobra, Naja naja",
"green mamba",
"sea snake",
"horned viper, cerastes, sand viper, horned asp, Cerastes cornutus",
"diamondback, diamondback rattlesnake, Crotalus adamanteus",
"sidewinder, horned rattlesnake, Crotalus cerastes",
"trilobite",
"harvestman, daddy longlegs, Phalangium opilio",
"scorpion",
"black and gold garden spider, Argiope aurantia",
"barn spider, Araneus cavaticus",
"garden spider, Aranea diademata",
"black widow, Latrodectus mactans",
"tarantula",
"wolf spider, hunting spider",
"tick",
"centipede",
"black grouse",
"ptarmigan",
"ruffed grouse, partridge, Bonasa umbellus",
"prairie chicken, prairie grouse, prairie fowl",
"peacock",
"quail",
"partridge",
"African grey, African gray, Psittacus erithacus",
"macaw",
"sulphur-crested cockatoo, Kakatoe galerita, Cacatua galerita",
"lorikeet",
"coucal",
"bee eater",
"hornbill",
"hummingbird",
"jacamar",
"toucan",
"drake",
"red-breasted merganser, Mergus serrator",
"goose",
"black swan, Cygnus atratus",
"tusker",
"echidna, spiny anteater, anteater",
"platypus, duckbill, duckbilled platypus, duck-billed platypus, Ornithorhynchus anatinus", # noqa: E501
"wallaby, brush kangaroo",
"koala, koala bear, kangaroo bear, native bear, Phascolarctos cinereus", # noqa: E501
"wombat",
"jellyfish",
"sea anemone, anemone",
"brain coral",
"flatworm, platyhelminth",
"nematode, nematode worm, roundworm",
"conch",
"snail",
"slug",
"sea slug, nudibranch",
"chiton, coat-of-mail shell, sea cradle, polyplacophore",
"chambered nautilus, pearly nautilus, nautilus",
"Dungeness crab, Cancer magister",
"rock crab, Cancer irroratus",
"fiddler crab",
"king crab, Alaska crab, Alaskan king crab, Alaska king crab, Paralithodes camtschatica", # noqa: E501
"American lobster, Northern lobster, Maine lobster, Homarus americanus", # noqa: E501
"spiny lobster, langouste, rock lobster, crawfish, crayfish, sea crawfish", # noqa: E501
"crayfish, crawfish, crawdad, crawdaddy",
"hermit crab",
"isopod",
"white stork, Ciconia ciconia",
"black stork, Ciconia nigra",
"spoonbill",
"flamingo",
"little blue heron, Egretta caerulea",
"American egret, great white heron, Egretta albus",
"bittern",
"crane",
"limpkin, Aramus pictus",
"European gallinule, Porphyrio porphyrio",
"American coot, marsh hen, mud hen, water hen, Fulica americana",
"bustard",
"ruddy turnstone, Arenaria interpres",
"red-backed sandpiper, dunlin, Erolia alpina",
"redshank, Tringa totanus",
"dowitcher",
"oystercatcher, oyster catcher",
"pelican",
"king penguin, Aptenodytes patagonica",
"albatross, mollymawk",
"grey whale, gray whale, devilfish, Eschrichtius gibbosus, Eschrichtius robustus", # noqa: E501
"killer whale, killer, orca, grampus, sea wolf, Orcinus orca",
"dugong, Dugong dugon",
"sea lion",
"Chihuahua",
"Japanese spaniel",
"Maltese dog, Maltese terrier, Maltese",
"Pekinese, Pekingese, Peke",
"Shih-Tzu",
"Blenheim spaniel",
"papillon",
"toy terrier",
"Rhodesian ridgeback",
"Afghan hound, Afghan",
"basset, basset hound",
"beagle",
"bloodhound, sleuthhound",
"bluetick",
"black-and-tan coonhound",
"Walker hound, Walker foxhound",
"English foxhound",
"redbone",
"borzoi, Russian wolfhound",
"Irish wolfhound",
"Italian greyhound",
"whippet",
"Ibizan hound, Ibizan Podenco",
"Norwegian elkhound, elkhound",
"otterhound, otter hound",
"Saluki, gazelle hound",
"Scottish deerhound, deerhound",
"Weimaraner",
"Staffordshire bullterrier, Staffordshire bull terrier",
"American Staffordshire terrier, Staffordshire terrier, American pit bull terrier, pit bull terrier", # noqa: E501
"Bedlington terrier",
"Border terrier",
"Kerry blue terrier",
"Irish terrier",
"Norfolk terrier",
"Norwich terrier",
"Yorkshire terrier",
"wire-haired fox terrier",
"Lakeland terrier",
"Sealyham terrier, Sealyham",
"Airedale, Airedale terrier",
"cairn, cairn terrier",
"Australian terrier",
"Dandie Dinmont, Dandie Dinmont terrier",
"Boston bull, Boston terrier",
"miniature schnauzer",
"giant schnauzer",
"standard schnauzer",
"Scotch terrier, Scottish terrier, Scottie",
"Tibetan terrier, chrysanthemum dog",
"silky terrier, Sydney silky",
"soft-coated wheaten terrier",
"West Highland white terrier",
"Lhasa, Lhasa apso",
"flat-coated retriever",
"curly-coated retriever",
"golden retriever",
"Labrador retriever",
"Chesapeake Bay retriever",
"German short-haired pointer",
"vizsla, Hungarian pointer",
"English setter",
"Irish setter, red setter",
"Gordon setter",
"Brittany spaniel",
"clumber, clumber spaniel",
"English springer, English springer spaniel",
"Welsh springer spaniel",
"cocker spaniel, English cocker spaniel, cocker",
"Sussex spaniel",
"Irish water spaniel",
"kuvasz",
"schipperke",
"groenendael",
"malinois",
"briard",
"kelpie",
"komondor",
"Old English sheepdog, bobtail",
"Shetland sheepdog, Shetland sheep dog, Shetland",
"collie",
"Border collie",
"Bouvier des Flandres, Bouviers des Flandres",
"Rottweiler",
"German shepherd, German shepherd dog, German police dog, alsatian",
"Doberman, Doberman pinscher",
"miniature pinscher",
"Greater Swiss Mountain dog",
"Bernese mountain dog",
"Appenzeller",
"EntleBucher",
"boxer",
"bull mastiff",
"Tibetan mastiff",
"French bulldog",
"Great Dane",
"Saint Bernard, St Bernard",
"Eskimo dog, husky",
"malamute, malemute, Alaskan malamute",
"Siberian husky",
"dalmatian, coach dog, carriage dog",
"affenpinscher, monkey pinscher, monkey dog",
"basenji",
"pug, pug-dog",
"Leonberg",
"Newfoundland, Newfoundland dog",
"Great Pyrenees",
"Samoyed, Samoyede",
"Pomeranian",
"chow, chow chow",
"keeshond",
"Brabancon griffon",
"Pembroke, Pembroke Welsh corgi",
"Cardigan, Cardigan Welsh corgi",
"toy poodle",
"miniature poodle",
"standard poodle",
"Mexican hairless",
"timber wolf, grey wolf, gray wolf, Canis lupus",
"white wolf, Arctic wolf, Canis lupus tundrarum",
"red wolf, maned wolf, Canis rufus, Canis niger",
"coyote, prairie wolf, brush wolf, Canis latrans",
"dingo, warrigal, warragal, Canis dingo",
"dhole, Cuon alpinus",
"African hunting dog, hyena dog, Cape hunting dog, Lycaon pictus",
"hyena, hyaena",
"red fox, Vulpes vulpes",
"kit fox, Vulpes macrotis",
"Arctic fox, white fox, Alopex lagopus",
"grey fox, gray fox, Urocyon cinereoargenteus",
"tabby, tabby cat",
"tiger cat",
"Persian cat",
"Siamese cat, Siamese",
"Egyptian cat",
"cougar, puma, catamount, mountain lion, painter, panther, Felis concolor", # noqa: E501
"lynx, catamount",
"leopard, Panthera pardus",
"snow leopard, ounce, Panthera uncia",
"jaguar, panther, Panthera onca, Felis onca",
"lion, king of beasts, Panthera leo",
"tiger, Panthera tigris",
"cheetah, chetah, Acinonyx jubatus",
"brown bear, bruin, Ursus arctos",
"American black bear, black bear, Ursus americanus, Euarctos americanus", # noqa: E501
"ice bear, polar bear, Ursus Maritimus, Thalarctos maritimus",
"sloth bear, Melursus ursinus, Ursus ursinus",
"mongoose",
"meerkat, mierkat",
"tiger beetle",
"ladybug, ladybeetle, lady beetle, ladybird, ladybird beetle",
"ground beetle, carabid beetle",
"long-horned beetle, longicorn, longicorn beetle",
"leaf beetle, chrysomelid",
"dung beetle",
"rhinoceros beetle",
"weevil",
"fly",
"bee",
"ant, emmet, pismire",
"grasshopper, hopper",
"cricket",
"walking stick, walkingstick, stick insect",
"cockroach, roach",
"mantis, mantid",
"cicada, cicala",
"leafhopper",
"lacewing, lacewing fly",
"dragonfly, darning needle, devil's darning needle, sewing needle, snake feeder, snake doctor, mosquito hawk, skeeter hawk", # noqa: E501
"damselfly",
"admiral",
"ringlet, ringlet butterfly",
"monarch, monarch butterfly, milkweed butterfly, Danaus plexippus",
"cabbage butterfly",
"sulphur butterfly, sulfur butterfly",
"lycaenid, lycaenid butterfly",
"starfish, sea star",
"sea urchin",
"sea cucumber, holothurian",
"wood rabbit, cottontail, cottontail rabbit",
"hare",
"Angora, Angora rabbit",
"hamster",
"porcupine, hedgehog",
"fox squirrel, eastern fox squirrel, Sciurus niger",
"marmot",
"beaver",
"guinea pig, Cavia cobaya",
"sorrel",
"zebra",
"hog, pig, grunter, squealer, Sus scrofa",
"wild boar, boar, Sus scrofa",
"warthog",
"hippopotamus, hippo, river horse, Hippopotamus amphibius",
"ox",
"water buffalo, water ox, Asiatic buffalo, Bubalus bubalis",
"bison",
"ram, tup",
"bighorn, bighorn sheep, cimarron, Rocky Mountain bighorn, Rocky Mountain sheep, Ovis canadensis", # noqa: E501
"ibex, Capra ibex",
"hartebeest",
"impala, Aepyceros melampus",
"gazelle",
"Arabian camel, dromedary, Camelus dromedarius",
"llama",
"weasel",
"mink",
"polecat, fitch, foulmart, foumart, Mustela putorius",
"black-footed ferret, ferret, Mustela nigripes",
"otter",
"skunk, polecat, wood pussy",
"badger",
"armadillo",
"three-toed sloth, ai, Bradypus tridactylus",
"orangutan, orang, orangutang, Pongo pygmaeus",
"gorilla, Gorilla gorilla",
"chimpanzee, chimp, Pan troglodytes",
"gibbon, Hylobates lar",
"siamang, Hylobates syndactylus, Symphalangus syndactylus",
"guenon, guenon monkey",
"patas, hussar monkey, Erythrocebus patas",
"baboon",
"macaque",
"langur",
"colobus, colobus monkey",
"proboscis monkey, Nasalis larvatus",
"marmoset",
"capuchin, ringtail, Cebus capucinus",
"howler monkey, howler",
"titi, titi monkey",
"spider monkey, Ateles geoffroyi",
"squirrel monkey, Saimiri sciureus",
"Madagascar cat, ring-tailed lemur, Lemur catta",
"indri, indris, Indri indri, Indri brevicaudatus",
"Indian elephant, Elephas maximus",
"African elephant, Loxodonta africana",
"lesser panda, red panda, panda, bear cat, cat bear, Ailurus fulgens",
"giant panda, panda, panda bear, coon bear, Ailuropoda melanoleuca",
"barracouta, snoek",
"eel",
"coho, cohoe, coho salmon, blue jack, silver salmon, Oncorhynchus kisutch", # noqa: E501
"rock beauty, Holocanthus tricolor",
"anemone fish",
"sturgeon",
"gar, garfish, garpike, billfish, Lepisosteus osseus",
"lionfish",
"puffer, pufferfish, blowfish, globefish",
"abacus",
"abaya",
"academic gown, academic robe, judge's robe",
"accordion, piano accordion, squeeze box",
"acoustic guitar",
"aircraft carrier, carrier, flattop, attack aircraft carrier",
"airliner",
"airship, dirigible",
"altar",
"ambulance",
"amphibian, amphibious vehicle",
"analog clock",
"apiary, bee house",
"apron",
"ashcan, trash can, garbage can, wastebin, ash bin, ash-bin, ashbin, dustbin, trash barrel, trash bin", # noqa: E501
"assault rifle, assault gun",
"backpack, back pack, knapsack, packsack, rucksack, haversack",
"bakery, bakeshop, bakehouse",
"balance beam, beam",
"balloon",
"ballpoint, ballpoint pen, ballpen, Biro",
"Band Aid",
"banjo",
"bannister, banister, balustrade, balusters, handrail",
"barbell",
"barber chair",
"barbershop",
"barn",
"barometer",
"barrel, cask",
"barrow, garden cart, lawn cart, wheelbarrow",
"baseball",
"basketball",
"bassinet",
"bassoon",
"bathing cap, swimming cap",
"bath towel",
"bathtub, bathing tub, bath, tub",
"beach wagon, station wagon, wagon, estate car, beach waggon, station waggon, waggon", # noqa: E501
"beacon, lighthouse, beacon light, pharos",
"beaker",
"bearskin, busby, shako",
"beer bottle",
"beer glass",
"bell cote, bell cot",
"bib",
"bicycle-built-for-two, tandem bicycle, tandem",
"bikini, two-piece",
"binder, ring-binder",
"binoculars, field glasses, opera glasses",
"birdhouse",
"boathouse",
"bobsled, bobsleigh, bob",
"bolo tie, bolo, bola tie, bola",
"bonnet, poke bonnet",
"bookcase",
"bookshop, bookstore, bookstall",
"bottlecap",
"bow",
"bow tie, bow-tie, bowtie",
"brass, memorial tablet, plaque",
"brassiere, bra, bandeau",
"breakwater, groin, groyne, mole, bulwark, seawall, jetty",
"breastplate, aegis, egis",
"broom",
"bucket, pail",
"buckle",
"bulletproof vest",
"bullet train, bullet",
"butcher shop, meat market",
"cab, hack, taxi, taxicab",
"caldron, cauldron",
"candle, taper, wax light",
"cannon",
"canoe",
"can opener, tin opener",
"cardigan",
"car mirror",
"carousel, carrousel, merry-go-round, roundabout, whirligig",
"carpenter's kit, tool kit",
"carton",
"car wheel",
"cash machine, cash dispenser, automated teller machine, automatic teller machine, automated teller, automatic teller, ATM", # noqa: E501
"cassette",
"cassette player",
"castle",
"catamaran",
"CD player",
"cello, violoncello",
"cellular telephone, cellular phone, cellphone, cell, mobile phone",
"chain",
"chainlink fence",
"chain mail, ring mail, mail, chain armor, chain armour, ring armor, ring armour", # noqa: E501
"chain saw, chainsaw",
"chest",
"chiffonier, commode",
"chime, bell, gong",
"china cabinet, china closet",
"Christmas stocking",
"church, church building",
"cinema, movie theater, movie theatre, movie house, picture palace",
"cleaver, meat cleaver, chopper",
"cliff dwelling",
"cloak",
"clog, geta, patten, sabot",
"cocktail shaker",
"coffee mug",
"coffeepot",
"coil, spiral, volute, whorl, helix",
"combination lock",
"computer keyboard, keypad",
"confectionery, confectionary, candy store",
"container ship, containership, container vessel",
"convertible",
"corkscrew, bottle screw",
"cornet, horn, trumpet, trump",
"cowboy boot",
"cowboy hat, ten-gallon hat",
"cradle",
"crane",
"crash helmet",
"crate",
"crib, cot",
"Crock Pot",
"croquet ball",
"crutch",
"cuirass",
"dam, dike, dyke",
"desk",
"desktop computer",
"dial telephone, dial phone",
"diaper, nappy, napkin",
"digital clock",
"digital watch",
"dining table, board",
"dishrag, dishcloth",
"dishwasher, dish washer, dishwashing machine",
"disk brake, disc brake",
"dock, dockage, docking facility",
"dogsled, dog sled, dog sleigh",
"dome",
"doormat, welcome mat",
"drilling platform, offshore rig",
"drum, membranophone, tympan",
"drumstick",
"dumbbell",
"Dutch oven",
"electric fan, blower",
"electric guitar",
"electric locomotive",
"entertainment center",
"envelope",
"espresso maker",
"face powder",
"feather boa, boa",
"file, file cabinet, filing cabinet",
"fireboat",
"fire engine, fire truck",
"fire screen, fireguard",
"flagpole, flagstaff",
"flute, transverse flute",
"folding chair",
"football helmet",
"forklift",
"fountain",
"fountain pen",
"four-poster",
"freight car",
"French horn, horn",
"frying pan, frypan, skillet",
"fur coat",
"garbage truck, dustcart",
"gasmask, respirator, gas helmet",
"gas pump, gasoline pump, petrol pump, island dispenser",
"goblet",
"go-kart",
"golf ball",
"golfcart, golf cart",
"gondola",
"gong, tam-tam",
"gown",
"grand piano, grand",
"greenhouse, nursery, glasshouse",
"grille, radiator grille",
"grocery store, grocery, food market, market",
"guillotine",
"hair slide",
"hair spray",
"half track",
"hammer",
"hamper",
"hand blower, blow dryer, blow drier, hair dryer, hair drier",
"hand-held computer, hand-held microcomputer",
"handkerchief, hankie, hanky, hankey",
"hard disc, hard disk, fixed disk",
"harmonica, mouth organ, harp, mouth harp",
"harp",
"harvester, reaper",
"hatchet",
"holster",
"home theater, home theatre",
"honeycomb",
"hook, claw",
"hoopskirt, crinoline",
"horizontal bar, high bar",
"horse cart, horse-cart",
"hourglass",
"iPod",
"iron, smoothing iron",
"jack-o'-lantern",
"jean, blue jean, denim",
"jeep, landrover",
"jersey, T-shirt, tee shirt",
"jigsaw puzzle",
"jinrikisha, ricksha, rickshaw",
"joystick",
"kimono",
"knee pad",
"knot",
"lab coat, laboratory coat",
"ladle",
"lampshade, lamp shade",
"laptop, laptop computer",
"lawn mower, mower",
"lens cap, lens cover",
"letter opener, paper knife, paperknife",
"library",
"lifeboat",
"lighter, light, igniter, ignitor",
"limousine, limo",
"liner, ocean liner",
"lipstick, lip rouge",
"Loafer",
"lotion",
"loudspeaker, speaker, speaker unit, loudspeaker system, speaker system", # noqa: E501
"loupe, jeweler's loupe",
"lumbermill, sawmill",
"magnetic compass",
"mailbag, postbag",
"mailbox, letter box",
"maillot",
"maillot, tank suit",
"manhole cover",
"maraca",
"marimba, xylophone",
"mask",
"matchstick",
"maypole",
"maze, labyrinth",
"measuring cup",
"medicine chest, medicine cabinet",
"megalith, megalithic structure",
"microphone, mike",
"microwave, microwave oven",
"military uniform",
"milk can",
"minibus",
"miniskirt, mini",
"minivan",
"missile",
"mitten",
"mixing bowl",
"mobile home, manufactured home",
"Model T",
"modem",
"monastery",
"monitor",
"moped",
"mortar",
"mortarboard",
"mosque",
"mosquito net",
"motor scooter, scooter",
"mountain bike, all-terrain bike, off-roader",
"mountain tent",
"mouse, computer mouse",
"mousetrap",
"moving van",
"muzzle",
"nail",
"neck brace",
"necklace",
"nipple",
"notebook, notebook computer",
"obelisk",
"oboe, hautboy, hautbois",
"ocarina, sweet potato",
"odometer, hodometer, mileometer, milometer",
"oil filter",
"organ, pipe organ",
"oscilloscope, scope, cathode-ray oscilloscope, CRO",
"overskirt",
"oxcart",
"oxygen mask",
"packet",
"paddle, boat paddle",
"paddlewheel, paddle wheel",
"padlock",
"paintbrush",
"pajama, pyjama, pj's, jammies",
"palace",
"panpipe, pandean pipe, syrinx",
"paper towel",
"parachute, chute",
"parallel bars, bars",
"park bench",
"parking meter",
"passenger car, coach, carriage",
"patio, terrace",
"pay-phone, pay-station",
"pedestal, plinth, footstall",
"pencil box, pencil case",
"pencil sharpener",
"perfume, essence",
"Petri dish",
"photocopier",
"pick, plectrum, plectron",
"pickelhaube",
"picket fence, paling",
"pickup, pickup truck",
"pier",
"piggy bank, penny bank",
"pill bottle",
"pillow",
"ping-pong ball",
"pinwheel",
"pirate, pirate ship",
"pitcher, ewer",
"plane, carpenter's plane, woodworking plane",
"planetarium",
"plastic bag",
"plate rack",
"plow, plough",
"plunger, plumber's helper",
"Polaroid camera, Polaroid Land camera",
"pole",
"police van, police wagon, paddy wagon, patrol wagon, wagon, black Maria", # noqa: E501
"poncho",
"pool table, billiard table, snooker table",
"pop bottle, soda bottle",
"pot, flowerpot",
"potter's wheel",
"power drill",
"prayer rug, prayer mat",
"printer",
"prison, prison house",
"projectile, missile",
"projector",
"puck, hockey puck",
"punching bag, punch bag, punching ball, punchball",
"purse",
"quill, quill pen",
"quilt, comforter, comfort, puff",
"racer, race car, racing car",
"racket, racquet",
"radiator",
"radio, wireless",
"radio telescope, radio reflector",
"rain barrel",
"recreational vehicle, RV, R.V.",
"reel",
"reflex camera",
"refrigerator, icebox",
"remote control, remote",
"restaurant, eating house, eating place, eatery",
"revolver, six-gun, six-shooter",
"rifle",
"rocking chair, rocker",
"rotisserie",
"rubber eraser, rubber, pencil eraser",
"rugby ball",
"rule, ruler",
"running shoe",
"safe",
"safety pin",
"saltshaker, salt shaker",
"sandal",
"sarong",
"sax, saxophone",
"scabbard",
"scale, weighing machine",
"school bus",
"schooner",
"scoreboard",
"screen, CRT screen",
"screw",
"screwdriver",
"seat belt, seatbelt",
"sewing machine",
"shield, buckler",
"shoe shop, shoe-shop, shoe store",
"shoji",
"shopping basket",
"shopping cart",
"shovel",
"shower cap",
"shower curtain",
"ski",
"ski mask",
"sleeping bag",
"slide rule, slipstick",
"sliding door",
"slot, one-armed bandit",
"snorkel",
"snowmobile",
"snowplow, snowplough",
"soap dispenser",
"soccer ball",
"sock",
"solar dish, solar collector, solar furnace",
"sombrero",
"soup bowl",
"space bar",
"space heater",
"space shuttle",
"spatula",
"speedboat",
"spider web, spider's web",
"spindle",
"sports car, sport car",
"spotlight, spot",
"stage",
"steam locomotive",
"steel arch bridge",
"steel drum",
"stethoscope",
"stole",
"stone wall",
"stopwatch, stop watch",
"stove",
"strainer",
"streetcar, tram, tramcar, trolley, trolley car",
"stretcher",
"studio couch, day bed",
"stupa, tope",
"submarine, pigboat, sub, U-boat",
"suit, suit of clothes",
"sundial",
"sunglass",
"sunglasses, dark glasses, shades",
"sunscreen, sunblock, sun blocker",
"suspension bridge",
"swab, swob, mop",
"sweatshirt",
"swimming trunks, bathing trunks",
"swing",
"switch, electric switch, electrical switch",
"syringe",
"table lamp",
"tank, army tank, armored combat vehicle, armoured combat vehicle",
"tape player",
"teapot",
"teddy, teddy bear",
"television, television system",
"tennis ball",
"thatch, thatched roof",
"theater curtain, theatre curtain",
"thimble",
"thresher, thrasher, threshing machine",
"throne",
"tile roof",
"toaster",
"tobacco shop, tobacconist shop, tobacconist",
"toilet seat",
"torch",
"totem pole",
"tow truck, tow car, wrecker",
"toyshop",
"tractor",
"trailer truck, tractor trailer, trucking rig, rig, articulated lorry, semi", # noqa: E501
"tray",
"trench coat",
"tricycle, trike, velocipede",
"trimaran",
"tripod",
"triumphal arch",
"trolleybus, trolley coach, trackless trolley",
"trombone",
"tub, vat",
"turnstile",
"typewriter keyboard",
"umbrella",
"unicycle, monocycle",
"upright, upright piano",
"vacuum, vacuum cleaner",
"vase",
"vault",
"velvet",
"vending machine",
"vestment",
"viaduct",
"violin, fiddle",
"volleyball",
"waffle iron",
"wall clock",
"wallet, billfold, notecase, pocketbook",
"wardrobe, closet, press",
"warplane, military plane",
"washbasin, handbasin, washbowl, lavabo, wash-hand basin",
"washer, automatic washer, washing machine",
"water bottle",
"water jug",
"water tower",
"whiskey jug",
"whistle",
"wig",
"window screen",
"window shade",
"Windsor tie",
"wine bottle",
"wing",
"wok",
"wooden spoon",
"wool, woolen, woollen",
"worm fence, snake fence, snake-rail fence, Virginia fence",
"wreck",
"yawl",
"yurt",
"web site, website, internet site, site",
"comic book",
"crossword puzzle, crossword",
"street sign",
"traffic light, traffic signal, stoplight",
"book jacket, dust cover, dust jacket, dust wrapper",
"menu",
"plate",
"guacamole",
"consomme",
"hot pot, hotpot",
"trifle",
"ice cream, icecream",
"ice lolly, lolly, lollipop, popsicle",
"French loaf",
"bagel, beigel",
"pretzel",
"cheeseburger",
"hotdog, hot dog, red hot",
"mashed potato",
"head cabbage",
"broccoli",
"cauliflower",
"zucchini, courgette",
"spaghetti squash",
"acorn squash",
"butternut squash",
"cucumber, cuke",
"artichoke, globe artichoke",
"bell pepper",
"cardoon",
"mushroom",
"Granny Smith",
"strawberry",
"orange",
"lemon",
"fig",
"pineapple, ananas",
"banana",
"jackfruit, jak, jack",
"custard apple",
"pomegranate",
"hay",
"carbonara",
"chocolate sauce, chocolate syrup",
"dough",
"meat loaf, meatloaf",
"pizza, pizza pie",
"potpie",
"burrito",
"red wine",
"espresso",
"cup",
"eggnog",
"alp",
"bubble",
"cliff, drop, drop-off",
"coral reef",
"geyser",
"lakeside, lakeshore",
"promontory, headland, head, foreland",
"sandbar, sand bar",
"seashore, coast, seacoast, sea-coast",
"valley, vale",
"volcano",
"ballplayer, baseball player",
"groom, bridegroom",
"scuba diver",
"rapeseed",
"daisy",
"yellow lady's slipper, yellow lady-slipper, Cypripedium calceolus, Cypripedium parviflorum", # noqa: E501
"corn",
"acorn",
"hip, rose hip, rosehip",
"buckeye, horse chestnut, conker",
"coral fungus",
"agaric",
"gyromitra",
"stinkhorn, carrion fungus",
"earthstar",
"hen-of-the-woods, hen of the woods, Polyporus frondosus, Grifola frondosa", # noqa: E501
"bolete",
"ear, spike, capitulum",
"toilet tissue, toilet paper, bathroom tissue",
]
| 29,033 | 27.947159 | 142 | py |
libai | libai-main/libai/inference/generator/generation_utils.py | # coding=utf-8
# Copyright 2021 The OneFlow Authors. All rights reserved.
# Copyright 2020 The Google AI Language Team Authors, Facebook AI Research authors and
# The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import inspect
import logging
import warnings
from typing import Callable, Dict, Iterable, List, Optional, Tuple, Union
import oneflow as flow
from oneflow import nn
from libai.utils import distributed as dist
from .generation_beam_search import BeamScorer, BeamSearchScorer
from .generation_logits_processor import (
EncoderNoRepeatNGramLogitsProcessor,
ExponentialDecayLengthPenalty,
ForcedBOSTokenLogitsProcessor,
ForcedEOSTokenLogitsProcessor,
HammingDiversityLogitsProcessor,
InfNanRemoveLogitsProcessor,
LogitsProcessorList,
MinLengthLogitsProcessor,
NoRepeatNGramLogitsProcessor,
NormalizationLogitsProcessor,
PrefixConstrainedLogitsProcessor,
RepetitionPenaltyLogitsProcessor,
TemperatureLogitsWarper,
TopKLogitsWarper,
TopPLogitsWarper,
TypicalLogitsWarper,
)
from .generation_stopping_criteria import (
MaxLengthCriteria,
MaxTimeCriteria,
StoppingCriteriaList,
validate_stopping_criteria,
)
logger = logging.getLogger(__name__)
class Generator:
def _prepare_model_inputs(
self,
inputs: Optional[flow.Tensor] = None,
bos_token_id: Optional[int] = None,
model_kwargs: Optional[Dict[str, flow.Tensor]] = None,
):
if self.cfg.is_encoder_decoder:
input_name = "encoder_input_ids"
else:
input_name = "input_ids"
model_kwargs = {k: v for k, v in model_kwargs.items() if v is not None or k != input_name}
inputs_kwarg = model_kwargs.pop(input_name, None)
if inputs_kwarg is not None and inputs is not None:
raise ValueError(
f"`inputs`: {inputs}` were passed alongside "
f"{input_name} which is not allowed."
f"Make sure to either pass {inputs} or {input_name}=..."
)
elif inputs_kwarg is not None:
inputs = inputs_kwarg
if inputs is None:
inputs = self._prepare_input_ids_for_generation(
bos_token_id, model_kwargs.get("encoder_outputs", None)
)
return inputs, input_name, model_kwargs
def prepare_inputs_for_generation(self, input_ids: flow.Tensor, **kwargs):
"""
Implement in subclasses of [`PreTrainedModel`] for custom behavior to prepare inputs in the
generate method.
"""
return {"input_ids": input_ids}
def _prepare_input_ids_for_generation(
self, bos_token_id: Optional[int], encoder_outputs: Optional[flow.Tensor]
):
if self.cfg.is_encoder_decoder and encoder_outputs is not None:
shape = encoder_outputs.size()[:-1]
return (
flow.ones(
shape,
dtype=flow.long,
sbp=dist.get_nd_sbp([flow.sbp.broadcast, flow.sbp.broadcast]),
placement=dist.get_layer_placement(0),
)
* -100
)
if bos_token_id is None:
raise ValueError("`bos_token_id` has to be defined when no `input_ids` are provided.")
return (
flow.ones(
(1, 1),
dtype=flow.long,
sbp=dist.get_nd_sbp([flow.sbp.broadcast, flow.sbp.broadcast]),
placement=dist.get_layer_placement(0),
)
* bos_token_id
)
def _prepare_attention_mask_for_generation(
self,
inputs: flow.Tensor,
pad_token_id: Optional[int],
eos_token_id: Optional[int],
):
is_input_ids = len(inputs.shape) == 2 and inputs.dtype in [flow.int64, flow.long]
is_pad_token_in_inputs = (pad_token_id is not None) and (pad_token_id in inputs)
is_pad_token_not_equal_to_eos_token_id = (eos_token_id is None) or (
(eos_token_id is not None) and (pad_token_id != eos_token_id)
)
# Check if input is input_ids and padded -> only then is attention_mask defined
if is_input_ids and is_pad_token_in_inputs and is_pad_token_not_equal_to_eos_token_id:
return inputs.ne(pad_token_id).bool()
else:
return flow.ones(
inputs.shape[:2],
dtype=flow.bool,
sbp=dist.get_nd_sbp([flow.sbp.broadcast, flow.sbp.broadcast]),
placement=dist.get_layer_placement(0),
)
def _prepare_encoder_decoder_kwargs_for_generation(
self, inputs_tensor: flow.Tensor, model_kwargs, model_input_name: str
):
only_encoder = True
model_kwargs[model_input_name] = inputs_tensor
if "encoder_decoder_attn_mask" in set(inspect.signature(self.forward).parameters):
model_kwargs["encoder_decoder_attn_mask"] = model_kwargs["encoder_attn_mask"]
model_kwargs["encoder_outputs"] = self(**model_kwargs, only_encoder=only_encoder)
model_kwargs.pop(model_input_name)
return model_kwargs
def _prepare_decoder_input_ids_for_generation(
self,
batch_size: int,
decoder_start_token_id: int = None,
bos_token_id: int = None,
model_kwargs=None,
):
if model_kwargs is not None and "decoder_input_ids" in model_kwargs:
return model_kwargs.pop("decoder_input_ids")
else:
decoder_start_token_id = (
decoder_start_token_id
if decoder_start_token_id
else self.cfg.decoder_start_token_id
)
return (
flow.ones(
(batch_size, 1),
dtype=flow.long,
sbp=dist.get_nd_sbp([flow.sbp.broadcast, flow.sbp.broadcast]),
placement=dist.get_layer_placement(0),
)
* decoder_start_token_id
)
def _get_decoder_start_token_id(
self, decoder_start_token_id: int = None, bos_token_id: int = None
):
if decoder_start_token_id is not None:
return decoder_start_token_id
elif self.cfg.is_encoder_decoder:
return self.cfg.decoder_start_token_id
elif bos_token_id is not None:
return bos_token_id
else:
return self.cfg.bos_token_idx
@staticmethod
def _expand_inputs_for_generation(
input_ids: flow.Tensor,
expand_size: int = 1,
is_encoder_decoder: bool = False,
attention_mask: Optional[flow.Tensor] = None,
encoder_outputs: Optional[flow.Tensor] = None,
**model_kwargs,
):
expanded_return_idx = (
flow.arange(input_ids.shape[0]).view(-1, 1).repeat(1, expand_size).view(-1)
)
expanded_return_idx = expanded_return_idx.to_global(
sbp=dist.get_nd_sbp([flow.sbp.broadcast, flow.sbp.broadcast]),
placement=dist.get_layer_placement(0),
)
input_ids = input_ids.index_select(0, expanded_return_idx)
# token_type ids not supported.
if attention_mask is not None:
model_kwargs["attention_mask"] = attention_mask.index_select(0, expanded_return_idx)
if is_encoder_decoder:
if encoder_outputs is None:
raise ValueError(
"If `is_encoder_decoder` is True, make sure that `encoder_outputs` is defined."
)
encoder_outputs = encoder_outputs.to_global(placement=expanded_return_idx.placement)
encoder_outputs = encoder_outputs.index_select(0, expanded_return_idx)
model_kwargs["encoder_outputs"] = encoder_outputs
model_kwargs["encoder_attn_mask"] = model_kwargs["encoder_attn_mask"].index_select(
0, expanded_return_idx
)
model_kwargs["encoder_decoder_attn_mask"] = model_kwargs["encoder_attn_mask"]
return input_ids, model_kwargs
def _update_model_kwargs_for_generation(
self, outputs, model_kwargs, is_encoder_decoder: bool = False
):
if "past_key_values" in outputs:
model_kwargs["past"] = outputs["past_key_values"]
elif "mems" in outputs:
model_kwargs["past"] = outputs["mems"]
elif "past_buckets_states" in outputs:
model_kwargs["past"] = outputs["past_buckets_states"]
elif self.past_key_values[-1] is not None:
model_kwargs["past"] = self.past_key_values
else:
model_kwargs["past"] = None
# update attention mask
if "attention_mask" in model_kwargs and not is_encoder_decoder:
attention_mask = model_kwargs["attention_mask"]
pad = flow.ones(
(attention_mask.shape[0], 1),
sbp=attention_mask.sbp,
placement=attention_mask.placement,
)
model_kwargs["attention_mask"] = flow.cat([attention_mask, pad], dim=-1)
if "decoder_attn_mask" in model_kwargs and is_encoder_decoder:
attention_mask = model_kwargs["decoder_attn_mask"]
pad = flow.ones(
(attention_mask.shape[0], 1),
sbp=attention_mask.sbp,
placement=attention_mask.placement,
)
model_kwargs["decoder_attn_mask"] = flow.cat([attention_mask, pad], dim=-1)
return model_kwargs
def _reorder_cache(self, past, beam_idx):
raise NotImplementedError(
"Make sure that a `_reorder_cache` function is correctly implemented in "
f"{self.__class__.__module__} to enable beam search for {self.__class__}"
)
def _get_logits_warper(
self,
top_k: Optional[int] = None,
top_p: Optional[float] = None,
typical_p: Optional[float] = None,
temperature: Optional[float] = None,
num_beams: Optional[int] = None,
renormalize_logits: Optional[bool] = None,
):
# instantiate warpers list
warpers = LogitsProcessorList()
# all samplers can be found in `generation_utils_samplers.py`
if temperature is not None and temperature != 1.0:
warpers.append(TemperatureLogitsWarper(temperature))
if top_k is not None and top_k != 0:
warpers.append(
TopKLogitsWarper(top_k=top_k, min_tokens_to_keep=(2 if num_beams > 1 else 1))
)
if top_p is not None and top_p < 1.0:
warpers.append(
TopPLogitsWarper(top_p=top_p, min_tokens_to_keep=(2 if num_beams > 1 else 1))
)
if typical_p is not None and typical_p < 1.0:
warpers.append(
TypicalLogitsWarper(mass=typical_p, min_tokens_to_keep=(2 if num_beams > 1 else 1))
)
# `LogitNormalization` should always be the last logit processor, when present
if renormalize_logits is True:
warpers.append(NormalizationLogitsProcessor())
return warpers
def _get_logits_processor(
self,
repetition_penalty: float,
no_repeat_ngram_size: int,
encoder_no_repeat_ngram_size: int,
input_ids_seq_length: int,
encoder_input_ids: flow.Tensor,
min_length: int,
max_length: int,
eos_token_id: int,
forced_bos_token_id: int,
forced_eos_token_id: int,
prefix_allowed_tokens_fn: Callable[[int, flow.Tensor], List[int]],
num_beams: int,
num_beam_groups: int,
diversity_penalty: float,
remove_invalid_values: bool,
exponential_decay_length_penalty: Tuple,
logits_processor: Optional[LogitsProcessorList],
renormalize_logits: Optional[bool],
):
"""
This class returns a [`LogitsProcessorList`] list object that contains all relevant
[`LogitsProcessor`] instances used to modify the scores of the language model head.
"""
processors = LogitsProcessorList()
# instantiate processors list
if diversity_penalty is not None and diversity_penalty > 0.0:
processors.append(
HammingDiversityLogitsProcessor(
diversity_penalty=diversity_penalty,
num_beams=num_beams,
num_beam_groups=num_beam_groups,
)
)
if repetition_penalty is not None and repetition_penalty != 1.0:
processors.append(RepetitionPenaltyLogitsProcessor(penalty=repetition_penalty))
if no_repeat_ngram_size is not None and no_repeat_ngram_size > 0:
processors.append(NoRepeatNGramLogitsProcessor(no_repeat_ngram_size))
if encoder_no_repeat_ngram_size is not None and encoder_no_repeat_ngram_size > 0:
if self.cfg.is_encoder_decoder:
processors.append(
EncoderNoRepeatNGramLogitsProcessor(
encoder_no_repeat_ngram_size, encoder_input_ids
)
)
else:
raise ValueError(
"It's impossible to use `encoder_no_repeat_ngram_size` with decoder-only "
"architecture"
)
if min_length is not None and eos_token_id is not None and min_length > 0:
processors.append(MinLengthLogitsProcessor(min_length, eos_token_id))
if prefix_allowed_tokens_fn is not None:
processors.append(
PrefixConstrainedLogitsProcessor(
prefix_allowed_tokens_fn, num_beams // num_beam_groups
)
)
if forced_bos_token_id is not None:
processors.append(ForcedBOSTokenLogitsProcessor(forced_bos_token_id))
if forced_eos_token_id is not None:
processors.append(ForcedEOSTokenLogitsProcessor(max_length, forced_eos_token_id))
if remove_invalid_values is True:
processors.append(InfNanRemoveLogitsProcessor())
if exponential_decay_length_penalty is not None:
processors.append(
ExponentialDecayLengthPenalty(
exponential_decay_length_penalty, eos_token_id, input_ids_seq_length
)
)
processors = self._merge_criteria_processor_list(processors, logits_processor)
# `LogitNormalization` should always be the last logit processor, when present
if renormalize_logits is True:
processors.append(NormalizationLogitsProcessor())
return processors
def _get_stopping_criteria(
self,
max_length: Optional[int],
max_time: Optional[float],
stopping_criteria: Optional[StoppingCriteriaList],
):
criteria = StoppingCriteriaList()
if max_length is not None:
criteria.append(MaxLengthCriteria(max_length=max_length))
if max_time is not None:
criteria.append(MaxTimeCriteria(max_time=max_time))
criteria = self._merge_criteria_processor_list(criteria, stopping_criteria)
return criteria
def _merge_criteria_processor_list(self, default_list, custom_list):
if len(custom_list) == 0:
return default_list
for default in default_list:
for custom in custom_list:
if type(custom) is type(default):
raise ValueError("Criteria repetition error.")
default_list.extend(custom_list)
return default_list
def compute_transition_beam_scores(
self,
sequences: flow.Tensor,
scores: Tuple[flow.Tensor],
beam_indices: flow.Tensor,
eos_token_id: int = None,
):
scores = flow.stack(scores).reshape(len(scores), -1).transpose(0, 1)
beam_indices_mask = beam_indices < 0
max_beam_length = (1 - beam_indices_mask.long()).sum(-1).max()
beam_indices = beam_indices[:, :max_beam_length]
beam_indices_mask = beam_indices_mask[:, :max_beam_length]
beam_indices[beam_indices_mask] = 0
beam_sequence_indices = beam_indices * self.cfg.vocab_size
cut_idx = sequences.shape[-1] - max_beam_length
indices = sequences[:, cut_idx:] + beam_sequence_indices
transition_scores = scores.gather(0, indices)
transition_scores[beam_indices_mask] = 0
return transition_scores
def _validate_model_kwargs(self, model_kwargs):
if self.cfg.is_encoder_decoder:
for key in ["decoder_input_ids"]:
model_kwargs.pop(key, None)
unused_model_args = []
model_args = set(inspect.signature(self.prepare_inputs_for_generation).parameters)
if "kwargs" in model_args:
model_args |= set(inspect.signature(self.forward).parameters)
for key, value in model_kwargs.items():
if value is not None and key not in model_args:
unused_model_args.append(key)
if unused_model_args:
raise ValueError(
f"The following `model_kwargs` are not used by the model: {unused_model_args} "
"(note: typos in the generate arguments will also show up in this list)"
)
def greedy_search(
self,
input_ids: flow.Tensor,
logits_processor: Optional[LogitsProcessorList] = None,
stopping_criteria: Optional[StoppingCriteriaList] = None,
max_length: Optional[int] = None,
pad_token_id: Optional[int] = None,
eos_token_id: Optional[int] = None,
is_encoder_decoder: bool = False,
output_scores: bool = False,
**model_kwargs,
):
pad_token_id = pad_token_id if pad_token_id is not None else self.cfg.pad_token_id
eos_token_id = eos_token_id if eos_token_id is not None else self.cfg.eos_token_id
output_scores = output_scores if output_scores is not None else self.cfg.output_scores
scores = () if output_scores else None
logits_processor = (
logits_processor if logits_processor is not None else LogitsProcessorList()
)
stopping_criteria = (
stopping_criteria if stopping_criteria is not None else StoppingCriteriaList()
)
if max_length is not None:
warnings.warn(
"`max_length` is deprecated in this function, use MaxLengthCriteria" " instead.",
UserWarning,
)
stopping_criteria = validate_stopping_criteria(stopping_criteria, max_length)
# keep track of which sequences are already finished
unfinished_sequences = flow.ones(input_ids.shape[0])
cur_len = input_ids.shape[-1]
while True:
if input_ids.size(0) > 1:
input_ids = input_ids.to_global(
sbp=dist.get_nd_sbp([flow.sbp.split(0), flow.sbp.broadcast])
)
# prepare model inputs
model_inputs = self.prepare_inputs_for_generation(input_ids, **model_kwargs)
# generate
outputs = self(**model_inputs)
next_token_logits = outputs["logits"][:, -1, :]
# logits_processor
next_token_scores = logits_processor(input_ids, next_token_logits)
# Store scores
if output_scores:
scores += (next_token_scores,)
# argmax
next_tokens = flow.argmax(next_token_scores, dim=-1)
next_tokens = next_tokens.to_global(placement=input_ids.placement)
unfinished_sequences = unfinished_sequences.to_global(
sbp=dist.get_nd_sbp([flow.sbp.broadcast, flow.sbp.broadcast]),
placement=dist.get_layer_placement(0),
)
if eos_token_id is not None:
if pad_token_id is None:
raise ValueError(
"If `eos_token_id` is defined, make sure that `pad_token_id` is defined."
)
next_tokens = next_tokens * unfinished_sequences + pad_token_id * (
1 - unfinished_sequences
)
next_tokens = next_tokens.to(flow.long)
input_ids = flow.cat([input_ids, next_tokens[:, None]], dim=-1)
model_kwargs = self._update_model_kwargs_for_generation(
outputs, model_kwargs, is_encoder_decoder=is_encoder_decoder
)
cur_len = cur_len + 1
# if eos_token was found in one sentence, set sentence to finished
if eos_token_id is not None:
unfinished_sequences = flow.mul(
unfinished_sequences, (next_tokens != eos_token_id).long()
)
if unfinished_sequences.max() == 0 or stopping_criteria(input_ids, scores):
break
# Release records
if "past_key_values" in self.__dir__():
self.past_key_values = [None] * self.cfg.hidden_layers
if "encoder_states" in self.__dir__():
self.encoder_states = None
return input_ids
def multinomial_sample(
self,
input_ids: flow.Tensor,
logits_processor: Optional[LogitsProcessorList] = None,
stopping_criteria: Optional[StoppingCriteriaList] = None,
logits_warper: Optional[LogitsProcessorList] = None,
max_length: Optional[int] = None,
pad_token_id: Optional[int] = None,
eos_token_id: Optional[int] = None,
is_encoder_decoder: bool = False,
output_scores: bool = False,
**model_kwargs,
):
# init values
pad_token_id = pad_token_id if pad_token_id is not None else self.cfg.pad_token_id
eos_token_id = eos_token_id if eos_token_id is not None else self.cfg.eos_token_id
output_scores = output_scores if output_scores is not None else self.cfg.output_scores
scores = () if output_scores else None
logits_processor = (
logits_processor if logits_processor is not None else LogitsProcessorList()
)
stopping_criteria = (
stopping_criteria if stopping_criteria is not None else StoppingCriteriaList()
)
if max_length is not None:
warnings.warn(
"`max_length` is deprecated in this function, use "
"`stopping_criteria=StoppingCriteriaList(MaxLengthCriteria(max_length=max_length))`"
"instead.",
UserWarning,
)
stopping_criteria = validate_stopping_criteria(stopping_criteria, max_length)
logits_warper = logits_warper if logits_warper is not None else LogitsProcessorList()
unfinished_sequences = flow.ones(input_ids.shape[0])
cur_len = input_ids.shape[-1]
while True:
# prepare model inputs
model_inputs = self.prepare_inputs_for_generation(input_ids, **model_kwargs)
# generate
outputs = self(**model_inputs)
next_token_logits = outputs["logits"][:, -1, :]
# pre-process distribution
next_token_scores = logits_processor(input_ids, next_token_logits)
next_token_scores = logits_warper(input_ids, next_token_scores)
# Store scores
if output_scores:
scores += (next_token_scores,)
# sample
probs = nn.functional.softmax(next_token_scores, dim=-1)
probs = probs.to_global(
sbp=dist.get_nd_sbp([flow.sbp.broadcast, flow.sbp.broadcast]),
placement=dist.get_layer_placement(0),
).to_local()
next_tokens = flow.multinomial(probs, num_samples=1).squeeze(1)
next_tokens = next_tokens.to_global(
sbp=dist.get_nd_sbp([flow.sbp.broadcast, flow.sbp.broadcast]),
placement=dist.get_layer_placement(0),
)
unfinished_sequences = unfinished_sequences.to_global(
sbp=next_tokens.sbp, placement=next_tokens.placement
)
if eos_token_id is not None:
if pad_token_id is None:
raise ValueError(
"If `eos_token_id` is defined, make sure that `pad_token_id` is defined."
)
next_tokens = next_tokens * unfinished_sequences + pad_token_id * (
1 - unfinished_sequences
)
next_tokens = next_tokens.to(flow.long)
input_ids = flow.cat([input_ids, next_tokens[:, None]], dim=-1)
model_kwargs = self._update_model_kwargs_for_generation(
outputs, model_kwargs, is_encoder_decoder=is_encoder_decoder
)
cur_len = cur_len + 1
if eos_token_id is not None:
unfinished_sequences = flow.mul(
unfinished_sequences, (next_tokens != eos_token_id).long()
)
if unfinished_sequences.max() == 0 or stopping_criteria(input_ids, scores):
break
# Release records
if "past_key_values" in self.__dir__():
self.past_key_values = [None] * self.cfg.hidden_layers
if "encoder_states" in self.__dir__():
self.encoder_states = None
return input_ids
def beam_search(
self,
input_ids: flow.Tensor,
beam_scorer: BeamScorer,
logits_processor: Optional[LogitsProcessorList] = None,
stopping_criteria: Optional[StoppingCriteriaList] = None,
max_length: Optional[int] = None,
pad_token_id: Optional[int] = None,
eos_token_id: Optional[int] = None,
is_encoder_decoder: bool = False,
output_scores: bool = False,
**model_kwargs,
):
pad_token_id = pad_token_id if pad_token_id is not None else self.cfg.pad_token_id
eos_token_id = eos_token_id if eos_token_id is not None else self.cfg.eos_token_id
output_scores = output_scores if output_scores is not None else self.cfg.output_scores
scores = () if output_scores else None
logits_processor = (
logits_processor if logits_processor is not None else LogitsProcessorList()
)
stopping_criteria = (
stopping_criteria if stopping_criteria is not None else StoppingCriteriaList()
)
if max_length is not None:
warnings.warn(
"`max_length` is deprecated in this function, use "
"`stopping_criteria=StoppingCriteriaList(MaxLengthCriteria(max_length=max_length))`"
"instead.",
UserWarning,
)
stopping_criteria = validate_stopping_criteria(stopping_criteria, max_length)
if len(stopping_criteria) == 0:
warnings.warn(
"You don't have defined any stopping_criteria, this will likely loop forever",
UserWarning,
)
batch_size = len(beam_scorer._beam_hyps)
num_beams = beam_scorer.num_beams
batch_beam_size, cur_len = input_ids.shape
if num_beams * batch_size != batch_beam_size:
raise ValueError(
f"Batch dimension of `input_ids` should be {num_beams * batch_size}, "
f"but is {batch_beam_size}."
)
beam_indices = None
beam_scores = flow.zeros(
(batch_size, num_beams),
dtype=flow.float,
sbp=dist.get_nd_sbp([flow.sbp.broadcast, flow.sbp.broadcast]),
placement=dist.get_layer_placement(0),
)
beam_scores[:, 1:] = -1e9
beam_scores = beam_scores.view((batch_size * num_beams,))
while True:
# prepare model inputs
model_inputs = self.prepare_inputs_for_generation(input_ids, **model_kwargs)
outputs = self(**model_inputs)
next_token_logits = outputs["logits"][:, -1, :]
next_token_scores = nn.functional.log_softmax(
next_token_logits, dim=-1
) # (batch_size * num_beams, vocab_size)
next_token_scores = next_token_scores.to_global(
sbp=input_ids.sbp, placement=input_ids.placement
)
next_token_scores_processed = logits_processor(input_ids, next_token_scores)
next_token_scores = next_token_scores_processed + beam_scores[:, None].expand_as(
next_token_scores
)
# Store scores
if output_scores:
scores += (next_token_scores,)
# reshape for beam search
vocab_size = next_token_scores.shape[-1]
next_token_scores = next_token_scores.view(batch_size, num_beams * vocab_size)
next_token_scores, next_tokens = flow.topk(
next_token_scores, 2 * num_beams, dim=1, largest=True, sorted=True
)
next_indices = next_tokens // vocab_size
next_tokens = next_tokens % vocab_size
beam_outputs = beam_scorer.process(
input_ids,
next_token_scores,
next_tokens,
next_indices,
pad_token_id=pad_token_id,
eos_token_id=eos_token_id,
beam_indices=beam_indices,
)
beam_scores = beam_outputs["next_beam_scores"]
beam_next_tokens = beam_outputs["next_beam_tokens"]
beam_idx = beam_outputs["next_beam_indices"]
input_ids = flow.cat([input_ids[beam_idx, :], beam_next_tokens.unsqueeze(-1)], dim=-1)
model_kwargs = self._update_model_kwargs_for_generation(
outputs, model_kwargs, is_encoder_decoder=is_encoder_decoder
)
# update past_key_value
if model_kwargs["past"] is not None:
model_kwargs["past"] = self._reorder_cache(beam_idx)
# increase cur_len
cur_len = cur_len + 1
if beam_scorer.is_done or stopping_criteria(input_ids, scores):
break
sequence_outputs = beam_scorer.finalize(
input_ids,
beam_scores,
next_tokens,
next_indices,
pad_token_id=pad_token_id,
eos_token_id=eos_token_id,
max_length=stopping_criteria.max_length,
beam_indices=beam_indices,
)
# Release records
if "past_key_values" in self.__dir__():
self.past_key_values = [None] * self.cfg.hidden_layers
if "encoder_states" in self.__dir__():
self.encoder_states = None
return sequence_outputs["sequences"]
@flow.no_grad()
def generate(
self,
inputs: Optional[flow.Tensor] = None,
max_length: Optional[int] = None,
min_length: Optional[int] = None,
do_sample: Optional[bool] = None,
early_stopping: Optional[bool] = None,
num_beams: Optional[int] = None,
temperature: Optional[float] = None,
top_k: Optional[int] = None,
top_p: Optional[float] = None,
typical_p: Optional[float] = None,
repetition_penalty: Optional[float] = None,
force_words_ids: Optional[Union[Iterable[int], Iterable[Iterable[int]]]] = None,
bos_token_id: Optional[int] = None,
pad_token_id: Optional[int] = None,
eos_token_id: Optional[int] = None,
length_penalty: Optional[float] = None,
no_repeat_ngram_size: Optional[int] = None,
encoder_no_repeat_ngram_size: Optional[int] = None,
num_return_sequences: Optional[int] = None,
max_time: Optional[float] = None,
max_new_tokens: Optional[int] = None,
decoder_start_token_id: Optional[int] = None,
use_cache: Optional[bool] = None,
num_beam_groups: Optional[int] = None,
diversity_penalty: Optional[float] = None,
prefix_allowed_tokens_fn: Optional[Callable[[int, flow.Tensor], List[int]]] = None,
logits_processor: Optional[LogitsProcessorList] = LogitsProcessorList(),
renormalize_logits: Optional[bool] = None,
stopping_criteria=StoppingCriteriaList(),
constraints=None,
output_scores: Optional[bool] = None,
forced_bos_token_id: Optional[int] = None,
forced_eos_token_id: Optional[int] = None,
remove_invalid_values: Optional[bool] = None,
exponential_decay_length_penalty: Optional[Tuple[Union[int, float]]] = None,
**model_kwargs,
):
# 0. Validate model kwargs
self._validate_model_kwargs(model_kwargs.copy())
# 1. Set generation parameters if not already defined
bos_token_id = bos_token_id if bos_token_id is not None else self.cfg.bos_token_id
num_beams = num_beams if num_beams is not None else self.cfg.num_beams
length_penalty = length_penalty if length_penalty is not None else self.cfg.length_penalty
early_stopping = early_stopping if early_stopping is not None else self.cfg.early_stopping
num_beam_groups = (
num_beam_groups if num_beam_groups is not None else self.cfg.num_beam_groups
)
do_sample = do_sample if do_sample is not None else self.cfg.do_sample
num_return_sequences = (
num_return_sequences
if num_return_sequences is not None
else self.cfg.num_return_sequences
)
pad_token_id = pad_token_id if pad_token_id is not None else self.cfg.pad_token_id
eos_token_id = eos_token_id if eos_token_id is not None else self.cfg.eos_token_id
output_scores = output_scores if output_scores is not None else self.cfg.output_scores
# 2. Prepare model inputs
inputs_tensor, model_input_name, model_kwargs = self._prepare_model_inputs(
inputs, bos_token_id, model_kwargs
)
batch_size = inputs_tensor.shape[0]
# 3. Prepare other model kwargs
model_kwargs["use_cache"] = use_cache if use_cache is not None else self.cfg.use_cache
if self.cfg.is_encoder_decoder:
att_mask_name = "encoder_attn_mask"
accepts_attention_mask = att_mask_name in set(
inspect.signature(self.forward).parameters.keys()
)
else:
att_mask_name = "attention_mask"
accepts_attention_mask = att_mask_name in set(
inspect.signature(self.forward).parameters.keys()
)
requires_attention_mask = "encoder_outputs" not in model_kwargs
if (
model_kwargs.get(att_mask_name, None) is None
and requires_attention_mask
and accepts_attention_mask
):
model_kwargs[att_mask_name] = self._prepare_attention_mask_for_generation(
inputs_tensor, pad_token_id, eos_token_id
)
if self.cfg.is_encoder_decoder and "encoder_outputs" not in model_kwargs:
# if model is encoder decoder encoder_outputs are created
# and added to `model_kwargs`
model_kwargs = self._prepare_encoder_decoder_kwargs_for_generation(
inputs_tensor, model_kwargs, model_input_name
)
# 4. Prepare `input_ids` which will be used for auto-regressive generation
if self.cfg.is_encoder_decoder:
input_ids = self._prepare_decoder_input_ids_for_generation(
batch_size,
decoder_start_token_id=decoder_start_token_id,
bos_token_id=bos_token_id,
model_kwargs=model_kwargs,
)
else:
# if decoder-only then inputs_tensor has to be `input_ids`
input_ids = inputs_tensor
# 5. Prepare `max_length` depending on other stopping criteria.
input_ids_seq_length = input_ids.shape[-1]
if max_length is None and max_new_tokens is None:
if dist.is_main_process():
warnings.warn(
"Neither `max_length` nor `max_new_tokens` has been set, `max_length` will "
f"default to {self.cfg.max_length} (`self.cfg.max_length`). we recommend using"
" `max_new_tokens` to control the maximum length of the generation.",
UserWarning,
)
elif max_length is None and max_new_tokens is not None:
max_length = max_new_tokens + input_ids_seq_length
elif max_length is not None and max_new_tokens is not None:
raise ValueError(
"Both `max_new_tokens` and `max_length` have been set but they serve the same"
)
# default to cfg if still None
max_length = max_length if max_length is not None else self.cfg.max_length
min_length = min_length if min_length is not None else self.cfg.min_length
if min_length is not None and min_length > max_length:
raise ValueError(
f"Unfeasable length constraints: the minimum length ({min_length}) is larger than"
f"the maximum length ({max_length})"
)
if input_ids_seq_length >= max_length:
input_ids_string = "decoder_input_ids" if self.cfg.is_encoder_decoder else "input_ids"
logger.warning(
f"Input length of {input_ids_string} is {input_ids_seq_length}, but `max_length` is"
f" set to {max_length}. This can lead to unexpected behavior. You should consider "
"increasing `max_new_tokens`."
)
# 6. Determine generation mode
is_constraint_gen_mode = constraints is not None or force_words_ids is not None
is_greedy_gen_mode = (
(num_beams == 1)
and (num_beam_groups == 1)
and do_sample is False
and not is_constraint_gen_mode
)
is_sample_gen_mode = (
(num_beams == 1)
and (num_beam_groups == 1)
and do_sample is True
and not is_constraint_gen_mode
)
is_beam_gen_mode = (
(num_beams > 1)
and (num_beam_groups == 1)
and do_sample is False
and not is_constraint_gen_mode
)
# is_beam_sample_gen_mode = (
# (num_beams > 1)
# and (num_beam_groups == 1)
# and do_sample is True
# and not is_constraint_gen_mode
# )
is_group_beam_gen_mode = (
(num_beams > 1) and (num_beam_groups > 1) and not is_constraint_gen_mode
)
if num_beam_groups > num_beams:
raise ValueError("`num_beam_groups` has to be smaller or equal to `num_beams`")
if is_group_beam_gen_mode and do_sample is True:
raise ValueError(
"Diverse beam search cannot be used in sampling mode. Make sure that `do_sample` is"
" set to `False`."
)
# 7. Prepare distribution pre_processing samplers
logits_processor = self._get_logits_processor(
repetition_penalty=repetition_penalty,
no_repeat_ngram_size=no_repeat_ngram_size,
encoder_no_repeat_ngram_size=encoder_no_repeat_ngram_size,
input_ids_seq_length=input_ids_seq_length,
encoder_input_ids=inputs_tensor,
min_length=min_length,
max_length=max_length,
eos_token_id=eos_token_id,
forced_bos_token_id=forced_bos_token_id,
forced_eos_token_id=forced_eos_token_id,
prefix_allowed_tokens_fn=prefix_allowed_tokens_fn,
num_beams=num_beams,
num_beam_groups=num_beam_groups,
diversity_penalty=diversity_penalty,
remove_invalid_values=remove_invalid_values,
exponential_decay_length_penalty=exponential_decay_length_penalty,
logits_processor=logits_processor,
renormalize_logits=renormalize_logits,
)
# 8. Prepare stopping criteria
stopping_criteria = self._get_stopping_criteria(
max_length=max_length, max_time=max_time, stopping_criteria=stopping_criteria
)
# 9. Go into different generation modes
if is_greedy_gen_mode:
if num_return_sequences > 1:
raise ValueError(
f"num_return_sequences has to be 1, but is {num_return_sequences} when doing"
" greedy search."
)
# 10. Run greedy search
return self.greedy_search(
input_ids,
logits_processor=logits_processor,
stopping_criteria=stopping_criteria,
pad_token_id=pad_token_id,
eos_token_id=eos_token_id,
output_scores=output_scores,
**model_kwargs,
)
elif is_sample_gen_mode:
# 10. Prepare logits warper
logits_warper = self._get_logits_warper(
top_k=top_k,
top_p=top_p,
typical_p=typical_p,
temperature=temperature,
num_beams=num_beams,
renormalize_logits=renormalize_logits,
)
# 11. Expand input_ids with `num_return_sequences` additional sequences per batch
input_ids, model_kwargs = self._expand_inputs_for_generation(
input_ids,
expand_size=num_return_sequences,
is_encoder_decoder=self.cfg.is_encoder_decoder,
**model_kwargs,
)
# 12. Run multinomial sample
return self.multinomial_sample(
input_ids,
logits_processor=logits_processor,
logits_warper=logits_warper,
stopping_criteria=stopping_criteria,
pad_token_id=pad_token_id,
eos_token_id=eos_token_id,
output_scores=output_scores,
**model_kwargs,
)
elif is_beam_gen_mode:
if num_return_sequences > num_beams:
raise ValueError(
"`num_return_sequences` has to be smaller or equal to `num_beams`."
)
if stopping_criteria.max_length is None:
raise ValueError("`max_length` needs to be a stopping_criteria for now.")
# 10. Prepare beam search scorer
beam_scorer = BeamSearchScorer(
batch_size=batch_size,
num_beams=num_beams,
length_penalty=length_penalty,
do_early_stopping=early_stopping,
num_beam_hyps_to_keep=num_return_sequences,
)
# 11. Interleave input_ids with `num_beams` additional sequences per batch
input_ids, model_kwargs = self._expand_inputs_for_generation(
input_ids,
expand_size=num_beams,
is_encoder_decoder=self.cfg.is_encoder_decoder,
**model_kwargs,
)
# 12. Run beam search
return self.beam_search(
input_ids,
beam_scorer,
logits_processor=logits_processor,
stopping_criteria=stopping_criteria,
pad_token_id=pad_token_id,
eos_token_id=eos_token_id,
output_scores=output_scores,
**model_kwargs,
)
| 44,204 | 39.930556 | 100 | py |
libai | libai-main/libai/inference/generator/generation_stopping_criteria.py | # coding=utf-8
# Copyright 2021 The OneFlow Authors. All rights reserved.
# Copyright 2020 The Google AI Language Team Authors, Facebook AI Research authors and
# The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
import warnings
from copy import deepcopy
import oneflow as flow
class StoppingCriteriaList(list):
def __call__(self, input_ids: flow.Tensor, scores: flow.Tensor, **kwargs) -> bool:
return any(criteria(input_ids, scores) for criteria in self)
@property
def max_length(self):
for stopping_criterium in self:
if isinstance(stopping_criterium, MaxLengthCriteria):
return stopping_criterium.max_length
return None
class MaxLengthCriteria(object):
def __init__(self, max_length: int):
self.max_length = max_length
def __call__(self, input_ids: flow.Tensor, scores: flow.Tensor) -> bool:
return input_ids.shape[-1] >= self.max_length
class MaxTimeCriteria(object):
def __init__(self, max_time: float, initial_timestamp: float = None):
self.max_time = max_time
self.initial_timestamp = time.time() if initial_timestamp is None else initial_timestamp
def __call__(self, input_ids: flow.Tensor, scores: flow.Tensor, **kwargs) -> bool:
return time.time() - self.initial_timestamp > self.max_time
def validate_stopping_criteria(stopping_criteria: StoppingCriteriaList, max_length: int):
stopping_max_length = stopping_criteria.max_length
new_stopping_criteria = deepcopy(stopping_criteria)
if stopping_max_length is not None and stopping_max_length != max_length:
warnings.warn(
"You set different `max_length` for stopping criteria and `max_length` parameter",
UserWarning,
)
elif stopping_max_length is None:
new_stopping_criteria.append(MaxLengthCriteria(max_length=max_length))
return new_stopping_criteria
| 2,449 | 36.692308 | 96 | py |
libai | libai-main/libai/inference/generator/generation_beam_search.py | # coding=utf-8
# Copyright 2021 The OneFlow Authors. All rights reserved.
# Copyright 2020 The Google AI Language Team Authors, Facebook AI Research authors and
# The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import warnings
from abc import ABC, abstractmethod
from collections import UserDict
from typing import Optional, Tuple
import oneflow as flow
from libai.utils import distributed as dist
class BeamScorer(ABC):
@abstractmethod
def process(
self,
input_ids: flow.Tensor,
next_scores: flow.Tensor,
next_tokens: flow.Tensor,
next_indices: flow.Tensor,
**kwargs,
):
raise NotImplementedError("This is an abstract method.")
class BeamHypotheses:
def __init__(self, num_beams: int, length_penalty: float, early_stopping: bool):
"""
Initialize n-best list of hypotheses.
"""
self.length_penalty = length_penalty
self.early_stopping = early_stopping
self.num_beams = num_beams
self.beams = []
self.worst_score = 1e9
def __len__(self) -> int:
"""
Number of hypotheses in the list.
"""
return len(self.beams)
def add(
self, hyp: flow.Tensor, sum_logprobs: float, beam_indices: Optional[flow.Tensor] = None
):
"""
Add a new hypothesis to the list.
"""
score = sum_logprobs / (hyp.shape[-1] ** self.length_penalty)
if len(self) < self.num_beams or score > self.worst_score:
self.beams.append((score, hyp, beam_indices))
if len(self) > self.num_beams:
sorted_next_scores = sorted([(s, idx) for idx, (s, _, _) in enumerate(self.beams)])
del self.beams[sorted_next_scores[0][1]]
self.worst_score = sorted_next_scores[1][0]
else:
self.worst_score = min(score, self.worst_score)
def is_done(self, best_sum_logprobs: float, cur_len: int) -> bool:
"""
If there are enough hypotheses and that none of the hypotheses being generated
can become better than the worst one in the heap, then we are done with this sentence.
"""
if len(self) < self.num_beams:
return False
elif self.early_stopping:
return True
else:
cur_score = best_sum_logprobs / cur_len ** self.length_penalty
ret = self.worst_score >= cur_score
return ret
class BeamSearchScorer(BeamScorer):
def __init__(
self,
batch_size: int,
num_beams: int,
length_penalty: Optional[float] = 1.0,
do_early_stopping: Optional[bool] = False,
num_beam_hyps_to_keep: Optional[int] = 1,
num_beam_groups: Optional[int] = 1,
**kwargs,
):
self.num_beams = num_beams
self.length_penalty = length_penalty
self.do_early_stopping = do_early_stopping
self.num_beam_hyps_to_keep = num_beam_hyps_to_keep
self.num_beam_groups = num_beam_groups
self.group_size = self.num_beams // self.num_beam_groups
self._is_init = False
self._beam_hyps = [
BeamHypotheses(
num_beams=self.num_beams,
length_penalty=self.length_penalty,
early_stopping=self.do_early_stopping,
)
for _ in range(batch_size)
]
self._done = flow.tensor(
[False for _ in range(batch_size)],
dtype=flow.bool,
sbp=dist.get_nd_sbp([flow.sbp.broadcast, flow.sbp.broadcast]),
placement=flow.placement("cuda", list(range(dist.get_world_size()))),
)
if not isinstance(num_beams, int) or num_beams <= 1:
raise ValueError(
f"`num_beams` has to be an integer strictly greater than 1, but is {num_beams}."
"For `num_beams` == 1, one should make use of `greedy_search` instead."
)
if (
not isinstance(num_beam_groups, int)
or (num_beam_groups > num_beams)
or (num_beams % num_beam_groups != 0)
):
raise ValueError(
"`num_beam_groups` has to be an integer smaller or equal than `num_beams` and "
f"`num_beams` has to be divisible by `num_beam_groups`, but is {num_beam_groups}"
f"with `num_beams` being {num_beams}."
)
if "max_length" in kwargs:
warnings.warn(
"Passing `max_length` to BeamSearchScorer is deprecated and has no effect. "
"`max_length` should be passed directly to `beam_search(...)`, `beam_sample(...)`"
", or `group_beam_search(...)`."
)
@property
def is_done(self) -> bool:
return self._done.all()
def process(
self,
input_ids: flow.Tensor,
next_scores: flow.Tensor,
next_tokens: flow.Tensor,
next_indices: flow.Tensor,
pad_token_id: Optional[int] = None,
eos_token_id: Optional[int] = None,
beam_indices: Optional[flow.Tensor] = None,
) -> Tuple[flow.Tensor]:
cur_len = input_ids.shape[-1]
batch_size = len(self._beam_hyps)
if not (batch_size == (input_ids.shape[0] // self.group_size)):
if self.num_beam_groups > 1:
raise ValueError(
f"A group beam size of {input_ids.shape[0]} is used as the input, but a group "
f"beam size of {self.group_size} is expected by the beam scorer."
)
else:
raise ValueError(
f"A beam size of {input_ids.shape[0]} is used as the input, but a beam size of "
f"{self.group_size} is expected by the beam scorer."
)
next_beam_scores = flow.zeros(
(batch_size, self.group_size),
dtype=next_scores.dtype,
sbp=dist.get_nd_sbp([flow.sbp.broadcast, flow.sbp.broadcast]),
placement=flow.placement("cuda", list(range(dist.get_world_size()))),
)
next_beam_tokens = flow.zeros(
(batch_size, self.group_size),
dtype=next_tokens.dtype,
sbp=dist.get_nd_sbp([flow.sbp.broadcast, flow.sbp.broadcast]),
placement=flow.placement("cuda", list(range(dist.get_world_size()))),
)
next_beam_indices = flow.zeros(
(batch_size, self.group_size),
dtype=next_indices.dtype,
sbp=dist.get_nd_sbp([flow.sbp.broadcast, flow.sbp.broadcast]),
placement=flow.placement("cuda", list(range(dist.get_world_size()))),
)
for batch_idx, beam_hyp in enumerate(self._beam_hyps):
if self._done[batch_idx]:
if self.num_beams < len(beam_hyp):
raise ValueError(
f"Batch can only be done if at least {self.num_beams} beams have "
"been generated"
)
if eos_token_id is None or pad_token_id is None:
raise ValueError(
"Generated beams >= num_beams -> eos_token_id and pad_token have "
"to be defined"
)
# pad the batch
next_beam_scores[batch_idx, :] = 0
next_beam_tokens[batch_idx, :] = pad_token_id
next_beam_indices[batch_idx, :] = 0
continue
# next tokens for this sentence
beam_idx = 0
for beam_token_rank, (next_token, next_score, next_index) in enumerate(
zip(next_tokens[batch_idx], next_scores[batch_idx], next_indices[batch_idx])
):
batch_beam_idx = batch_idx * self.group_size + next_index
# add to generated hypotheses if end of sentence
if (eos_token_id is not None) and (next_token.item() == eos_token_id):
# if beam_token does not belong to top num_beams tokens, it should not be added
is_beam_token_worse_than_top_num_beams = beam_token_rank >= self.group_size
if is_beam_token_worse_than_top_num_beams:
continue
if beam_indices is not None:
beam_index = beam_indices[batch_beam_idx]
beam_index = beam_index + (next_index,)
else:
beam_index = None
beam_hyp.add(
input_ids[batch_beam_idx].clone(),
next_score.item(),
beam_indices=beam_index,
)
else:
# add next predicted token since it is not eos_token
next_beam_scores[batch_idx, beam_idx] = next_score
next_beam_tokens[batch_idx, beam_idx] = next_token
next_beam_indices[batch_idx, beam_idx] = batch_beam_idx
beam_idx += 1
# once the beam for next step is full, don't add more tokens to it.
if beam_idx == self.group_size:
break
if beam_idx < self.group_size:
raise ValueError(
f"At most {self.group_size} tokens in {next_tokens[batch_idx]} can be equal "
f"to `eos_token_id: {eos_token_id}`. Make sure {next_tokens[batch_idx]} "
"are corrected."
)
# Check if we are done so that we can save a pad step if all(done)
self._done[batch_idx] = self._done[batch_idx] or beam_hyp.is_done(
next_scores[batch_idx].max().item(), cur_len
)
return UserDict(
{
"next_beam_scores": next_beam_scores.view(-1),
"next_beam_tokens": next_beam_tokens.view(-1),
"next_beam_indices": next_beam_indices.view(-1),
}
)
def finalize(
self,
input_ids: flow.Tensor,
final_beam_scores: flow.Tensor,
final_beam_tokens: flow.Tensor,
final_beam_indices: flow.Tensor,
max_length: int,
pad_token_id: Optional[int] = None,
eos_token_id: Optional[int] = None,
beam_indices: Optional[flow.Tensor] = None,
):
batch_size = len(self._beam_hyps)
# finalize all open beam hypotheses and add to generated hypotheses
for batch_idx, beam_hyp in enumerate(self._beam_hyps):
if self._done[batch_idx]:
continue
# all open beam hypotheses are added to the beam hypothesis
# beam hypothesis class automatically keeps the best beams
for beam_id in range(self.num_beams):
batch_beam_idx = batch_idx * self.num_beams + beam_id
final_score = final_beam_scores[batch_beam_idx].item()
final_tokens = input_ids[batch_beam_idx]
beam_index = beam_indices[batch_beam_idx] if beam_indices is not None else None
beam_hyp.add(final_tokens, final_score, beam_indices=beam_index)
# select the best hypotheses
sent_lengths = flow.zeros(
batch_size * self.num_beam_hyps_to_keep,
dtype=flow.long,
sbp=input_ids.sbp,
placement=input_ids.placement,
)
best = []
best_indices = []
best_scores = flow.zeros(
batch_size * self.num_beam_hyps_to_keep,
dtype=flow.float32,
sbp=dist.get_nd_sbp([flow.sbp.broadcast, flow.sbp.broadcast]),
placement=flow.placement("cuda", list(range(dist.get_world_size()))),
)
# retrieve best hypotheses
for i, beam_hyp in enumerate(self._beam_hyps):
sorted_hyps = sorted(beam_hyp.beams, key=lambda x: x[0])
for j in range(self.num_beam_hyps_to_keep):
best_hyp_tuple = sorted_hyps.pop()
best_score = best_hyp_tuple[0]
best_hyp = best_hyp_tuple[1]
best_index = best_hyp_tuple[2]
sent_lengths[self.num_beam_hyps_to_keep * i + j] = len(best_hyp)
# append hyp to lists
best.append(best_hyp)
# append indices to list
best_indices.append(best_index)
best_scores[i * self.num_beam_hyps_to_keep + j] = best_score
# prepare for adding eos
sent_lengths_max = sent_lengths.max().item() + 1
sent_max_len = (
min(sent_lengths_max, max_length) if max_length is not None else sent_lengths_max
)
decoded = flow.zeros(
(batch_size * self.num_beam_hyps_to_keep, sent_max_len),
dtype=flow.long,
sbp=input_ids.sbp,
placement=input_ids.placement,
)
if len(best_indices) > 0 and best_indices[0] is not None:
indices = flow.zeros(
(batch_size * self.num_beam_hyps_to_keep, sent_max_len),
dtype=flow.long,
sbp=input_ids.sbp,
placement=input_ids.placement,
)
else:
indices = None
# shorter batches are padded if needed
if sent_lengths.min().item() != sent_lengths.max().item():
assert pad_token_id is not None, "`pad_token_id` has to be defined"
decoded.fill_(pad_token_id)
if indices is not None:
indices.fill_(-1)
# fill with hypotheses and eos_token_id if the latter fits in
for i, (hypo, best_idx) in enumerate(zip(best, best_indices)):
decoded[i, : sent_lengths[i]] = hypo
if indices is not None:
indices[i, : len(best_idx)] = flow.tensor(best_idx)
if sent_lengths[i] < sent_max_len:
decoded[i, sent_lengths[i]] = eos_token_id
return UserDict(
{
"sequences": decoded,
"sequence_scores": best_scores,
"beam_indices": indices,
}
)
| 14,749 | 38.438503 | 100 | py |
libai | libai-main/libai/inference/generator/generation_logits_processor.py | # coding=utf-8
# Copyright 2021 The OneFlow Authors. All rights reserved.
# Copyright 2020 The Google AI Language Team Authors, Facebook AI Research authors and
# The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import inspect
import math
from typing import Callable, List, Tuple
import oneflow as flow
class LogitsProcessorList(list):
def __call__(self, input_ids: flow.Tensor, scores: flow.Tensor, **kwargs) -> flow.Tensor:
for processor in self:
function_args = inspect.signature(processor.__call__).parameters
if len(function_args) > 2:
if not all(arg in kwargs for arg in list(function_args.keys())[2:]):
raise ValueError(
f"Make sure that all the required parameters: {list(function_args.keys())} "
"for {processor.__class__} are passed to the logits processor."
)
scores = processor(input_ids, scores, **kwargs)
else:
scores = processor(input_ids, scores)
return scores
class NormalizationLogitsProcessor(object):
def __call__(self, input_ids: flow.Tensor, scores: flow.Tensor) -> flow.Tensor:
scores = scores.log_softmax(dim=-1)
return scores
class InfNanRemoveLogitsProcessor(object):
def __call__(self, input_ids: flow.Tensor, scores: flow.Tensor) -> flow.Tensor:
scores[scores != scores] = 0.0
scores[scores == float("inf")] = flow.finfo(scores.dtype).max
return scores
class ForcedEOSTokenLogitsProcessor(object):
def __init__(self, max_length: int, eos_token_id: int):
self.max_length = max_length
self.eos_token_id = eos_token_id
def __call__(self, input_ids: flow.Tensor, scores: flow.Tensor) -> flow.Tensor:
cur_len = input_ids.shape[-1]
if cur_len == self.max_length - 1:
num_tokens = scores.shape[1]
scores[:, [i for i in range(num_tokens) if i != self.eos_token_id]] = -float("inf")
scores[:, self.eos_token_id] = 0
return scores
class ForcedBOSTokenLogitsProcessor(object):
def __init__(self, bos_token_id: int):
self.bos_token_id = bos_token_id
def __call__(self, input_ids: flow.Tensor, scores: flow.Tensor) -> flow.Tensor:
cur_len = input_ids.shape[-1]
if cur_len == 1:
num_tokens = scores.shape[1]
scores[:, [i for i in range(num_tokens) if i != self.bos_token_id]] = -float("inf")
scores[:, self.bos_token_id] = 0
return scores
class RepetitionPenaltyLogitsProcessor(object):
def __init__(self, penalty: float):
if not isinstance(penalty, float) or not (penalty > 0):
raise ValueError(f"`penalty` has to be a strictly positive float, but is {penalty}")
self.penalty = penalty
def __call__(self, input_ids: flow.Tensor, scores: flow.Tensor) -> flow.Tensor:
score = flow.gather(scores, 1, input_ids)
score = flow.where(score < 0, score * self.penalty, score / self.penalty)
scores = flow.scatter(scores, 1, input_ids, score)
return scores
class HammingDiversityLogitsProcessor(object):
def __init__(self, diversity_penalty: float, num_beams: int, num_beam_groups: int):
if not isinstance(diversity_penalty, float) or (not diversity_penalty > 0.0):
raise ValueError("`diversity_penalty` should be a float strictly larger than 0.")
self._diversity_penalty = diversity_penalty
if not isinstance(num_beams, int) or num_beams < 2:
raise ValueError("`num_beams` should be an integer strictly larger than 1.")
self._num_beams = num_beams
if not isinstance(num_beam_groups, int) or num_beam_groups < 2:
raise ValueError("`num_beam_groups` should be an integer strictly larger than 1.")
if num_beam_groups > num_beams:
raise ValueError("`beam_groups` has to be smaller or equal to `num_beams`.")
self._num_sub_beams = num_beams // num_beam_groups
def __call__(self, input_ids, scores, current_tokens, beam_group_idx) -> flow.Tensor:
scores = scores.numpy()
batch_size = current_tokens.shape[0] // self._num_beams
group_start_idx = beam_group_idx * self._num_sub_beams
group_end_idx = min(group_start_idx + self._num_sub_beams, self._num_beams)
group_size = group_end_idx - group_start_idx
vocab_size = scores.shape[-1]
if group_start_idx == 0:
return scores
for batch_idx in range(batch_size):
# predicted tokens of last time step of previous groups
previous_group_tokens = current_tokens[
batch_idx * self._num_beams : batch_idx * self._num_beams + group_start_idx
]
token_frequency = flow.bincount(previous_group_tokens, minlength=vocab_size)
scores[batch_idx * group_size : (batch_idx + 1) * group_size] = (
scores[batch_idx * group_size : (batch_idx + 1) * group_size]
- self._diversity_penalty * token_frequency
)
return scores
def _get_ngrams(ngram_size: int, prev_input_ids: flow.Tensor, num_hypos: int):
generated_ngrams = [{} for _ in range(num_hypos)]
for idx in range(num_hypos):
gen_tokens = prev_input_ids[idx].tolist()
generated_ngram = generated_ngrams[idx]
for ngram in zip(*[gen_tokens[i:] for i in range(ngram_size)]):
prev_ngram_tuple = tuple(ngram[:-1])
generated_ngram[prev_ngram_tuple] = generated_ngram.get(prev_ngram_tuple, []) + [
ngram[-1]
]
return generated_ngrams
def _get_generated_ngrams(banned_ngrams, prev_input_ids, ngram_size, cur_len):
start_idx = cur_len + 1 - ngram_size
ngram_idx = tuple(prev_input_ids[start_idx:cur_len].tolist())
return banned_ngrams.get(ngram_idx, [])
def _calc_banned_ngram_tokens(
ngram_size: int, prev_input_ids: flow.Tensor, num_hypos: int, cur_len: int
):
if cur_len + 1 < ngram_size:
return [[] for _ in range(num_hypos)]
generated_ngrams = _get_ngrams(ngram_size, prev_input_ids, num_hypos)
banned_tokens = [
_get_generated_ngrams(
generated_ngrams[hypo_idx], prev_input_ids[hypo_idx], ngram_size, cur_len
)
for hypo_idx in range(num_hypos)
]
return banned_tokens
class NoRepeatNGramLogitsProcessor(object):
def __init__(self, ngram_size: int):
if not isinstance(ngram_size, int) or ngram_size <= 0:
raise ValueError(
f"`ngram_size` has to be a strictly positive integer, but is {ngram_size}"
)
self.ngram_size = ngram_size
def __call__(self, input_ids, scores) -> flow.Tensor:
num_batch_hypotheses = scores.shape[0]
cur_len = input_ids.shape[-1]
banned_batch_tokens = _calc_banned_ngram_tokens(
self.ngram_size, input_ids, num_batch_hypotheses, cur_len
)
for i, banned_tokens in enumerate(banned_batch_tokens):
scores[i, banned_tokens] = -float("inf")
return scores
class EncoderNoRepeatNGramLogitsProcessor(object):
def __init__(self, encoder_ngram_size: int, encoder_input_ids: flow.Tensor):
if not isinstance(encoder_ngram_size, int) or encoder_ngram_size <= 0:
raise ValueError(
"`encoder_ngram_size` has to be a strictly positive integer, but is "
f"{encoder_ngram_size}"
)
self.ngram_size = encoder_ngram_size
if len(encoder_input_ids.shape) == 1:
encoder_input_ids = encoder_input_ids.unsqueeze(0)
self.batch_size = encoder_input_ids.shape[0]
self.generated_ngrams = _get_ngrams(encoder_ngram_size, encoder_input_ids, self.batch_size)
def __call__(self, input_ids: flow.Tensor, scores: flow.Tensor) -> flow.Tensor:
# B x num_beams
num_hypos = scores.shape[0]
num_beams = num_hypos // self.batch_size
cur_len = input_ids.shape[-1]
banned_batch_tokens = [
_get_generated_ngrams(
self.generated_ngrams[hypo_idx // num_beams],
input_ids[hypo_idx],
self.ngram_size,
cur_len,
)
for hypo_idx in range(num_hypos)
]
for i, banned_tokens in enumerate(banned_batch_tokens):
scores[i, banned_tokens] = -float("inf")
return scores
class MinLengthLogitsProcessor(object):
def __init__(self, min_length: int, eos_token_id: int):
if not isinstance(min_length, int) or min_length < 0:
raise ValueError(f"`min_length` has to be a positive integer, but is {min_length}")
if not isinstance(eos_token_id, int) or eos_token_id < 0:
raise ValueError(f"`eos_token_id` has to be a positive integer, but is {eos_token_id}")
self.min_length = min_length
self.eos_token_id = eos_token_id
def __call__(self, input_ids: flow.Tensor, scores: flow.Tensor) -> flow.Tensor:
cur_len = input_ids.shape[-1]
if cur_len < self.min_length:
scores[:, self.eos_token_id] = -float("inf")
return scores
class PrefixConstrainedLogitsProcessor(object):
def __init__(
self, prefix_allowed_tokens_fn: Callable[[int, flow.Tensor], List[int]], num_beams: int
):
self._prefix_allowed_tokens_fn = prefix_allowed_tokens_fn
self._num_beams = num_beams
def __call__(self, input_ids: flow.Tensor, scores: flow.Tensor) -> flow.Tensor:
mask = flow.full_like(scores, -math.inf)
for batch_id, beam_sent in enumerate(
input_ids.view(-1, self._num_beams, input_ids.shape[-1])
):
for beam_id, sent in enumerate(beam_sent):
mask[
batch_id * self._num_beams + beam_id,
self._prefix_allowed_tokens_fn(batch_id, sent),
] = 0
return scores + mask
class ExponentialDecayLengthPenalty(object):
def __init__(
self, exponential_decay_length_penalty: Tuple, eos_token_id: int, input_ids_seq_length: int
):
self.regulation_start = exponential_decay_length_penalty[0] + input_ids_seq_length
self.regulation_factor = exponential_decay_length_penalty[1]
self.eos_token_id = eos_token_id
def __call__(self, input_ids: flow.Tensor, scores: flow.Tensor) -> flow.Tensor:
cur_len = input_ids.shape[-1]
if cur_len > self.regulation_start:
scores[:, self.eos_token_id] = scores[:, self.eos_token_id] * pow(
self.regulation_factor, cur_len - self.regulation_start
)
return scores
class TemperatureLogitsWarper(object):
def __init__(self, temperature: float):
if not isinstance(temperature, float) or not (temperature > 0):
raise ValueError(
f"`temperature` has to be a strictly positive float, but is {temperature}"
)
self.temperature = temperature
def __call__(self, input_ids: flow.Tensor, scores: flow.Tensor) -> flow.Tensor:
scores = scores / self.temperature
return scores
class TopPLogitsWarper(object):
def __init__(
self, top_p: float, filter_value: float = -float("Inf"), min_tokens_to_keep: int = 1
):
top_p = float(top_p)
if top_p < 0 or top_p > 1.0:
raise ValueError(f"`top_p` has to be a float > 0 and < 1, but is {top_p}")
self.top_p = top_p
self.filter_value = filter_value
self.min_tokens_to_keep = min_tokens_to_keep
def __call__(self, input_ids: flow.Tensor, scores: flow.Tensor) -> flow.Tensor:
sorted_logits, sorted_indices = flow.sort(scores, descending=True)
cumulative_probs = sorted_logits.softmax(dim=-1).cumsum(dim=-1)
# Remove tokens with cumulative top_p above the threshold (token with 0 are kept)
sorted_indices_to_remove = cumulative_probs > self.top_p
if self.min_tokens_to_keep > 1:
# Keep at least min_tokens_to_keep (set to min_tokens_to_keep-1
# because we add the first one below)
sorted_indices_to_remove[..., : self.min_tokens_to_keep - 1] = 0
# Shift the indices to the right to keep also the first token above the threshold
sorted_indices_to_remove[..., 1:] = sorted_indices_to_remove[..., :-1].clone()
sorted_indices_to_remove[..., 0] = 0
# scatter sorted tensors to original indexing
indices_to_remove = flow.scatter(
sorted_indices_to_remove, 1, sorted_indices, sorted_indices_to_remove
)
scores = scores.masked_fill(indices_to_remove, self.filter_value)
return scores
class TopKLogitsWarper(object):
def __init__(
self, top_k: int, filter_value: float = -float("Inf"), min_tokens_to_keep: int = 1
):
if not isinstance(top_k, int) or top_k <= 0:
raise ValueError(f"`top_k` has to be a strictly positive integer, but is {top_k}")
self.top_k = top_k
self.filter_value = filter_value
self.min_tokens_to_keep = min_tokens_to_keep
def __call__(self, input_ids: flow.Tensor, scores: flow.Tensor) -> flow.Tensor:
top_k = min(max(self.top_k, self.min_tokens_to_keep), scores.size(-1)) # Safety check
# Remove all tokens with a probability less than the last token of the top-k
indices_to_remove = scores < flow.topk(scores, top_k)[0][..., -1, None]
scores = scores.masked_fill(indices_to_remove, self.filter_value)
return scores
class TypicalLogitsWarper(object):
def __init__(
self, mass: float = 0.9, filter_value: float = -float("Inf"), min_tokens_to_keep: int = 1
):
mass = float(mass)
if not (mass > 0 and mass < 1):
raise ValueError(f"`typical_p` has to be a float > 0 and < 1, but is {mass}")
self.filter_value = filter_value
self.mass = mass
self.min_tokens_to_keep = min_tokens_to_keep
def __call__(self, input_ids: flow.Tensor, scores: flow.Tensor) -> flow.Tensor:
# calculate entropy
normalized = flow.nn.functional.log_softmax(scores, dim=-1)
p = flow.exp(normalized)
ent = -flow.nansum(normalized * p, dim=-1, keepdim=True)
# shift and sort
shifted_scores = flow.abs((-normalized) - ent)
sorted_scores, sorted_indices = flow.sort(shifted_scores, descending=False)
sorted_logits = scores.gather(-1, sorted_indices)
cumulative_probs = sorted_logits.softmax(dim=-1).cumsum(dim=-1)
# Remove tokens with cumulative mass above the threshold
last_ind = (cumulative_probs < self.mass).sum(dim=1)
last_ind[last_ind < 0] = 0
sorted_indices_to_remove = sorted_scores > sorted_scores.gather(1, last_ind.view(-1, 1))
if self.min_tokens_to_keep > 1:
# Keep at least min_tokens_to_keep
# (set to min_tokens_to_keep-1 because we add the first one below)
sorted_indices_to_remove[..., : self.min_tokens_to_keep] = 0
indices_to_remove = flow.scatter(
sorted_indices_to_remove, 1, sorted_indices, sorted_indices_to_remove
)
scores = scores.masked_fill(indices_to_remove, self.filter_value)
return scores
| 15,960 | 40.349741 | 100 | py |
libai | libai-main/tools/preprocess_data.py | # coding=utf-8
# Copyright 2021 The OneFlow Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Processing data for pretraining."""
import argparse
import json
import multiprocessing
import os
import sys
import time
import oneflow as flow
from omegaconf import OmegaConf
from libai.config import LazyCall
try:
import nltk
nltk_available = True
except ImportError:
nltk_available = False
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir)))
from libai import tokenizer
from libai.data.data_utils import indexed_dataset
from libai.tokenizer import build_tokenizer
# https://stackoverflow.com/questions/33139531/preserve-empty-lines-with-nltks-punkt-tokenizer
class CustomLanguageVars(nltk.tokenize.punkt.PunktLanguageVars):
_period_context_fmt = r"""
\S* # some word material
%(SentEndChars)s # a potential sentence ending
\s* # <-- THIS is what I changed
(?=(?P<after_tok>
%(NonWord)s # either other punctuation
|
(?P<next_tok>\S+) # <-- Normally you would have \s+ here
))"""
class IdentitySplitter(object):
def tokenize(self, *text):
return text
class Encoder(object): # split sentence, tokenize
def __init__(self, args, cfg):
self.args = args
self.cfg = cfg
def initializer(self):
# Use Encoder class as a container for global data
Encoder.tokenizer = build_tokenizer(self.cfg)
if self.args.split_sentences:
if not nltk_available:
print("NLTK is not available to split sentences.")
exit()
splitter = nltk.load("tokenizers/punkt/english.pickle")
if self.args.keep_newlines:
# this prevents punkt from eating newlines after sentences
Encoder.splitter = nltk.tokenize.punkt.PunktSentenceTokenizer(
train_text=splitter._params, lang_vars=CustomLanguageVars()
)
else:
Encoder.splitter = splitter
else:
Encoder.splitter = IdentitySplitter()
def encode(self, json_line):
data = json.loads(json_line)
ids = {}
for key in self.args.json_keys:
text = data[key]
doc_ids = []
for sentence in Encoder.splitter.tokenize(text):
sentence_ids = Encoder.tokenizer.encode(sentence)
if len(sentence_ids) > 0:
doc_ids.append(sentence_ids)
if (
len(doc_ids) > 0 and self.args.append_eod
): # append eod token when at the enc of document
doc_ids[-1].append(Encoder.tokenizer.eod)
ids[key] = doc_ids
return ids, len(json_line)
def get_args():
parser = argparse.ArgumentParser()
group = parser.add_argument_group(title="input data")
group.add_argument("--input", type=str, required=True, help="Path to input JSON")
group.add_argument(
"--json-keys",
nargs="+",
default=["text"],
help="space separate listed of keys to extract from json",
)
group.add_argument(
"--split-sentences", action="store_true", help="Split documents into sentences."
)
group.add_argument(
"--keep-newlines",
action="store_true",
help="Keep newlines between sentences when splitting.",
)
group = parser.add_argument_group(title="tokenizer")
group.add_argument(
"--tokenizer-name",
type=str,
required=True,
choices=["BertTokenizer", "GPT2Tokenizer", "T5Tokenizer", "RobertaTokenizer"],
help="What type of tokenizer to use.",
)
group.add_argument("--vocab-file", type=str, default=None, help="Path to the vocab file")
group.add_argument(
"--merges-file",
type=str,
default=None,
help="Path to the BPE merge file (if necessary).",
)
group.add_argument("--do-lower-case", action="store_true", help="Whether to do lower case.")
group.add_argument("--extra-ids", type=int, default=0, help="Number of extra ids.")
group.add_argument(
"--append-eod",
action="store_true",
help="Append an <eod> token to the end of a document.",
)
group.add_argument(
"--do-chinese-wwm", action="store_true", help="Whether to do whole word mask for Chinese."
)
group = parser.add_argument_group(title="output data")
group.add_argument(
"--output-prefix",
type=str,
required=True,
help="Path to binary output file without suffix",
)
group.add_argument(
"--dataset-impl", type=str, default="mmap", choices=["lazy", "cached", "mmap"]
)
group = parser.add_argument_group(title="runtime")
group.add_argument(
"--workers", type=int, default=1, help="Number of worker processes to launch"
)
group.add_argument(
"--log-interval",
type=int,
default=100,
help="Interval between progress updates",
)
args = parser.parse_args()
if args.tokenizer_name.startswith("Bert"):
if not args.split_sentences:
print("Bert tokenizer detected, are you sure you don't want to split sentences?")
return args
def parse_args_to_config(args):
tokenization = OmegaConf.create()
tokenization.tokenizer = LazyCall(getattr(tokenizer, args.tokenizer_name))(
vocab_file="bert-base-chinese-vocab.txt",
do_lower_case=True,
do_chinese_wwm=True,
)
tokenization.tokenizer.vocab_file = args.vocab_file
tokenization.tokenizer.merges_file = args.merges_file
tokenization.tokenizer.do_lower_case = args.do_lower_case
tokenization.tokenizer.extra_id = args.extra_ids
tokenization.tokenizer.do_chinese_wwm = args.do_chinese_wwm
tokenization.append_eod = args.append_eod
return tokenization
def main():
args = get_args()
cfg = parse_args_to_config(args)
startup_start = time.time()
print("Opening", args.input)
fin = open(args.input, "r", encoding="utf-8")
if nltk_available and args.split_sentences:
print("Start downloading punkt data...")
"""Download url: http://www.nltk.org/nltk_data/,
and default save path is `~/nltk_data/tokenizers/punkt.zip`
"""
nltk.download("punkt", quiet=True)
print("End download")
encoder = Encoder(args, cfg)
tokenizer = build_tokenizer(cfg)
pool = multiprocessing.Pool(args.workers, initializer=encoder.initializer)
encoded_docs = pool.imap(encoder.encode, fin, 25)
level = "document"
if args.split_sentences:
level = "sentence"
print(f"Vocab size: {tokenizer.vocab_size}")
print(f"Output prefix: {args.output_prefix}")
output_bin_files = {}
output_idx_files = {}
builders = {}
for key in args.json_keys:
output_bin_files[key] = "{}_{}_{}.bin".format(args.output_prefix, key, level)
output_idx_files[key] = "{}_{}_{}.idx".format(args.output_prefix, key, level)
builders[key] = indexed_dataset.make_builder(
output_bin_files[key], impl=args.dataset_impl, vocab_size=len(tokenizer)
)
startup_end = time.time()
proc_start = time.time()
total_bytes_processed = 0
print("Time to startup:", startup_end - startup_start)
for i, (doc, bytes_processed) in enumerate(encoded_docs, start=1):
total_bytes_processed += bytes_processed
for key, sentences in doc.items():
if len(sentences) == 0:
continue
for sentence in sentences:
builders[key].add_item(
flow.tensor(sentence, dtype=flow.int32)
) # write data into .bin file
builders[key].end_document()
if i % args.log_interval == 0:
current = time.time()
elapsed = current - proc_start
mbs = total_bytes_processed / elapsed / 1024 / 1024
print(
f"Processed {i} documents",
f"({i/elapsed} docs/s, {mbs} MB/s).",
file=sys.stderr,
)
for key in args.json_keys:
builders[key].finalize(output_idx_files[key]) # write data into .idx file
if __name__ == "__main__":
main()
| 8,942 | 32.369403 | 98 | py |
libai | libai-main/tools/train_net.py | # coding=utf-8
# Copyright 2021 The OneFlow Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
import random
import sys
import numpy as np
import oneflow as flow
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir)))
from libai.config import LazyConfig, default_argument_parser, try_get_key
from libai.engine import DefaultTrainer, default_setup
from libai.utils.checkpoint import Checkpointer
logger = logging.getLogger("libai." + __name__)
def main(args):
cfg = LazyConfig.load(args.config_file)
cfg = LazyConfig.apply_overrides(cfg, args.opts)
default_setup(cfg, args)
seed_for_rank = cfg.train.seed + flow.env.get_rank()
flow.manual_seed(seed_for_rank)
flow.cuda.manual_seed(seed_for_rank)
np.random.seed(seed_for_rank)
random.seed(seed_for_rank)
if args.fast_dev_run:
cfg.train.train_epoch = 0
cfg.train.train_iter = 20
cfg.train.evaluation.eval_period = 10
cfg.train.log_period = 1
if args.eval_only:
tokenizer = None
if try_get_key(cfg, "tokenization") is not None:
tokenizer = DefaultTrainer.build_tokenizer(cfg)
model = DefaultTrainer.build_model(cfg)
Checkpointer(model, save_dir=cfg.train.output_dir).resume_or_load(
cfg.train.load_weight, resume=args.resume
)
if try_get_key(cfg, "graph.enabled", default=False):
model = DefaultTrainer.build_graph(cfg, model, is_train=False)
test_loader = DefaultTrainer.build_test_loader(cfg, tokenizer)
if len(test_loader) == 0:
logger.info("No dataset in dataloader.test, please set dataset for dataloader.test")
_ = DefaultTrainer.test(cfg, test_loader, model)
return
trainer = DefaultTrainer(cfg)
return trainer.train()
if __name__ == "__main__":
args = default_argument_parser().parse_args()
main(args)
| 2,468 | 33.291667 | 96 | py |
libai | libai-main/projects/mock_transformers/dist_infer_opt.py | # coding=utf-8
# Copyright 2021 The OneFlow Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import init_env # noqa
import oneflow as flow
from omegaconf import DictConfig
from oneflow.utils.global_view import global_mode
from transformers import AutoModelForCausalLM, AutoTokenizer
from transformers.models.opt import modeling_opt
from libai.layers import Linear
from libai.utils import distributed as dist
# ------replace attention to libai------
temp_class = modeling_opt.OPTAttention
class LiBaiOPTAttention(temp_class):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
embed_dim = kwargs["embed_dim"]
bias = kwargs["bias"]
self.k_proj = Linear(embed_dim, embed_dim, bias=bias, parallel="col", dtype=flow.float16)
self.v_proj = Linear(embed_dim, embed_dim, bias=bias, parallel="col", dtype=flow.float16)
self.q_proj = Linear(embed_dim, embed_dim, bias=bias, parallel="col", dtype=flow.float16)
self.out_proj = Linear(embed_dim, embed_dim, bias=bias, parallel="row", dtype=flow.float16)
modeling_opt.OPTAttention = LiBaiOPTAttention
# ----------replace Decoder to libai -----
temp_class = modeling_opt.OPTDecoderLayer
class LiBaiOPTDecoderLayer(temp_class):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
config = args[0]
self.fc1 = Linear(
self.embed_dim,
config.ffn_dim,
bias=config.enable_bias,
parallel="col",
dtype=flow.float16,
)
self.fc2 = Linear(
config.ffn_dim,
self.embed_dim,
bias=config.enable_bias,
parallel="row",
dtype=flow.float16,
)
modeling_opt.OPTDecoderLayer = LiBaiOPTDecoderLayer
if __name__ == "__main__":
# set dist config
parallel_config = DictConfig(
dict(
data_parallel_size=1,
tensor_parallel_size=2,
pipeline_parallel_size=1, # set to 1, unsupport pipeline parallel now
pipeline_num_layers=None,
device_type="cpu",
)
)
dist.setup_dist_util(parallel_config)
# initial and load model
model = AutoModelForCausalLM.from_pretrained("facebook/opt-2.7b", torch_dtype=flow.float16)
# set model to cuda
dist.set_device_type("cuda")
model._apply(dist.convert_to_distributed_default_setting)
# initial tokenizer
tokenizer = AutoTokenizer.from_pretrained("facebook/opt-2.7b", use_fast=False)
# get input_ids
prompt = "Hello, I'm am conscious and"
input_ids = tokenizer(prompt, return_tensors="np").input_ids
input_ids = flow.from_numpy(input_ids)
input_ids = input_ids.to_global(
sbp=dist.get_nd_sbp([flow.sbp.broadcast, flow.sbp.broadcast]),
placement=dist.get_layer_placement(0),
)
# generate id
placement_sbp_dict = dict(
placement=flow.env.all_device_placement("cuda"),
sbp=flow.sbp.broadcast,
)
with global_mode(True, **placement_sbp_dict):
generated_ids = model.generate(input_ids, max_length=30)
out_put_ids = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)
if dist.is_main_process():
print(out_put_ids)
| 3,789 | 32.839286 | 99 | py |
libai | libai-main/projects/mock_transformers/dist_infer_llama.py | # coding=utf-8
# Copyright 2021 The Sugon Authors. All rights reserved.
# Copyright 2021 The OneFlow Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import init_env # noqa
import oneflow as flow
from omegaconf import DictConfig
from oneflow.utils.global_view import global_mode
from transformers import AutoModelForCausalLM, AutoTokenizer
from transformers.models.llama import modeling_llama
from libai.layers import Linear
from libai.utils import distributed as dist
# ------replace attention to libai------
temp_class = modeling_llama.LlamaAttention
class LiBaiLlamaAttention(temp_class):
def __init__(self, config):
super().__init__(config)
self.q_proj = Linear(
self.hidden_size,
self.num_heads * self.head_dim,
bias=False,
parallel="col",
dtype=flow.float16,
)
self.k_proj = Linear(
self.hidden_size,
self.num_heads * self.head_dim,
bias=False,
parallel="col",
dtype=flow.float16,
)
self.v_proj = Linear(
self.hidden_size,
self.num_heads * self.head_dim,
bias=False,
parallel="col",
dtype=flow.float16,
)
self.o_proj = Linear(
self.num_heads * self.head_dim,
self.hidden_size,
bias=False,
parallel="row",
dtype=flow.float16,
)
modeling_llama.LlamaAttention = LiBaiLlamaAttention
# ----------replace mlp to libai -----
temp_class = modeling_llama.LlamaMLP
class LiBaiLlamaMLP(temp_class):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
hidden_size = kwargs["hidden_size"]
intermediate_size = kwargs["intermediate_size"]
self.gate_proj = Linear(
hidden_size, intermediate_size, bias=False, parallel="col", dtype=flow.float16
)
self.down_proj = Linear(
intermediate_size, hidden_size, bias=False, parallel="col", dtype=flow.float16
)
self.up_proj = Linear(
hidden_size, intermediate_size, bias=False, parallel="row", dtype=flow.float16
)
modeling_llama.LlamaMLP = LiBaiLlamaMLP
if __name__ == "__main__":
# set dist config
parallel_config = DictConfig(
dict(
data_parallel_size=1,
tensor_parallel_size=4,
pipeline_parallel_size=1, # set to 1, unsupport pipeline parallel now
pipeline_num_layers=None,
device_type="cpu",
)
)
dist.setup_dist_util(parallel_config)
# initial and load model
model = AutoModelForCausalLM.from_pretrained(
"decapoda-research/llama-13b-hf", torch_dtype=flow.float16
)
# set model to cuda
dist.set_device_type("cuda")
model._apply(dist.convert_to_distributed_default_setting)
# initial tokenizer
tokenizer = AutoTokenizer.from_pretrained("decapoda-research/llama-13b-hf", use_fast=False)
# get input_ids
prompt = "Hello, I'm am conscious and"
input_ids = tokenizer(prompt, return_tensors="np").input_ids
input_ids = flow.from_numpy(input_ids)
input_ids = input_ids.to_global(
sbp=dist.get_nd_sbp([flow.sbp.broadcast, flow.sbp.broadcast]),
placement=dist.get_layer_placement(0),
)
# generate id
placement_sbp_dict = dict(
placement=flow.env.all_device_placement("cuda"),
sbp=flow.sbp.broadcast,
)
with global_mode(True, **placement_sbp_dict):
generated_ids = model.generate(input_ids, max_length=30)
out_put_ids = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)
if dist.is_main_process():
print(out_put_ids)
| 4,277 | 31.656489 | 95 | py |
libai | libai-main/projects/mock_transformers/init_env.py | # coding=utf-8
# Copyright 2021 The OneFlow Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# -----------mock torch, put it in the first line-----------
import oneflow as flow
flow.mock_torch.enable(lazy=True)
from oneflow import Tensor, nn # noqa
from transformers import modeling_utils # noqa
from transformers.modeling_utils import _load_state_dict_into_model # noqa
# ---------------- mock _load_state_dict_into_model ------------------
def new_load(model_to_load, state_dict, start_prefix):
# Convert old format to new format if needed from a PyTorch state_dict
old_keys = []
new_keys = []
for key in state_dict.keys():
new_key = None
if "gamma" in key:
new_key = key.replace("gamma", "weight")
if "beta" in key:
new_key = key.replace("beta", "bias")
if new_key:
old_keys.append(key)
new_keys.append(new_key)
for old_key, new_key in zip(old_keys, new_keys):
state_dict[new_key] = state_dict.pop(old_key)
# copy state_dict so _load_from_state_dict can modify it
metadata = getattr(state_dict, "_metadata", None)
state_dict = state_dict.copy()
if metadata is not None:
state_dict._metadata = metadata
error_msgs = []
# TODO: add start_prefix judgement
for k, v in model_to_load.state_dict().items():
if k in state_dict and v.is_global:
state_dict[k] = state_dict[k].to_global(
sbp=flow.sbp.broadcast, placement=flow.env.all_device_placement("cpu")
)
state_dict[k] = state_dict[k].to_global(
sbp=v.sbp,
placement=flow.placement("cpu", ranks=list(v.placement.ranks)),
)
# PyTorch's `_load_from_state_dict` does not copy parameters in a module's descendants
# so we need to apply the function recursively.
def load(module: nn.Module, state_dict, prefix=""):
local_metadata = {} if metadata is None else metadata.get(prefix[:-1], {})
args = (state_dict, prefix, local_metadata, True, [], [], error_msgs)
# Parameters of module and children will start with prefix.
# We can exit early if there are none in this state_dict
if len([key for key in state_dict if key.startswith(prefix)]) > 0:
module._load_from_state_dict(*args)
for name, child in module._modules.items():
if child is not None:
load(child, state_dict, prefix + name + ".")
load(model_to_load, state_dict, prefix=start_prefix)
# Delete `state_dict` so it could be collected by GC earlier.
# Note that `state_dict` is a copy of the argument, so it's safe to delete it.
del state_dict
return error_msgs
modeling_utils._load_state_dict_into_model = new_load
# -----------------mock tensor.new_ones() -------------
def flow_ones(self, *args, **kwargs):
return flow.ones(*args, **kwargs, device=self.device, dtype=self.dtype)
Tensor.new_ones = flow_ones
# -----------------mock tensor.new() ------------------
def flow_zeros(self, *args, **kwargs):
return flow.zeros(*args, **kwargs, device=self.device, dtype=self.dtype)
Tensor.new = flow_zeros
# ------------------mock nn.functional.softmax---------
temp_func = nn.functional.softmax
def flow_softmax(*args, **kwargs):
if "dtype" in kwargs:
_tensor = args[0].to(dtype=kwargs.pop("dtype"))
return temp_func(_tensor, *args[1:], **kwargs)
else:
return temp_func(*args, **kwargs)
nn.functional.softmax = flow_softmax
# -----------------mock flow.tensor---------------
temp_tensor_func = flow.tensor
def flow_tensor(input_x, **kwargs):
if isinstance(input_x, (int, float)):
return input_x
else:
return temp_tensor_func(input_x, **kwargs)
flow.tensor = flow_tensor
| 4,360 | 33.338583 | 90 | py |
libai | libai-main/projects/mock_transformers/dist_infer_gpt.py | # coding=utf-8
# Copyright 2021 The OneFlow Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import init_env # noqa
import oneflow as flow
from omegaconf import DictConfig
from oneflow.utils.global_view import global_mode
from transformers import AutoModelForCausalLM, AutoTokenizer, pytorch_utils
from transformers.models.gpt2 import modeling_gpt2
from libai.layers import Conv1D
from libai.utils import distributed as dist
# ------replace Conv1D to libai------
class LiBaiConv1d(Conv1D):
def __init__(
self,
nf,
nx,
bias=True,
parallel="data",
init_method=flow.nn.init.xavier_normal_,
skip_bias_add=False,
dtype=flow.float32,
layer_idx=0,
):
super().__init__(
in_features=nx,
out_features=nf,
bias=bias,
parallel=parallel,
init_method=init_method,
skip_bias_add=skip_bias_add,
dtype=dtype,
layer_idx=layer_idx,
)
pytorch_utils.Conv1D = LiBaiConv1d
# ------replace attention to libai------
temp_class = modeling_gpt2.GPT2Attention
class LiBaiGPT2Attention(temp_class):
def __init__(self, config, is_cross_attention=False, layer_idx=None):
super().__init__(config, is_cross_attention=is_cross_attention, layer_idx=layer_idx)
if is_cross_attention:
self.c_attn = Conv1D(
in_features=self.embed_dim,
out_features=2 * self.embed_dim,
parallel="col",
dtype=flow.float16,
)
self.q_attn = Conv1D(
in_features=self.embed_dim,
out_features=self.embed_dim,
parallel="col",
dtype=flow.float16,
)
else:
self.c_attn = Conv1D(
in_features=self.embed_dim,
out_features=3 * self.embed_dim,
parallel="col",
dtype=flow.float16,
)
self.c_proj = Conv1D(
in_features=self.embed_dim,
out_features=self.embed_dim,
parallel="row",
dtype=flow.float16,
)
modeling_gpt2.GPT2Attention = LiBaiGPT2Attention
# ------replace mlp to libai------
temp_class = modeling_gpt2.GPT2MLP
class LiBaiGPT2MLP(temp_class):
def __init__(self, intermediate_size, config):
super().__init__(intermediate_size, config)
embed_dim = config.hidden_size
self.c_fc = Conv1D(
in_features=embed_dim,
out_features=intermediate_size,
parallel="col",
dtype=flow.float16,
)
self.c_proj = Conv1D(
in_features=intermediate_size,
out_features=embed_dim,
parallel="row",
dtype=flow.float16,
)
if __name__ == "__main__":
# set dist config
parallel_config = DictConfig(
dict(
data_parallel_size=1,
tensor_parallel_size=2,
pipeline_parallel_size=1, # set to 1, unsupport pipeline parallel now
pipeline_num_layers=None,
device_type="cpu",
)
)
dist.setup_dist_util(parallel_config)
# initial and load model
model = AutoModelForCausalLM.from_pretrained("gpt2", torch_dtype=flow.float16)
# set model to cuda
dist.set_device_type("cuda")
model._apply(dist.convert_to_distributed_default_setting)
# initial tokenizer
tokenizer = AutoTokenizer.from_pretrained("gpt2", use_fast=False)
# get input_ids
prompt = "Hello, I'm a language model,"
input_ids = tokenizer(prompt, return_tensors="np").input_ids
input_ids = flow.from_numpy(input_ids)
input_ids = input_ids.to_global(
sbp=dist.get_nd_sbp([flow.sbp.broadcast, flow.sbp.broadcast]),
placement=dist.get_layer_placement(0),
)
# generate id
placement_sbp_dict = dict(
placement=flow.env.all_device_placement("cuda"),
sbp=flow.sbp.broadcast,
)
with global_mode(True, **placement_sbp_dict):
generated_ids = model.generate(input_ids, max_length=30)
out_put_ids = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)
if dist.is_main_process():
print(out_put_ids)
| 4,812 | 29.656051 | 92 | py |
libai | libai-main/projects/mock_transformers/dist_infer_bloom.py | # coding=utf-8
# Copyright 2021 The OneFlow Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import init_env # noqa
import oneflow as flow
from omegaconf import DictConfig
from oneflow.utils.global_view import global_mode
from transformers import AutoModelForCausalLM, AutoTokenizer
from transformers.models.bloom import modeling_bloom
from libai.layers import Embedding, Linear
from libai.utils import distributed as dist
# ------replace attention to libai------
temp_class = modeling_bloom.BloomAttention
class LiBaiBloomAttention(temp_class):
def __init__(self, config):
super().__init__(config)
hidden_size = config.hidden_size
self.query_key_value = Linear(
hidden_size, 3 * hidden_size, bias=True, parallel="col", dtype=flow.float16
)
self.dense = Linear(hidden_size, hidden_size, bias=True, parallel="row", dtype=flow.float16)
modeling_bloom.BloomAttention = LiBaiBloomAttention
# ----------replace Decoder to libai -----
temp_class = modeling_bloom.BloomMLP
class LiBaiBloomMLP(temp_class):
def __init__(self, config):
super().__init__(config)
hidden_size = config.hidden_size
self.dense_h_to_4h = Linear(
hidden_size, 4 * hidden_size, bias=True, parallel="col", dtype=flow.float16
)
self.dense_4h_to_h = Linear(
4 * hidden_size, hidden_size, bias=True, parallel="row", dtype=flow.float16
)
modeling_bloom.BloomMLP = LiBaiBloomMLP
# ----------replace Embedding to libai -----
temp_class = modeling_bloom.BloomModel
class LiBaiBloomModel(temp_class):
def __init__(self, config):
super().__init__(config)
self.word_embeddings = Embedding(config.vocab_size, self.embed_dim, dtype=flow.float16)
modeling_bloom.BloomModel = LiBaiBloomModel
if __name__ == "__main__":
# set dist config
parallel_config = DictConfig(
dict(
data_parallel_size=1,
tensor_parallel_size=2,
pipeline_parallel_size=1, # set to 1, unsupport pipeline parallel now
pipeline_num_layers=None,
device_type="cpu",
)
)
dist.setup_dist_util(parallel_config)
# initial and load model
model = AutoModelForCausalLM.from_pretrained("bigscience/bloom-560m", torch_dtype=flow.float16)
# set model to cuda
dist.set_device_type("cuda")
model._apply(dist.convert_to_distributed_default_setting)
# initial tokenizer
tokenizer = AutoTokenizer.from_pretrained("bigscience/bloom-560m", use_fast=False)
# get input_ids
prompt = "Hello, I'm am conscious and"
input_ids = tokenizer(prompt, return_tensors="np").input_ids
input_ids = flow.from_numpy(input_ids)
input_ids = input_ids.to_global(
sbp=dist.get_nd_sbp([flow.sbp.broadcast, flow.sbp.broadcast]),
placement=dist.get_layer_placement(0),
)
# generate id
placement_sbp_dict = dict(
placement=flow.env.all_device_placement("cuda"),
sbp=flow.sbp.broadcast,
)
with global_mode(True, **placement_sbp_dict):
generated_ids = model.generate(input_ids, max_length=30)
out_put_ids = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)
if dist.is_main_process():
print(out_put_ids)
| 3,820 | 30.578512 | 100 | py |
libai | libai-main/projects/mock_transformers/mock_tokenization.py | # coding=utf-8
# Copyright 2021 The OneFlow Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import oneflow as flow
from libai.utils import distributed as dist
flow.mock_torch.enable()
from transformers import BertTokenizer, GPT2Tokenizer, MT5Tokenizer, T5Tokenizer # noqa
from transformers.tokenization_utils_base import * # noqa
from transformers.utils import generic # noqa
from transformers.utils.generic import TensorType # noqa
# ---------------- mock TensorType ------------------
class TensorType(ExplicitEnum): # noqa
PYTORCH = "pt"
TENSORFLOW = "tf"
ONEFLOW = "of"
NUMPY = "np"
JAX = "jax"
generic.TensorType = TensorType
# ---------------- mock convert_to_tensors ------------------
def flow_convert_to_tensors(self, tensor_type=None, prepend_batch_axis=False):
if tensor_type is None:
return self
# Convert to TensorType
if not isinstance(tensor_type, TensorType):
tensor_type = TensorType(tensor_type)
as_tensor = None
is_tensor = None
# Get a function reference for the correct framework
if tensor_type == TensorType.TENSORFLOW:
if not is_tf_available(): # noqa
raise ImportError(
"Unable to convert output to TensorFlow tensors format, TensorFlow is not "
"installed."
)
import tensorflow as tf
as_tensor = tf.constant
is_tensor = tf.is_tensor
elif tensor_type == TensorType.PYTORCH:
if not is_torch_available(): # noqa
raise ImportError(
"Unable to convert output to PyTorch tensors format, PyTorch is not installed."
)
import torch
as_tensor = torch.tensor
is_tensor = torch.is_tensor
elif tensor_type == TensorType.ONEFLOW:
try:
import oneflow # noqa
except ImportError as e:
msg = "Unable to convert output to OneFlow tensors format, OneFlow is not installed."
raise ImportError(msg) from e
as_tensor = flow.tensor
is_tensor = flow.is_tensor
elif tensor_type == TensorType.JAX:
if not is_flax_available(): # noqa
raise ImportError(
"Unable to convert output to JAX tensors format, JAX is not installed."
)
import jax.numpy as jnp # noqa: F811
as_tensor = jnp.array
is_tensor = is_jax_tensor # noqa
else:
as_tensor = np.asarray # noqa
is_tensor = is_numpy_array # noqa
# Do the tensor conversion in batch
for key, value in self.items():
try:
if prepend_batch_axis:
value = [value]
if not is_tensor(value):
tensor = as_tensor(value)
# Removing this for now in favor of controlling the shape with `prepend_batch_axis`
# # at-least2d
# if tensor.ndim > 2:
# tensor = tensor.squeeze(0)
# elif tensor.ndim < 2:
# tensor = tensor[None, :]
self[key] = tensor
except Exception as e:
if key == "overflowing_tokens":
raise ValueError(
"Unable to create tensor returning overflowing tokens of different lengths. "
"Please see if a fast version of this tokenizer is available to have this "
"feature available."
) from e
raise ValueError(
"Unable to create tensor, you should probably activate truncation and/or "
"padding with 'padding=True' 'truncation=True' to have batched tensors with "
f"the same length. Perhaps your features (`{key}` in this case) have "
"excessive nesting (inputs type `list` where type `int` is expected)."
) from e
if os.getenv("IS_GLOBAL", True) is True:
size = self["input_ids"].size()
sbp = dist.get_nd_sbp([flow.sbp.broadcast, flow.sbp.broadcast])
for k, v in self.items():
if is_tensor != flow.is_tensor:
raise ValueError(
"Unable to create tensor, you should probably set `return_tensors='of'` "
)
if v.size() != size:
raise ValueError(
"Unable to create tensor, you should probably padding with `padding=True` "
)
self[k] = v.to_global(sbp=sbp, placement=dist.get_layer_placement(0))
return self
BatchEncoding.convert_to_tensors = flow_convert_to_tensors # noqa
| 5,136 | 35.432624 | 99 | py |
libai | libai-main/projects/MOCOV3/pretrain_net.py | # coding=utf-8
# Copyright 2021 The OneFlow Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import sys
from trainer.moco_trainer import MoCoEagerTrainer
from libai.config import LazyConfig, default_argument_parser, try_get_key
from libai.engine import DefaultTrainer, default_setup
from libai.utils.checkpoint import Checkpointer
sys.path.append(".")
logger = logging.getLogger(__name__)
class MoCoPretrainingTrainer(DefaultTrainer):
def __init__(self, cfg):
super().__init__(cfg)
self.model.max_iter = cfg.train.train_iter
self._trainer = MoCoEagerTrainer(
self.model, self.train_loader, self.optimizer, cfg.train.num_accumulation_steps
)
def main(args):
cfg = LazyConfig.load(args.config_file)
cfg = LazyConfig.apply_overrides(cfg, args.opts)
if try_get_key(cfg, "graph.enabled") is True:
raise NotImplementedError(
"LiBai MOCO only support eager global mode now, please set cfg.graph.enabled=False"
)
default_setup(cfg, args)
if args.fast_dev_run:
cfg.train.train_epoch = 0
cfg.train.train_iter = 20
cfg.train.eval_period = 10
cfg.train.log_period = 1
if args.eval_only:
tokenizer = None
if try_get_key(cfg, "tokenization.setup", default=False):
tokenizer = MoCoPretrainingTrainer.build_tokenizer(cfg)
model = MoCoPretrainingTrainer.build_model(cfg)
Checkpointer(model, save_dir=cfg.train.output_dir).resume_or_load(
cfg.train.load_weight, resume=args.resume
)
if try_get_key(cfg, "train.graph.enabled", default=False):
model = MoCoPretrainingTrainer.build_graph(cfg, model, is_train=False)
test_loader = MoCoPretrainingTrainer.build_test_loader(cfg, tokenizer)
_ = MoCoPretrainingTrainer.test(cfg, test_loader, model)
return
trainer = MoCoPretrainingTrainer(cfg)
return trainer.train()
if __name__ == "__main__":
args = default_argument_parser().parse_args()
main(args)
| 2,592 | 31.4125 | 95 | py |
libai | libai-main/projects/MOCOV3/trainer/moco_trainer.py | # coding=utf-8
# Copyright 2021 The OneFlow Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
from typing import Callable
from libai.engine.trainer import EagerTrainer
class MoCoEagerTrainer(EagerTrainer):
def run_step(self, get_batch: Callable):
assert self.model.training, "[SimpleTrainer] model was changed to eval mode!"
start = time.perf_counter()
# If you want to do something with the data, you can wrap the dataloader.
data = next(self._data_loader_iter)
data = get_batch(data, getattr(self.data_loader, "mixup_func", None))
data_time = time.perf_counter() - start
# update the moco_momentum per step
loss_dict, m_dict = self.model(**data, cu_iter=self.iter, m=self.model.m)
self.model.m = m_dict["m"]
losses = sum(loss_dict.values()) / self.grad_acc_steps
losses.backward()
self.write_metrics(loss_dict, data_time)
if (self.iter + 1) % self.grad_acc_steps == 0:
self.optimizer.step()
self.optimizer.zero_grad()
| 1,599 | 35.363636 | 85 | py |
libai | libai-main/projects/MOCOV3/configs/moco_linear_prob.py | from oneflow.optim import SGD
from flowvision.transforms import transforms
from libai.config import get_config, LazyCall
from .models.vit_small_patch16 import model
from ..transform.linear_prob_transform import train_aug
dataloader = get_config("common/data/imagenet.py").dataloader
train = get_config("common/train.py").train
graph = get_config("common/models/graph.py").graph
optim = get_config("common/optim.py").optim
# Path to the weight for fine-tune
model.linear_prob = "path/to/pretrained_weight"
model.weight_style = "oneflow"
# Refine data path to imagenet
dataloader.train.dataset[0].root = "/path/to/imagenet/"
dataloader.test[0].dataset.root = "/path/to/imagenet/"
# Add augmentation Func
dataloader.train.dataset[0].transform = LazyCall(transforms.Compose)(transforms=train_aug)
# Refine train cfg for moco v3 model
train.train_micro_batch_size = 128
train.test_micro_batch_size = 32
train.train_epoch = 90
train.log_period = 1
train.evaluation.eval_period = 1000
optim._target_ = SGD
optim.params.clip_grad_max_norm = None
optim.params.clip_grad_norm_type = None
optim.params.weight_decay_norm = None
optim.params.weight_decay_bias = None
del optim.betas
del optim.eps
del optim.do_bias_correction
# Refine optimizer cfg for moco v3 model
# Reference:
# https://github.com/facebookresearch/moco-v3/blob/main/CONFIG.md
# https://github.com/facebookresearch/moco-v3/blob/main/main_lincls.py
base_lr = 3.0
actual_lr = base_lr * (train.train_micro_batch_size * 8 / 256)
optim.lr = actual_lr
optim.weight_decay = 0.0
optim.momentum = 0.9
# Scheduler
train.scheduler.warmup_iter = 0
train.scheduler.alpha = 0
graph.enabled = False
| 1,652 | 28 | 90 | py |
libai | libai-main/projects/MOCOV3/configs/moco_pretrain.py | from flowvision import transforms
from libai.config import get_config, LazyCall
from .models.moco_vit_small_patch16 import model
from transform.pretrain_transform import TwoCropsTransform, augmentation1, augmentation2
dataloader = get_config("common/data/imagenet.py").dataloader
train = get_config("common/train.py").train
graph = get_config("common/models/graph.py").graph
optim = get_config("common/optim.py").optim
# Refine data path to imagenet
dataloader.train.dataset[0].root = "/path/to/imagenet/"
dataloader.test[0].dataset.root = "/path/to/imagenet/"
# Add augmentation Func
dataloader.train.dataset[0].transform = LazyCall(TwoCropsTransform)(
base_transform1=LazyCall(transforms.Compose)(transforms=augmentation1),
base_transform2=LazyCall(transforms.Compose)(transforms=augmentation2),
)
# the momentum of MOCOV3
model.m = 0.99
# the temperature coefficient of MOCOV3
model.T = 0.2
# Refine train cfg for moco v3 model
train.train_micro_batch_size = 32
train.test_micro_batch_size = 32
train.train_epoch = 300
train.warmup_ratio = 40 / 300
train.eval_period = 5
train.log_period = 1
# Refine optimizer cfg for moco v3 model
base_lr = 1.5e-4
actual_lr = base_lr * (train.train_micro_batch_size * 8 / 256)
optim.lr = actual_lr
optim.weight_decay = 0.1
# Scheduler
train.scheduler.warmup_factor = 0.001
train.scheduler.alpha = 1.5e-4
train.scheduler.warmup_method = "linear"
graph.enabled = False
| 1,426 | 27.54 | 88 | py |
libai | libai-main/projects/MOCOV3/configs/models/moco_vit_small_patch16.py | from libai.config import LazyCall
from modeling.moco import MoCo_ViT
from modeling.vit import VisionTransformer
base_encoder = LazyCall(VisionTransformer)(
img_size=224,
patch_size=16,
in_chans=3,
embed_dim=384,
depth=12,
num_heads=12,
mlp_ratio=4,
drop_path_rate=0.0,
global_pool=False,
stop_grad_conv1=True,
)
momentum_encoder = LazyCall(VisionTransformer)(
img_size=224,
patch_size=16,
in_chans=3,
embed_dim=384,
depth=12,
num_heads=12,
mlp_ratio=4,
drop_path_rate=0.0,
global_pool=False,
stop_grad_conv1=True,
)
model = LazyCall(MoCo_ViT)(
base_encoder=base_encoder,
momentum_encoder=momentum_encoder,
dim=256,
mlp_dim=4096,
T=0.2,
m=0.99,
)
| 755 | 17.439024 | 47 | py |
libai | libai-main/projects/MOCOV3/configs/models/vit_base_patch16.py | import sys
sys.path.append("projects/MOCOV3")
from libai.config import LazyCall # noqa: E402
from modeling.vit import VisionTransformer # noqa: E402
model = LazyCall(VisionTransformer)(
img_size=224,
patch_size=16,
in_chans=3,
embed_dim=768,
depth=12,
num_heads=12,
mlp_ratio=4,
drop_path_rate=0.1,
global_pool=False,
)
| 362 | 17.15 | 56 | py |
libai | libai-main/projects/MOCOV3/configs/models/moco_vit_base_patch16.py | from libai.config import LazyCall
from modeling.moco import MoCo_ViT
from modeling.vit import VisionTransformer
base_encoder = LazyCall(VisionTransformer)(
img_size=224,
patch_size=16,
in_chans=3,
embed_dim=768,
depth=12,
num_heads=12,
mlp_ratio=4,
drop_path_rate=0.1,
global_pool=False,
stop_grad_conv1=True,
)
momentum_encoder = LazyCall(VisionTransformer)(
img_size=224,
patch_size=16,
in_chans=3,
embed_dim=768,
depth=12,
num_heads=12,
mlp_ratio=4,
drop_path_rate=0.1,
global_pool=False,
stop_grad_conv1=True,
)
model = LazyCall(MoCo_ViT)(
base_encoder=base_encoder,
momentum_encoder=momentum_encoder,
dim=256,
mlp_dim=4096,
T=0.2,
m=0.99,
)
| 755 | 17.439024 | 47 | py |
libai | libai-main/projects/MOCOV3/configs/models/vit_small_patch16.py | from .vit_base_patch16 import model
model.embed_dim = 384
model.depth = 12
model.num_heads = 12
model.drop_path_rate = 0.0
| 125 | 14.75 | 35 | py |
libai | libai-main/projects/MOCOV3/utils/load_checkpoint.py | # coding=utf-8
# Copyright 2021 The OneFlow Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from utils.weight_convert import load_torch_checkpoint_linear_prob
from libai.utils.checkpoint import (
Checkpointer,
get_missing_parameters_message,
get_unexpected_parameters_message,
)
logger = logging.getLogger("libai." + __name__)
def load_checkpoint(model, path, weight_style, num_heads, embed_dim):
linear_keyword = "head"
for name, param in model.named_parameters():
if name not in ["%s.weight" % linear_keyword, "%s.bias" % linear_keyword]:
param.requires_grad = False
assert weight_style in ["pytorch", "oneflow"]
if weight_style == "pytorch":
params = load_torch_checkpoint_linear_prob(num_heads, embed_dim, path=path)
else:
params = Checkpointer(model).load(path)
model_state_dict = model.state_dict()
# check the incorrect shape and unexpected keys
incorrect_shapes = []
unexpected_keys = []
for k in list(params.keys()):
if k in model_state_dict:
shape_model = tuple(model_state_dict[k].shape)
shape_ckp = tuple(params[k].shape)
if shape_model != shape_ckp:
incorrect_shapes.append((k, shape_ckp, shape_model))
params.pop(k)
model_state_dict.pop(k)
else:
unexpected_keys.append(k)
missing_keys = list(model_state_dict.keys())
for k, shape_checkpoint, shape_model in incorrect_shapes:
logger.warning(
"Skip loading parameter '{}' to the model due to incompatible "
"shapes: {} in the checkpoint but {} in the "
"model! You might want to double check if this is expected.".format(
k, shape_checkpoint, shape_model
)
)
if missing_keys:
logger.info(get_missing_parameters_message(missing_keys))
if unexpected_keys:
logger.info(get_unexpected_parameters_message(unexpected_keys))
model.load_state_dict(params, strict=False)
| 2,591 | 34.506849 | 83 | py |
libai | libai-main/projects/MOCOV3/utils/weight_convert.py | # coding=utf-8
# Copyright 2021 The OneFlow Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import oneflow as flow
import torch
logger = logging.getLogger(__name__)
def convert_qkv_weight(value, num_heads, hidden_size):
"""
convert qkv.weight to be compatible with LiBai transformer layer
Args:
cfg: config file
value: qkv.weight in the loaded checkpoint
"""
head_size = int(hidden_size / num_heads)
qkv_weight = (
value.view(3, num_heads, head_size, hidden_size)
.permute(1, 0, 2, 3)
.contiguous()
.view(hidden_size * 3, hidden_size)
)
return qkv_weight
def convert_qkv_bias(value, num_heads, hidden_size):
"""
convert qkv.bias to be compatible with LiBai transformer layer
Args:
cfg: config file
value: qkv.bias in the loaded checkpoint
"""
head_size = int(hidden_size / num_heads)
qkv_bias = (
value.view(3, num_heads, head_size).permute(1, 0, 2).contiguous().view(hidden_size * 3)
)
return qkv_bias
def filter_keys(key, value, num_heads, hidden_size):
"""Filtering the state_dict keys and values to match LiBai's MOCOV3 model"""
if "norm1" in key:
key = key.replace("norm1", "input_layernorm")
elif "attn.qkv" in key:
key = key.replace("attn.qkv", "self_attention.query_key_value")
if "weight" in key:
value = convert_qkv_weight(value, num_heads, hidden_size)
if "bias" in key:
value = convert_qkv_bias(value, num_heads, hidden_size)
elif "attn.proj" in key:
key = key.replace("attn.proj", "self_attention.dense")
elif "norm2" in key:
key = key.replace("norm2", "post_attention_layernorm")
elif "mlp.fc1" in key:
key = key.replace("mlp.fc1", "mlp.dense_h_to_4h")
elif "mlp.fc2" in key:
key = key.replace("mlp.fc2", "mlp.dense_4h_to_h")
elif "fc_norm" in key:
key = key.replace("fc_norm", "norm")
return key, value
def load_torch_checkpoint_linear_prob(
num_heads, hidden_size, path="projects/MOCOV3/output/vit-b-300ep.pth.tar", linear_keyword="head"
):
"""Load checkpoint from the given torch weights.
Torch weight from: xxx
"""
torch_dict = torch.load(path, map_location="cpu")["state_dict"]
parameters = torch_dict
new_parameters = dict()
for key, value in parameters.items():
if "num_batches_tracked" not in key:
if key.startswith("module.base_encoder") and not key.startswith(
"module.base_encoder.%s" % linear_keyword
):
# to global tensor
key, val = filter_keys(key, value, num_heads, hidden_size)
val = val.detach().cpu().numpy()
val = flow.tensor(val).to_global(
sbp=flow.sbp.broadcast, placement=flow.placement("cuda", {0: range(1)})
)
new_parameters[key[len("module.base_encoder.") :]] = val
return new_parameters
| 3,558 | 31.953704 | 100 | py |
libai | libai-main/projects/MOCOV3/transform/linear_prob_transform.py | # coding=utf-8
# Copyright 2021 The OneFlow Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from flowvision import transforms
from flowvision.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
from libai.config import LazyCall
train_aug = [
LazyCall(transforms.RandomResizedCrop)(size=224),
LazyCall(transforms.RandomHorizontalFlip)(),
LazyCall(transforms.ToTensor)(),
LazyCall(transforms.Normalize)(mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD),
]
| 1,010 | 35.107143 | 89 | py |
libai | libai-main/projects/MOCOV3/transform/pretrain_transform.py | # coding=utf-8
# Copyright 2021 The OneFlow Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import random
import oneflow as flow
from flowvision import transforms
from flowvision.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
from PIL import ImageFilter, ImageOps
from libai.config import LazyCall
class GaussianBlur(object):
"""Gaussian blur augmentation from SimCLR: https://arxiv.org/abs/2002.05709"""
def __init__(self, sigma=[0.1, 2.0]):
self.sigma = sigma
def __call__(self, x):
sigma = random.uniform(self.sigma[0], self.sigma[1])
x = x.filter(ImageFilter.GaussianBlur(radius=sigma))
return x
class Solarize(object):
"""Solarize augmentation from BYOL: https://arxiv.org/abs/2006.07733"""
def __call__(self, x):
return ImageOps.solarize(x)
# follow BYOL's augmentation recipe: https://arxiv.org/abs/2006.07733
augmentation1 = [
LazyCall(transforms.RandomResizedCrop)(size=224, scale=(0.2, 1.0)),
LazyCall(transforms.RandomApply)(
transforms=[
LazyCall(transforms.ColorJitter)(
brightness=0.4, contrast=0.4, saturation=0.2, hue=0.1
) # not strengthened
],
p=0.8,
),
# TODO: Add RandomGrayscale
# LazyCall(transforms.RandomGrayscale)(p=0.2),
LazyCall(transforms.RandomApply)(transforms=[LazyCall(GaussianBlur)(sigma=[0.1, 2.0])], p=1.0),
LazyCall(transforms.RandomHorizontalFlip)(),
LazyCall(transforms.ToTensor)(),
LazyCall(transforms.Normalize)(mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD),
]
augmentation2 = [
LazyCall(transforms.RandomResizedCrop)(size=224, scale=(0.2, 1.0)),
LazyCall(transforms.RandomApply)(
transforms=[
LazyCall(transforms.ColorJitter)(
brightness=0.4, contrast=0.4, saturation=0.2, hue=0.1
) # not strengthened
],
p=0.8,
),
# TODO: Add RandomGrayscale
# LazyCall(transforms.RandomGrayscale)(p=0.2),
LazyCall(transforms.RandomApply)(transforms=[LazyCall(GaussianBlur)(sigma=[0.1, 2.0])], p=1.0),
LazyCall(transforms.RandomApply)(transforms=[LazyCall(Solarize)()], p=0.2),
LazyCall(transforms.RandomHorizontalFlip)(),
LazyCall(transforms.ToTensor)(),
LazyCall(transforms.Normalize)(mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD),
]
class TwoCropsTransform:
"""Take two random crops of one image"""
def __init__(self, base_transform1, base_transform2):
self.base_transform1 = base_transform1
self.base_transform2 = base_transform2
def __call__(self, x):
im1 = self.base_transform1(x)
im2 = self.base_transform2(x)
return flow.cat((im1, im2), dim=0)
| 3,266 | 33.03125 | 99 | py |
libai | libai-main/projects/MOCOV3/modeling/moco.py | # coding=utf-8
# Copyright 2021 The OneFlow Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# --------------------------------------------------------
# MoCo v3 Model
# References:
# moco-v3: https://github.com/facebookresearch/moco-v3/blob/main/moco/builder.py
# --------------------------------------------------------
import math
import oneflow as flow
import oneflow.nn as nn
from libai.layers import Linear
from libai.utils.distributed import get_world_size
class MoCo(nn.Module):
"""
Build a MoCo model with a base encoder, a momentum encoder, and two MLPs
https://arxiv.org/abs/1911.05722
"""
def __init__(
self, base_encoder, momentum_encoder, dim=256, mlp_dim=4096, T=1.0, m=0.99, max_iter=300
):
"""
dim: feature dimension (default: 256)
mlp_dim: hidden dimension in MLPs (default: 4096)
T: softmax temperature (default: 1.0)
"""
super(MoCo, self).__init__()
self.T = T
self.m = m
# build encoders
self.base_encoder = base_encoder
self.momentum_encoder = momentum_encoder
self.base_encoder.num_classes = dim
self.momentum_encoder.num_classes = dim
self.max_iter = max_iter
self._build_projector_and_predictor_mlps(dim, mlp_dim)
for param_b, param_m in zip(
self.base_encoder.parameters(), self.momentum_encoder.parameters()
):
param_m.data.copy_(param_b.data) # initialize
param_m.requires_grad = False # not update by gradient
def _build_mlp(self, num_layers, input_dim, mlp_dim, output_dim, last_bn=True):
mlp = []
for l in range(num_layers):
dim1 = input_dim if l == 0 else mlp_dim
dim2 = output_dim if l == num_layers - 1 else mlp_dim
mlp.append(Linear(dim1, dim2, bias=False)) # libai
if l < num_layers - 1:
mlp.append(nn.BatchNorm1d(dim2))
mlp.append(nn.ReLU(inplace=True))
elif last_bn:
# follow SimCLR's design:
# https://github.com/google-research/simclr/blob/master/model_util.py#L157
# for simplicity, we further removed gamma in BN
# TODO: affine should be False (bug here)
mlp.append(nn.BatchNorm1d(dim2, affine=True))
return nn.Sequential(*mlp)
def _build_projector_and_predictor_mlps(self, dim, mlp_dim):
pass
@flow.no_grad()
def _update_momentum_encoder(self, m):
"""Momentum update of the momentum encoder"""
for param_b, param_m in zip(
self.base_encoder.parameters(), self.momentum_encoder.parameters()
):
param_m.data = param_m.data * m + param_b.data * (1.0 - m)
def contrastive_loss(self, q, k):
# normalize
q = nn.functional.normalize(q, dim=1)
k = nn.functional.normalize(k, dim=1)
# gather all targets
# k = concat_all_gather(k).to_global(sbp=q.sbp, placement=q.placement)
k = k.to_global(sbp=flow.sbp.broadcast)
# Einstein sum is more intuitive
logits = flow.einsum("nc,mc->nm", q, k) / self.T
N = logits.shape[0] // get_world_size()
labels = (flow.arange(N, dtype=flow.long) + N * flow.env.get_rank()).to_global(
sbp=flow.sbp.split(0), placement=logits.placement
)
return nn.CrossEntropyLoss()(logits, labels) * (2 * self.T)
def adjust_moco_momentum(self, cu_iter, m):
"""Adjust moco momentum based on current epoch"""
m = 1.0 - 0.5 * (1.0 + math.cos(math.pi * cu_iter / self.max_iter)) * (1.0 - m)
return m
def forward(self, images, labels=None, cu_iter=0, m=0.99):
if self.training:
[x1, x2] = flow.chunk(images, 2, dim=1)
# compute features
q1 = self.predictor(self.base_encoder(x1)["prediction_scores"])
q2 = self.predictor(self.base_encoder(x2)["prediction_scores"])
m = self.adjust_moco_momentum(cu_iter, m) # update the moco_momentum
with flow.no_grad(): # no gradient
self._update_momentum_encoder(m) # update the momentum encoder
# compute momentum features as targets
k1 = self.momentum_encoder(x1)["prediction_scores"]
k2 = self.momentum_encoder(x2)["prediction_scores"]
return (
{"losses": self.contrastive_loss(q1, k2) + self.contrastive_loss(q2, k1)},
{"m": m},
)
else:
return self.base_encoder(images)
class MoCo_ViT(MoCo):
def _build_projector_and_predictor_mlps(self, dim, mlp_dim):
hidden_dim = self.base_encoder.head.weight.shape[1]
# projectors
self.base_encoder.head = self._build_mlp(3, hidden_dim, mlp_dim, dim)
self.momentum_encoder.head = self._build_mlp(3, hidden_dim, mlp_dim, dim)
# predictor
self.predictor = self._build_mlp(2, dim, mlp_dim, dim)
| 5,568 | 34.929032 | 96 | py |
libai | libai-main/projects/MOCOV3/modeling/vit.py | # coding=utf-8
# Copyright 2021 The OneFlow Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# --------------------------------------------------------
# ViT Model
# References:
# moco-v3: https://github.com/facebookresearch/moco-v3/blob/main/vits.py
# --------------------------------------------------------
import math
from functools import reduce
from operator import mul
import oneflow as flow
import oneflow.nn as nn
from flowvision.layers.weight_init import trunc_normal_
from utils.load_checkpoint import load_checkpoint
from libai.layers import Linear, PatchEmbedding
from libai.models import vision_transformer
class VisionTransformer(vision_transformer.VisionTransformer):
"""Vision Transformer for MOCO
LiBai impl of: `An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale`
- https://arxiv.org/abs/2010.11929
"""
def __init__(
self,
img_size=224,
patch_size=16,
in_chans=3,
embed_dim=768,
depth=12,
num_heads=12,
mlp_ratio=4.0,
drop_rate=0.0,
attn_drop_rate=0.0,
drop_path_rate=0.0,
global_pool=False,
num_classes=1000,
loss_func=None,
linear_prob=None,
weight_style="pytorch",
stop_grad_conv1=False,
):
super(VisionTransformer, self).__init__(
img_size=img_size,
patch_size=patch_size,
in_chans=in_chans,
embed_dim=embed_dim,
depth=depth,
num_heads=num_heads,
mlp_ratio=mlp_ratio,
drop_rate=drop_rate,
attn_drop_rate=attn_drop_rate,
drop_path_rate=drop_path_rate,
num_classes=num_classes,
loss_func=loss_func,
)
self.global_pool = global_pool
# weight init
if linear_prob:
load_checkpoint(self, linear_prob, weight_style, num_heads, embed_dim)
self.head.weight.data.normal_(mean=0.0, std=0.01)
self.head.bias.data.zeros_()
else:
trunc_normal_(self.pos_embed, std=0.02)
trunc_normal_(self.cls_token, std=0.02)
self.apply(self._init_weights)
self.stop_grad_conv1 = stop_grad_conv1
self.embed_dim = embed_dim
self.initialization()
def initialization(self):
# Use fixed 2D sin-cos position embedding
self.build_2d_sincos_position_embedding()
# weight initialization
for name, m in self.named_modules():
if isinstance(m, Linear):
if "query_key_value" in name:
val = math.sqrt(6.0 / float(m.weight.shape[0] // 3 + m.weight.shape[1]))
nn.init.uniform_(m.weight, -val, val)
else:
nn.init.xavier_uniform_(m.weight)
nn.init.zeros_(m.bias)
nn.init.normal_(self.cls_token, std=1e-6)
if isinstance(self.patch_embed, PatchEmbedding):
# xavier_uniform initialization
val = math.sqrt(
6.0 / float(3 * reduce(mul, self.patch_embed.patch_size, 1) + self.embed_dim)
)
nn.init.uniform_(self.patch_embed.proj.weight, -val, val)
nn.init.zeros_(self.patch_embed.proj.bias)
if self.stop_grad_conv1:
self.patch_embed.proj.weight.requires_grad = False
self.patch_embed.proj.bias.requires_grad = False
def build_2d_sincos_position_embedding(self, temperature=10000.0):
sbp = self.pos_embed.sbp
placement = self.pos_embed.placement
h, w = self.patch_embed.grid_size
grid_w = flow.arange(w, dtype=flow.float32).to_global(sbp=sbp, placement=placement)
grid_h = flow.arange(h, dtype=flow.float32).to_global(sbp=sbp, placement=placement)
grid_w, grid_h = flow.meshgrid(grid_w, grid_h)
assert (
self.embed_dim % 4 == 0
), "Embed dimension must be divisible by 4 for 2D sin-cos position embedding"
pos_dim = self.embed_dim // 4
omega = (flow.arange(pos_dim, dtype=flow.float32) / pos_dim).to_global(
sbp=sbp, placement=placement
)
omega = 1.0 / flow.tensor(temperature).to_global(sbp=sbp, placement=placement) ** omega
out_w = flow.einsum("m,d->md", grid_w.flatten(), omega)
out_h = flow.einsum("m,d->md", grid_h.flatten(), omega)
pos_emb = flow.cat(
[flow.sin(out_w), flow.cos(out_w), flow.sin(out_h), flow.cos(out_h)], dim=1
)[None, :, :]
pe_token = flow.zeros([1, 1, self.embed_dim], dtype=flow.float32).to_global(
sbp=sbp, placement=placement
)
self.pos_embed = nn.Parameter(flow.cat([pe_token, pos_emb], dim=1))
self.pos_embed.requires_grad = False
def forward_head(self, x):
if self.global_pool:
x = x[:, 1:, :].mean(dim=1) # global pool without cls token
outcome = self.norm(x)
outcome = self.head(outcome)
else:
x = self.norm(x)
outcome = x[:, 0]
outcome = self.head(outcome)
return outcome
| 5,715 | 35.177215 | 95 | py |
libai | libai-main/projects/ConvNeXT/configs/convnext_imagenet.py | from libai.config import LazyCall
from projects.ConvNeXT.configs.convnext import model
from configs.common.models.graph import graph
from configs.common.train import train
from configs.common.optim import optim
from configs.common.data.imagenet import dataloader
from flowvision.data import Mixup
# Refine data path to imagenet
dataloader.train.dataset[0].root = "/data/dataset/ImageNet/extract"
dataloader.test[0].dataset.root = "/data/dataset/ImageNet/extract"
# Add Mixup Func
dataloader.train.mixup_func = LazyCall(Mixup)(
mixup_alpha=0.8,
cutmix_alpha=1.0,
prob=1.0,
switch_prob=0.5,
mode="batch",
num_classes=model.cfg.num_labels,
)
# Refine model cfg for vit training on cifar100
model.cfg.num_lables = 1000
# Add Mixup Func
dataloader.train.mixup_func = LazyCall(Mixup)(
mixup_alpha=0.8,
cutmix_alpha=1.0,
prob=1.0,
switch_prob=0.5,
mode="batch",
num_classes=model.cfg.num_labels,
)
# Refine optimizer cfg for vit model
optim.lr = 1e-3 # 5e-4 * 1024 (batchsize) / 512
optim.eps = 1e-8
optim.weight_decay = 0.05
optim.params.clip_grad_max_norm = None
optim.params.clip_grad_norm_type = None
optim.params.overrides = {"pos_embed": {"weight_decay": 0.0}, "cls_token": {"weight_decay": 0.0}}
# Refine train cfg for vit model
train.train_micro_batch_size = 128
train.test_micro_batch_size = 128
train.train_epoch = 300
train.warmup_ratio = 5 / 300
train.evaluation.eval_period = 1000
train.log_period = 1
# Scheduler
train.scheduler.warmup_factor = 0.001
train.scheduler.alpha = 0.01
train.scheduler.warmup_method = "linear"
# Set fp16 ON
train.amp.enabled = True
# Distributed Settings
# train.dist.pipeline_num_layers = 4
train.dist.data_parallel_size = 1
train.dist.tensor_parallel_size = 1
train.dist.pipeline_parallel_size = 1
| 1,794 | 26.19697 | 97 | py |
libai | libai-main/projects/ConvNeXT/configs/convnext.py | from omegaconf import DictConfig
from libai.config import LazyCall
from projects.ConvNeXT.modeling.convnext_model import ConvNextForImageClassification
cfg = dict(
num_channels=3,
patch_size=4,
num_stages=4,
hidden_sizes=[96, 192, 384, 768],
depths=[3, 3, 9, 3],
layer_norm_eps=1e-12,
drop_path_rate=0.0,
image_size=224,
num_labels=1000,
initializer_range=0.02,
problem_type=None,
)
cfg = DictConfig(cfg)
model = LazyCall(ConvNextForImageClassification)(cfg=cfg)
| 510 | 22.227273 | 84 | py |
libai | libai-main/projects/ConvNeXT/modeling/embedding.py | # coding=utf-8
# Copyright 2021 The OneFlow Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import oneflow as flow
from oneflow import nn
from libai.utils import distributed as dist
from projects.ConvNeXT.modeling.layer_norm import ConvNextLayerNorm
class ConvNextEmbeddings(nn.Module):
def __init__(self, num_channels, hidden_sizes, patch_size, layer_idx=0):
super().__init__()
self.patch_embeddings = nn.Conv2d(
num_channels, hidden_sizes[0], kernel_size=patch_size, stride=patch_size
).to_global(
sbp=dist.get_nd_sbp([flow.sbp.broadcast, flow.sbp.broadcast]),
placement=dist.get_layer_placement(layer_idx),
)
self.layernorm = ConvNextLayerNorm(
hidden_sizes[0], eps=1e-6, data_format="channels_first", layer_idx=layer_idx
)
self.num_channels = num_channels
self.layer_idx = layer_idx
def forward(self, x):
num_channels = x.shape[1]
if num_channels != self.num_channels:
raise ValueError(
"Make sure that the channel dimension of the pixel values match with the one set "
"in the configuration."
)
embeddings = self.patch_embeddings(x)
embeddings = self.layernorm(embeddings)
return embeddings
| 1,845 | 37.458333 | 98 | py |
libai | libai-main/projects/ConvNeXT/modeling/convnext_layers.py | # coding=utf-8
# Copyright 2021 The OneFlow Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import oneflow as flow
from oneflow import nn
from libai.layers import DropPath, Linear, build_activation
from libai.utils import distributed as dist
from projects.ConvNeXT.modeling.layer_norm import ConvNextLayerNorm
class ConvNextLayer(nn.Module):
def __init__(
self, dim, eps=1e-6, drop_path=0, layer_scale_init_value=1e-6, layer_idx=0
) -> None:
super().__init__()
self.dwconv = nn.Conv2d(dim, dim, kernel_size=7, padding=3, groups=dim).to_global(
sbp=dist.get_nd_sbp([flow.sbp.broadcast, flow.sbp.broadcast]),
placement=dist.get_layer_placement(layer_idx),
)
self.layernorm = ConvNextLayerNorm(dim, eps=eps, layer_idx=layer_idx)
self.pwconv1 = Linear(dim, 4 * dim, parallel="col", layer_idx=layer_idx)
self.act = build_activation("gelu")
self.pwconv2 = Linear(4 * dim, dim, parallel="row", layer_idx=layer_idx)
layer_scale_parameter = (
flow.ones(
(dim),
placement=dist.get_layer_placement(layer_idx),
sbp=dist.get_nd_sbp([flow.sbp.broadcast, flow.sbp.broadcast]),
)
* layer_scale_init_value
)
self.layer_scale_parameter = (
nn.Parameter(layer_scale_parameter, requires_grad=True)
if layer_scale_init_value > 0
else None
)
self.drop_path = DropPath(drop_path) if drop_path > 0.0 else nn.Identity()
self.layer_idx = layer_idx
def forward(self, hidden_states):
input = hidden_states
x = self.dwconv(hidden_states)
x = x.permute(0, 2, 3, 1) # (N, C, H, W) -> (N, H, W, C)
x = self.layernorm(x)
x = self.pwconv1(x)
x = self.act(x)
x = self.pwconv2(x)
if self.layer_scale_parameter is not None:
x = self.layer_scale_parameter * x
x = x.permute(0, 3, 1, 2) # (N, H, W, C) -> (N, C, H, W)
x = input + self.drop_path(x)
return x
class ConvNextStage(nn.Module):
def __init__(
self,
in_channels,
out_channels,
kernel_size=2,
stride=2,
depth=2,
drop_path_rates=None,
layer_idx=0,
):
super().__init__()
if in_channels != out_channels or stride > 1:
self.downsampling_layer = nn.Sequential(
ConvNextLayerNorm(
in_channels, eps=1e-6, data_format="channels_first", layer_idx=layer_idx
),
nn.Conv2d(
in_channels, out_channels, kernel_size=kernel_size, stride=stride
).to_global(
sbp=dist.get_nd_sbp([flow.sbp.broadcast, flow.sbp.broadcast]),
placement=dist.get_layer_placement(layer_idx),
),
)
else:
self.downsampling_layer = nn.Identity()
drop_path_rates = drop_path_rates or [0.0] * depth
self.layers = nn.Sequential(
*[
ConvNextLayer(dim=out_channels, drop_path=drop_path_rates[j], layer_idx=layer_idx)
for j in range(depth)
]
)
self.layer_idx = layer_idx
def forward(self, hidden_states):
hidden_states = self.downsampling_layer(hidden_states)
hidden_states = self.layers(hidden_states)
return hidden_states
class ConvNextEncoder(nn.Module):
def __init__(self, hidden_sizes, depths, num_stages, drop_path_rate):
super().__init__()
self.stages = nn.ModuleList()
drop_path_rates = [
x.tolist() for x in flow.linspace(0, drop_path_rate, sum(depths)).split(list(depths))
]
prev_chs = hidden_sizes[0]
for i in range(num_stages):
out_chs = hidden_sizes[i]
stage = ConvNextStage(
in_channels=prev_chs,
out_channels=out_chs,
stride=2 if i > 0 else 1,
depth=depths[i],
drop_path_rates=drop_path_rates[i],
layer_idx=i,
)
self.stages.append(stage)
prev_chs = out_chs
def forward(
self,
hidden_states,
):
for i, layer_module in enumerate(self.stages):
hidden_states = layer_module(hidden_states)
return hidden_states
| 4,983 | 34.347518 | 98 | py |
libai | libai-main/projects/ConvNeXT/modeling/convnext_model.py | # coding=utf-8
# Copyright 2021 The OneFlow Authors. All rights reserved.
# Copyright 2022 Meta Platforms, Inc. and The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import oneflow as flow
from oneflow import nn
from libai.config import configurable
from libai.layers import LayerNorm, Linear
from libai.utils import distributed as dist
from projects.ConvNeXT.modeling.convnext_layers import ConvNextEncoder, ConvNextStage
from projects.ConvNeXT.modeling.embedding import ConvNextEmbeddings
class ConvNextModel(nn.Module):
@configurable
def __init__(
self,
num_channels,
patch_size,
num_stages,
hidden_sizes,
depths,
layer_norm_eps=1e-12,
drop_path_rate=0.0,
cfg=None,
):
super().__init__()
self.cfg = cfg
self.embeddings = ConvNextEmbeddings(num_channels, hidden_sizes, patch_size, layer_idx=0)
self.encoder = ConvNextEncoder(hidden_sizes, depths, num_stages, drop_path_rate)
self.layernorm = LayerNorm(hidden_sizes[-1], eps=layer_norm_eps, layer_idx=-1)
# weight init
if os.getenv("ONEFLOW_LINEAR_EMBEDDING_SKIP_INIT", "0") != "1":
self.apply(self._init_weight)
def forward(self, x):
embedding_output = self.embeddings(x)
encoder_outputs = self.encoder(embedding_output)
last_hidden_state = encoder_outputs
pooled_output = self.layernorm(last_hidden_state.mean([-2, -1]))
return {"last_hidden_state": last_hidden_state, "pooled_output": pooled_output}
def _init_weight(self, module):
if isinstance(module, (nn.Linear, nn.Conv2d)):
module.weight.data.normal_(mean=0.0, std=self.cfg.initializer_range)
if module.bias is not None:
module.bias.data.zero_()
@classmethod
def from_config(cls, cfg):
return {
"num_channels": cfg.num_channels,
"patch_size": cfg.patch_size,
"num_stages": cfg.num_stages,
"hidden_sizes": cfg.hidden_sizes,
"depths": cfg.depths,
"layer_norm_eps": cfg.layer_norm_eps,
"drop_path_rate": cfg.drop_path_rate,
"cfg": cfg,
}
class ConvNextForImageClassification(nn.Module):
@configurable
def __init__(
self,
num_channels,
patch_size,
num_stages,
hidden_sizes,
depths,
layer_norm_eps=1e-12,
drop_path_rate=0.0,
num_labels=None,
cfg=None,
):
super().__init__()
self.cfg = cfg
self.num_labels = num_labels
self.convnext = ConvNextModel(
num_channels=num_channels,
patch_size=patch_size,
num_stages=num_stages,
hidden_sizes=hidden_sizes,
depths=depths,
layer_norm_eps=layer_norm_eps,
drop_path_rate=drop_path_rate,
cfg=self.cfg,
)
# Classifier head
self.classifier = (
Linear(hidden_sizes[-1], self.num_labels, layer_idx=-1)
if num_labels > 0
else nn.Identity()
)
# weight init
if os.getenv("ONEFLOW_LINEAR_EMBEDDING_SKIP_INIT", "0") != "1":
self.apply(self._init_weight)
@classmethod
def from_config(cls, cfg):
return {
"num_channels": cfg.num_channels,
"patch_size": cfg.patch_size,
"num_stages": cfg.num_stages,
"hidden_sizes": cfg.hidden_sizes,
"depths": cfg.depths,
"layer_norm_eps": cfg.layer_norm_eps,
"drop_path_rate": cfg.drop_path_rate,
"num_labels": cfg.num_labels,
"cfg": cfg,
}
def _init_weight(self, module):
if isinstance(module, (nn.Linear, nn.Conv2d)):
module.weight.data.normal_(mean=0.0, std=self.cfg.initializer_range)
if module.bias is not None:
module.bias.data.zero_()
def forward(self, images, labels):
outputs = self.convnext(images)
pooled_output = outputs["pooled_output"]
logits = self.classifier(pooled_output)
if labels is not None:
if self.cfg.problem_type is None:
if self.num_labels == 1:
self.cfg.problem_type = "regression"
elif self.num_labels > 1 and (
labels.dtype == flow.long or labels.dtype == flow.int
):
self.cfg.problem_type = "single_label_classification"
else:
self.cfg.problem_type = "multi_label_classification"
if self.cfg.problem_type == "regression":
loss_fct = nn.MSELoss()
if self.num_labels == 1:
loss = loss_fct(logits.squeeze(), labels.squeeze())
else:
loss = loss_fct(logits, labels)
elif self.cfg.problem_type == "single_label_classification":
loss = flow._C.sparse_softmax_cross_entropy(
logits.view(-1, self.num_labels), labels.view(-1)
)
elif self.cfg.problem_type == "multi_label_classification":
loss_fct = nn.BCEWithLogitsLoss()
loss = loss_fct(logits, labels)
return {"losses": loss}
else:
return {"prediction_scores": logits}
@staticmethod
def set_activation_checkpoint(model):
for module_block in model.convnext.encoder.modules():
if hasattr(module_block, "origin"):
# Old API in OneFlow 0.8
if isinstance(module_block.origin, ConvNextStage):
module_block.config.activation_checkpointing = True
else:
if isinstance(module_block.to(nn.Module), ConvNextStage):
module_block.to(flow.nn.graph.GraphModule).activation_checkpointing = True
@staticmethod
def set_pipeline_stage_id(model):
dist_utils = dist.get_dist_util()
# Set pipeline parallelism stage_id
if hasattr(model.convnext.embeddings, "config"):
# Old API in OneFlow 0.8
model.convnext.embeddings.config.set_stage(
dist_utils.get_layer_stage_id(0), dist.get_layer_placement(0)
)
for module_block in model.modules():
if isinstance(module_block.origin, ConvNextStage):
module_block.config.set_stage(
dist_utils.get_layer_stage_id(module_block.layer_idx),
dist.get_layer_placement(module_block.layer_idx),
)
model.convnext.layernorm.config.set_stage(
dist_utils.get_layer_stage_id(-1), dist.get_layer_placement(-1)
)
model.classifier.config.set_stage(
dist_utils.get_layer_stage_id(-1), dist.get_layer_placement(-1)
)
else:
model.convnext.embeddings.to(flow.nn.graph.GraphModule).set_stage(
dist_utils.get_layer_stage_id(0), dist.get_layer_placement(0)
)
for module_block in model.modules():
if isinstance(module_block.to(nn.Module), ConvNextStage):
module_block.to(flow.nn.graph.GraphModule).set_stage(
dist_utils.get_layer_stage_id(module_block.layer_idx),
dist.get_layer_placement(module_block.layer_idx),
)
model.convnext.layernorm.to(flow.nn.graph.GraphModule).set_stage(
dist_utils.get_layer_stage_id(-1), dist.get_layer_placement(-1)
)
model.classifier.to(flow.nn.graph.GraphModule).set_stage(
dist_utils.get_layer_stage_id(-1), dist.get_layer_placement(-1)
)
| 8,428 | 37.313636 | 97 | py |
libai | libai-main/projects/ConvNeXT/modeling/layer_norm.py | # coding=utf-8
# Copyright 2021 The OneFlow Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import oneflow as flow
from oneflow import nn
from libai.utils import distributed as dist
class ConvNextLayerNorm(nn.Module):
def __init__(
self,
normalized_shape,
eps=1e-5,
elementwise_affine=True,
bias=True,
data_format="channels_last",
*,
layer_idx=0
):
super().__init__()
if isinstance(normalized_shape, int):
normalized_shape = (normalized_shape,)
self.normalized_shape = tuple(normalized_shape)
self.eps = eps
self.elementwise_affine = elementwise_affine
self.data_format = data_format
self.layer_idx = layer_idx
self.weight = nn.Parameter(
flow.ones(
normalized_shape,
dtype=flow.float32,
placement=dist.get_layer_placement(layer_idx),
sbp=dist.get_nd_sbp([flow.sbp.broadcast, flow.sbp.broadcast]),
)
)
self.bias = nn.Parameter(
flow.zeros(
normalized_shape,
dtype=flow.float32,
placement=dist.get_layer_placement(layer_idx),
sbp=dist.get_nd_sbp([flow.sbp.broadcast, flow.sbp.broadcast]),
),
requires_grad=bias,
)
def forward(self, x):
x = x.to_global(placement=self.weight.placement)
if self.data_format == "channels_last":
begin_norm_axis = x.ndim - len(self.normalized_shape)
begin_params_axis = x.ndim - len(self.normalized_shape)
y = flow._C.layer_norm_affine(
x,
self.weight,
self.bias,
begin_norm_axis=begin_norm_axis,
begin_params_axis=begin_params_axis,
epsilon=self.eps,
)
elif self.data_format == "channels_first":
input_dtype = x.dtype
x = x.float()
u = x.mean(1, keepdim=True)
s = (x - u).pow(2).mean(1, keepdim=True)
x = (x - u) / flow.sqrt(s + self.eps)
x = x.to(dtype=input_dtype)
y = self.weight[:, None, None] * x + self.bias[:, None, None]
return y
def extra_repr(self) -> str:
return "{normalized_shape}, eps={eps}, elementwise_affine={elementwise_affine}".format(
**self.__dict__
)
| 2,993 | 33.413793 | 95 | py |
libai | libai-main/projects/text_classification/dataset/download_clue_data.py | # flake8: noqa
""" Script for downloading all CLUE data.
The original dataset information links
available from: https://www.cluebenchmarks.com/
Example usage:
python download_clue_data.py --data_dir data --tasks all
"""
import argparse
import os
import sys
import urllib.request
import zipfile
TASKS = [
"afqmc",
"cmnli",
"copa",
"csl",
"iflytek",
"tnews",
"wsc",
"cmrc",
"chid",
"drcd",
]
TASK2PATH = {
"afqmc": "https://storage.googleapis.com/cluebenchmark/tasks/afqmc_public.zip",
"cmnli": "https://storage.googleapis.com/cluebenchmark/tasks/cmnli_public.zip",
"copa": "https://storage.googleapis.com/cluebenchmark/tasks/copa_public.zip",
"csl": "https://storage.googleapis.com/cluebenchmark/tasks/csl_public.zip",
"iflytek": "https://storage.googleapis.com/cluebenchmark/tasks/iflytek_public.zip",
"tnews": "https://storage.googleapis.com/cluebenchmark/tasks/tnews_public.zip",
"wsc": "https://storage.googleapis.com/cluebenchmark/tasks/cluewsc2020_public.zip",
"cmrc": "https://storage.googleapis.com/cluebenchmark/tasks/cmrc2018_public.zip",
"chid": "https://storage.googleapis.com/cluebenchmark/tasks/chid_public.zip",
"drcd": "https://storage.googleapis.com/cluebenchmark/tasks/drcd_public.zip",
}
def download_and_extract(task, data_dir):
print("Downloading and extracting %s..." % task)
if not os.path.isdir(data_dir):
os.mkdir(data_dir)
data_file = os.path.join(data_dir, "%s_public.zip" % task)
save_dir = os.path.join(data_dir, task)
if not os.path.isdir(save_dir):
os.mkdir(save_dir)
urllib.request.urlretrieve(TASK2PATH[task], data_file)
with zipfile.ZipFile(data_file) as zip_ref:
zip_ref.extractall(save_dir)
os.remove(data_file)
print(f"\tCompleted! Downloaded {task} data to directory {save_dir}")
def get_tasks(task_names):
task_names = task_names.split(",")
if "all" in task_names:
tasks = TASKS
else:
tasks = []
for task_name in task_names:
assert task_name in TASKS, "Task %s not found!" % task_name
tasks.append(task_name)
return tasks
def main(arguments):
parser = argparse.ArgumentParser()
parser.add_argument(
"-d",
"--data_dir",
help="directory to save data to",
type=str,
default="./clue_data",
)
parser.add_argument(
"-t",
"--tasks",
help="tasks to download data for as a comma separated string",
type=str,
default="all",
)
args = parser.parse_args(arguments)
if not os.path.exists(args.data_dir):
os.mkdir(args.data_dir)
tasks = get_tasks(args.tasks)
for task in tasks:
download_and_extract(task, args.data_dir)
if __name__ == "__main__":
sys.exit(main(sys.argv[1:]))
| 2,854 | 28.43299 | 87 | py |
libai | libai-main/projects/text_classification/dataset/utils_glue.py | # coding=utf-8
# Copyright 2021 The OneFlow Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
from .utils import DataProcessor, EncodePattern, InputExample, InputFeatures
logger = logging.getLogger(__name__)
def glue_convert_examples_to_features(
examples,
tokenizer,
max_length,
task=None,
pattern=EncodePattern.bert_pattern,
label_list=None,
output_mode=None,
):
if task is not None:
processor = glue_processors[task]()
if label_list is None:
label_list = processor.get_labels()
logger.info(f"Using label list {label_list} for task {task}")
if output_mode is None:
output_mode = glue_output_modes[task]
logger.info(f"Using output mode {output_mode} for task {task}")
label_map = {label: i for i, label in enumerate(label_list)}
start_token = [] if tokenizer.start_token is None else [tokenizer.start_token]
end_token = [] if tokenizer.end_token is None else [tokenizer.end_token]
pad_id = tokenizer.pad_token_id
if pattern == EncodePattern.bert_pattern:
added_special_tokens = [2, 3]
elif pattern == EncodePattern.roberta_pattern:
added_special_tokens = [2, 4]
else:
raise KeyError("pattern is not a valid EncodePattern")
features = []
for (ex_index, example) in enumerate(examples):
if ex_index % 10000 == 0:
logger.info("Writing example %d of %d" % (ex_index, len(examples)))
tokens_a = tokenizer.tokenize(example.text_a)
tokens_b = None
if example.text_b:
tokens_b = tokenizer.tokenize(example.text_b)
_truncate_seq_pair(tokens_a, tokens_b, max_length - added_special_tokens[1])
else:
if len(tokens_a) > max_length - added_special_tokens[0]:
tokens_a = tokens_a[: (max_length - added_special_tokens[0])]
if pattern is EncodePattern.bert_pattern:
tokens = start_token + tokens_a + end_token
token_type_ids = [0] * len(tokens)
if tokens_b:
tokens += tokens_b + end_token
token_type_ids += [1] * (len(tokens) - len(token_type_ids))
elif pattern is EncodePattern.roberta_pattern:
tokens = start_token + tokens_a + end_token
token_type_ids = [0] * len(tokens)
if tokens_b:
tokens += end_token + tokens_b + end_token
token_type_ids += [1] * (len(tokens) - len(token_type_ids))
else:
raise KeyError("pattern is not a valid EncodePattern")
input_ids = tokenizer.convert_tokens_to_ids(tokens)
attention_mask = [1] * len(input_ids)
padding_length = max_length - len(input_ids)
input_ids = input_ids + ([pad_id] * padding_length)
attention_mask = attention_mask + ([0] * padding_length)
token_type_ids = token_type_ids + ([0] * padding_length)
label = None
if example.label is not None:
if output_mode == "classification":
label = label_map[example.label]
elif output_mode == "regression":
label = float(example.label)
if ex_index < 5:
logger.info("*** Example ***")
logger.info("guid: %s" % (example.guid))
logger.info("input_ids: %s" % " ".join([str(x) for x in input_ids]))
logger.info("attention_mask: %s" % " ".join([str(x) for x in attention_mask]))
logger.info("token_type_ids: %s" % " ".join([str(x) for x in token_type_ids]))
logger.info("label: %s (id = %d)" % (example.label, label))
features.append(
InputFeatures(
input_ids=input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
labels=label,
)
)
return features
def _truncate_seq_pair(tokens_a, tokens_b, max_length):
while True:
total_length = len(tokens_a) + len(tokens_b)
if total_length <= max_length:
break
if len(tokens_a) > len(tokens_b):
tokens_a.pop()
else:
tokens_b.pop()
class MrpcProcessor(DataProcessor):
"""Processor for the MRPC data set (GLUE version).
Sentence pair classification task.
Determine whether the two sentences have the same meaning.
"""
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev")
def get_test_examples(self, data_dir):
"""See base class."""
return self._create_examples(self._read_tsv(os.path.join(data_dir, "test.tsv")), "test")
def get_labels(self):
"""See base class."""
return ["0", "1"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training, dev and test sets."""
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
guid = f"{set_type}-{i}"
text_a = line[3]
text_b = line[4]
label = None if set_type == "test" else line[0]
examples.append(InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
class MnliProcessor(DataProcessor):
"""Processor for the MultiNLI data set (GLUE version).
Sentence pair classification task.
Given a premise sentence and a hypothesis sentence,
the task is to predict whether the premise entails the hypothesis (entailment),
contradicts the hypothesis (contradiction), or neither (neutral).
"""
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "dev_matched.tsv")), "dev_matched"
)
def get_test_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "test_matched.tsv")), "test_matched"
)
def get_labels(self):
"""See base class."""
return ["contradiction", "entailment", "neutral"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training, dev and test sets."""
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
guid = f"{set_type}-{line[0]}"
text_a = line[8]
text_b = line[9]
label = None if set_type.startswith("test") else line[-1]
examples.append(InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
class MnliMismatchedProcessor(MnliProcessor):
"""Processor for the MultiNLI Mismatched data set (GLUE version)."""
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "dev_mismatched.tsv")),
"dev_mismatched",
)
def get_test_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "test_mismatched.tsv")),
"test_mismatched",
)
class ColaProcessor(DataProcessor):
"""Processor for the CoLA data set (GLUE version).
Single sentence classification task.
Each example is a sequence of words annotated with whether it is a grammatical English sentence.
"""
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev")
def get_test_examples(self, data_dir):
"""See base class."""
return self._create_examples(self._read_tsv(os.path.join(data_dir, "test.tsv")), "test")
def get_labels(self):
"""See base class."""
return ["0", "1"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training, dev and test sets."""
test_mode = set_type == "test"
if test_mode:
lines = lines[1:]
text_index = 1 if test_mode else 3
examples = []
for (i, line) in enumerate(lines):
guid = f"{set_type}-{i}"
text_a = line[text_index]
label = None if test_mode else line[1]
examples.append(InputExample(guid=guid, text_a=text_a, text_b=None, label=label))
return examples
class Sst2Processor(DataProcessor):
"""Processor for the SST-2 data set (GLUE version).
Single sentence classification task.
The task is to predict the sentiment of a given sentence.
We use the two-way (positive/negative) class split, and use only sentence-level labels.
"""
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev")
def get_test_examples(self, data_dir):
"""See base class."""
return self._create_examples(self._read_tsv(os.path.join(data_dir, "test.tsv")), "test")
def get_labels(self):
"""See base class."""
return ["0", "1"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training, dev and test sets."""
examples = []
text_index = 1 if set_type == "test" else 0
for (i, line) in enumerate(lines):
if i == 0:
continue
guid = f"{set_type}-{i}"
text_a = line[text_index]
label = None if set_type == "test" else line[1]
examples.append(InputExample(guid=guid, text_a=text_a, text_b=None, label=label))
return examples
class StsbProcessor(DataProcessor):
"""Processor for the STS-B data set (GLUE version).
Sentence pair task but it is a regression task.
This task is to predict the similarity score of two sentences.
"""
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev")
def get_test_examples(self, data_dir):
"""See base class."""
return self._create_examples(self._read_tsv(os.path.join(data_dir, "test.tsv")), "test")
def get_labels(self):
"""See base class."""
return [None]
def _create_examples(self, lines, set_type):
"""Creates examples for the training, dev and test sets."""
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
guid = f"{set_type}-{line[0]}"
text_a = line[7]
text_b = line[8]
label = None if set_type == "test" else line[-1]
examples.append(InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
class QqpProcessor(DataProcessor):
"""Processor for the QQP data set (GLUE version).
Sentence pair classification task.
The task is to determine whether a pair of questions are semantically equivalent.
"""
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev")
def get_test_examples(self, data_dir):
"""See base class."""
return self._create_examples(self._read_tsv(os.path.join(data_dir, "test.tsv")), "test")
def get_labels(self):
"""See base class."""
return ["0", "1"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training, dev and test sets."""
test_mode = set_type == "test"
q1_index = 1 if test_mode else 3
q2_index = 2 if test_mode else 4
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
guid = f"{set_type}-{line[0]}"
try:
text_a = line[q1_index]
text_b = line[q2_index]
label = None if test_mode else line[5]
except IndexError:
continue
examples.append(InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
class QnliProcessor(DataProcessor):
"""Processor for the QNLI data set (GLUE version).
Sentence pair classification task.
The task is to determine whether the context sentence contains the answer to the question.
"""
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev")
def get_test_examples(self, data_dir):
"""See base class."""
return self._create_examples(self._read_tsv(os.path.join(data_dir, "test.tsv")), "test")
def get_labels(self):
"""See base class."""
return ["entailment", "not_entailment"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training, dev and test sets."""
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
guid = f"{set_type}-{line[0]}"
text_a = line[1]
text_b = line[2]
label = None if set_type == "test" else line[-1]
examples.append(InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
class RteProcessor(DataProcessor):
"""Processor for the RTE data set (GLUE version).
Sentence pair classification task.
Recognizing Textual Entailment.
Predict whether the two sentences is entailment or not entailment (neutral and contradiction).
"""
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev")
def get_test_examples(self, data_dir):
"""See base class."""
return self._create_examples(self._read_tsv(os.path.join(data_dir, "test.tsv")), "test")
def get_labels(self):
"""See base class."""
return ["entailment", "not_entailment"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training, dev and test sets."""
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
guid = f"{set_type}-{line[0]}"
text_a = line[1]
text_b = line[2]
label = None if set_type == "test" else line[-1]
examples.append(InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
class WnliProcessor(DataProcessor):
"""Processor for the WNLI data set (GLUE version).
Sentence pair classification task.
The task is to predict if the sentence with the pronoun substituted is entailed
by the original sentence.
"""
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev")
def get_test_examples(self, data_dir):
"""See base class."""
return self._create_examples(self._read_tsv(os.path.join(data_dir, "test.tsv")), "test")
def get_labels(self):
"""See base class."""
return ["0", "1"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training, dev and test sets."""
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
guid = f"{set_type}-{line[0]}"
text_a = line[1]
text_b = line[2]
label = None if set_type == "test" else line[-1]
examples.append(InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
glue_tasks_num_labels = {
"cola": 2,
"mnli": 3,
"mrpc": 2,
"sst-2": 2,
"sts-b": 1,
"qqp": 2,
"qnli": 2,
"rte": 2,
"wnli": 2,
}
glue_processors = {
"cola": ColaProcessor,
"mnli": MnliProcessor,
"mnli-mm": MnliMismatchedProcessor,
"mrpc": MrpcProcessor,
"sst-2": Sst2Processor,
"sts-b": StsbProcessor,
"qqp": QqpProcessor,
"qnli": QnliProcessor,
"rte": RteProcessor,
"wnli": WnliProcessor,
}
glue_output_modes = {
"cola": "classification",
"mnli": "classification",
"mnli-mm": "classification",
"mrpc": "classification",
"sst-2": "classification",
"sts-b": "regression",
"qqp": "classification",
"qnli": "classification",
"rte": "classification",
"wnli": "classification",
}
| 18,832 | 34.804183 | 100 | py |
libai | libai-main/projects/text_classification/dataset/utils_clue.py | # coding=utf-8
# Copyright 2021 The OneFlow Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
from .utils import DataProcessor, EncodePattern, InputExample, InputFeatures
logger = logging.getLogger(__name__)
def clue_convert_examples_to_features(
examples,
tokenizer,
max_length,
task=None,
pattern=EncodePattern.bert_pattern,
label_list=None,
output_mode=None,
):
if task is not None:
processor = clue_processors[task]()
if label_list is None:
label_list = processor.get_labels()
logger.info(f"Using label list {label_list} for task {task}")
if output_mode is None:
output_mode = clue_output_modes[task]
logger.info(f"Using output mode {output_mode} for task {task}")
label_map = {label: i for i, label in enumerate(label_list)}
start_token = [] if tokenizer.start_token is None else [tokenizer.start_token]
end_token = [] if tokenizer.end_token is None else [tokenizer.end_token]
pad_id = tokenizer.pad_token_id
if pattern == EncodePattern.bert_pattern:
added_special_tokens = [2, 3]
elif pattern == EncodePattern.roberta_pattern:
added_special_tokens = [2, 4]
else:
raise KeyError("pattern is not a valid EncodePattern")
features = []
for (ex_index, example) in enumerate(examples):
if ex_index % 10000 == 0:
logger.info("Writing example %d of %d" % (ex_index, len(examples)))
tokens_a = tokenizer.tokenize(example.text_a)
tokens_b = None
if example.text_b:
tokens_b = tokenizer.tokenize(example.text_b)
_truncate_seq_pair(tokens_a, tokens_b, max_length - added_special_tokens[1])
else:
if len(tokens_a) > max_length - added_special_tokens[0]:
tokens_a = tokens_a[: (max_length - added_special_tokens[0])]
if pattern is EncodePattern.bert_pattern:
tokens = start_token + tokens_a + end_token
token_type_ids = [0] * len(tokens)
if tokens_b:
tokens += tokens_b + end_token
token_type_ids += [1] * (len(tokens) - len(token_type_ids))
elif pattern is EncodePattern.roberta_pattern:
tokens = start_token + tokens_a + end_token
token_type_ids = [0] * len(tokens)
if tokens_b:
tokens += end_token + tokens_b + end_token
token_type_ids += [1] * (len(tokens) - len(token_type_ids))
else:
raise KeyError("pattern is not a valid EncodePattern")
input_ids = tokenizer.convert_tokens_to_ids(tokens)
attention_mask = [1] * len(input_ids)
padding_length = max_length - len(input_ids)
input_ids = input_ids + ([pad_id] * padding_length)
attention_mask = attention_mask + ([0] * padding_length)
token_type_ids = token_type_ids + ([0] * padding_length)
label = None
if example.label is not None:
if output_mode == "classification":
label = label_map[example.label]
elif output_mode == "regression":
label = float(example.label)
if ex_index < 5:
logger.info("*** Example ***")
logger.info("guid: %s" % (example.guid))
logger.info("input_ids: %s" % " ".join([str(x) for x in input_ids]))
logger.info("attention_mask: %s" % " ".join([str(x) for x in attention_mask]))
logger.info("token_type_ids: %s" % " ".join([str(x) for x in token_type_ids]))
logger.info("label: %s (id = %d)" % (example.label, label))
features.append(
InputFeatures(
input_ids=input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
labels=label,
)
)
return features
def _truncate_seq_pair(tokens_a, tokens_b, max_length):
while True:
total_length = len(tokens_a) + len(tokens_b)
if total_length <= max_length:
break
if len(tokens_a) > len(tokens_b):
tokens_a.pop()
else:
tokens_b.pop()
class TnewsProcessor(DataProcessor):
"""Processor for the TNEWS data set (CLUE version).
Single sentence classification task.
The task is to predict which category the title belongs to.
"""
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(self._read_json(os.path.join(data_dir, "train.json")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(self._read_json(os.path.join(data_dir, "dev.json")), "dev")
def get_test_examples(self, data_dir):
"""See base class."""
return self._create_examples(self._read_json(os.path.join(data_dir, "test.json")), "test")
def get_labels(self):
"""See base class."""
labels = []
for i in range(17):
if i == 5 or i == 11:
continue
labels.append(str(100 + i))
return labels
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
guid = f"{set_type}-{i}"
text_a = line["sentence"]
label = None if set_type == "test" else str(line["label"])
examples.append(InputExample(guid=guid, text_a=text_a, text_b=None, label=label))
return examples
class IflytekProcessor(DataProcessor):
"""Processor for the IFLYTEK data set (CLUE version).
Single sentence classification task.
The task is to predict the categories according to discription.
"""
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(self._read_json(os.path.join(data_dir, "train.json")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(self._read_json(os.path.join(data_dir, "dev.json")), "dev")
def get_test_examples(self, data_dir):
"""See base class."""
return self._create_examples(self._read_json(os.path.join(data_dir, "test.json")), "test")
def get_labels(self):
"""See base class."""
labels = []
for i in range(119):
labels.append(str(i))
return labels
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
guid = f"{set_type}-{i}"
text_a = line["sentence"]
label = None if set_type == "test" else str(line["label"])
examples.append(InputExample(guid=guid, text_a=text_a, text_b=None, label=label))
return examples
class AfqmcProcessor(DataProcessor):
"""Processor for the AFQMC data set (CLUE version).
Sentence pair classification task.
This task is to predict whether two sentences are semantically similar.
"""
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(self._read_json(os.path.join(data_dir, "train.json")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(self._read_json(os.path.join(data_dir, "dev.json")), "dev")
def get_test_examples(self, data_dir):
"""See base class."""
return self._create_examples(self._read_json(os.path.join(data_dir, "test.json")), "test")
def get_labels(self):
"""See base class."""
return ["0", "1"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
guid = f"{set_type}-{i}"
text_a = line["sentence1"]
text_b = line["sentence2"]
label = None if set_type == "test" else str(line["label"])
examples.append(InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
class OcnliProcessor(DataProcessor):
"""Processor for the OCNLI data set (CLUE version).
Sentence pair classification task.
Given a premise sentence and a hypothesis sentence,
the task is to predict whether the premise entails the hypothesis (entailment),
contradicts the hypothesis (contradiction), or neither (neutral).
"""
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(self._read_json(os.path.join(data_dir, "train.json")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(self._read_json(os.path.join(data_dir, "dev.json")), "dev")
def get_test_examples(self, data_dir):
"""See base class."""
return self._create_examples(self._read_json(os.path.join(data_dir, "test.json")), "test")
def get_labels(self):
"""See base class."""
return ["contradiction", "entailment", "neutral"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
guid = f"{set_type}-{i}"
text_a = line["sentence1"]
text_b = line["sentence2"]
label = None if set_type == "test" else str(line["label"])
if label.strip() == "-":
continue
examples.append(InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
class CmnliProcessor(DataProcessor):
"""Processor for the CMNLI data set (CLUE version).
Sentence pair classification task.
Given a premise sentence and a hypothesis sentence,
the task is to predict whether the premise entails the hypothesis (entailment),
contradicts the hypothesis (contradiction), or neither (neutral).
"""
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(self._read_json(os.path.join(data_dir, "train.json")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(self._read_json(os.path.join(data_dir, "dev.json")), "dev")
def get_test_examples(self, data_dir):
"""See base class."""
return self._create_examples(self._read_json(os.path.join(data_dir, "test.json")), "test")
def get_labels(self):
"""See base class."""
return ["contradiction", "entailment", "neutral"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
guid = f"{set_type}-{i}"
text_a = line["sentence1"]
text_b = line["sentence2"]
label = None if set_type == "test" else str(line["label"])
if label.strip() == "-":
continue
examples.append(InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
class CslProcessor(DataProcessor):
"""Processor for the CSL data set (CLUE version)."""
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(self._read_json(os.path.join(data_dir, "train.json")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(self._read_json(os.path.join(data_dir, "dev.json")), "dev")
def get_test_examples(self, data_dir):
"""See base class."""
return self._create_examples(self._read_json(os.path.join(data_dir, "test.json")), "test")
def get_labels(self):
"""See base class."""
return ["0", "1"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
guid = f"{set_type}-{i}"
text_a = " ".join(line["keyword"])
text_b = line["abst"]
label = None if set_type == "test" else str(line["label"])
examples.append(InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
class WscProcessor(DataProcessor):
"""Processor for the WSC data set (CLUE version)."""
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(self._read_json(os.path.join(data_dir, "train.json")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(self._read_json(os.path.join(data_dir, "dev.json")), "dev")
def get_test_examples(self, data_dir):
"""See base class."""
return self._create_examples(self._read_json(os.path.join(data_dir, "test.json")), "test")
def get_labels(self):
"""See base class."""
return ["true", "false"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
guid = f"{set_type}-{i}"
text_a = line["text"]
text_a_list = list(text_a)
target = line["target"]
query = target["span1_text"]
query_idx = target["span1_index"]
pronoun = target["span2_text"]
pronoun_idx = target["span2_index"]
assert (
text_a[pronoun_idx : (pronoun_idx + len(pronoun))] == pronoun
), "pronoun: {}".format(pronoun)
assert text_a[query_idx : (query_idx + len(query))] == query, "query: {}".format(query)
if pronoun_idx > query_idx:
text_a_list.insert(query_idx, "_")
text_a_list.insert(query_idx + len(query) + 1, "_")
text_a_list.insert(pronoun_idx + 2, "[")
text_a_list.insert(pronoun_idx + len(pronoun) + 2 + 1, "]")
else:
text_a_list.insert(pronoun_idx, "[")
text_a_list.insert(pronoun_idx + len(pronoun) + 1, "]")
text_a_list.insert(query_idx + 2, "_")
text_a_list.insert(query_idx + len(query) + 2 + 1, "_")
text_a = "".join(text_a_list)
label = None if set_type == "test" else str(line["label"])
examples.append(InputExample(guid=guid, text_a=text_a, text_b=None, label=label))
return examples
class CopaProcessor(DataProcessor):
"""Processor for the COPA data set (CLUE version)."""
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(self._read_json(os.path.join(data_dir, "train.json")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(self._read_json(os.path.join(data_dir, "dev.json")), "dev")
def get_test_examples(self, data_dir):
"""See base class."""
return self._create_examples(self._read_json(os.path.join(data_dir, "test.json")), "test")
def get_labels(self):
"""See base class."""
return ["0", "1"]
def _create_examples(self, lines, set_type):
examples = []
for (i, line) in enumerate(lines):
i = 2 * i
guid1 = f"{set_type}-{i}"
guid2 = "%s-%s" % (set_type, i + 1)
premise = line["premise"]
choice0 = line["choice0"]
label = None if set_type == "test" else str(1 if line["label"] == 0 else 0)
choice1 = line["choice1"]
label2 = None if set_type == "test" else str(1 if line["label"] == 0 else 0)
if line["question"] == "effect":
text_a = premise
text_b = choice0
text_a2 = premise
text_b2 = choice1
elif line["question"] == "cause":
text_a = choice0
text_b = premise
text_a2 = choice1
text_b2 = premise
else:
raise ValueError(f'unknowed {line["question"]} type')
examples.append(InputExample(guid=guid1, text_a=text_a, text_b=text_b, label=label))
examples.append(InputExample(guid=guid2, text_a=text_a2, text_b=text_b2, label=label2))
return examples
def _create_examples_version2(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
guid = f"{set_type}-{i}"
if line["question"] == "cause":
text_a = line["premise"] + "这是什么原因造成的?" + line["choice0"]
text_b = line["premise"] + "这是什么原因造成的?" + line["choice1"]
else:
text_a = line["premise"] + "这造成了什么影响?" + line["choice0"]
text_b = line["premise"] + "这造成了什么影响?" + line["choice1"]
label = None if set_type == "test" else str(1 if line["label"] == 0 else 0)
examples.append(InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
clue_tasks_num_labels = {
"iflytek": 119,
"cmnli": 3,
"ocnli": 3,
"afqmc": 2,
"csl": 2,
"wsc": 2,
"copa": 2,
"tnews": 15,
}
clue_processors = {
"tnews": TnewsProcessor,
"iflytek": IflytekProcessor,
"cmnli": CmnliProcessor,
"ocnli": OcnliProcessor,
"afqmc": AfqmcProcessor,
"csl": CslProcessor,
"wsc": WscProcessor,
"copa": CopaProcessor,
}
clue_output_modes = {
"tnews": "classification",
"iflytek": "classification",
"cmnli": "classification",
"ocnli": "classification",
"afqmc": "classification",
"csl": "classification",
"wsc": "classification",
"copa": "classification",
}
| 18,613 | 36.910387 | 100 | py |
libai | libai-main/projects/text_classification/dataset/utils.py | # coding=utf-8
# Copyright 2021 The OneFlow Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import csv
import json
from dataclasses import dataclass
from enum import Enum
from typing import List, Optional, Union
class EncodePattern(Enum):
"""encode pattern
bert pattern:
single sentence: [CLS] A [SEP]
pair of sentences: [CLS] A [SEP] B [SEP]
roberta/bart pattern:
single sentence: <s> A </s>
pair of sentences: <s> A </s> </s> B </s>
"""
bert_pattern = "S*E*E"
roberta_pattern = "S*EE*E"
@dataclass
class InputExample:
"""
A single training/test example for simple sequence classification.
Args:
guid: Unique id for the example.
text_a: string. The untokenized text of the first sequence. For single
sequence tasks, only this sequence must be specified.
text_b: (Optional) string. The untokenized text of the second sequence.
Only must be specified for sequence pair tasks.
label: (Optional) string. The label of the example. This should be
specified for train and dev examples, but not for test examples.
"""
guid: str
text_a: str
text_b: Optional[str] = None
label: Optional[str] = None
@dataclass(frozen=True)
class InputFeatures:
"""
A single set of features of data.
Property names are the same names as the corresponding inputs to a model.
Args:
input_ids: Indices of input sequence tokens in the vocabulary.
attention_mask: Mask to avoid performing attention on padding token indices.
Mask values selected in `[0, 1]`: Usually `1` for tokens that are NOT MASKED,
`0` for MASKED (padded) tokens.
token_type_ids: (Optional) Segment token indices to indicate first and second
portions of the inputs. Only some models use them.
label: (Optional) Label corresponding to the input. Int for classification problems,
float for regression problems.
"""
input_ids: List[int]
attention_mask: Optional[List[int]] = None
token_type_ids: Optional[List[int]] = None
labels: Optional[Union[int, float]] = None
class DataProcessor:
"""Base class for data converters for sequence classification data sets."""
def get_train_examples(self, data_dir):
"""Gets a collection of [`InputExample`] for the train set."""
raise NotImplementedError()
def get_dev_examples(self, data_dir):
"""Gets a collection of [`InputExample`] for the dev set."""
raise NotImplementedError()
def get_test_examples(self, data_dir):
"""Gets a collection of [`InputExample`] for the test set."""
raise NotImplementedError()
def get_labels(self):
"""Gets the list of labels for this data set."""
raise NotImplementedError()
@classmethod
def _read_tsv(cls, input_file, quotechar=None):
"""Reads a tab separated value file."""
with open(input_file, "r", encoding="utf-8-sig") as f:
reader = csv.reader(f, delimiter="\t", quotechar=quotechar)
lines = []
for line in reader:
lines.append(line)
return lines
@classmethod
def _read_json(cls, input_file):
"""Reads a json list file."""
with open(input_file, "r") as f:
reader = f.readlines()
lines = []
for line in reader:
lines.append(json.loads(line.strip()))
return lines
| 4,059 | 33.117647 | 92 | py |
libai | libai-main/projects/text_classification/dataset/glue_dataset.py | # coding=utf-8
# Copyright 2021 The OneFlow Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
import time
from enum import Enum
from typing import Optional, Union
import oneflow as flow
from filelock import FileLock
from oneflow.utils.data import Dataset
from libai.data.structures import DistTensorData, Instance
from .utils import EncodePattern
from .utils_glue import glue_convert_examples_to_features, glue_output_modes, glue_processors
logger = logging.getLogger(__name__)
class Split(Enum):
train = "train"
dev = "dev"
test = "test"
class GlueDataset(Dataset):
def __init__(
self,
task_name,
data_dir,
tokenizer,
max_seq_length: int = 128,
mode: Union[str, Split] = Split.train,
pattern: Union[str, EncodePattern] = EncodePattern.bert_pattern,
cache_dir: Optional[str] = None,
overwrite_cache: bool = False,
):
self.processor = glue_processors[task_name]()
self.output_mode = glue_output_modes[task_name]
if isinstance(mode, str):
try:
mode = Split[mode]
except KeyError:
raise KeyError("mode is not a valid split name")
if isinstance(pattern, str):
try:
pattern = EncodePattern[pattern]
except KeyError:
raise KeyError("pattern is not a valid pattern method")
# Load data features from cache or dataset file
cached_features_file = os.path.join(
cache_dir if cache_dir is not None else data_dir,
f"cached_{mode.value}_{tokenizer.__class__.__name__}_{max_seq_length}_{task_name}",
)
label_list = self.processor.get_labels()
self.label_list = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
lock_path = cached_features_file + ".lock"
with FileLock(lock_path):
if os.path.exists(cached_features_file) and not overwrite_cache:
start = time.time()
self.features = flow.load(cached_features_file)
logger.info(
f"Loading features from cached file {cached_features_file} [took %.3f s]",
time.time() - start,
)
else:
logger.info(f"Creating features from dataset file at {data_dir}")
if mode == Split.dev:
examples = self.processor.get_dev_examples(data_dir)
elif mode == Split.test:
examples = self.processor.get_test_examples(data_dir)
else:
examples = self.processor.get_train_examples(data_dir)
self.features = glue_convert_examples_to_features(
examples,
tokenizer,
max_length=max_seq_length,
pattern=pattern,
label_list=label_list,
output_mode=self.output_mode,
)
start = time.time()
flow.save(self.features, cached_features_file)
logger.info(
f"Saving features into cached file {cached_features_file} "
f"[took {time.time() - start:.3f} s]"
)
def __len__(self):
return len(self.features)
def __getitem__(self, i):
feature = self.features[i]
tensors = {}
for k, v in feature.__dict__.items():
if v is not None:
if k == "label":
dtype = flow.long if isinstance(v, int) else flow.float
t = flow.tensor(v, dtype=dtype)
tensors[k] = DistTensorData(t, placement_idx=-1)
elif k == "attention_mask":
t = flow.tensor(v, dtype=flow.bool)
tensors[k] = DistTensorData(t)
else:
t = flow.tensor(v, dtype=flow.long)
tensors[k] = DistTensorData(t)
sample = Instance(**tensors)
return sample
def get_labels(self):
return self.label_list
| 4,780 | 35.219697 | 95 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.