code stringlengths 86 54.5k | code_codestyle int64 0 371 | style_context stringlengths 87 49.2k | style_context_codestyle int64 0 349 | label int64 0 1 |
|---|---|---|---|---|
"""simple docstring"""
import argparse
import torch
# Step 1. clone https://github.com/microsoft/unilm
# Step 2. git checkout to https://github.com/microsoft/unilm/commit/b94ec76c36f02fb2b0bf0dcb0b8554a2185173cd
# Step 3. cd unilm
# Step 4. ln -s $(realpath wavlm/modules.py) ./ # create simlink
# import classes
from unilm.wavlm.WavLM import WavLM as WavLMOrig
from unilm.wavlm.WavLM import WavLMConfig as WavLMConfigOrig
from transformers import WavLMConfig, WavLMModel, logging
logging.set_verbosity_info()
UpperCAmelCase__ : List[str] = logging.get_logger(__name__)
UpperCAmelCase__ : str = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn.grep_linear': 'encoder.layers.*.attention.gru_rel_pos_linear',
'self_attn.relative_attention_bias': 'encoder.layers.*.attention.rel_attn_embed',
'self_attn.grep_a': 'encoder.layers.*.attention.gru_rel_pos_const',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'ctc_proj',
'mask_emb': 'masked_spec_embed',
}
UpperCAmelCase__ : Union[str, Any] = [
'ctc_proj',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
]
def lowercase_ ( _snake_case ,_snake_case ,_snake_case ,_snake_case ,_snake_case ):
for attribute in key.split(""".""" ):
SCREAMING_SNAKE_CASE__ : Dict = getattr(_snake_case ,_snake_case )
if weight_type is not None:
SCREAMING_SNAKE_CASE__ : int = getattr(_snake_case ,_snake_case ).shape
else:
SCREAMING_SNAKE_CASE__ : Any = hf_pointer.shape
assert hf_shape == value.shape, (
f'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be'''
f''' {value.shape} for {full_name}'''
)
if weight_type == "weight":
SCREAMING_SNAKE_CASE__ : Optional[Any] = value
elif weight_type == "weight_g":
SCREAMING_SNAKE_CASE__ : Union[str, Any] = value
elif weight_type == "weight_v":
SCREAMING_SNAKE_CASE__ : List[Any] = value
elif weight_type == "bias":
SCREAMING_SNAKE_CASE__ : Dict = value
else:
SCREAMING_SNAKE_CASE__ : int = value
logger.info(f'''{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.''' )
def lowercase_ ( _snake_case ,_snake_case ):
SCREAMING_SNAKE_CASE__ : int = []
SCREAMING_SNAKE_CASE__ : List[str] = fairseq_model.state_dict()
SCREAMING_SNAKE_CASE__ : str = hf_model.feature_extractor
for name, value in fairseq_dict.items():
SCREAMING_SNAKE_CASE__ : Tuple = False
if "conv_layers" in name:
load_conv_layer(
_snake_case ,_snake_case ,_snake_case ,_snake_case ,hf_model.config.feat_extract_norm == """group""" ,)
SCREAMING_SNAKE_CASE__ : str = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]:
SCREAMING_SNAKE_CASE__ : int = True
if "*" in mapped_key:
SCREAMING_SNAKE_CASE__ : Dict = name.split(_snake_case )[0].split(""".""" )[-2]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = mapped_key.replace("""*""" ,_snake_case )
if "weight_g" in name:
SCREAMING_SNAKE_CASE__ : Dict = """weight_g"""
elif "weight_v" in name:
SCREAMING_SNAKE_CASE__ : int = """weight_v"""
elif "bias" in name and "relative_attention_bias" not in name:
SCREAMING_SNAKE_CASE__ : str = """bias"""
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
SCREAMING_SNAKE_CASE__ : Any = """weight"""
else:
SCREAMING_SNAKE_CASE__ : List[str] = None
set_recursively(_snake_case ,_snake_case ,_snake_case ,_snake_case ,_snake_case )
continue
if not is_used:
unused_weights.append(_snake_case )
logger.warning(f'''Unused weights: {unused_weights}''' )
def lowercase_ ( _snake_case ,_snake_case ,_snake_case ,_snake_case ,_snake_case ):
SCREAMING_SNAKE_CASE__ : List[str] = full_name.split("""conv_layers.""" )[-1]
SCREAMING_SNAKE_CASE__ : List[str] = name.split(""".""" )
SCREAMING_SNAKE_CASE__ : Optional[int] = int(items[0] )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'''
)
SCREAMING_SNAKE_CASE__ : Optional[Any] = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'''
)
SCREAMING_SNAKE_CASE__ : Optional[Any] = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'''
" found."
)
SCREAMING_SNAKE_CASE__ : Optional[int] = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'''
)
SCREAMING_SNAKE_CASE__ : int = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(_snake_case )
@torch.no_grad()
def lowercase_ ( _snake_case ,_snake_case ,_snake_case=None ):
# load the pre-trained checkpoints
SCREAMING_SNAKE_CASE__ : Any = torch.load(_snake_case )
SCREAMING_SNAKE_CASE__ : List[str] = WavLMConfigOrig(checkpoint["""cfg"""] )
SCREAMING_SNAKE_CASE__ : Optional[Any] = WavLMOrig(_snake_case )
model.load_state_dict(checkpoint["""model"""] )
model.eval()
if config_path is not None:
SCREAMING_SNAKE_CASE__ : str = WavLMConfig.from_pretrained(_snake_case )
else:
SCREAMING_SNAKE_CASE__ : Any = WavLMConfig()
SCREAMING_SNAKE_CASE__ : Tuple = WavLMModel(_snake_case )
recursively_load_weights(_snake_case ,_snake_case )
hf_wavlm.save_pretrained(_snake_case )
if __name__ == "__main__":
UpperCAmelCase__ : Optional[int] = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
UpperCAmelCase__ : List[str] = parser.parse_args()
convert_wavlm_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 25 |
"""simple docstring"""
from typing import List, Union
import numpy as np
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_DEPTH_ESTIMATION_MAPPING
UpperCAmelCase__ : List[str] = logging.get_logger(__name__)
@add_end_docstrings(a__ )
class lowerCAmelCase_ (a__ ):
"""simple docstring"""
def __init__(self , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) -> Tuple:
"""simple docstring"""
super().__init__(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
requires_backends(self , """vision""" )
self.check_model_type(SCREAMING_SNAKE_CASE__ )
def __call__(self , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) -> List[str]:
"""simple docstring"""
return super().__call__(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self , **SCREAMING_SNAKE_CASE__ ) -> Any:
"""simple docstring"""
return {}, {}, {}
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = load_image(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Any = image.size
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.image_processor(images=SCREAMING_SNAKE_CASE__ , return_tensors=self.framework )
return model_inputs
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = self.model(**SCREAMING_SNAKE_CASE__ )
return model_outputs
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = model_outputs.predicted_depth
SCREAMING_SNAKE_CASE__ : Optional[int] = torch.nn.functional.interpolate(
predicted_depth.unsqueeze(1 ) , size=self.image_size[::-1] , mode="""bicubic""" , align_corners=SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Optional[Any] = prediction.squeeze().cpu().numpy()
SCREAMING_SNAKE_CASE__ : Any = (output * 2_55 / np.max(SCREAMING_SNAKE_CASE__ )).astype("""uint8""" )
SCREAMING_SNAKE_CASE__ : List[str] = Image.fromarray(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : str = {}
SCREAMING_SNAKE_CASE__ : Any = predicted_depth
SCREAMING_SNAKE_CASE__ : Dict = depth
return output_dict
| 25 | 1 |
import os
import pickle
import unittest
from transformers import AutoTokenizer
from transformers.models.bert.tokenization_bert import BertTokenizer
from transformers.models.bert_japanese.tokenization_bert_japanese import (
VOCAB_FILES_NAMES,
BertJapaneseTokenizer,
CharacterTokenizer,
JumanppTokenizer,
MecabTokenizer,
SudachiTokenizer,
WordpieceTokenizer,
)
from transformers.testing_utils import custom_tokenizers, require_jumanpp, require_sudachi
from ...test_tokenization_common import TokenizerTesterMixin
@custom_tokenizers
class __a ( UpperCAmelCase , unittest.TestCase ):
_a : List[Any] = BertJapaneseTokenizer
_a : Tuple = False
_a : Dict = True
def UpperCAmelCase__ ( self ) -> Dict:
"""simple docstring"""
super().setUp()
_UpperCAmelCase = [
'[UNK]',
'[CLS]',
'[SEP]',
'こんにちは',
'こん',
'にちは',
'ばんは',
'##こん',
'##にちは',
'##ばんは',
'世界',
'##世界',
'、',
'##、',
'。',
'##。',
]
_UpperCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
_UpperCAmelCase = 'こんにちは、世界。 \nこんばんは、世界。'
_UpperCAmelCase = 'こんにちは 、 世界 。 こんばんは 、 世界 。'
return input_text, output_text
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE ) -> Any:
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = self.get_input_output_texts(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = tokenizer.encode(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = tokenizer.decode(_SCREAMING_SNAKE_CASE , clean_up_tokenization_spaces=_SCREAMING_SNAKE_CASE )
return text, ids
def UpperCAmelCase__ ( self ) -> int:
"""simple docstring"""
pass # TODO add if relevant
def UpperCAmelCase__ ( self ) -> str:
"""simple docstring"""
pass # TODO add if relevant
def UpperCAmelCase__ ( self ) -> Any:
"""simple docstring"""
pass # TODO add if relevant
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase = self.tokenizer_class(self.vocab_file )
_UpperCAmelCase = tokenizer.tokenize('こんにちは、世界。\nこんばんは、世界。' )
self.assertListEqual(_SCREAMING_SNAKE_CASE , ['こんにちは', '、', '世界', '。', 'こん', '##ばんは', '、', '世界', '。'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_SCREAMING_SNAKE_CASE ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
def UpperCAmelCase__ ( self ) -> List[str]:
"""simple docstring"""
_UpperCAmelCase = self.tokenizer_class(self.vocab_file , word_tokenizer_type='mecab' )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = 'こんにちは、世界。\nこんばんは、世界。'
_UpperCAmelCase = tokenizer.tokenize(_SCREAMING_SNAKE_CASE )
self.assertListEqual(_SCREAMING_SNAKE_CASE , ['こんにちは', '、', '世界', '。', 'こん', '##ばんは', '、', '世界', '。'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_SCREAMING_SNAKE_CASE ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
_UpperCAmelCase = os.path.join(self.tmpdirname , 'tokenizer.bin' )
with open(_SCREAMING_SNAKE_CASE , 'wb' ) as handle:
pickle.dump(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
with open(_SCREAMING_SNAKE_CASE , 'rb' ) as handle:
_UpperCAmelCase = pickle.load(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = tokenizer_new.tokenize(_SCREAMING_SNAKE_CASE )
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase = MecabTokenizer(mecab_dic='ipadic' )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップルストア', 'で', 'iPhone', '8', 'が', '発売', 'さ', 'れ', 'た', '。'] , )
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
"""simple docstring"""
try:
_UpperCAmelCase = MecabTokenizer(mecab_dic='unidic_lite' )
except ModuleNotFoundError:
return
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップル', 'ストア', 'で', 'iPhone', '8', 'が', '発売', 'さ', 'れ', 'た', '。'] , )
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
"""simple docstring"""
try:
_UpperCAmelCase = MecabTokenizer(mecab_dic='unidic' )
except ModuleNotFoundError:
return
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップル', 'ストア', 'で', 'iPhone', '8', 'が', '発売', 'さ', 'れ', 'た', '。'] , )
def UpperCAmelCase__ ( self ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = MecabTokenizer(do_lower_case=_SCREAMING_SNAKE_CASE , mecab_dic='ipadic' )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップルストア', 'で', 'iphone', '8', 'が', '発売', 'さ', 'れ', 'た', '。'] , )
def UpperCAmelCase__ ( self ) -> Optional[int]:
"""simple docstring"""
try:
_UpperCAmelCase = MecabTokenizer(
do_lower_case=_SCREAMING_SNAKE_CASE , normalize_text=_SCREAMING_SNAKE_CASE , mecab_option='-d /usr/local/lib/mecab/dic/jumandic' )
except RuntimeError:
# if dict doesn't exist in the system, previous code raises this error.
return
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップルストア', 'で', 'iPhone', '8', 'が', '発売', 'さ', 'れた', '\u3000', '。'] , )
def UpperCAmelCase__ ( self ) -> Dict:
"""simple docstring"""
_UpperCAmelCase = MecabTokenizer(normalize_text=_SCREAMING_SNAKE_CASE , mecab_dic='ipadic' )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップルストア', 'で', 'iPhone', '8', 'が', '発売', 'さ', 'れ', 'た', ' ', '。'] , )
@require_sudachi
def UpperCAmelCase__ ( self ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = self.tokenizer_class(self.vocab_file , word_tokenizer_type='sudachi' )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = 'こんにちは、世界。\nこんばんは、世界。'
_UpperCAmelCase = tokenizer.tokenize(_SCREAMING_SNAKE_CASE )
self.assertListEqual(_SCREAMING_SNAKE_CASE , ['こんにちは', '、', '世界', '。', 'こん', '##ばんは', '、', '世界', '。'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_SCREAMING_SNAKE_CASE ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
_UpperCAmelCase = os.path.join(self.tmpdirname , 'tokenizer.bin' )
with open(_SCREAMING_SNAKE_CASE , 'wb' ) as handle:
pickle.dump(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
with open(_SCREAMING_SNAKE_CASE , 'rb' ) as handle:
_UpperCAmelCase = pickle.load(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = tokenizer_new.tokenize(_SCREAMING_SNAKE_CASE )
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@require_sudachi
def UpperCAmelCase__ ( self ) -> Dict:
"""simple docstring"""
_UpperCAmelCase = SudachiTokenizer(sudachi_dict_type='core' )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , [' ', '\t', 'アップル', 'ストア', 'で', 'iPhone', '8', ' ', 'が', ' ', ' ', '\n ', '発売', 'さ', 'れ', 'た', ' ', '。', ' ', ' '] , )
@require_sudachi
def UpperCAmelCase__ ( self ) -> List[str]:
"""simple docstring"""
_UpperCAmelCase = SudachiTokenizer(sudachi_dict_type='core' , sudachi_split_mode='A' )
self.assertListEqual(tokenizer.tokenize('外国人参政権' ) , ['外国', '人', '参政', '権'] )
@require_sudachi
def UpperCAmelCase__ ( self ) -> str:
"""simple docstring"""
_UpperCAmelCase = SudachiTokenizer(sudachi_dict_type='core' , sudachi_split_mode='B' )
self.assertListEqual(tokenizer.tokenize('外国人参政権' ) , ['外国人', '参政権'] )
@require_sudachi
def UpperCAmelCase__ ( self ) -> Any:
"""simple docstring"""
_UpperCAmelCase = SudachiTokenizer(sudachi_dict_type='core' , sudachi_split_mode='C' )
self.assertListEqual(tokenizer.tokenize('外国人参政権' ) , ['外国人参政権'] )
@require_sudachi
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase = SudachiTokenizer(do_lower_case=_SCREAMING_SNAKE_CASE , sudachi_dict_type='core' )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , [' ', '\t', 'アップル', 'ストア', 'で', 'iphone', '8', ' ', 'が', ' ', ' ', '\n ', '発売', 'さ', 'れ', 'た', ' ', '。', ' ', ' '] , )
@require_sudachi
def UpperCAmelCase__ ( self ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = SudachiTokenizer(normalize_text=_SCREAMING_SNAKE_CASE , sudachi_dict_type='core' )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , [' ', '\t', 'アップル', 'ストア', 'で', 'iPhone', '8', ' ', 'が', ' ', ' ', '\n ', '発売', 'さ', 'れ', 'た', '\u3000', '。', ' ', ' '] , )
@require_sudachi
def UpperCAmelCase__ ( self ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = SudachiTokenizer(trim_whitespace=_SCREAMING_SNAKE_CASE , sudachi_dict_type='core' )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップル', 'ストア', 'で', 'iPhone', '8', 'が', '発売', 'さ', 'れ', 'た', '。'] , )
@require_jumanpp
def UpperCAmelCase__ ( self ) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = self.tokenizer_class(self.vocab_file , word_tokenizer_type='jumanpp' )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = 'こんにちは、世界。\nこんばんは、世界。'
_UpperCAmelCase = tokenizer.tokenize(_SCREAMING_SNAKE_CASE )
self.assertListEqual(_SCREAMING_SNAKE_CASE , ['こんにちは', '、', '世界', '。', 'こん', '##ばんは', '、', '世界', '。'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_SCREAMING_SNAKE_CASE ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
_UpperCAmelCase = os.path.join(self.tmpdirname , 'tokenizer.bin' )
with open(_SCREAMING_SNAKE_CASE , 'wb' ) as handle:
pickle.dump(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
with open(_SCREAMING_SNAKE_CASE , 'rb' ) as handle:
_UpperCAmelCase = pickle.load(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = tokenizer_new.tokenize(_SCREAMING_SNAKE_CASE )
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@require_jumanpp
def UpperCAmelCase__ ( self ) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = JumanppTokenizer()
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップル', 'ストア', 'で', 'iPhone', '8', '\u3000', 'が', '\u3000', '\u3000', '\u3000', '発売', 'さ', 'れた', '\u3000', '。'] , )
@require_jumanpp
def UpperCAmelCase__ ( self ) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = JumanppTokenizer(do_lower_case=_SCREAMING_SNAKE_CASE )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップル', 'ストア', 'で', 'iphone', '8', '\u3000', 'が', '\u3000', '\u3000', '\u3000', '発売', 'さ', 'れた', '\u3000', '。'] , )
@require_jumanpp
def UpperCAmelCase__ ( self ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = JumanppTokenizer(normalize_text=_SCREAMING_SNAKE_CASE )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['ア', 'ッ', 'フ', '゚', 'ル', 'ストア', 'で', 'iPhone', '8', '\u3000', 'が', '\u3000', '\u3000', '\u3000', '発売', 'さ', 'れた', '\u3000', '。'] , )
@require_jumanpp
def UpperCAmelCase__ ( self ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = JumanppTokenizer(trim_whitespace=_SCREAMING_SNAKE_CASE )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップル', 'ストア', 'で', 'iPhone', '8', 'が', '発売', 'さ', 'れた', '。'] , )
@require_jumanpp
def UpperCAmelCase__ ( self ) -> int:
"""simple docstring"""
_UpperCAmelCase = JumanppTokenizer()
self.assertListEqual(
tokenizer.tokenize('ありがとうございますm(_ _)m見つけるのが大変です。' ) , ['ありがとう', 'ございます', 'm(_ _)m', '見つける', 'の', 'が', '大変です', '。'] , )
def UpperCAmelCase__ ( self ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = ['[UNK]', '[CLS]', '[SEP]', 'こんにちは', 'こん', 'にちは', 'ばんは', '##こん', '##にちは', '##ばんは']
_UpperCAmelCase = {}
for i, token in enumerate(_SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = i
_UpperCAmelCase = WordpieceTokenizer(vocab=_SCREAMING_SNAKE_CASE , unk_token='[UNK]' )
self.assertListEqual(tokenizer.tokenize('' ) , [] )
self.assertListEqual(tokenizer.tokenize('こんにちは' ) , ['こんにちは'] )
self.assertListEqual(tokenizer.tokenize('こんばんは' ) , ['こん', '##ばんは'] )
self.assertListEqual(tokenizer.tokenize('こんばんは こんばんにちは こんにちは' ) , ['こん', '##ばんは', '[UNK]', 'こんにちは'] )
def UpperCAmelCase__ ( self ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = BertJapaneseTokenizer.from_pretrained('nlp-waseda/roberta-base-japanese-with-auto-jumanpp' )
_UpperCAmelCase = tokenizer.subword_tokenizer
_UpperCAmelCase = subword_tokenizer.tokenize('国境 の 長い トンネル を 抜ける と 雪国 であった 。' )
self.assertListEqual(_SCREAMING_SNAKE_CASE , ['▁国境', '▁の', '▁長い', '▁トンネル', '▁を', '▁抜ける', '▁と', '▁雪', '国', '▁であった', '▁。'] )
_UpperCAmelCase = subword_tokenizer.tokenize('こんばんは こんばん にち は こんにちは' )
self.assertListEqual(_SCREAMING_SNAKE_CASE , ['▁こん', 'ばん', 'は', '▁こん', 'ばん', '▁に', 'ち', '▁は', '▁こんにちは'] )
def UpperCAmelCase__ ( self ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = self.tokenizer_class.from_pretrained('cl-tohoku/bert-base-japanese' )
_UpperCAmelCase = tokenizer.encode('ありがとう。' , add_special_tokens=_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = tokenizer.encode('どういたしまして。' , add_special_tokens=_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = tokenizer.build_inputs_with_special_tokens(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = tokenizer.build_inputs_with_special_tokens(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# 2 is for "[CLS]", 3 is for "[SEP]"
assert encoded_sentence == [2] + text + [3]
assert encoded_pair == [2] + text + [3] + text_a + [3]
@custom_tokenizers
class __a ( UpperCAmelCase , unittest.TestCase ):
_a : Optional[Any] = BertJapaneseTokenizer
_a : str = False
def UpperCAmelCase__ ( self ) -> List[Any]:
"""simple docstring"""
super().setUp()
_UpperCAmelCase = ['[UNK]', '[CLS]', '[SEP]', 'こ', 'ん', 'に', 'ち', 'は', 'ば', '世', '界', '、', '。']
_UpperCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
def UpperCAmelCase__ ( self , **_SCREAMING_SNAKE_CASE ) -> List[str]:
"""simple docstring"""
return BertJapaneseTokenizer.from_pretrained(self.tmpdirname , subword_tokenizer_type='character' , **_SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = 'こんにちは、世界。 \nこんばんは、世界。'
_UpperCAmelCase = 'こ ん に ち は 、 世 界 。 こ ん ば ん は 、 世 界 。'
return input_text, output_text
def UpperCAmelCase__ ( self ) -> List[Any]:
"""simple docstring"""
pass # TODO add if relevant
def UpperCAmelCase__ ( self ) -> List[Any]:
"""simple docstring"""
pass # TODO add if relevant
def UpperCAmelCase__ ( self ) -> Tuple:
"""simple docstring"""
pass # TODO add if relevant
def UpperCAmelCase__ ( self ) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = self.tokenizer_class(self.vocab_file , subword_tokenizer_type='character' )
_UpperCAmelCase = tokenizer.tokenize('こんにちは、世界。 \nこんばんは、世界。' )
self.assertListEqual(
_SCREAMING_SNAKE_CASE , ['こ', 'ん', 'に', 'ち', 'は', '、', '世', '界', '。', 'こ', 'ん', 'ば', 'ん', 'は', '、', '世', '界', '。'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_SCREAMING_SNAKE_CASE ) , [3, 4, 5, 6, 7, 11, 9, 10, 12, 3, 4, 8, 4, 7, 11, 9, 10, 12] )
def UpperCAmelCase__ ( self ) -> Any:
"""simple docstring"""
_UpperCAmelCase = ['[UNK]', '[CLS]', '[SEP]', 'こ', 'ん', 'に', 'ち', 'は', 'ば', '世', '界', '、', '。']
_UpperCAmelCase = {}
for i, token in enumerate(_SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = i
_UpperCAmelCase = CharacterTokenizer(vocab=_SCREAMING_SNAKE_CASE , unk_token='[UNK]' )
self.assertListEqual(tokenizer.tokenize('' ) , [] )
self.assertListEqual(tokenizer.tokenize('こんにちは' ) , ['こ', 'ん', 'に', 'ち', 'は'] )
self.assertListEqual(tokenizer.tokenize('こんにちほ' ) , ['こ', 'ん', 'に', 'ち', '[UNK]'] )
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase = self.tokenizer_class.from_pretrained('cl-tohoku/bert-base-japanese-char' )
_UpperCAmelCase = tokenizer.encode('ありがとう。' , add_special_tokens=_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = tokenizer.encode('どういたしまして。' , add_special_tokens=_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = tokenizer.build_inputs_with_special_tokens(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = tokenizer.build_inputs_with_special_tokens(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# 2 is for "[CLS]", 3 is for "[SEP]"
assert encoded_sentence == [2] + text + [3]
assert encoded_pair == [2] + text + [3] + text_a + [3]
@custom_tokenizers
class __a ( unittest.TestCase ):
def UpperCAmelCase__ ( self ) -> Dict:
"""simple docstring"""
_UpperCAmelCase = 'cl-tohoku/bert-base-japanese'
_UpperCAmelCase = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
class __a ( unittest.TestCase ):
def UpperCAmelCase__ ( self ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = 'cl-tohoku/bert-base-japanese'
with self.assertLogs('transformers' , level='WARNING' ) as cm:
BertTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertTrue(
cm.records[0].message.startswith(
'The tokenizer class you load from this checkpoint is not the same type as the class this function'
' is called from.' ) )
_UpperCAmelCase = 'bert-base-cased'
with self.assertLogs('transformers' , level='WARNING' ) as cm:
BertJapaneseTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertTrue(
cm.records[0].message.startswith(
'The tokenizer class you load from this checkpoint is not the same type as the class this function'
' is called from.' ) ) | 364 |
from __future__ import annotations
from PIL import Image
# Define glider example
lowerCAmelCase__ :str = [
[0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0],
[1, 1, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
]
# Define blinker example
lowerCAmelCase__ :Any = [[0, 1, 0], [0, 1, 0], [0, 1, 0]]
def lowerCAmelCase__ ( a__: list[list[int]] ) -> list[list[int]]:
'''simple docstring'''
_UpperCAmelCase = []
for i in range(len(a__ ) ):
_UpperCAmelCase = []
for j in range(len(cells[i] ) ):
# Get the number of live neighbours
_UpperCAmelCase = 0
if i > 0 and j > 0:
neighbour_count += cells[i - 1][j - 1]
if i > 0:
neighbour_count += cells[i - 1][j]
if i > 0 and j < len(cells[i] ) - 1:
neighbour_count += cells[i - 1][j + 1]
if j > 0:
neighbour_count += cells[i][j - 1]
if j < len(cells[i] ) - 1:
neighbour_count += cells[i][j + 1]
if i < len(a__ ) - 1 and j > 0:
neighbour_count += cells[i + 1][j - 1]
if i < len(a__ ) - 1:
neighbour_count += cells[i + 1][j]
if i < len(a__ ) - 1 and j < len(cells[i] ) - 1:
neighbour_count += cells[i + 1][j + 1]
# Rules of the game of life (excerpt from Wikipedia):
# 1. Any live cell with two or three live neighbours survives.
# 2. Any dead cell with three live neighbours becomes a live cell.
# 3. All other live cells die in the next generation.
# Similarly, all other dead cells stay dead.
_UpperCAmelCase = cells[i][j] == 1
if (
(alive and 2 <= neighbour_count <= 3)
or not alive
and neighbour_count == 3
):
next_generation_row.append(1 )
else:
next_generation_row.append(0 )
next_generation.append(a__ )
return next_generation
def lowerCAmelCase__ ( a__: list[list[int]] , a__: int ) -> list[Image.Image]:
'''simple docstring'''
_UpperCAmelCase = []
for _ in range(a__ ):
# Create output image
_UpperCAmelCase = Image.new('RGB' , (len(cells[0] ), len(a__ )) )
_UpperCAmelCase = img.load()
# Save cells to image
for x in range(len(a__ ) ):
for y in range(len(cells[0] ) ):
_UpperCAmelCase = 2_5_5 - cells[y][x] * 2_5_5
_UpperCAmelCase = (colour, colour, colour)
# Save image
images.append(a__ )
_UpperCAmelCase = new_generation(a__ )
return images
if __name__ == "__main__":
lowerCAmelCase__ :Tuple = generate_images(GLIDER, 1_6)
images[0].save('''out.gif''', save_all=True, append_images=images[1:])
| 185 | 0 |
from typing import Dict, List
from nltk.translate import gleu_score
import datasets
from datasets import MetricInfo
snake_case : List[Any] = '''\\n@misc{wu2016googles,\n title={Google\'s Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},\n author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey\n and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin\n Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto\n Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and\n Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes\n and Jeffrey Dean},\n year={2016},\n eprint={1609.08144},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n'''
snake_case : List[Any] = '''\\nThe BLEU score has some undesirable properties when used for single\nsentences, as it was designed to be a corpus measure. We therefore\nuse a slightly different score for our RL experiments which we call\nthe \'GLEU score\'. For the GLEU score, we record all sub-sequences of\n1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then\ncompute a recall, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the target (ground truth) sequence,\nand a precision, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the generated output sequence. Then\nGLEU score is simply the minimum of recall and precision. This GLEU\nscore\'s range is always between 0 (no matches) and 1 (all match) and\nit is symmetrical when switching output and target. According to\nour experiments, GLEU score correlates quite well with the BLEU\nmetric on a corpus level but does not have its drawbacks for our per\nsentence reward objective.\n'''
snake_case : Tuple = '''\\nComputes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.\nInstead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching\ntokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.\n\nArgs:\n predictions (list of str): list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references (list of list of str): list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.\n max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.\n\nReturns:\n \'google_bleu\': google_bleu score\n\nExamples:\n Example 1:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.44\n\n Example 2:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n >>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',\n ... \'heed\', \'the\', \'cat\', \'commands\']\n >>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',\n ... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',\n ... \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.61\n\n Example 3:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n >>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',\n ... \'heed\', \'the\', \'cat\', \'commands\']\n >>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',\n ... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',\n ... \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.53\n\n Example 4:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n >>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',\n ... \'heed\', \'the\', \'cat\', \'commands\']\n >>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',\n ... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',\n ... \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.4\n'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _snake_case ( datasets.Metric ):
def SCREAMING_SNAKE_CASE__ ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Sequence(datasets.Value('''string''' , id='''token''' ) , id='''sequence''' ),
'''references''': datasets.Sequence(
datasets.Sequence(datasets.Value('''string''' , id='''token''' ) , id='''sequence''' ) , id='''references''' ),
} ) , )
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = 1 , _lowerCamelCase = 4 , ):
return {
"google_bleu": gleu_score.corpus_gleu(
list_of_references=_UpperCamelCase , hypotheses=_UpperCamelCase , min_len=_UpperCamelCase , max_len=_UpperCamelCase )
}
| 94 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_A = logging.get_logger(__name__)
_A = {
# See all MEGATRON_BERT models at https://huggingface.co/models?filter=bert
}
class _lowerCAmelCase ( __a ):
_lowercase ='''megatron-bert'''
def __init__( self , _UpperCamelCase=29_056 , _UpperCamelCase=1_024 , _UpperCamelCase=24 , _UpperCamelCase=16 , _UpperCamelCase=4_096 , _UpperCamelCase="gelu" , _UpperCamelCase=0.1 , _UpperCamelCase=0.1 , _UpperCamelCase=512 , _UpperCamelCase=2 , _UpperCamelCase=0.02 , _UpperCamelCase=1e-1_2 , _UpperCamelCase=0 , _UpperCamelCase="absolute" , _UpperCamelCase=True , **_UpperCamelCase , ) -> int:
super().__init__(pad_token_id=_UpperCamelCase , **_UpperCamelCase )
lowerCAmelCase_ = vocab_size
lowerCAmelCase_ = hidden_size
lowerCAmelCase_ = num_hidden_layers
lowerCAmelCase_ = num_attention_heads
lowerCAmelCase_ = hidden_act
lowerCAmelCase_ = intermediate_size
lowerCAmelCase_ = hidden_dropout_prob
lowerCAmelCase_ = attention_probs_dropout_prob
lowerCAmelCase_ = max_position_embeddings
lowerCAmelCase_ = type_vocab_size
lowerCAmelCase_ = initializer_range
lowerCAmelCase_ = layer_norm_eps
lowerCAmelCase_ = position_embedding_type
lowerCAmelCase_ = use_cache
| 231 | 0 |
import shutil
import tempfile
import unittest
from transformers import ClapFeatureExtractor, ClapProcessor, RobertaTokenizer, RobertaTokenizerFast
from transformers.testing_utils import require_sentencepiece, require_torchaudio
from .test_feature_extraction_clap import floats_list
@require_torchaudio
@require_sentencepiece
class __snake_case (unittest.TestCase ):
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
_lowerCAmelCase : Any = """laion/clap-htsat-unfused"""
_lowerCAmelCase : str = tempfile.mkdtemp()
def SCREAMING_SNAKE_CASE ( self : Optional[int] , **_UpperCAmelCase : List[Any] ) -> Optional[int]:
'''simple docstring'''
return RobertaTokenizer.from_pretrained(self.checkpoint , **A__ )
def SCREAMING_SNAKE_CASE ( self : Tuple , **_UpperCAmelCase : List[str] ) -> Tuple:
'''simple docstring'''
return ClapFeatureExtractor.from_pretrained(self.checkpoint , **A__ )
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Dict:
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> Union[str, Any]:
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = self.get_tokenizer()
_lowerCAmelCase : Optional[int] = self.get_feature_extractor()
_lowerCAmelCase : Optional[int] = ClapProcessor(tokenizer=A__ , feature_extractor=A__ )
processor.save_pretrained(self.tmpdirname )
_lowerCAmelCase : Dict = ClapProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , A__ )
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor , A__ )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> List[str]:
'''simple docstring'''
_lowerCAmelCase : List[Any] = ClapProcessor(tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() )
processor.save_pretrained(self.tmpdirname )
_lowerCAmelCase : Tuple = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
_lowerCAmelCase : Dict = self.get_feature_extractor(do_normalize=A__ , padding_value=1.0 )
_lowerCAmelCase : List[Any] = ClapProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=A__ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , A__ )
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.feature_extractor , A__ )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Optional[int]:
'''simple docstring'''
_lowerCAmelCase : List[Any] = self.get_feature_extractor()
_lowerCAmelCase : Optional[int] = self.get_tokenizer()
_lowerCAmelCase : int = ClapProcessor(tokenizer=A__ , feature_extractor=A__ )
_lowerCAmelCase : List[str] = floats_list((3, 1000) )
_lowerCAmelCase : Tuple = feature_extractor(A__ , return_tensors="""np""" )
_lowerCAmelCase : Union[str, Any] = processor(audios=A__ , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
_lowerCAmelCase : Tuple = self.get_feature_extractor()
_lowerCAmelCase : List[Any] = self.get_tokenizer()
_lowerCAmelCase : Optional[Any] = ClapProcessor(tokenizer=A__ , feature_extractor=A__ )
_lowerCAmelCase : int = """This is a test string"""
_lowerCAmelCase : List[Any] = processor(text=A__ )
_lowerCAmelCase : List[str] = tokenizer(A__ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> str:
'''simple docstring'''
_lowerCAmelCase : int = self.get_feature_extractor()
_lowerCAmelCase : Any = self.get_tokenizer()
_lowerCAmelCase : Tuple = ClapProcessor(tokenizer=A__ , feature_extractor=A__ )
_lowerCAmelCase : Union[str, Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_lowerCAmelCase : Optional[int] = processor.batch_decode(A__ )
_lowerCAmelCase : Any = tokenizer.batch_decode(A__ )
self.assertListEqual(A__ , A__ )
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> List[Any]:
'''simple docstring'''
_lowerCAmelCase : Dict = self.get_feature_extractor()
_lowerCAmelCase : Optional[Any] = self.get_tokenizer()
_lowerCAmelCase : Any = ClapProcessor(tokenizer=A__ , feature_extractor=A__ )
self.assertListEqual(
processor.model_input_names[2:] , feature_extractor.model_input_names , msg="""`processor` and `feature_extractor` model input names do not match""" , )
| 359 |
import json
import os
import sys
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from huggingface_hub import HfFolder, Repository, create_repo, delete_repo
from requests.exceptions import HTTPError
import transformers
from transformers import (
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
PROCESSOR_MAPPING,
TOKENIZER_MAPPING,
AutoConfig,
AutoFeatureExtractor,
AutoProcessor,
AutoTokenizer,
BertTokenizer,
ProcessorMixin,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
)
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
from transformers.tokenization_utils import TOKENIZER_CONFIG_FILE
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_tokenizers_available
sys.path.append(str(Path(__file__).parent.parent.parent.parent / "utils"))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
from test_module.custom_processing import CustomProcessor # noqa E402
from test_module.custom_tokenization import CustomTokenizer # noqa E402
_lowerCamelCase : Tuple = get_tests_dir("fixtures/dummy_feature_extractor_config.json")
_lowerCamelCase : List[str] = get_tests_dir("fixtures/vocab.json")
_lowerCamelCase : str = get_tests_dir("fixtures")
class __snake_case (unittest.TestCase ):
lowerCAmelCase__ = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "bla", "blou"]
def SCREAMING_SNAKE_CASE ( self : Dict ) -> Any:
'''simple docstring'''
_lowerCAmelCase : Any = 0
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Any:
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = AutoProcessor.from_pretrained("""facebook/wav2vec2-base-960h""" )
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Any ) -> List[Any]:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
_lowerCAmelCase : List[Any] = WavaVecaConfig()
_lowerCAmelCase : str = AutoProcessor.from_pretrained("""facebook/wav2vec2-base-960h""" )
# save in new folder
model_config.save_pretrained(_UpperCAmelCase )
processor.save_pretrained(_UpperCAmelCase )
_lowerCAmelCase : Any = AutoProcessor.from_pretrained(_UpperCAmelCase )
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : int ) -> Union[str, Any]:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
# copy relevant files
copyfile(_UpperCAmelCase , os.path.join(_UpperCAmelCase , _UpperCAmelCase ) )
copyfile(_UpperCAmelCase , os.path.join(_UpperCAmelCase , """vocab.json""" ) )
_lowerCAmelCase : Optional[int] = AutoProcessor.from_pretrained(_UpperCAmelCase )
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
_lowerCAmelCase : Any = WavaVecaFeatureExtractor()
_lowerCAmelCase : Optional[int] = AutoTokenizer.from_pretrained("""facebook/wav2vec2-base-960h""" )
_lowerCAmelCase : List[str] = WavaVecaProcessor(_UpperCAmelCase , _UpperCAmelCase )
# save in new folder
processor.save_pretrained(_UpperCAmelCase )
# drop `processor_class` in tokenizer
with open(os.path.join(_UpperCAmelCase , _UpperCAmelCase ) , """r""" ) as f:
_lowerCAmelCase : Union[str, Any] = json.load(_UpperCAmelCase )
config_dict.pop("""processor_class""" )
with open(os.path.join(_UpperCAmelCase , _UpperCAmelCase ) , """w""" ) as f:
f.write(json.dumps(_UpperCAmelCase ) )
_lowerCAmelCase : List[Any] = AutoProcessor.from_pretrained(_UpperCAmelCase )
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : int ) -> Optional[Any]:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
_lowerCAmelCase : Dict = WavaVecaFeatureExtractor()
_lowerCAmelCase : Optional[Any] = AutoTokenizer.from_pretrained("""facebook/wav2vec2-base-960h""" )
_lowerCAmelCase : str = WavaVecaProcessor(_UpperCAmelCase , _UpperCAmelCase )
# save in new folder
processor.save_pretrained(_UpperCAmelCase )
# drop `processor_class` in feature extractor
with open(os.path.join(_UpperCAmelCase , _UpperCAmelCase ) , """r""" ) as f:
_lowerCAmelCase : str = json.load(_UpperCAmelCase )
config_dict.pop("""processor_class""" )
with open(os.path.join(_UpperCAmelCase , _UpperCAmelCase ) , """w""" ) as f:
f.write(json.dumps(_UpperCAmelCase ) )
_lowerCAmelCase : Union[str, Any] = AutoProcessor.from_pretrained(_UpperCAmelCase )
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> str:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
_lowerCAmelCase : Tuple = WavaVecaConfig(processor_class="""Wav2Vec2Processor""" )
model_config.save_pretrained(_UpperCAmelCase )
# copy relevant files
copyfile(_UpperCAmelCase , os.path.join(_UpperCAmelCase , """vocab.json""" ) )
# create emtpy sample processor
with open(os.path.join(_UpperCAmelCase , _UpperCAmelCase ) , """w""" ) as f:
f.write("""{}""" )
_lowerCAmelCase : Optional[int] = AutoProcessor.from_pretrained(_UpperCAmelCase )
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> List[str]:
'''simple docstring'''
with self.assertRaises(_UpperCAmelCase ):
_lowerCAmelCase : Any = AutoProcessor.from_pretrained("""hf-internal-testing/test_dynamic_processor""" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(_UpperCAmelCase ):
_lowerCAmelCase : List[str] = AutoProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_processor""" , trust_remote_code=_UpperCAmelCase )
_lowerCAmelCase : Optional[int] = AutoProcessor.from_pretrained("""hf-internal-testing/test_dynamic_processor""" , trust_remote_code=_UpperCAmelCase )
self.assertTrue(processor.special_attribute_present )
self.assertEqual(processor.__class__.__name__ , """NewProcessor""" )
_lowerCAmelCase : Optional[int] = processor.feature_extractor
self.assertTrue(feature_extractor.special_attribute_present )
self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" )
_lowerCAmelCase : Union[str, Any] = processor.tokenizer
self.assertTrue(tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizerFast""" )
# Test we can also load the slow version
_lowerCAmelCase : Optional[int] = AutoProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_processor""" , trust_remote_code=_UpperCAmelCase , use_fast=_UpperCAmelCase )
_lowerCAmelCase : List[str] = new_processor.tokenizer
self.assertTrue(new_tokenizer.special_attribute_present )
self.assertEqual(new_tokenizer.__class__.__name__ , """NewTokenizer""" )
else:
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
def SCREAMING_SNAKE_CASE ( self : str ) -> Union[str, Any]:
'''simple docstring'''
try:
AutoConfig.register("""custom""" , _UpperCAmelCase )
AutoFeatureExtractor.register(_UpperCAmelCase , _UpperCAmelCase )
AutoTokenizer.register(_UpperCAmelCase , slow_tokenizer_class=_UpperCAmelCase )
AutoProcessor.register(_UpperCAmelCase , _UpperCAmelCase )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(_UpperCAmelCase ):
AutoProcessor.register(_UpperCAmelCase , _UpperCAmelCase )
# Now that the config is registered, it can be used as any other config with the auto-API
_lowerCAmelCase : List[str] = CustomFeatureExtractor.from_pretrained(_UpperCAmelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
_lowerCAmelCase : Tuple = os.path.join(_UpperCAmelCase , """vocab.txt""" )
with open(_UpperCAmelCase , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in self.vocab_tokens] ) )
_lowerCAmelCase : str = CustomTokenizer(_UpperCAmelCase )
_lowerCAmelCase : List[str] = CustomProcessor(_UpperCAmelCase , _UpperCAmelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(_UpperCAmelCase )
_lowerCAmelCase : Optional[Any] = AutoProcessor.from_pretrained(_UpperCAmelCase )
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
if CustomConfig in PROCESSOR_MAPPING._extra_content:
del PROCESSOR_MAPPING._extra_content[CustomConfig]
def SCREAMING_SNAKE_CASE ( self : Any ) -> Optional[int]:
'''simple docstring'''
class __snake_case (_a ):
lowerCAmelCase__ = False
class __snake_case (_a ):
lowerCAmelCase__ = False
class __snake_case (_a ):
lowerCAmelCase__ = "AutoFeatureExtractor"
lowerCAmelCase__ = "AutoTokenizer"
lowerCAmelCase__ = False
try:
AutoConfig.register("""custom""" , _UpperCAmelCase )
AutoFeatureExtractor.register(_UpperCAmelCase , _UpperCAmelCase )
AutoTokenizer.register(_UpperCAmelCase , slow_tokenizer_class=_UpperCAmelCase )
AutoProcessor.register(_UpperCAmelCase , _UpperCAmelCase )
# If remote code is not set, the default is to use local classes.
_lowerCAmelCase : Optional[Any] = AutoProcessor.from_pretrained("""hf-internal-testing/test_dynamic_processor""" )
self.assertEqual(processor.__class__.__name__ , """NewProcessor""" )
self.assertFalse(processor.special_attribute_present )
self.assertFalse(processor.feature_extractor.special_attribute_present )
self.assertFalse(processor.tokenizer.special_attribute_present )
# If remote code is disabled, we load the local ones.
_lowerCAmelCase : str = AutoProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_processor""" , trust_remote_code=_UpperCAmelCase )
self.assertEqual(processor.__class__.__name__ , """NewProcessor""" )
self.assertFalse(processor.special_attribute_present )
self.assertFalse(processor.feature_extractor.special_attribute_present )
self.assertFalse(processor.tokenizer.special_attribute_present )
# If remote is enabled, we load from the Hub.
_lowerCAmelCase : Union[str, Any] = AutoProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_processor""" , trust_remote_code=_UpperCAmelCase )
self.assertEqual(processor.__class__.__name__ , """NewProcessor""" )
self.assertTrue(processor.special_attribute_present )
self.assertTrue(processor.feature_extractor.special_attribute_present )
self.assertTrue(processor.tokenizer.special_attribute_present )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
if CustomConfig in PROCESSOR_MAPPING._extra_content:
del PROCESSOR_MAPPING._extra_content[CustomConfig]
def SCREAMING_SNAKE_CASE ( self : Dict ) -> Union[str, Any]:
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = AutoProcessor.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
self.assertEqual(processor.__class__.__name__ , """BertTokenizerFast""" )
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Dict:
'''simple docstring'''
_lowerCAmelCase : List[str] = AutoProcessor.from_pretrained("""hf-internal-testing/tiny-random-convnext""" )
self.assertEqual(processor.__class__.__name__ , """ConvNextImageProcessor""" )
@is_staging_test
class __snake_case (unittest.TestCase ):
lowerCAmelCase__ = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "bla", "blou"]
@classmethod
def SCREAMING_SNAKE_CASE ( cls : int ) -> Any:
'''simple docstring'''
_lowerCAmelCase : List[str] = TOKEN
HfFolder.save_token(_UpperCAmelCase )
@classmethod
def SCREAMING_SNAKE_CASE ( cls : Tuple ) -> Optional[int]:
'''simple docstring'''
try:
delete_repo(token=cls._token , repo_id="""test-processor""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""valid_org/test-processor-org""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""test-dynamic-processor""" )
except HTTPError:
pass
def SCREAMING_SNAKE_CASE ( self : Dict ) -> str:
'''simple docstring'''
_lowerCAmelCase : Optional[int] = WavaVecaProcessor.from_pretrained(_UpperCAmelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(
os.path.join(_UpperCAmelCase , """test-processor""" ) , push_to_hub=_UpperCAmelCase , use_auth_token=self._token )
_lowerCAmelCase : str = WavaVecaProcessor.from_pretrained(f"{USER}/test-processor" )
for k, v in processor.feature_extractor.__dict__.items():
self.assertEqual(_UpperCAmelCase , getattr(new_processor.feature_extractor , _UpperCAmelCase ) )
self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab() )
def SCREAMING_SNAKE_CASE ( self : Dict ) -> int:
'''simple docstring'''
_lowerCAmelCase : int = WavaVecaProcessor.from_pretrained(_UpperCAmelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(
os.path.join(_UpperCAmelCase , """test-processor-org""" ) , push_to_hub=_UpperCAmelCase , use_auth_token=self._token , organization="""valid_org""" , )
_lowerCAmelCase : str = WavaVecaProcessor.from_pretrained("""valid_org/test-processor-org""" )
for k, v in processor.feature_extractor.__dict__.items():
self.assertEqual(_UpperCAmelCase , getattr(new_processor.feature_extractor , _UpperCAmelCase ) )
self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab() )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[str]:
'''simple docstring'''
CustomFeatureExtractor.register_for_auto_class()
CustomTokenizer.register_for_auto_class()
CustomProcessor.register_for_auto_class()
_lowerCAmelCase : Any = CustomFeatureExtractor.from_pretrained(_UpperCAmelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
_lowerCAmelCase : int = os.path.join(_UpperCAmelCase , """vocab.txt""" )
with open(_UpperCAmelCase , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in self.vocab_tokens] ) )
_lowerCAmelCase : List[str] = CustomTokenizer(_UpperCAmelCase )
_lowerCAmelCase : List[str] = CustomProcessor(_UpperCAmelCase , _UpperCAmelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
create_repo(f"{USER}/test-dynamic-processor" , token=self._token )
_lowerCAmelCase : Union[str, Any] = Repository(_UpperCAmelCase , clone_from=f"{USER}/test-dynamic-processor" , token=self._token )
processor.save_pretrained(_UpperCAmelCase )
# This has added the proper auto_map field to the feature extractor config
self.assertDictEqual(
processor.feature_extractor.auto_map , {
"""AutoFeatureExtractor""": """custom_feature_extraction.CustomFeatureExtractor""",
"""AutoProcessor""": """custom_processing.CustomProcessor""",
} , )
# This has added the proper auto_map field to the tokenizer config
with open(os.path.join(_UpperCAmelCase , """tokenizer_config.json""" ) ) as f:
_lowerCAmelCase : str = json.load(_UpperCAmelCase )
self.assertDictEqual(
tokenizer_config["""auto_map"""] , {
"""AutoTokenizer""": ["""custom_tokenization.CustomTokenizer""", None],
"""AutoProcessor""": """custom_processing.CustomProcessor""",
} , )
# The code has been copied from fixtures
self.assertTrue(os.path.isfile(os.path.join(_UpperCAmelCase , """custom_feature_extraction.py""" ) ) )
self.assertTrue(os.path.isfile(os.path.join(_UpperCAmelCase , """custom_tokenization.py""" ) ) )
self.assertTrue(os.path.isfile(os.path.join(_UpperCAmelCase , """custom_processing.py""" ) ) )
repo.push_to_hub()
_lowerCAmelCase : Tuple = AutoProcessor.from_pretrained(f"{USER}/test-dynamic-processor" , trust_remote_code=_UpperCAmelCase )
# Can't make an isinstance check because the new_processor is from the CustomProcessor class of a dynamic module
self.assertEqual(new_processor.__class__.__name__ , """CustomProcessor""" )
| 159 | 0 |
import argparse
import os
from io import BytesIO
from pathlib import Path
import requests
from clip_retrieval.clip_client import ClipClient
from PIL import Image
from tqdm import tqdm
def lowercase_ ( _lowerCamelCase : Tuple , _lowerCamelCase : List[str] , _lowerCamelCase : Optional[Any]):
lowercase__ : str = 1.5
lowercase__ : Any = int(factor * num_class_images)
lowercase__ : Optional[Any] = ClipClient(
url="https://knn.laion.ai/knn-service" , indice_name="laion_400m" , num_images=_lowerCamelCase , aesthetic_weight=0.1)
os.makedirs(f'''{class_data_dir}/images''' , exist_ok=_lowerCamelCase)
if len(list(Path(f'''{class_data_dir}/images''').iterdir())) >= num_class_images:
return
while True:
lowercase__ : Dict = client.query(text=_lowerCamelCase)
if len(_lowerCamelCase) >= factor * num_class_images or num_images > 1E4:
break
else:
lowercase__ : List[Any] = int(factor * num_images)
lowercase__ : Any = ClipClient(
url="https://knn.laion.ai/knn-service" , indice_name="laion_400m" , num_images=_lowerCamelCase , aesthetic_weight=0.1 , )
lowercase__ : List[str] = 0
lowercase__ : Dict = 0
lowercase__ : int = tqdm(desc="downloading real regularization images" , total=_lowerCamelCase)
with open(f'''{class_data_dir}/caption.txt''' , "w") as fa, open(f'''{class_data_dir}/urls.txt''' , "w") as fa, open(
f'''{class_data_dir}/images.txt''' , "w") as fa:
while total < num_class_images:
lowercase__ : List[str] = class_images[count]
count += 1
try:
lowercase__ : Union[str, Any] = requests.get(images["url"])
if img.status_code == 200:
lowercase__ : List[str] = Image.open(BytesIO(img.content))
with open(f'''{class_data_dir}/images/{total}.jpg''' , "wb") as f:
f.write(img.content)
fa.write(images["caption"] + "\n")
fa.write(images["url"] + "\n")
fa.write(f'''{class_data_dir}/images/{total}.jpg''' + "\n")
total += 1
pbar.update(1)
else:
continue
except Exception:
continue
return
def lowercase_ ( ):
lowercase__ : Optional[int] = argparse.ArgumentParser("" , add_help=_lowerCamelCase)
parser.add_argument("--class_prompt" , help="text prompt to retrieve images" , required=_lowerCamelCase , type=_lowerCamelCase)
parser.add_argument("--class_data_dir" , help="path to save images" , required=_lowerCamelCase , type=_lowerCamelCase)
parser.add_argument("--num_class_images" , help="number of images to download" , default=200 , type=_lowerCamelCase)
return parser.parse_args()
if __name__ == "__main__":
UpperCamelCase = parse_args()
retrieve(args.class_prompt, args.class_data_dir, args.num_class_images)
| 87 | import operator
def lowercase_ ( _lowerCamelCase : list , _lowerCamelCase : bool = False , _lowerCamelCase : list | None = None):
lowercase__ : int = operator.lt if reverse else operator.gt
lowercase__ : str = solution or []
if not arr:
return solution
lowercase__ : List[str] = [arr.pop(0)]
for i, item in enumerate(_lowerCamelCase):
if _operator(_lowerCamelCase , sublist[-1]):
sublist.append(_lowerCamelCase)
arr.pop(_lowerCamelCase)
# merging sublist into solution list
if not solution:
solution.extend(_lowerCamelCase)
else:
while sublist:
lowercase__ : str = sublist.pop(0)
for i, xx in enumerate(_lowerCamelCase):
if not _operator(_lowerCamelCase , _lowerCamelCase):
solution.insert(_lowerCamelCase , _lowerCamelCase)
break
else:
solution.append(_lowerCamelCase)
strand_sort(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
return solution
if __name__ == "__main__":
assert strand_sort([4, 3, 5, 1, 2]) == [1, 2, 3, 4, 5]
assert strand_sort([4, 3, 5, 1, 2], reverse=True) == [5, 4, 3, 2, 1]
| 87 | 1 |
import io
import os
import unicodedata
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = '''▁'''
UpperCAmelCase_ = {'''vocab_file''': '''vocab.txt''', '''sentencepiece_model_ckpt''': '''sentencepiece.bpe.model'''}
UpperCAmelCase_ = {
'''sentencepiece_model_file''': '''sentencepiece.bpe.model''',
'''vocab_file''': '''vocab.txt''',
}
UpperCAmelCase_ = {
'''vocab_file''': {
'''ernie-m-base''': '''https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt''',
'''ernie-m-large''': '''https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt''',
},
'''sentencepiece_model_file''': {
'''ernie-m-base''': '''https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model''',
'''ernie-m-large''': '''https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model''',
},
}
UpperCAmelCase_ = {
'''ernie-m-base''': 514,
'''ernie-m-large''': 514,
}
UpperCAmelCase_ = {
'''ernie-m-base''': {'''do_lower_case''': False},
'''ernie-m-large''': {'''do_lower_case''': False},
}
class lowerCamelCase__( A__):
UpperCAmelCase__ : List[str] = ["input_ids"]
UpperCAmelCase__ : Optional[Any] = VOCAB_FILES_NAMES
UpperCAmelCase__ : Union[str, Any] = PRETRAINED_INIT_CONFIGURATION
UpperCAmelCase__ : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase__ : Any = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__ : Any = RESOURCE_FILES_NAMES
def __init__( self: List[str] , UpperCamelCase_: Optional[int] , UpperCamelCase_: str=None , UpperCamelCase_: Union[str, Any]=False , UpperCamelCase_: Tuple="utf8" , UpperCamelCase_: Any="[UNK]" , UpperCamelCase_: Tuple="[SEP]" , UpperCamelCase_: Any="[PAD]" , UpperCamelCase_: str="[CLS]" , UpperCamelCase_: Optional[Any]="[MASK]" , UpperCamelCase_: Optional[Any] = None , **UpperCamelCase_: Union[str, Any] , ):
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
__lowerCamelCase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=lowerCamelCase__ , unk_token=lowerCamelCase__ , sep_token=lowerCamelCase__ , pad_token=lowerCamelCase__ , cls_token=lowerCamelCase__ , mask_token=lowerCamelCase__ , vocab_file=lowerCamelCase__ , encoding=lowerCamelCase__ , sp_model_kwargs=self.sp_model_kwargs , **lowerCamelCase__ , )
__lowerCamelCase = do_lower_case
__lowerCamelCase = sentencepiece_model_ckpt
__lowerCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(lowerCamelCase__ )
# to mimic paddlenlp.transformers.ernie_m.tokenizer.ErnieMTokenizer functioning
if vocab_file is not None:
__lowerCamelCase = self.load_vocab(filepath=lowerCamelCase__ )
else:
__lowerCamelCase = {self.sp_model.id_to_piece(lowerCamelCase__ ): id for id in range(self.sp_model.get_piece_size() )}
__lowerCamelCase = {v: k for k, v in self.vocab.items()}
def lowerCAmelCase__ ( self: str , UpperCamelCase_: Any ):
if text is None:
return None
__lowerCamelCase = self.tokenize(lowerCamelCase__ )
__lowerCamelCase, __lowerCamelCase = """""", []
for i, ch in enumerate(lowerCamelCase__ ):
if ch in self.SP_CHAR_MAPPING:
__lowerCamelCase = self.SP_CHAR_MAPPING.get(lowerCamelCase__ )
else:
__lowerCamelCase = unicodedata.normalize("""NFKC""" , lowerCamelCase__ )
if self.is_whitespace(lowerCamelCase__ ):
continue
normalized_text += ch
char_mapping.extend([i] * len(lowerCamelCase__ ) )
__lowerCamelCase, __lowerCamelCase, __lowerCamelCase = normalized_text, [], 0
if self.do_lower_case:
__lowerCamelCase = text.lower()
for token in split_tokens:
if token[:1] == "▁":
__lowerCamelCase = token[1:]
__lowerCamelCase = text[offset:].index(lowerCamelCase__ ) + offset
__lowerCamelCase = start + len(lowerCamelCase__ )
token_mapping.append((char_mapping[start], char_mapping[end - 1] + 1) )
__lowerCamelCase = end
return token_mapping
@property
def lowerCAmelCase__ ( self: Dict ):
return len(self.vocab )
def lowerCAmelCase__ ( self: str ):
return dict(self.vocab , **self.added_tokens_encoder )
def __getstate__( self: Union[str, Any] ):
__lowerCamelCase = self.__dict__.copy()
__lowerCamelCase = None
return state
def __setstate__( self: Tuple , UpperCamelCase_: List[str] ):
__lowerCamelCase = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
__lowerCamelCase = {}
__lowerCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.sentencepiece_model_ckpt )
def lowerCAmelCase__ ( self: Any , UpperCamelCase_: Optional[int] ):
return "".join((self.SP_CHAR_MAPPING.get(lowerCamelCase__ , lowerCamelCase__ ) for c in text) )
def lowerCAmelCase__ ( self: Dict , UpperCamelCase_: List[str] , UpperCamelCase_: int=False , UpperCamelCase_: Any=64 , UpperCamelCase_: Tuple=0.1 ):
if self.sp_model_kwargs.get("""enable_sampling""" ) is True:
__lowerCamelCase = True
if self.sp_model_kwargs.get("""alpha""" ) is not None:
__lowerCamelCase = self.sp_model_kwargs.get("""alpha""" )
if self.sp_model_kwargs.get("""nbest_size""" ) is not None:
__lowerCamelCase = self.sp_model_kwargs.get("""nbest_size""" )
if not enable_sampling:
__lowerCamelCase = self.sp_model.EncodeAsPieces(lowerCamelCase__ )
else:
__lowerCamelCase = self.sp_model.SampleEncodeAsPieces(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
__lowerCamelCase = []
for pi, piece in enumerate(lowerCamelCase__ ):
if piece == SPIECE_UNDERLINE:
if not pieces[pi + 1].startswith(lowerCamelCase__ ) and pi != 0:
new_pieces.append(lowerCamelCase__ )
continue
else:
continue
__lowerCamelCase = 0
for i, chunk in enumerate(lowerCamelCase__ ):
if chunk == SPIECE_UNDERLINE:
continue
if self.is_ch_char(lowerCamelCase__ ) or self.is_punct(lowerCamelCase__ ):
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
new_pieces.append(lowerCamelCase__ )
__lowerCamelCase = i + 1
elif chunk.isdigit() and i > 0 and not piece[i - 1].isdigit():
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
__lowerCamelCase = i
elif not chunk.isdigit() and i > 0 and piece[i - 1].isdigit():
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
__lowerCamelCase = i
if len(lowerCamelCase__ ) > lst_i:
new_pieces.append(piece[lst_i:] )
return new_pieces
def lowerCAmelCase__ ( self: str , UpperCamelCase_: Optional[int] ):
__lowerCamelCase = """""".join(lowerCamelCase__ ).replace(lowerCamelCase__ , """ """ ).strip()
return out_string
def lowerCAmelCase__ ( self: List[Any] , UpperCamelCase_: Dict ):
__lowerCamelCase = self.convert_ids_to_tokens(lowerCamelCase__ )
__lowerCamelCase = """""".join(lowerCamelCase__ ).replace(lowerCamelCase__ , """ """ ).strip()
return out_string
def lowerCAmelCase__ ( self: List[str] , UpperCamelCase_: List[Any] ):
return self.vocab.get(lowerCamelCase__ , self.vocab.get(self.unk_token ) )
def lowerCAmelCase__ ( self: Optional[Any] , UpperCamelCase_: Optional[int] ):
return self.reverse_vocab.get(lowerCamelCase__ , self.unk_token )
def lowerCAmelCase__ ( self: int , UpperCamelCase_: Tuple , UpperCamelCase_: Optional[Any]=None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__lowerCamelCase = [self.cls_token_id]
__lowerCamelCase = [self.sep_token_id]
return _cls + token_ids_a + _sep + _sep + token_ids_a + _sep
def lowerCAmelCase__ ( self: Optional[Any] , UpperCamelCase_: Dict , UpperCamelCase_: List[str]=None ):
if offset_mapping_a is None:
return [(0, 0)] + offset_mapping_a + [(0, 0)]
return [(0, 0)] + offset_mapping_a + [(0, 0), (0, 0)] + offset_mapping_a + [(0, 0)]
def lowerCAmelCase__ ( self: List[str] , UpperCamelCase_: Optional[int] , UpperCamelCase_: Any=None , UpperCamelCase_: str=False ):
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"""You should not supply a second sequence if the provided sequence of """
"""ids is already formatted with special tokens for the model.""" )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(lowerCamelCase__ )) + [1, 1] + ([0] * len(lowerCamelCase__ )) + [1]
return [1] + ([0] * len(lowerCamelCase__ )) + [1]
def lowerCAmelCase__ ( self: List[str] , UpperCamelCase_: Optional[int] , UpperCamelCase_: Tuple = None ):
# called when `add_special_tokens` is True, so align with `build_inputs_with_special_tokens` method
if token_ids_a is None:
# [CLS] X [SEP]
return (len(lowerCamelCase__ ) + 2) * [0]
# [CLS] A [SEP] [SEP] B [SEP]
return [0] * (len(lowerCamelCase__ ) + 1) + [1] * (len(lowerCamelCase__ ) + 3)
def lowerCAmelCase__ ( self: Optional[int] , UpperCamelCase_: Optional[Any] ):
if "\u4e00" <= char <= "\u9fff":
return True
return False
def lowerCAmelCase__ ( self: Dict , UpperCamelCase_: str ):
if ("a" <= char <= "z") or ("A" <= char <= "Z"):
return True
return False
def lowerCAmelCase__ ( self: str , UpperCamelCase_: List[Any] ):
if char in ",;:.?!~,;:。?!《》【】":
return True
return False
def lowerCAmelCase__ ( self: Any , UpperCamelCase_: str ):
if char == " " or char == "\t" or char == "\n" or char == "\r":
return True
if len(lowerCamelCase__ ) == 1:
__lowerCamelCase = unicodedata.category(lowerCamelCase__ )
if cat == "Zs":
return True
return False
def lowerCAmelCase__ ( self: Optional[Any] , UpperCamelCase_: Optional[Any] ):
__lowerCamelCase = {}
with io.open(lowerCamelCase__ , """r""" , encoding="""utf-8""" ) as f:
for index, line in enumerate(lowerCamelCase__ ):
__lowerCamelCase = line.rstrip("""\n""" )
__lowerCamelCase = int(lowerCamelCase__ )
return token_to_idx
def lowerCAmelCase__ ( self: List[Any] , UpperCamelCase_: str , UpperCamelCase_: int = None ):
__lowerCamelCase = 0
if os.path.isdir(lowerCamelCase__ ):
__lowerCamelCase = os.path.join(
lowerCamelCase__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
else:
__lowerCamelCase = (filename_prefix + """-""" if filename_prefix else """""") + save_directory
with open(lowerCamelCase__ , """w""" , encoding="""utf-8""" ) as writer:
for token, token_index in sorted(self.vocab.items() , key=lambda UpperCamelCase_ : kv[1] ):
if index != token_index:
logger.warning(
F'Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.'
""" Please check that the vocabulary is not corrupted!""" )
__lowerCamelCase = token_index
writer.write(token + """\n""" )
index += 1
__lowerCamelCase = os.path.join(lowerCamelCase__ , """sentencepiece.bpe.model""" )
with open(lowerCamelCase__ , """wb""" ) as fi:
__lowerCamelCase = self.sp_model.serialized_model_proto()
fi.write(lowerCamelCase__ )
return (vocab_file,)
| 356 |
import os
from math import logaa
def lowerCamelCase__ ( A__ : str = "base_exp.txt" ):
'''simple docstring'''
__lowerCamelCase = 0
__lowerCamelCase = 0
for i, line in enumerate(open(os.path.join(os.path.dirname(A__ ) , A__ ) ) ):
__lowerCamelCase, __lowerCamelCase = list(map(A__ , line.split(""",""" ) ) )
if x * logaa(A__ ) > largest:
__lowerCamelCase = x * logaa(A__ )
__lowerCamelCase = i + 1
return result
if __name__ == "__main__":
print(solution())
| 29 | 0 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import logging
logging.set_verbosity_info()
UpperCamelCase__: List[Any] = logging.get_logger(__name__)
def snake_case_ ( _lowerCAmelCase : Any , _lowerCAmelCase : Optional[int]=False ) -> Union[str, Any]:
UpperCAmelCase : Dict = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f"""blocks.{i}.norm1.weight""", f"""vit.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((f"""blocks.{i}.norm1.bias""", f"""vit.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append((f"""blocks.{i}.attn.proj.weight""", f"""vit.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append((f"""blocks.{i}.attn.proj.bias""", f"""vit.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((f"""blocks.{i}.norm2.weight""", f"""vit.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((f"""blocks.{i}.norm2.bias""", f"""vit.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append((f"""blocks.{i}.mlp.fc1.weight""", f"""vit.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((f"""blocks.{i}.mlp.fc1.bias""", f"""vit.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((f"""blocks.{i}.mlp.fc2.weight""", f"""vit.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((f"""blocks.{i}.mlp.fc2.bias""", f"""vit.encoder.layer.{i}.output.dense.bias""") )
# projection layer + position embeddings
rename_keys.extend(
[
('''cls_token''', '''vit.embeddings.cls_token'''),
('''patch_embed.proj.weight''', '''vit.embeddings.patch_embeddings.projection.weight'''),
('''patch_embed.proj.bias''', '''vit.embeddings.patch_embeddings.projection.bias'''),
('''pos_embed''', '''vit.embeddings.position_embeddings'''),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('''norm.weight''', '''layernorm.weight'''),
('''norm.bias''', '''layernorm.bias'''),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
UpperCAmelCase : Union[str, Any] = [(pair[0], pair[1][4:]) if pair[1].startswith('''vit''' ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('''norm.weight''', '''vit.layernorm.weight'''),
('''norm.bias''', '''vit.layernorm.bias'''),
('''head.weight''', '''classifier.weight'''),
('''head.bias''', '''classifier.bias'''),
] )
return rename_keys
def snake_case_ ( _lowerCAmelCase : Dict , _lowerCAmelCase : int , _lowerCAmelCase : Optional[Any]=False ) -> List[Any]:
for i in range(config.num_hidden_layers ):
if base_model:
UpperCAmelCase : Optional[Any] = ''''''
else:
UpperCAmelCase : Any = '''vit.'''
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
UpperCAmelCase : Optional[int] = state_dict.pop(f"""blocks.{i}.attn.qkv.weight""" )
UpperCAmelCase : str = state_dict.pop(f"""blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
UpperCAmelCase : List[str] = in_proj_weight[
: config.hidden_size, :
]
UpperCAmelCase : Any = in_proj_bias[: config.hidden_size]
UpperCAmelCase : Tuple = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
UpperCAmelCase : List[str] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
UpperCAmelCase : Dict = in_proj_weight[
-config.hidden_size :, :
]
UpperCAmelCase : int = in_proj_bias[-config.hidden_size :]
def snake_case_ ( _lowerCAmelCase : Optional[Any] ) -> str:
UpperCAmelCase : Tuple = ['''head.weight''', '''head.bias''']
for k in ignore_keys:
state_dict.pop(_lowerCAmelCase , _lowerCAmelCase )
def snake_case_ ( _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Tuple , _lowerCAmelCase : Dict ) -> Dict:
UpperCAmelCase : Union[str, Any] = dct.pop(_lowerCAmelCase )
UpperCAmelCase : Any = val
def snake_case_ ( ) -> List[str]:
UpperCAmelCase : int = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
UpperCAmelCase : Dict = Image.open(requests.get(_lowerCAmelCase , stream=_lowerCAmelCase ).raw )
return im
@torch.no_grad()
def snake_case_ ( _lowerCAmelCase : str , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Union[str, Any]=True ) -> Union[str, Any]:
UpperCAmelCase : str = ViTConfig()
# patch_size
if model_name[-1] == "8":
UpperCAmelCase : Optional[Any] = 8
# set labels if required
if not base_model:
UpperCAmelCase : Optional[Any] = 1000
UpperCAmelCase : str = '''huggingface/label-files'''
UpperCAmelCase : Any = '''imagenet-1k-id2label.json'''
UpperCAmelCase : Optional[Any] = json.load(open(hf_hub_download(_lowerCAmelCase , _lowerCAmelCase , repo_type='''dataset''' ) , '''r''' ) )
UpperCAmelCase : Union[str, Any] = {int(_lowerCAmelCase ): v for k, v in idalabel.items()}
UpperCAmelCase : Any = idalabel
UpperCAmelCase : Dict = {v: k for k, v in idalabel.items()}
# size of the architecture
if model_name in ["dino_vits8", "dino_vits16"]:
UpperCAmelCase : List[Any] = 384
UpperCAmelCase : Tuple = 1536
UpperCAmelCase : int = 12
UpperCAmelCase : Optional[Any] = 6
# load original model from torch hub
UpperCAmelCase : List[Any] = torch.hub.load('''facebookresearch/dino:main''' , _lowerCAmelCase )
original_model.eval()
# load state_dict of original model, remove and rename some keys
UpperCAmelCase : List[Any] = original_model.state_dict()
if base_model:
remove_classification_head_(_lowerCAmelCase )
UpperCAmelCase : Union[str, Any] = create_rename_keys(_lowerCAmelCase , base_model=_lowerCAmelCase )
for src, dest in rename_keys:
rename_key(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
read_in_q_k_v(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# load HuggingFace model
if base_model:
UpperCAmelCase : Union[str, Any] = ViTModel(_lowerCAmelCase , add_pooling_layer=_lowerCAmelCase ).eval()
else:
UpperCAmelCase : List[str] = ViTForImageClassification(_lowerCAmelCase ).eval()
model.load_state_dict(_lowerCAmelCase )
# Check outputs on an image, prepared by ViTImageProcessor
UpperCAmelCase : str = ViTImageProcessor()
UpperCAmelCase : int = image_processor(images=prepare_img() , return_tensors='''pt''' )
UpperCAmelCase : Dict = encoding['''pixel_values''']
UpperCAmelCase : List[Any] = model(_lowerCAmelCase )
if base_model:
UpperCAmelCase : Any = original_model(_lowerCAmelCase )
assert torch.allclose(_lowerCAmelCase , outputs.last_hidden_state[:, 0, :] , atol=1e-1 )
else:
UpperCAmelCase : int = original_model(_lowerCAmelCase )
assert logits.shape == outputs.logits.shape
assert torch.allclose(_lowerCAmelCase , outputs.logits , atol=1e-3 )
Path(_lowerCAmelCase ).mkdir(exist_ok=_lowerCAmelCase )
print(f"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(_lowerCAmelCase )
print(f"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(_lowerCAmelCase )
if __name__ == "__main__":
UpperCamelCase__: Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="dino_vitb16",
type=str,
help="Name of the model trained with DINO you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--base_model",
action="store_true",
help="Whether to only convert the base model (no projection head weights).",
)
parser.set_defaults(base_model=True)
UpperCamelCase__: Optional[int] = parser.parse_args()
convert_vit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.base_model)
| 23 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE : List[Any] = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : Optional[Any] = {
"google/mobilenet_v2_1.4_224": "https://huggingface.co/google/mobilenet_v2_1.4_224/resolve/main/config.json",
"google/mobilenet_v2_1.0_224": "https://huggingface.co/google/mobilenet_v2_1.0_224/resolve/main/config.json",
"google/mobilenet_v2_0.75_160": "https://huggingface.co/google/mobilenet_v2_0.75_160/resolve/main/config.json",
"google/mobilenet_v2_0.35_96": "https://huggingface.co/google/mobilenet_v2_0.35_96/resolve/main/config.json",
# See all MobileNetV2 models at https://huggingface.co/models?filter=mobilenet_v2
}
class A__ ( snake_case__ ):
"""simple docstring"""
__magic_name__ = 'mobilenet_v2'
def __init__( self , __snake_case=3 , __snake_case=2_2_4 , __snake_case=1.0 , __snake_case=8 , __snake_case=8 , __snake_case=6 , __snake_case=3_2 , __snake_case=True , __snake_case=True , __snake_case="relu6" , __snake_case=True , __snake_case=0.8 , __snake_case=0.02 , __snake_case=0.001 , __snake_case=2_5_5 , **__snake_case , ):
super().__init__(**__snake_case )
if depth_multiplier <= 0:
raise ValueError('''depth_multiplier must be greater than zero.''' )
snake_case = num_channels
snake_case = image_size
snake_case = depth_multiplier
snake_case = depth_divisible_by
snake_case = min_depth
snake_case = expand_ratio
snake_case = output_stride
snake_case = first_layer_is_expansion
snake_case = finegrained_output
snake_case = hidden_act
snake_case = tf_padding
snake_case = classifier_dropout_prob
snake_case = initializer_range
snake_case = layer_norm_eps
snake_case = semantic_loss_ignore_index
class A__ ( snake_case__ ):
"""simple docstring"""
__magic_name__ = version.parse('1.11' )
@property
def a_ ( self ):
return OrderedDict([('''pixel_values''', {0: '''batch'''})] )
@property
def a_ ( self ):
if self.task == "image-classification":
return OrderedDict([('''logits''', {0: '''batch'''})] )
else:
return OrderedDict([('''last_hidden_state''', {0: '''batch'''}), ('''pooler_output''', {0: '''batch'''})] )
@property
def a_ ( self ):
return 1E-4
| 127 | 0 |
'''simple docstring'''
import os
import unittest
from transformers import BatchEncoding
from transformers.models.bert.tokenization_bert import (
BasicTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.models.prophetnet.tokenization_prophetnet import VOCAB_FILES_NAMES, ProphetNetTokenizer
from transformers.testing_utils import require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
class UpperCAmelCase ( a__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = ProphetNetTokenizer
SCREAMING_SNAKE_CASE = False
def _lowerCAmelCase( self ) -> Any:
super().setUp()
lowercase__ : Tuple = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
lowercase__ : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
def _lowerCAmelCase( self , __lowerCAmelCase ) -> str:
lowercase__ : Union[str, Any] = '''UNwant\u00E9d,running'''
lowercase__ : List[Any] = '''unwanted, running'''
return input_text, output_text
def _lowerCAmelCase( self ) -> Any:
lowercase__ : List[str] = self.tokenizer_class(self.vocab_file )
lowercase__ : Union[str, Any] = tokenizer.tokenize('''UNwant\u00E9d,running''' )
self.assertListEqual(__lowerCAmelCase , ['''un''', '''##want''', '''##ed''', ''',''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowerCAmelCase ) , [9, 6, 7, 12, 10, 11] )
def _lowerCAmelCase( self ) -> List[str]:
lowercase__ : List[str] = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize('''ah\u535A\u63A8zz''' ) , ['''ah''', '''\u535A''', '''\u63A8''', '''zz'''] )
def _lowerCAmelCase( self ) -> Tuple:
lowercase__ : List[Any] = BasicTokenizer(do_lower_case=__lowerCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''hello''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def _lowerCAmelCase( self ) -> Union[str, Any]:
lowercase__ : Optional[int] = BasicTokenizer(do_lower_case=__lowerCAmelCase , strip_accents=__lowerCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hällo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''h\u00E9llo'''] )
def _lowerCAmelCase( self ) -> Tuple:
lowercase__ : str = BasicTokenizer(do_lower_case=__lowerCAmelCase , strip_accents=__lowerCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def _lowerCAmelCase( self ) -> int:
lowercase__ : Union[str, Any] = BasicTokenizer(do_lower_case=__lowerCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def _lowerCAmelCase( self ) -> List[Any]:
lowercase__ : int = BasicTokenizer(do_lower_case=__lowerCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def _lowerCAmelCase( self ) -> List[str]:
lowercase__ : Tuple = BasicTokenizer(do_lower_case=__lowerCAmelCase , strip_accents=__lowerCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HäLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def _lowerCAmelCase( self ) -> Any:
lowercase__ : Tuple = BasicTokenizer(do_lower_case=__lowerCAmelCase , strip_accents=__lowerCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HaLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def _lowerCAmelCase( self ) -> Optional[int]:
lowercase__ : Optional[int] = BasicTokenizer(do_lower_case=__lowerCAmelCase , never_split=['''[UNK]'''] )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? [UNK]''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?''', '''[UNK]'''] )
def _lowerCAmelCase( self ) -> int:
lowercase__ : Union[str, Any] = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''']
lowercase__ : Union[str, Any] = {}
for i, token in enumerate(__lowerCAmelCase ):
lowercase__ : List[Any] = i
lowercase__ : Dict = WordpieceTokenizer(vocab=__lowerCAmelCase , unk_token='''[UNK]''' )
self.assertListEqual(tokenizer.tokenize('''''' ) , [] )
self.assertListEqual(tokenizer.tokenize('''unwanted running''' ) , ['''un''', '''##want''', '''##ed''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.tokenize('''unwantedX running''' ) , ['''[UNK]''', '''runn''', '''##ing'''] )
@require_torch
def _lowerCAmelCase( self ) -> Union[str, Any]:
lowercase__ : Union[str, Any] = self.tokenizer_class.from_pretrained('''microsoft/prophetnet-large-uncased''' )
lowercase__ : Optional[int] = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
lowercase__ : Dict = [1037, 2146, 20423, 2005, 7680, 7849, 3989, 1012, 102]
lowercase__ : int = tokenizer(__lowerCAmelCase , padding=__lowerCAmelCase , return_tensors='''pt''' )
self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase )
lowercase__ : Optional[int] = list(batch.input_ids.numpy()[0] )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
def _lowerCAmelCase( self ) -> str:
self.assertTrue(_is_whitespace(''' ''' ) )
self.assertTrue(_is_whitespace('''\t''' ) )
self.assertTrue(_is_whitespace('''\r''' ) )
self.assertTrue(_is_whitespace('''\n''' ) )
self.assertTrue(_is_whitespace('''\u00A0''' ) )
self.assertFalse(_is_whitespace('''A''' ) )
self.assertFalse(_is_whitespace('''-''' ) )
def _lowerCAmelCase( self ) -> Optional[Any]:
self.assertTrue(_is_control('''\u0005''' ) )
self.assertFalse(_is_control('''A''' ) )
self.assertFalse(_is_control(''' ''' ) )
self.assertFalse(_is_control('''\t''' ) )
self.assertFalse(_is_control('''\r''' ) )
def _lowerCAmelCase( self ) -> Dict:
self.assertTrue(_is_punctuation('''-''' ) )
self.assertTrue(_is_punctuation('''$''' ) )
self.assertTrue(_is_punctuation('''`''' ) )
self.assertTrue(_is_punctuation('''.''' ) )
self.assertFalse(_is_punctuation('''A''' ) )
self.assertFalse(_is_punctuation(''' ''' ) )
@slow
def _lowerCAmelCase( self ) -> Optional[Any]:
lowercase__ : List[Any] = self.tokenizer_class.from_pretrained('''microsoft/prophetnet-large-uncased''' )
lowercase__ : List[str] = tokenizer.encode('''sequence builders''' , add_special_tokens=__lowerCAmelCase )
lowercase__ : Optional[int] = tokenizer.encode('''multi-sequence build''' , add_special_tokens=__lowerCAmelCase )
lowercase__ : Tuple = tokenizer.build_inputs_with_special_tokens(__lowerCAmelCase )
lowercase__ : List[str] = tokenizer.build_inputs_with_special_tokens(__lowerCAmelCase , __lowerCAmelCase )
assert encoded_sentence == text + [102]
assert encoded_pair == text + [102] + text_a + [102]
| 361 | '''simple docstring'''
class UpperCAmelCase :
'''simple docstring'''
def __init__( self ) -> List[str]:
lowercase__ : Dict = {}
def _lowerCAmelCase( self ) -> None:
print(self.vertex )
for i in self.vertex:
print(__lowerCAmelCase , ''' -> ''' , ''' -> '''.join([str(__lowerCAmelCase ) for j in self.vertex[i]] ) )
def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase ) -> None:
# check if vertex is already present,
if from_vertex in self.vertex:
self.vertex[from_vertex].append(__lowerCAmelCase )
else:
# else make a new vertex
lowercase__ : Union[str, Any] = [to_vertex]
def _lowerCAmelCase( self ) -> None:
# visited array for storing already visited nodes
lowercase__ : str = [False] * len(self.vertex )
# call the recursive helper function
for i in range(len(self.vertex ) ):
if not visited[i]:
self.dfs_recursive(__lowerCAmelCase , __lowerCAmelCase )
def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase ) -> None:
# mark start vertex as visited
lowercase__ : List[str] = True
print(__lowerCAmelCase , end=''' ''' )
# Recur for all the vertices that are adjacent to this node
for i in self.vertex:
if not visited[i]:
self.dfs_recursive(__lowerCAmelCase , __lowerCAmelCase )
if __name__ == "__main__":
__a: Optional[Any] = Graph()
g.add_edge(0, 1)
g.add_edge(0, 2)
g.add_edge(1, 2)
g.add_edge(2, 0)
g.add_edge(2, 3)
g.add_edge(3, 3)
g.print_graph()
print("""DFS:""")
g.dfs()
# OUTPUT:
# 0 -> 1 -> 2
# 1 -> 2
# 2 -> 0 -> 3
# 3 -> 3
# DFS:
# 0 1 2 3
| 214 | 0 |
"""simple docstring"""
def UpperCamelCase ( _lowerCAmelCase : list, _lowerCAmelCase : list, _lowerCAmelCase : int, _lowerCAmelCase : int, _lowerCAmelCase : int ) -> int:
if index == number_of_items:
return 0
_UpperCAmelCase : List[str] = 0
_UpperCAmelCase : str = 0
_UpperCAmelCase : Any = knapsack(_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, index + 1 )
if weights[index] <= max_weight:
_UpperCAmelCase : List[Any] = values[index] + knapsack(
_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, max_weight - weights[index], index + 1 )
return max(_lowerCAmelCase, _lowerCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 246 |
"""simple docstring"""
from __future__ import annotations
# This is the precision for this function which can be altered.
# It is recommended for users to keep this number greater than or equal to 10.
lowerCamelCase__ : Any = 10
def UpperCamelCase ( _lowerCAmelCase : int, _lowerCAmelCase : int, _lowerCAmelCase : list[int], _lowerCAmelCase : int ) -> int:
for i in range(_lowerCAmelCase, _lowerCAmelCase ):
if array[i] == target:
return i
return -1
def UpperCamelCase ( _lowerCAmelCase : list[int], _lowerCAmelCase : int ) -> int:
_UpperCAmelCase : Optional[Any] = 0
_UpperCAmelCase : Optional[int] = len(_lowerCAmelCase )
while left <= right:
if right - left < precision:
return lin_search(_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase )
_UpperCAmelCase : str = (left + right) // 3 + 1
_UpperCAmelCase : int = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
_UpperCAmelCase : Tuple = one_third - 1
elif array[two_third] < target:
_UpperCAmelCase : Any = two_third + 1
else:
_UpperCAmelCase : Any = one_third + 1
_UpperCAmelCase : Dict = two_third - 1
else:
return -1
def UpperCamelCase ( _lowerCAmelCase : int, _lowerCAmelCase : int, _lowerCAmelCase : list[int], _lowerCAmelCase : int ) -> int:
if left < right:
if right - left < precision:
return lin_search(_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase )
_UpperCAmelCase : Optional[Any] = (left + right) // 3 + 1
_UpperCAmelCase : List[Any] = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
return rec_ternary_search(_lowerCAmelCase, one_third - 1, _lowerCAmelCase, _lowerCAmelCase )
elif array[two_third] < target:
return rec_ternary_search(two_third + 1, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase )
else:
return rec_ternary_search(one_third + 1, two_third - 1, _lowerCAmelCase, _lowerCAmelCase )
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCamelCase__ : Optional[Any] = input('''Enter numbers separated by comma:\n''').strip()
lowerCamelCase__ : Optional[int] = [int(item.strip()) for item in user_input.split(''',''')]
assert collection == sorted(collection), F"List must be ordered.\n{collection}."
lowerCamelCase__ : List[Any] = int(input('''Enter the number to be found in the list:\n''').strip())
lowerCamelCase__ : str = ite_ternary_search(collection, target)
lowerCamelCase__ : List[Any] = rec_ternary_search(0, len(collection) - 1, collection, target)
if resulta != -1:
print(F'''Iterative search: {target} found at positions: {resulta}''')
print(F'''Recursive search: {target} found at positions: {resulta}''')
else:
print('''Not found''')
| 246 | 1 |
import unittest
import numpy as np
import torch
from torch import nn
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import KandinskyVaaPriorPipeline, PriorTransformer, UnCLIPScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import enable_full_determinism, skip_mps
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class __a( _a , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase = KandinskyVaaPriorPipeline
lowerCAmelCase = ['''prompt''']
lowerCAmelCase = ['''prompt''', '''negative_prompt''']
lowerCAmelCase = [
'''num_images_per_prompt''',
'''generator''',
'''num_inference_steps''',
'''latents''',
'''negative_prompt''',
'''guidance_scale''',
'''output_type''',
'''return_dict''',
]
lowerCAmelCase = False
@property
def a__ ( self ) -> Dict:
return 32
@property
def a__ ( self ) -> Tuple:
return 32
@property
def a__ ( self ) -> str:
return self.time_input_dim
@property
def a__ ( self ) -> int:
return self.time_input_dim * 4
@property
def a__ ( self ) -> List[str]:
return 100
@property
def a__ ( self ) -> Any:
UpperCAmelCase_ : Any = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
return tokenizer
@property
def a__ ( self ) -> str:
torch.manual_seed(0 )
UpperCAmelCase_ : Union[str, Any] = CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=self.text_embedder_hidden_size ,projection_dim=self.text_embedder_hidden_size ,intermediate_size=37 ,layer_norm_eps=1e-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1_000 ,)
return CLIPTextModelWithProjection(_SCREAMING_SNAKE_CASE )
@property
def a__ ( self ) -> str:
torch.manual_seed(0 )
UpperCAmelCase_ : Any = {
'''num_attention_heads''': 2,
'''attention_head_dim''': 12,
'''embedding_dim''': self.text_embedder_hidden_size,
'''num_layers''': 1,
}
UpperCAmelCase_ : Union[str, Any] = PriorTransformer(**_SCREAMING_SNAKE_CASE )
# clip_std and clip_mean is initialized to be 0 so PriorTransformer.post_process_latents will always return 0 - set clip_std to be 1 so it won't return 0
UpperCAmelCase_ : Union[str, Any] = nn.Parameter(torch.ones(model.clip_std.shape ) )
return model
@property
def a__ ( self ) -> Tuple:
torch.manual_seed(0 )
UpperCAmelCase_ : Any = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size ,image_size=224 ,projection_dim=self.text_embedder_hidden_size ,intermediate_size=37 ,num_attention_heads=4 ,num_channels=3 ,num_hidden_layers=5 ,patch_size=14 ,)
UpperCAmelCase_ : Optional[Any] = CLIPVisionModelWithProjection(_SCREAMING_SNAKE_CASE )
return model
@property
def a__ ( self ) -> List[Any]:
UpperCAmelCase_ : Dict = CLIPImageProcessor(
crop_size=224 ,do_center_crop=_SCREAMING_SNAKE_CASE ,do_normalize=_SCREAMING_SNAKE_CASE ,do_resize=_SCREAMING_SNAKE_CASE ,image_mean=[0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73] ,image_std=[0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11] ,resample=3 ,size=224 ,)
return image_processor
def a__ ( self ) -> int:
UpperCAmelCase_ : Union[str, Any] = self.dummy_prior
UpperCAmelCase_ : Any = self.dummy_image_encoder
UpperCAmelCase_ : List[Any] = self.dummy_text_encoder
UpperCAmelCase_ : Optional[int] = self.dummy_tokenizer
UpperCAmelCase_ : Optional[int] = self.dummy_image_processor
UpperCAmelCase_ : Dict = UnCLIPScheduler(
variance_type='''fixed_small_log''' ,prediction_type='''sample''' ,num_train_timesteps=1_000 ,clip_sample=_SCREAMING_SNAKE_CASE ,clip_sample_range=10.0 ,)
UpperCAmelCase_ : str = {
'''prior''': prior,
'''image_encoder''': image_encoder,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''scheduler''': scheduler,
'''image_processor''': image_processor,
}
return components
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE=0 ) -> Any:
if str(_SCREAMING_SNAKE_CASE ).startswith('''mps''' ):
UpperCAmelCase_ : int = torch.manual_seed(_SCREAMING_SNAKE_CASE )
else:
UpperCAmelCase_ : int = torch.Generator(device=_SCREAMING_SNAKE_CASE ).manual_seed(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Any = {
'''prompt''': '''horse''',
'''generator''': generator,
'''guidance_scale''': 4.0,
'''num_inference_steps''': 2,
'''output_type''': '''np''',
}
return inputs
def a__ ( self ) -> List[Any]:
UpperCAmelCase_ : Optional[int] = '''cpu'''
UpperCAmelCase_ : Any = self.get_dummy_components()
UpperCAmelCase_ : Tuple = self.pipeline_class(**_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Dict = pipe.to(_SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : str = pipe(**self.get_dummy_inputs(_SCREAMING_SNAKE_CASE ) )
UpperCAmelCase_ : List[str] = output.image_embeds
UpperCAmelCase_ : str = pipe(
**self.get_dummy_inputs(_SCREAMING_SNAKE_CASE ) ,return_dict=_SCREAMING_SNAKE_CASE ,)[0]
UpperCAmelCase_ : Tuple = image[0, -10:]
UpperCAmelCase_ : Dict = image_from_tuple[0, -10:]
assert image.shape == (1, 32)
UpperCAmelCase_ : Optional[int] = np.array(
[-0.05_32, 1.71_20, 0.36_56, -1.08_52, -0.89_46, -1.17_56, 0.43_48, 0.24_82, 0.51_46, -0.11_56] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@skip_mps
def a__ ( self ) -> str:
UpperCAmelCase_ : Union[str, Any] = torch_device == '''cpu'''
UpperCAmelCase_ : int = True
UpperCAmelCase_ : Union[str, Any] = False
self._test_inference_batch_single_identical(
test_max_difference=_SCREAMING_SNAKE_CASE ,relax_max_difference=_SCREAMING_SNAKE_CASE ,test_mean_pixel_difference=_SCREAMING_SNAKE_CASE ,)
@skip_mps
def a__ ( self ) -> Any:
UpperCAmelCase_ : List[Any] = torch_device == '''cpu'''
UpperCAmelCase_ : Optional[Any] = False
self._test_attention_slicing_forward_pass(
test_max_difference=_SCREAMING_SNAKE_CASE ,test_mean_pixel_difference=_SCREAMING_SNAKE_CASE ,) | 235 |
from __future__ import annotations
from fractions import Fraction
def lowerCamelCase__ ( _lowercase , _lowercase ):
'''simple docstring'''
return (
num != den and num % 10 == den // 10 and (num // 10) / (den % 10) == num / den
)
def lowerCamelCase__ ( _lowercase ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = []
UpperCAmelCase_ : Tuple = 11
UpperCAmelCase_ : int = int('''1''' + '''0''' * digit_len )
for num in range(_lowercase , _lowercase ):
while den <= 99:
if (num != den) and (num % 10 == den // 10) and (den % 10 != 0):
if is_digit_cancelling(_lowercase , _lowercase ):
solutions.append(f'''{num}/{den}''' )
den += 1
num += 1
UpperCAmelCase_ : Any = 10
return solutions
def lowerCamelCase__ ( _lowercase = 2 ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = 1.0
for fraction in fraction_list(_lowercase ):
UpperCAmelCase_ : Optional[Any] = Fraction(_lowercase )
result *= frac.denominator / frac.numerator
return int(_lowercase )
if __name__ == "__main__":
print(solution()) | 235 | 1 |
from typing import Optional, Tuple
import jax
import jax.numpy as jnp
from flax import linen as nn
from flax.core.frozen_dict import FrozenDict
from transformers import CLIPConfig, FlaxPreTrainedModel
from transformers.models.clip.modeling_flax_clip import FlaxCLIPVisionModule
def lowerCamelCase__ ( _a , _a , _a=1E-12):
SCREAMING_SNAKE_CASE : Optional[Any] = jnp.divide(emb_a.T , jnp.clip(jnp.linalg.norm(_a , axis=1) , a_min=_a)).T
SCREAMING_SNAKE_CASE : Any = jnp.divide(emb_a.T , jnp.clip(jnp.linalg.norm(_a , axis=1) , a_min=_a)).T
return jnp.matmul(_a , norm_emb_a.T)
class _UpperCamelCase ( nn.Module ):
'''simple docstring'''
lowerCamelCase__ =42
lowerCamelCase__ =jnp.floataa
def __UpperCamelCase ( self : List[Any] ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = FlaxCLIPVisionModule(self.config.vision_config )
SCREAMING_SNAKE_CASE : Any = nn.Dense(self.config.projection_dim , use_bias=a , dtype=self.dtype )
SCREAMING_SNAKE_CASE : str = self.param("concept_embeds" , jax.nn.initializers.ones , (17, self.config.projection_dim) )
SCREAMING_SNAKE_CASE : Optional[int] = self.param(
"special_care_embeds" , jax.nn.initializers.ones , (3, self.config.projection_dim) )
SCREAMING_SNAKE_CASE : List[Any] = self.param("concept_embeds_weights" , jax.nn.initializers.ones , (17,) )
SCREAMING_SNAKE_CASE : Any = self.param("special_care_embeds_weights" , jax.nn.initializers.ones , (3,) )
def __call__( self : Optional[Any] , a : List[str] ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = self.vision_model(a )[1]
SCREAMING_SNAKE_CASE : Optional[Any] = self.visual_projection(a )
SCREAMING_SNAKE_CASE : List[Any] = jax_cosine_distance(a , self.special_care_embeds )
SCREAMING_SNAKE_CASE : str = jax_cosine_distance(a , self.concept_embeds )
# increase this value to create a stronger `nfsw` filter
# at the cost of increasing the possibility of filtering benign image inputs
SCREAMING_SNAKE_CASE : Optional[int] = 0.0
SCREAMING_SNAKE_CASE : List[str] = special_cos_dist - self.special_care_embeds_weights[None, :] + adjustment
SCREAMING_SNAKE_CASE : str = jnp.round(a , 3 )
SCREAMING_SNAKE_CASE : Optional[Any] = jnp.any(special_scores > 0 , axis=1 , keepdims=a )
# Use a lower threshold if an image has any special care concept
SCREAMING_SNAKE_CASE : Union[str, Any] = is_special_care * 0.01
SCREAMING_SNAKE_CASE : Union[str, Any] = cos_dist - self.concept_embeds_weights[None, :] + special_adjustment
SCREAMING_SNAKE_CASE : Dict = jnp.round(a , 3 )
SCREAMING_SNAKE_CASE : int = jnp.any(concept_scores > 0 , axis=1 )
return has_nsfw_concepts
class _UpperCamelCase ( __A ):
'''simple docstring'''
lowerCamelCase__ =CLIPConfig
lowerCamelCase__ ='clip_input'
lowerCamelCase__ =FlaxStableDiffusionSafetyCheckerModule
def __init__( self : str , a : CLIPConfig , a : Optional[Tuple] = None , a : int = 0 , a : jnp.dtype = jnp.floataa , a : bool = True , **a : str , ) -> int:
"""simple docstring"""
if input_shape is None:
SCREAMING_SNAKE_CASE : List[Any] = (1, 224, 224, 3)
SCREAMING_SNAKE_CASE : Optional[int] = self.module_class(config=a , dtype=a , **a )
super().__init__(a , a , input_shape=a , seed=a , dtype=a , _do_init=_do_init )
def __UpperCamelCase ( self : str , a : jax.random.KeyArray , a : Tuple , a : FrozenDict = None ) -> FrozenDict:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = jax.random.normal(a , a )
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Tuple = jax.random.split(a )
SCREAMING_SNAKE_CASE : Union[str, Any] = {"params": params_rng, "dropout": dropout_rng}
SCREAMING_SNAKE_CASE : int = self.module.init(a , a )["params"]
return random_params
def __call__( self : List[Any] , a : Optional[int] , a : dict = None , ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = jnp.transpose(a , (0, 2, 3, 1) )
return self.module.apply(
{"params": params or self.params} , jnp.array(a , dtype=jnp.floataa ) , rngs={} , ) | 76 |
"""simple docstring"""
import argparse
import logging
import os
from pathlib import Path
from typing import Any, Dict
import pytorch_lightning as pl
from pytorch_lightning.utilities import rank_zero_info
from transformers import (
AdamW,
AutoConfig,
AutoModel,
AutoModelForPreTraining,
AutoModelForQuestionAnswering,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoModelForTokenClassification,
AutoModelWithLMHead,
AutoTokenizer,
PretrainedConfig,
PreTrainedTokenizer,
)
from transformers.optimization import (
Adafactor,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.utils.versions import require_version
UpperCamelCase : Union[str, Any] = logging.getLogger(__name__)
require_version("pytorch_lightning>=1.0.4")
UpperCamelCase : int = {
"base": AutoModel,
"sequence-classification": AutoModelForSequenceClassification,
"question-answering": AutoModelForQuestionAnswering,
"pretraining": AutoModelForPreTraining,
"token-classification": AutoModelForTokenClassification,
"language-modeling": AutoModelWithLMHead,
"summarization": AutoModelForSeqaSeqLM,
"translation": AutoModelForSeqaSeqLM,
}
# update this and the import above to support new schedulers from transformers.optimization
UpperCamelCase : Optional[Any] = {
"linear": get_linear_schedule_with_warmup,
"cosine": get_cosine_schedule_with_warmup,
"cosine_w_restarts": get_cosine_with_hard_restarts_schedule_with_warmup,
"polynomial": get_polynomial_decay_schedule_with_warmup,
# '': get_constant_schedule, # not supported for now
# '': get_constant_schedule_with_warmup, # not supported for now
}
UpperCamelCase : str = sorted(arg_to_scheduler.keys())
UpperCamelCase : List[str] = "{" + ", ".join(arg_to_scheduler_choices) + "}"
class __lowerCAmelCase ( pl.LightningModule ):
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=None , __UpperCAmelCase="base" , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , **__UpperCAmelCase , ):
'''simple docstring'''
super().__init__()
# TODO: move to self.save_hyperparameters()
# self.save_hyperparameters()
# can also expand arguments into trainer signature for easier reading
self.save_hyperparameters(__UpperCAmelCase )
__UpperCamelCase = 0
__UpperCamelCase = Path(self.hparams.output_dir )
__UpperCamelCase = self.hparams.cache_dir if self.hparams.cache_dir else None
if config is None:
__UpperCamelCase = AutoConfig.from_pretrained(
self.hparams.config_name if self.hparams.config_name else self.hparams.model_name_or_path , **({'num_labels': num_labels} if num_labels is not None else {}) , cache_dir=__UpperCAmelCase , **__UpperCAmelCase , )
else:
__UpperCamelCase = config
__UpperCamelCase = ('encoder_layerdrop', 'decoder_layerdrop', 'dropout', 'attention_dropout')
for p in extra_model_params:
if getattr(self.hparams , __UpperCAmelCase , __UpperCAmelCase ):
assert hasattr(self.config , __UpperCAmelCase ), F'model config doesn\'t have a `{p}` attribute'
setattr(self.config , __UpperCAmelCase , getattr(self.hparams , __UpperCAmelCase ) )
if tokenizer is None:
__UpperCamelCase = AutoTokenizer.from_pretrained(
self.hparams.tokenizer_name if self.hparams.tokenizer_name else self.hparams.model_name_or_path , cache_dir=__UpperCAmelCase , )
else:
__UpperCamelCase = tokenizer
__UpperCamelCase = MODEL_MODES[mode]
if model is None:
__UpperCamelCase = self.model_type.from_pretrained(
self.hparams.model_name_or_path , from_tf=bool('.ckpt' in self.hparams.model_name_or_path ) , config=self.config , cache_dir=__UpperCAmelCase , )
else:
__UpperCamelCase = model
def UpperCAmelCase ( self , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = self.model_type.from_pretrained(*__UpperCAmelCase , **__UpperCAmelCase )
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = arg_to_scheduler[self.hparams.lr_scheduler]
__UpperCamelCase = get_schedule_func(
self.opt , num_warmup_steps=self.hparams.warmup_steps , num_training_steps=self.total_steps() )
__UpperCamelCase = {'scheduler': scheduler, 'interval': 'step', 'frequency': 1}
return scheduler
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = self.model
__UpperCamelCase = ['bias', 'LayerNorm.weight']
__UpperCamelCase = [
{
'params': [
p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay )
], # check this named paramters
'weight_decay': self.hparams.weight_decay,
},
{
'params': [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay )],
'weight_decay': 0.0,
},
]
if self.hparams.adafactor:
__UpperCamelCase = Adafactor(
__UpperCAmelCase , lr=self.hparams.learning_rate , scale_parameter=__UpperCAmelCase , relative_step=__UpperCAmelCase )
else:
__UpperCamelCase = AdamW(
__UpperCAmelCase , lr=self.hparams.learning_rate , eps=self.hparams.adam_epsilon )
__UpperCamelCase = optimizer
__UpperCamelCase = self.get_lr_scheduler()
return [optimizer], [scheduler]
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
return self.validation_step(__UpperCAmelCase , __UpperCAmelCase )
def UpperCAmelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
return self.validation_end(__UpperCAmelCase )
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = max(1 , self.hparams.gpus ) # TODO: consider num_tpu_cores
__UpperCamelCase = self.hparams.train_batch_size * self.hparams.accumulate_grad_batches * num_devices
return (self.dataset_size / effective_batch_size) * self.hparams.max_epochs
def UpperCAmelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
if stage == "test":
__UpperCamelCase = len(self.test_dataloader().dataset )
else:
__UpperCamelCase = self.get_dataloader('train' , self.hparams.train_batch_size , shuffle=__UpperCAmelCase )
__UpperCamelCase = len(self.train_dataloader().dataset )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = False ):
'''simple docstring'''
raise NotImplementedError('You must implement this for your task' )
def UpperCAmelCase ( self ):
'''simple docstring'''
return self.train_loader
def UpperCAmelCase ( self ):
'''simple docstring'''
return self.get_dataloader('dev' , self.hparams.eval_batch_size , shuffle=__UpperCAmelCase )
def UpperCAmelCase ( self ):
'''simple docstring'''
return self.get_dataloader('test' , self.hparams.eval_batch_size , shuffle=__UpperCAmelCase )
def UpperCAmelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
return os.path.join(
self.hparams.data_dir , 'cached_{}_{}_{}'.format(
__UpperCAmelCase , list(filter(__UpperCAmelCase , self.hparams.model_name_or_path.split('/' ) ) ).pop() , str(self.hparams.max_seq_length ) , ) , )
@pl.utilities.rank_zero_only
def UpperCAmelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = self.output_dir.joinpath('best_tfmr' )
__UpperCamelCase = self.step_count
self.model.save_pretrained(__UpperCAmelCase )
self.tokenizer.save_pretrained(__UpperCAmelCase )
@staticmethod
def UpperCAmelCase ( __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
parser.add_argument(
'--model_name_or_path' , default=__UpperCAmelCase , type=__UpperCAmelCase , required=__UpperCAmelCase , help='Path to pretrained model or model identifier from huggingface.co/models' , )
parser.add_argument(
'--config_name' , default='' , type=__UpperCAmelCase , help='Pretrained config name or path if not the same as model_name' )
parser.add_argument(
'--tokenizer_name' , default=__UpperCAmelCase , type=__UpperCAmelCase , help='Pretrained tokenizer name or path if not the same as model_name' , )
parser.add_argument(
'--cache_dir' , default=str(Path(__UpperCAmelCase ).parent / 'test_run' / 'cache' ) , type=__UpperCAmelCase , help='Where do you want to store the pre-trained models downloaded from huggingface.co' , )
parser.add_argument(
'--encoder_layerdrop' , type=__UpperCAmelCase , help='Encoder layer dropout probability (Optional). Goes into model.config' , )
parser.add_argument(
'--decoder_layerdrop' , type=__UpperCAmelCase , help='Decoder layer dropout probability (Optional). Goes into model.config' , )
parser.add_argument(
'--dropout' , type=__UpperCAmelCase , help='Dropout probability (Optional). Goes into model.config' , )
parser.add_argument(
'--attention_dropout' , type=__UpperCAmelCase , help='Attention dropout probability (Optional). Goes into model.config' , )
parser.add_argument('--learning_rate' , default=5E-5 , type=__UpperCAmelCase , help='The initial learning rate for Adam.' )
parser.add_argument(
'--lr_scheduler' , default='linear' , choices=__UpperCAmelCase , metavar=__UpperCAmelCase , type=__UpperCAmelCase , help='Learning rate scheduler' , )
parser.add_argument('--weight_decay' , default=0.0 , type=__UpperCAmelCase , help='Weight decay if we apply some.' )
parser.add_argument('--adam_epsilon' , default=1E-8 , type=__UpperCAmelCase , help='Epsilon for Adam optimizer.' )
parser.add_argument('--warmup_steps' , default=0 , type=__UpperCAmelCase , help='Linear warmup over warmup_steps.' )
parser.add_argument('--num_workers' , default=4 , type=__UpperCAmelCase , help='kwarg passed to DataLoader' )
parser.add_argument('--num_train_epochs' , dest='max_epochs' , default=3 , type=__UpperCAmelCase )
parser.add_argument('--train_batch_size' , default=32 , type=__UpperCAmelCase )
parser.add_argument('--eval_batch_size' , default=32 , type=__UpperCAmelCase )
parser.add_argument('--adafactor' , action='store_true' )
class __lowerCAmelCase ( pl.Callback ):
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
if (
trainer.is_global_zero and trainer.global_rank == 0
): # we initialize the retriever only on master worker with RAY. In new pytorch-lightning accelorators are removed.
pl_module.model.rag.retriever.init_retrieval() # better to use hook functions.
class __lowerCAmelCase ( pl.Callback ):
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
for name, param in pl_module.model.rag.named_parameters():
if param.grad is None:
print(__UpperCAmelCase )
class __lowerCAmelCase ( pl.Callback ):
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = trainer.lr_schedulers[0]['scheduler']
__UpperCamelCase = {F'lr_group_{i}': lr for i, lr in enumerate(lr_scheduler.get_lr() )}
pl_module.logger.log_metrics(__UpperCAmelCase )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
rank_zero_info('***** Validation results *****' )
__UpperCamelCase = trainer.callback_metrics
# Log results
for key in sorted(__UpperCAmelCase ):
if key not in ["log", "progress_bar"]:
rank_zero_info('{} = {}\n'.format(__UpperCAmelCase , str(metrics[key] ) ) )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
rank_zero_info('***** Test results *****' )
__UpperCamelCase = trainer.callback_metrics
# Log and save results to file
__UpperCamelCase = os.path.join(pl_module.hparams.output_dir , 'test_results.txt' )
with open(__UpperCAmelCase , 'w' ) as writer:
for key in sorted(__UpperCAmelCase ):
if key not in ["log", "progress_bar"]:
rank_zero_info('{} = {}\n'.format(__UpperCAmelCase , str(metrics[key] ) ) )
writer.write('{} = {}\n'.format(__UpperCAmelCase , str(metrics[key] ) ) )
def A ( snake_case :Any , snake_case :int ) -> None:
# To allow all pl args uncomment the following line
# parser = pl.Trainer.add_argparse_args(parser)
parser.add_argument(
'--output_dir' , default=str(Path(snake_case ).parent / 'test_run' / 'model_checkpoints' ) , type=snake_case , help='The output directory where the model predictions and checkpoints will be written.' , )
parser.add_argument(
'--fp16' , action='store_true' , help='Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit' , )
parser.add_argument(
'--fp16_opt_level' , type=snake_case , default='O2' , help=(
'For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\'].'
'See details at https://nvidia.github.io/apex/amp.html'
) , )
parser.add_argument('--n_tpu_cores' , dest='tpu_cores' , type=snake_case )
parser.add_argument('--max_grad_norm' , dest='gradient_clip_val' , default=1.0 , type=snake_case , help='Max gradient norm' )
parser.add_argument('--do_train' , action='store_true' , help='Whether to run training.' )
parser.add_argument('--do_predict' , action='store_true' , help='Whether to run predictions on the test set.' )
parser.add_argument(
'--gradient_accumulation_steps' , dest='accumulate_grad_batches' , type=snake_case , default=1 , help='Number of updates steps to accumulate before performing a backward/update pass.' , )
parser.add_argument('--seed' , type=snake_case , default=4_2 , help='random seed for initialization' )
parser.add_argument(
'--data_dir' , default=str(Path(snake_case ).parent / 'test_run' / 'dummy-train-data' ) , type=snake_case , help='The input data dir. Should contain the training files for the CoNLL-2003 NER task.' , )
def A ( snake_case :BaseTransformer , snake_case :argparse.Namespace , snake_case :Union[str, Any]=None , snake_case :Union[str, Any]=True , snake_case :Any=[] , snake_case :Tuple=None , snake_case :List[str]=None , **snake_case :Union[str, Any] , ) -> Optional[int]:
pl.seed_everything(args.seed )
# init model
__UpperCamelCase = Path(model.hparams.output_dir )
odir.mkdir(exist_ok=snake_case )
# add custom checkpoints
if checkpoint_callback is None:
__UpperCamelCase = pl.callbacks.ModelCheckpoint(
filepath=args.output_dir , prefix='checkpoint' , monitor='val_loss' , mode='min' , save_top_k=1 )
if early_stopping_callback:
extra_callbacks.append(snake_case )
if logging_callback is None:
__UpperCamelCase = LoggingCallback()
__UpperCamelCase = {}
if args.fpaa:
__UpperCamelCase = 1_6
if args.gpus > 1:
__UpperCamelCase = 'auto'
__UpperCamelCase = 'ddp'
__UpperCamelCase = args.accumulate_grad_batches
__UpperCamelCase = None
__UpperCamelCase = 'auto'
__UpperCamelCase = pl.Trainer.from_argparse_args(
snake_case , weights_summary=snake_case , callbacks=[logging_callback] + extra_callbacks + [InitCallback()] + [checkpoint_callback] , logger=snake_case , val_check_interval=1 , num_sanity_val_steps=2 , **snake_case , )
if args.do_train:
trainer.fit(snake_case )
else:
print('RAG modeling tests with new set functions successfuly executed!' )
return trainer
| 316 | 0 |
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from ...utils import deprecate
from ..controlnet.multicontrolnet import MultiControlNetModel # noqa: F401
from ..controlnet.pipeline_controlnet import StableDiffusionControlNetPipeline # noqa: F401
deprecate(
'''stable diffusion controlnet''',
'''0.22.0''',
'''Importing `StableDiffusionControlNetPipeline` or `MultiControlNetModel` from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import StableDiffusionControlNetPipeline` instead.''',
standard_warn=False,
stacklevel=3,
)
| 323 |
import logging
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEncoder,
BertModel,
BertPreTrainedModel,
)
__A : str = logging.getLogger(__name__)
class __A ( lowerCAmelCase ):
def lowercase__ ( self : List[Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Dict=None , UpperCAmelCase_ : Any=None ):
lowerCAmelCase : List[Any] = self.layer[current_layer](UpperCAmelCase_ , UpperCAmelCase_ , head_mask[current_layer] )
lowerCAmelCase : Optional[Any] = layer_outputs[0]
return hidden_states
@add_start_docstrings(
"The bare Bert Model transformer with PABEE outputting raw hidden-states without any specific head on top." , lowerCAmelCase , )
class __A ( lowerCAmelCase ):
def __init__( self : Dict , UpperCAmelCase_ : Optional[int] ):
super().__init__(UpperCAmelCase_ )
lowerCAmelCase : Optional[Any] = BertEncoderWithPabee(UpperCAmelCase_ )
self.init_weights()
lowerCAmelCase : str = 0
lowerCAmelCase : Optional[Any] = 0
lowerCAmelCase : str = 0
lowerCAmelCase : Dict = 0
def lowercase__ ( self : int , UpperCAmelCase_ : Any ):
lowerCAmelCase : int = threshold
def lowercase__ ( self : Tuple , UpperCAmelCase_ : Dict ):
lowerCAmelCase : Optional[Any] = patience
def lowercase__ ( self : Union[str, Any] ):
lowerCAmelCase : Tuple = 0
lowerCAmelCase : Tuple = 0
def lowercase__ ( self : Dict ):
lowerCAmelCase : Optional[int] = self.inference_layers_num / self.inference_instances_num
lowerCAmelCase : List[Any] = (
f"*** Patience = {self.patience} Avg. Inference Layers = {avg_inf_layers:.2f} Speed Up ="
f" {1 - avg_inf_layers / self.config.num_hidden_layers:.2f} ***"
)
print(UpperCAmelCase_ )
@add_start_docstrings_to_model_forward(UpperCAmelCase_ )
def lowercase__ ( self : Tuple , UpperCAmelCase_ : Union[str, Any]=None , UpperCAmelCase_ : Tuple=None , UpperCAmelCase_ : Any=None , UpperCAmelCase_ : Optional[int]=None , UpperCAmelCase_ : Optional[int]=None , UpperCAmelCase_ : int=None , UpperCAmelCase_ : List[str]=None , UpperCAmelCase_ : Optional[int]=None , UpperCAmelCase_ : Union[str, Any]=None , UpperCAmelCase_ : Union[str, Any]=None , UpperCAmelCase_ : Tuple=False , ):
if input_ids is not None and inputs_embeds is not None:
raise ValueError('You cannot specify both input_ids and inputs_embeds at the same time' )
elif input_ids is not None:
lowerCAmelCase : Optional[int] = input_ids.size()
elif inputs_embeds is not None:
lowerCAmelCase : List[str] = inputs_embeds.size()[:-1]
else:
raise ValueError('You have to specify either input_ids or inputs_embeds' )
lowerCAmelCase : Union[str, Any] = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
lowerCAmelCase : Any = torch.ones(UpperCAmelCase_ , device=UpperCAmelCase_ )
if token_type_ids is None:
lowerCAmelCase : Union[str, Any] = torch.zeros(UpperCAmelCase_ , dtype=torch.long , device=UpperCAmelCase_ )
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
lowerCAmelCase : torch.Tensor = self.get_extended_attention_mask(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if self.config.is_decoder and encoder_hidden_states is not None:
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : Optional[int] = encoder_hidden_states.size()
lowerCAmelCase : Optional[int] = (encoder_batch_size, encoder_sequence_length)
if encoder_attention_mask is None:
lowerCAmelCase : Any = torch.ones(UpperCAmelCase_ , device=UpperCAmelCase_ )
lowerCAmelCase : Tuple = self.invert_attention_mask(UpperCAmelCase_ )
else:
lowerCAmelCase : List[Any] = None
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
lowerCAmelCase : Optional[Any] = self.get_head_mask(UpperCAmelCase_ , self.config.num_hidden_layers )
lowerCAmelCase : int = self.embeddings(
input_ids=UpperCAmelCase_ , position_ids=UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ , inputs_embeds=UpperCAmelCase_ )
lowerCAmelCase : List[str] = embedding_output
if self.training:
lowerCAmelCase : Tuple = []
for i in range(self.config.num_hidden_layers ):
lowerCAmelCase : Dict = self.encoder.adaptive_forward(
UpperCAmelCase_ , current_layer=UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , head_mask=UpperCAmelCase_ )
lowerCAmelCase : List[str] = self.pooler(UpperCAmelCase_ )
lowerCAmelCase : Union[str, Any] = output_layers[i](output_dropout(UpperCAmelCase_ ) )
res.append(UpperCAmelCase_ )
elif self.patience == 0: # Use all layers for inference
lowerCAmelCase : Union[str, Any] = self.encoder(
UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , head_mask=UpperCAmelCase_ , encoder_hidden_states=UpperCAmelCase_ , encoder_attention_mask=UpperCAmelCase_ , )
lowerCAmelCase : Optional[Any] = self.pooler(encoder_outputs[0] )
lowerCAmelCase : List[Any] = [output_layers[self.config.num_hidden_layers - 1](UpperCAmelCase_ )]
else:
lowerCAmelCase : Tuple = 0
lowerCAmelCase : List[str] = None
lowerCAmelCase : Optional[Any] = 0
for i in range(self.config.num_hidden_layers ):
calculated_layer_num += 1
lowerCAmelCase : Union[str, Any] = self.encoder.adaptive_forward(
UpperCAmelCase_ , current_layer=UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , head_mask=UpperCAmelCase_ )
lowerCAmelCase : Optional[int] = self.pooler(UpperCAmelCase_ )
lowerCAmelCase : List[Any] = output_layers[i](UpperCAmelCase_ )
if regression:
lowerCAmelCase : List[str] = logits.detach()
if patient_result is not None:
lowerCAmelCase : List[Any] = patient_result.detach()
if (patient_result is not None) and torch.abs(patient_result - labels ) < self.regression_threshold:
patient_counter += 1
else:
lowerCAmelCase : Any = 0
else:
lowerCAmelCase : Union[str, Any] = logits.detach().argmax(dim=1 )
if patient_result is not None:
lowerCAmelCase : Optional[Any] = patient_result.detach().argmax(dim=1 )
if (patient_result is not None) and torch.all(labels.eq(UpperCAmelCase_ ) ):
patient_counter += 1
else:
lowerCAmelCase : Tuple = 0
lowerCAmelCase : List[Any] = logits
if patient_counter == self.patience:
break
lowerCAmelCase : Dict = [patient_result]
self.inference_layers_num += calculated_layer_num
self.inference_instances_num += 1
return res
@add_start_docstrings(
"Bert Model transformer with PABEE and a sequence classification/regression head on top (a linear layer on top of\n the pooled output) e.g. for GLUE tasks. " , lowerCAmelCase , )
class __A ( lowerCAmelCase ):
def __init__( self : Tuple , UpperCAmelCase_ : Tuple ):
super().__init__(UpperCAmelCase_ )
lowerCAmelCase : Tuple = config.num_labels
lowerCAmelCase : int = BertModelWithPabee(UpperCAmelCase_ )
lowerCAmelCase : Optional[Any] = nn.Dropout(config.hidden_dropout_prob )
lowerCAmelCase : List[Any] = nn.ModuleList(
[nn.Linear(config.hidden_size , self.config.num_labels ) for _ in range(config.num_hidden_layers )] )
self.init_weights()
@add_start_docstrings_to_model_forward(UpperCAmelCase_ )
def lowercase__ ( self : Union[str, Any] , UpperCAmelCase_ : List[str]=None , UpperCAmelCase_ : Dict=None , UpperCAmelCase_ : Any=None , UpperCAmelCase_ : Tuple=None , UpperCAmelCase_ : Tuple=None , UpperCAmelCase_ : Any=None , UpperCAmelCase_ : Any=None , ):
lowerCAmelCase : int = self.bert(
input_ids=UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ , position_ids=UpperCAmelCase_ , head_mask=UpperCAmelCase_ , inputs_embeds=UpperCAmelCase_ , output_dropout=self.dropout , output_layers=self.classifiers , regression=self.num_labels == 1 , )
lowerCAmelCase : Any = (logits[-1],)
if labels is not None:
lowerCAmelCase : Tuple = None
lowerCAmelCase : Optional[int] = 0
for ix, logits_item in enumerate(UpperCAmelCase_ ):
if self.num_labels == 1:
# We are doing regression
lowerCAmelCase : Tuple = MSELoss()
lowerCAmelCase : Any = loss_fct(logits_item.view(-1 ) , labels.view(-1 ) )
else:
lowerCAmelCase : Tuple = CrossEntropyLoss()
lowerCAmelCase : Dict = loss_fct(logits_item.view(-1 , self.num_labels ) , labels.view(-1 ) )
if total_loss is None:
lowerCAmelCase : Any = loss
else:
total_loss += loss * (ix + 1)
total_weights += ix + 1
lowerCAmelCase : str = (total_loss / total_weights,) + outputs
return outputs
| 323 | 1 |
from timeit import timeit
__snake_case = {
'''MALAYALAM''': True,
'''String''': False,
'''rotor''': True,
'''level''': True,
'''A''': True,
'''BB''': True,
'''ABC''': False,
'''amanaplanacanalpanama''': True, # "a man a plan a canal panama"
}
# Ensure our test data is valid
assert all((key == key[::-1]) is value for key, value in test_data.items())
def lowerCAmelCase_ ( __lowerCAmelCase )-> bool:
'''simple docstring'''
UpperCAmelCase : List[str] =0
UpperCAmelCase : Optional[int] =len(_lowerCamelCase ) - 1
while start_i < end_i:
if s[start_i] == s[end_i]:
start_i += 1
end_i -= 1
else:
return False
return True
def lowerCAmelCase_ ( __lowerCAmelCase )-> bool:
'''simple docstring'''
UpperCAmelCase : Union[str, Any] =len(_lowerCamelCase ) // 2
UpperCAmelCase : int =len(_lowerCamelCase )
# We need to traverse till half of the length of string
# as we can get access of the i'th last element from
# i'th index.
# eg: [0,1,2,3,4,5] => 4th index can be accessed
# with the help of 1st index (i==n-i-1)
# where n is length of string
return all(s[i] == s[n - i - 1] for i in range(_lowerCamelCase ) )
def lowerCAmelCase_ ( __lowerCAmelCase )-> bool:
'''simple docstring'''
if len(_lowerCamelCase ) <= 2:
return True
if s[0] == s[len(_lowerCamelCase ) - 1]:
return is_palindrome_recursive(s[1:-1] )
else:
return False
def lowerCAmelCase_ ( __lowerCAmelCase )-> bool:
'''simple docstring'''
return s == s[::-1]
def lowerCAmelCase_ ( __lowerCAmelCase )-> None:
'''simple docstring'''
UpperCAmelCase : Any =f'''all({name}(key) is value for key, value in test_data.items())'''
UpperCAmelCase : Optional[int] =f'''from __main__ import test_data, {name}'''
UpperCAmelCase : int =50_00_00
UpperCAmelCase : Dict =timeit(stmt=_lowerCamelCase , setup=_lowerCamelCase , number=_lowerCamelCase )
print(f'''{name:<35} finished {number:,} runs in {result:.5f} seconds''' )
if __name__ == "__main__":
for key, value in test_data.items():
assert is_palindrome(key) is is_palindrome_recursive(key)
assert is_palindrome(key) is is_palindrome_slice(key)
print(f'{key:21} {value}')
print('''a man a plan a canal panama''')
# finished 500,000 runs in 0.46793 seconds
benchmark_function('''is_palindrome_slice''')
# finished 500,000 runs in 0.85234 seconds
benchmark_function('''is_palindrome''')
# finished 500,000 runs in 1.32028 seconds
benchmark_function('''is_palindrome_recursive''')
# finished 500,000 runs in 2.08679 seconds
benchmark_function('''is_palindrome_traversal''')
| 348 |
'''simple docstring'''
def _UpperCAmelCase ( _lowerCamelCase : int = 1_00 ) -> int:
_lowerCAmelCase : Optional[Any] = (n * (n + 1) // 2) ** 2
_lowerCAmelCase : str = n * (n + 1) * (2 * n + 1) // 6
return sum_cubes - sum_squares
if __name__ == "__main__":
print(F'{solution() = }')
| 309 | 0 |
"""simple docstring"""
import re
import jax.numpy as jnp
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.random import PRNGKey
from ..utils import logging
A: Optional[int] = logging.get_logger(__name__)
def _snake_case ( UpperCamelCase : List[Any] ):
UpperCAmelCase : Optional[int] = R"""\w+[.]\d+"""
UpperCAmelCase : str = re.findall(UpperCamelCase , UpperCamelCase )
for pat in pats:
UpperCAmelCase : List[str] = key.replace(UpperCamelCase , """_""".join(pat.split(""".""" ) ) )
return key
def _snake_case ( UpperCamelCase : Any , UpperCamelCase : Dict , UpperCamelCase : Dict ):
UpperCAmelCase : Any = pt_tuple_key[:-1] + ("""scale""",)
if (
any("""norm""" in str_ for str_ in pt_tuple_key )
and (pt_tuple_key[-1] == "bias")
and (pt_tuple_key[:-1] + ("bias",) not in random_flax_state_dict)
and (pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict)
):
UpperCAmelCase : List[str] = pt_tuple_key[:-1] + ("""scale""",)
return renamed_pt_tuple_key, pt_tensor
elif pt_tuple_key[-1] in ["weight", "gamma"] and pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict:
UpperCAmelCase : Dict = pt_tuple_key[:-1] + ("""scale""",)
return renamed_pt_tuple_key, pt_tensor
# embedding
if pt_tuple_key[-1] == "weight" and pt_tuple_key[:-1] + ("embedding",) in random_flax_state_dict:
UpperCAmelCase : Union[str, Any] = pt_tuple_key[:-1] + ("""embedding""",)
return renamed_pt_tuple_key, pt_tensor
# conv layer
UpperCAmelCase : List[str] = pt_tuple_key[:-1] + ("""kernel""",)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4:
UpperCAmelCase : str = pt_tensor.transpose(2 , 3 , 1 , 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
UpperCAmelCase : List[Any] = pt_tuple_key[:-1] + ("""kernel""",)
if pt_tuple_key[-1] == "weight":
UpperCAmelCase : List[Any] = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
UpperCAmelCase : int = pt_tuple_key[:-1] + ("""weight""",)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
UpperCAmelCase : Optional[Any] = pt_tuple_key[:-1] + ("""bias""",)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def _snake_case ( UpperCamelCase : str , UpperCamelCase : List[str] , UpperCamelCase : int=42 ):
# Step 1: Convert pytorch tensor to numpy
UpperCAmelCase : Dict = {k: v.numpy() for k, v in pt_state_dict.items()}
# Step 2: Since the model is stateless, get random Flax params
UpperCAmelCase : Any = flax_model.init_weights(PRNGKey(UpperCamelCase ) )
UpperCAmelCase : Any = flatten_dict(UpperCamelCase )
UpperCAmelCase : Optional[Any] = {}
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
UpperCAmelCase : Optional[Any] = rename_key(UpperCamelCase )
UpperCAmelCase : Tuple = tuple(renamed_pt_key.split(""".""" ) )
# Correctly rename weight parameters
UpperCAmelCase : Any = rename_key_and_reshape_tensor(UpperCamelCase , UpperCamelCase , UpperCamelCase )
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
F"PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape "
F"{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}." )
# also add unexpected weight so that warning is thrown
UpperCAmelCase : List[Any] = jnp.asarray(UpperCamelCase )
return unflatten_dict(UpperCamelCase )
| 351 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_mbart import MBartTokenizer
else:
A: str = None
A: List[Any] = logging.get_logger(__name__)
A: Union[str, Any] = {"vocab_file": "sentencepiece.bpe.model", "tokenizer_file": "tokenizer.json"}
A: Union[str, Any] = {
"vocab_file": {
"facebook/mbart-large-en-ro": (
"https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model"
),
"facebook/mbart-large-cc25": (
"https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model"
),
},
"tokenizer_file": {
"facebook/mbart-large-en-ro": "https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/tokenizer.json",
"facebook/mbart-large-cc25": "https://huggingface.co/facebook/mbart-large-cc25/resolve/main/tokenizer.json",
},
}
A: Tuple = {
"facebook/mbart-large-en-ro": 1_0_2_4,
"facebook/mbart-large-cc25": 1_0_2_4,
}
# fmt: off
A: Any = ["ar_AR", "cs_CZ", "de_DE", "en_XX", "es_XX", "et_EE", "fi_FI", "fr_XX", "gu_IN", "hi_IN", "it_IT", "ja_XX", "kk_KZ", "ko_KR", "lt_LT", "lv_LV", "my_MM", "ne_NP", "nl_XX", "ro_RO", "ru_RU", "si_LK", "tr_TR", "vi_VN", "zh_CN"]
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__ ):
__lowerCAmelCase : Tuple = VOCAB_FILES_NAMES
__lowerCAmelCase : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCAmelCase : Dict = PRETRAINED_VOCAB_FILES_MAP
__lowerCAmelCase : Tuple = ['input_ids', 'attention_mask']
__lowerCAmelCase : str = MBartTokenizer
__lowerCAmelCase : List[int] = []
__lowerCAmelCase : List[int] = []
def __init__( self , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE="<s>" , _SCREAMING_SNAKE_CASE="</s>" , _SCREAMING_SNAKE_CASE="</s>" , _SCREAMING_SNAKE_CASE="<s>" , _SCREAMING_SNAKE_CASE="<unk>" , _SCREAMING_SNAKE_CASE="<pad>" , _SCREAMING_SNAKE_CASE="<mask>" , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , **_SCREAMING_SNAKE_CASE , ) -> Any:
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = AddedToken(_SCREAMING_SNAKE_CASE , lstrip=_SCREAMING_SNAKE_CASE , rstrip=_SCREAMING_SNAKE_CASE ) if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else mask_token
super().__init__(
vocab_file=_SCREAMING_SNAKE_CASE , tokenizer_file=_SCREAMING_SNAKE_CASE , bos_token=_SCREAMING_SNAKE_CASE , eos_token=_SCREAMING_SNAKE_CASE , sep_token=_SCREAMING_SNAKE_CASE , cls_token=_SCREAMING_SNAKE_CASE , unk_token=_SCREAMING_SNAKE_CASE , pad_token=_SCREAMING_SNAKE_CASE , mask_token=_SCREAMING_SNAKE_CASE , src_lang=_SCREAMING_SNAKE_CASE , tgt_lang=_SCREAMING_SNAKE_CASE , additional_special_tokens=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
UpperCAmelCase : int = vocab_file
UpperCAmelCase : Optional[int] = False if not self.vocab_file else True
UpperCAmelCase : List[str] = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({"""additional_special_tokens""": _additional_special_tokens} )
UpperCAmelCase : List[Any] = {
lang_code: self.convert_tokens_to_ids(_SCREAMING_SNAKE_CASE ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
UpperCAmelCase : int = src_lang if src_lang is not None else """en_XX"""
UpperCAmelCase : List[Any] = self.convert_tokens_to_ids(self._src_lang )
UpperCAmelCase : int = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def SCREAMING_SNAKE_CASE ( self ) -> str:
'''simple docstring'''
return self._src_lang
@src_lang.setter
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE ) -> None:
'''simple docstring'''
UpperCAmelCase : Dict = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ) -> List[int]:
'''simple docstring'''
UpperCAmelCase : str = [self.sep_token_id]
UpperCAmelCase : str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> Optional[int]:
'''simple docstring'''
if src_lang is None or tgt_lang is None:
raise ValueError("""Translation requires a `src_lang` and a `tgt_lang` for this model""" )
UpperCAmelCase : List[str] = src_lang
UpperCAmelCase : Union[str, Any] = self(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE , return_tensors=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Dict = self.convert_tokens_to_ids(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Tuple = tgt_lang_id
return inputs
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = "en_XX" , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = "ro_RO" , **_SCREAMING_SNAKE_CASE , ) -> BatchEncoding:
'''simple docstring'''
UpperCAmelCase : int = src_lang
UpperCAmelCase : Dict = tgt_lang
return super().prepare_seqaseq_batch(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
'''simple docstring'''
return self.set_src_lang_special_tokens(self.src_lang )
def SCREAMING_SNAKE_CASE ( self ) -> str:
'''simple docstring'''
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE ) -> None:
'''simple docstring'''
UpperCAmelCase : Any = self.convert_tokens_to_ids(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Any = []
UpperCAmelCase : Tuple = [self.eos_token_id, self.cur_lang_code]
UpperCAmelCase : Optional[Any] = self.convert_ids_to_tokens(self.prefix_tokens )
UpperCAmelCase : List[str] = self.convert_ids_to_tokens(self.suffix_tokens )
UpperCAmelCase : str = processors.TemplateProcessing(
single=prefix_tokens_str + ["""$A"""] + suffix_tokens_str , pair=prefix_tokens_str + ["""$A""", """$B"""] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE ) -> None:
'''simple docstring'''
UpperCAmelCase : Tuple = self.convert_tokens_to_ids(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : int = []
UpperCAmelCase : Optional[int] = [self.eos_token_id, self.cur_lang_code]
UpperCAmelCase : str = self.convert_ids_to_tokens(self.prefix_tokens )
UpperCAmelCase : Optional[Any] = self.convert_ids_to_tokens(self.suffix_tokens )
UpperCAmelCase : int = processors.TemplateProcessing(
single=prefix_tokens_str + ["""$A"""] + suffix_tokens_str , pair=prefix_tokens_str + ["""$A""", """$B"""] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ) -> Tuple[str]:
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
"""Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """
"""tokenizer.""" )
if not os.path.isdir(_SCREAMING_SNAKE_CASE ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory." )
return
UpperCAmelCase : Any = os.path.join(
_SCREAMING_SNAKE_CASE , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_SCREAMING_SNAKE_CASE ):
copyfile(self.vocab_file , _SCREAMING_SNAKE_CASE )
return (out_vocab_file,)
| 76 | 0 |
'''simple docstring'''
from unittest import TestCase
from datasets import Dataset
from minhash_deduplication import deduplicate_dataset, make_duplicate_clusters
def _lowercase ( ):
'''simple docstring'''
__UpperCamelCase = {
"""repo_name""": ["""test_repo1""", """test_repo2""", """test_repo3"""],
"""path""": ["""test_1.py""", """test_2.py""", """unit_test.py"""],
"""content""": ["""a """ * 20, """a """ * 30, """b """ * 7],
}
__UpperCamelCase = Dataset.from_dict(__A )
return dataset
class UpperCAmelCase__ ( UpperCAmelCase_):
def __lowerCamelCase ( self ) -> Dict:
__UpperCamelCase = get_dataset()
__UpperCamelCase = make_duplicate_clusters(lowercase , 0.85 )
self.assertEqual(len(duplicate_clusters[0] ) , 2 )
def __lowerCamelCase ( self ) -> Any:
__UpperCamelCase = get_dataset()
__UpperCamelCase , __UpperCamelCase = deduplicate_dataset(lowercase )
self.assertEqual(len(lowercase ) , 2 )
print(lowercase )
self.assertEqual(duplicate_clusters[0][0]["""copies"""] , 2 )
self.assertEqual(duplicate_clusters[0][0]["""is_extreme"""] , lowercase )
| 349 |
'''simple docstring'''
from __future__ import annotations
import math
import numpy as np
from numpy.linalg import norm
def _lowercase ( __A ,__A ):
'''simple docstring'''
return math.sqrt(sum(pow(a - b ,2 ) for a, b in zip(__A ,__A ) ) )
def _lowercase ( __A ,__A ):
'''simple docstring'''
if dataset.ndim != value_array.ndim:
__UpperCamelCase = (
"""Wrong input data's dimensions... """
f"dataset : {dataset.ndim}, value_array : {value_array.ndim}"
)
raise ValueError(__A )
try:
if dataset.shape[1] != value_array.shape[1]:
__UpperCamelCase = (
"""Wrong input data's shape... """
f"dataset : {dataset.shape[1]}, value_array : {value_array.shape[1]}"
)
raise ValueError(__A )
except IndexError:
if dataset.ndim != value_array.ndim:
raise TypeError("""Wrong shape""" )
if dataset.dtype != value_array.dtype:
__UpperCamelCase = (
"""Input data have different datatype... """
f"dataset : {dataset.dtype}, value_array : {value_array.dtype}"
)
raise TypeError(__A )
__UpperCamelCase = []
for value in value_array:
__UpperCamelCase = euclidean(__A ,dataset[0] )
__UpperCamelCase = dataset[0].tolist()
for dataset_value in dataset[1:]:
__UpperCamelCase = euclidean(__A ,__A )
if dist > temp_dist:
__UpperCamelCase = temp_dist
__UpperCamelCase = dataset_value.tolist()
answer.append([vector, dist] )
return answer
def _lowercase ( __A ,__A ):
'''simple docstring'''
return np.dot(__A ,__A ) / (norm(__A ) * norm(__A ))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 349 | 1 |
'''simple docstring'''
import argparse
import torch
from transformers import OpenAIGPTConfig, OpenAIGPTModel, load_tf_weights_in_openai_gpt
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def __UpperCAmelCase ( a_: Tuple, a_: Union[str, Any], a_: List[str] ):
# Construct model
if openai_config_file == "":
_UpperCAmelCase : Optional[int] = OpenAIGPTConfig()
else:
_UpperCAmelCase : List[Any] = OpenAIGPTConfig.from_json_file(a_ )
_UpperCAmelCase : Dict = OpenAIGPTModel(a_ )
# Load weights from numpy
load_tf_weights_in_openai_gpt(a_, a_, a_ )
# Save pytorch-model
_UpperCAmelCase : Any = pytorch_dump_folder_path + "/" + WEIGHTS_NAME
_UpperCAmelCase : Optional[Any] = pytorch_dump_folder_path + "/" + CONFIG_NAME
print(f"""Save PyTorch model to {pytorch_weights_dump_path}""" )
torch.save(model.state_dict(), a_ )
print(f"""Save configuration file to {pytorch_config_dump_path}""" )
with open(a_, "w", encoding="utf-8" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
__a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--openai_checkpoint_folder_path',
default=None,
type=str,
required=True,
help='Path to the TensorFlow checkpoint path.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--openai_config_file',
default='',
type=str,
help=(
'An optional config json file corresponding to the pre-trained OpenAI model. \n'
'This specifies the model architecture.'
),
)
__a = parser.parse_args()
convert_openai_checkpoint_to_pytorch(
args.openai_checkpoint_folder_path, args.openai_config_file, args.pytorch_dump_folder_path
) | 17 | '''simple docstring'''
def __UpperCAmelCase ( a_: int, a_: int ):
if a < 0 or b < 0:
raise ValueError("the value of both inputs must be positive" )
_UpperCAmelCase : List[str] = str(bin(a_ ) )[2:] # remove the leading "0b"
_UpperCAmelCase : Any = str(bin(a_ ) )[2:] # remove the leading "0b"
_UpperCAmelCase : Dict = max(len(a_ ), len(a_ ) )
return "0b" + "".join(
str(int(char_a == "1" and char_b == "1" ) )
for char_a, char_b in zip(a_binary.zfill(a_ ), b_binary.zfill(a_ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod() | 17 | 1 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, PNDMScheduler, StableDiffusionInpaintPipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class _a (__magic_name__ , __magic_name__ , __magic_name__ , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase__: Any = StableDiffusionInpaintPipeline
UpperCAmelCase__: Dict = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
UpperCAmelCase__: int = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
UpperCAmelCase__: Optional[int] = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
UpperCAmelCase__: Tuple = frozenset([] )
def __A ( self ):
torch.manual_seed(0 )
A__ : Union[str, Any] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=9 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=A__ , )
A__ : Any = PNDMScheduler(skip_prk_steps=A__ )
torch.manual_seed(0 )
A__ : Any = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
A__ : Tuple = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act="""gelu""" , projection_dim=512 , )
A__ : Dict = CLIPTextModel(A__ )
A__ : List[Any] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
A__ : Optional[Any] = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def __A ( self , A__ , A__=0 ):
# TODO: use tensor inputs instead of PIL, this is here just to leave the old expected_slices untouched
A__ : List[str] = floats_tensor((1, 3, 32, 32) , rng=random.Random(A__ ) ).to(A__ )
A__ : Dict = image.cpu().permute(0 , 2 , 3 , 1 )[0]
A__ : Union[str, Any] = Image.fromarray(np.uinta(A__ ) ).convert("""RGB""" ).resize((64, 64) )
A__ : Any = Image.fromarray(np.uinta(image + 4 ) ).convert("""RGB""" ).resize((64, 64) )
if str(A__ ).startswith("""mps""" ):
A__ : List[str] = torch.manual_seed(A__ )
else:
A__ : int = torch.Generator(device=A__ ).manual_seed(A__ )
A__ : Tuple = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": init_image,
"""mask_image""": mask_image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
}
return inputs
def __A ( self ):
A__ : str = """cpu""" # ensure determinism for the device-dependent torch.Generator
A__ : Optional[Any] = self.get_dummy_components()
A__ : List[Any] = StableDiffusionInpaintPipeline(**A__ )
A__ : Optional[Any] = sd_pipe.to(A__ )
sd_pipe.set_progress_bar_config(disable=A__ )
A__ : Optional[Any] = self.get_dummy_inputs(A__ )
A__ : List[Any] = sd_pipe(**A__ ).images
A__ : List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
A__ : Optional[Any] = np.array([0.4_7_2_7, 0.5_7_3_5, 0.3_9_4_1, 0.5_4_4_6, 0.5_9_2_6, 0.4_3_9_4, 0.5_0_6_2, 0.4_6_5_4, 0.4_4_7_6] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __A ( self ):
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class _a (unittest.TestCase ):
'''simple docstring'''
def __A ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __A ( self ):
A__ : Tuple = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-inpaint/init_image.png""" )
A__ : Any = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png""" )
A__ : Union[str, Any] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint"""
"""/yellow_cat_sitting_on_a_park_bench.npy""" )
A__ : int = """stabilityai/stable-diffusion-2-inpainting"""
A__ : List[str] = StableDiffusionInpaintPipeline.from_pretrained(A__ , safety_checker=A__ )
pipe.to(A__ )
pipe.set_progress_bar_config(disable=A__ )
pipe.enable_attention_slicing()
A__ : int = """Face of a yellow cat, high resolution, sitting on a park bench"""
A__ : Tuple = torch.manual_seed(0 )
A__ : Tuple = pipe(
prompt=A__ , image=A__ , mask_image=A__ , generator=A__ , output_type="""np""" , )
A__ : int = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 9e-3
def __A ( self ):
A__ : str = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-inpaint/init_image.png""" )
A__ : Union[str, Any] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png""" )
A__ : Optional[int] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint"""
"""/yellow_cat_sitting_on_a_park_bench_fp16.npy""" )
A__ : Optional[int] = """stabilityai/stable-diffusion-2-inpainting"""
A__ : Any = StableDiffusionInpaintPipeline.from_pretrained(
A__ , torch_dtype=torch.floataa , safety_checker=A__ , )
pipe.to(A__ )
pipe.set_progress_bar_config(disable=A__ )
pipe.enable_attention_slicing()
A__ : List[str] = """Face of a yellow cat, high resolution, sitting on a park bench"""
A__ : List[str] = torch.manual_seed(0 )
A__ : List[Any] = pipe(
prompt=A__ , image=A__ , mask_image=A__ , generator=A__ , output_type="""np""" , )
A__ : Union[str, Any] = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 5e-1
def __A ( self ):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
A__ : Any = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-inpaint/init_image.png""" )
A__ : List[str] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png""" )
A__ : Union[str, Any] = """stabilityai/stable-diffusion-2-inpainting"""
A__ : List[str] = PNDMScheduler.from_pretrained(A__ , subfolder="""scheduler""" )
A__ : List[Any] = StableDiffusionInpaintPipeline.from_pretrained(
A__ , safety_checker=A__ , scheduler=A__ , torch_dtype=torch.floataa , )
pipe.to(A__ )
pipe.set_progress_bar_config(disable=A__ )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
A__ : Union[str, Any] = """Face of a yellow cat, high resolution, sitting on a park bench"""
A__ : Any = torch.manual_seed(0 )
A__ : str = pipe(
prompt=A__ , image=A__ , mask_image=A__ , generator=A__ , num_inference_steps=2 , output_type="""np""" , )
A__ : int = torch.cuda.max_memory_allocated()
# make sure that less than 2.65 GB is allocated
assert mem_bytes < 2.6_5 * 10**9
| 192 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
from ...utils import logging
A_ : str = logging.get_logger(__name__)
A_ : Optional[Any] = '▁'
A_ : int = {'vocab_file': 'sentencepiece.bpe.model'}
A_ : int = {
'vocab_file': {
'facebook/nllb-200-distilled-600M': (
'https://huggingface.co/facebook/nllb-200-distilled-600M/blob/main/sentencepiece.bpe.model'
),
}
}
A_ : Optional[int] = {
'facebook/nllb-200-distilled-600M': 1024,
}
# fmt: off
A_ : Tuple = ['ace_Arab', 'ace_Latn', 'acm_Arab', 'acq_Arab', 'aeb_Arab', 'afr_Latn', 'ajp_Arab', 'aka_Latn', 'amh_Ethi', 'apc_Arab', 'arb_Arab', 'ars_Arab', 'ary_Arab', 'arz_Arab', 'asm_Beng', 'ast_Latn', 'awa_Deva', 'ayr_Latn', 'azb_Arab', 'azj_Latn', 'bak_Cyrl', 'bam_Latn', 'ban_Latn', 'bel_Cyrl', 'bem_Latn', 'ben_Beng', 'bho_Deva', 'bjn_Arab', 'bjn_Latn', 'bod_Tibt', 'bos_Latn', 'bug_Latn', 'bul_Cyrl', 'cat_Latn', 'ceb_Latn', 'ces_Latn', 'cjk_Latn', 'ckb_Arab', 'crh_Latn', 'cym_Latn', 'dan_Latn', 'deu_Latn', 'dik_Latn', 'dyu_Latn', 'dzo_Tibt', 'ell_Grek', 'eng_Latn', 'epo_Latn', 'est_Latn', 'eus_Latn', 'ewe_Latn', 'fao_Latn', 'pes_Arab', 'fij_Latn', 'fin_Latn', 'fon_Latn', 'fra_Latn', 'fur_Latn', 'fuv_Latn', 'gla_Latn', 'gle_Latn', 'glg_Latn', 'grn_Latn', 'guj_Gujr', 'hat_Latn', 'hau_Latn', 'heb_Hebr', 'hin_Deva', 'hne_Deva', 'hrv_Latn', 'hun_Latn', 'hye_Armn', 'ibo_Latn', 'ilo_Latn', 'ind_Latn', 'isl_Latn', 'ita_Latn', 'jav_Latn', 'jpn_Jpan', 'kab_Latn', 'kac_Latn', 'kam_Latn', 'kan_Knda', 'kas_Arab', 'kas_Deva', 'kat_Geor', 'knc_Arab', 'knc_Latn', 'kaz_Cyrl', 'kbp_Latn', 'kea_Latn', 'khm_Khmr', 'kik_Latn', 'kin_Latn', 'kir_Cyrl', 'kmb_Latn', 'kon_Latn', 'kor_Hang', 'kmr_Latn', 'lao_Laoo', 'lvs_Latn', 'lij_Latn', 'lim_Latn', 'lin_Latn', 'lit_Latn', 'lmo_Latn', 'ltg_Latn', 'ltz_Latn', 'lua_Latn', 'lug_Latn', 'luo_Latn', 'lus_Latn', 'mag_Deva', 'mai_Deva', 'mal_Mlym', 'mar_Deva', 'min_Latn', 'mkd_Cyrl', 'plt_Latn', 'mlt_Latn', 'mni_Beng', 'khk_Cyrl', 'mos_Latn', 'mri_Latn', 'zsm_Latn', 'mya_Mymr', 'nld_Latn', 'nno_Latn', 'nob_Latn', 'npi_Deva', 'nso_Latn', 'nus_Latn', 'nya_Latn', 'oci_Latn', 'gaz_Latn', 'ory_Orya', 'pag_Latn', 'pan_Guru', 'pap_Latn', 'pol_Latn', 'por_Latn', 'prs_Arab', 'pbt_Arab', 'quy_Latn', 'ron_Latn', 'run_Latn', 'rus_Cyrl', 'sag_Latn', 'san_Deva', 'sat_Beng', 'scn_Latn', 'shn_Mymr', 'sin_Sinh', 'slk_Latn', 'slv_Latn', 'smo_Latn', 'sna_Latn', 'snd_Arab', 'som_Latn', 'sot_Latn', 'spa_Latn', 'als_Latn', 'srd_Latn', 'srp_Cyrl', 'ssw_Latn', 'sun_Latn', 'swe_Latn', 'swh_Latn', 'szl_Latn', 'tam_Taml', 'tat_Cyrl', 'tel_Telu', 'tgk_Cyrl', 'tgl_Latn', 'tha_Thai', 'tir_Ethi', 'taq_Latn', 'taq_Tfng', 'tpi_Latn', 'tsn_Latn', 'tso_Latn', 'tuk_Latn', 'tum_Latn', 'tur_Latn', 'twi_Latn', 'tzm_Tfng', 'uig_Arab', 'ukr_Cyrl', 'umb_Latn', 'urd_Arab', 'uzn_Latn', 'vec_Latn', 'vie_Latn', 'war_Latn', 'wol_Latn', 'xho_Latn', 'ydd_Hebr', 'yor_Latn', 'yue_Hant', 'zho_Hans', 'zho_Hant', 'zul_Latn']
class _a (__magic_name__ ):
'''simple docstring'''
UpperCAmelCase__: Optional[Any] = VOCAB_FILES_NAMES
UpperCAmelCase__: Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase__: Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__: str = ['''input_ids''', '''attention_mask''']
UpperCAmelCase__: List[int] = []
UpperCAmelCase__: List[int] = []
def __init__( self , A__ , A__="<s>" , A__="</s>" , A__="</s>" , A__="<s>" , A__="<unk>" , A__="<pad>" , A__="<mask>" , A__=None , A__=None , A__=None , A__ = None , A__=None , A__=False , **A__ , ):
# Mask token behave like a normal word, i.e. include the space before it
A__ : Any = AddedToken(A__ , lstrip=A__ , rstrip=A__ ) if isinstance(A__ , A__ ) else mask_token
A__ : List[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
A__ : List[str] = legacy_behaviour
super().__init__(
bos_token=A__ , eos_token=A__ , unk_token=A__ , sep_token=A__ , cls_token=A__ , pad_token=A__ , mask_token=A__ , tokenizer_file=A__ , src_lang=A__ , tgt_lang=A__ , additional_special_tokens=A__ , sp_model_kwargs=self.sp_model_kwargs , legacy_behaviour=A__ , **A__ , )
A__ : List[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(A__ ) )
A__ : List[str] = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | ---- | ---- | ---- | ---- | ---- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | 'an' | '▁n' | '▁m' | '▁t' | '▁k' | '▁a'
# spm | '<unk>' | '<s>' | '</s>' | 'an' | '▁n' | '▁m' | '▁t' | '▁k' | '▁a' | '▁s'
# Mimic fairseq token-to-id alignment for the first 4 token
A__ : str = {"""<s>""": 0, """<pad>""": 1, """</s>""": 2, """<unk>""": 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
A__ : str = 1
A__ : Optional[int] = len(self.sp_model )
A__ : List[Any] = {
code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(A__ )
}
A__ : Tuple = {v: k for k, v in self.lang_code_to_id.items()}
A__ : Dict = len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset
self.fairseq_tokens_to_ids.update(self.lang_code_to_id )
A__ : Tuple = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
A__ : int = list(self.lang_code_to_id.keys() )
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
self._additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in self._additional_special_tokens] )
A__ : int = src_lang if src_lang is not None else """eng_Latn"""
A__ : str = self.lang_code_to_id[self._src_lang]
A__ : Union[str, Any] = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
def __getstate__( self ):
A__ : Tuple = self.__dict__.copy()
A__ : List[Any] = None
A__ : Tuple = self.sp_model.serialized_model_proto()
return state
def __setstate__( self , A__ ):
A__ : str = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
A__ : Any = {}
A__ : List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
@property
def __A ( self ):
return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token
@property
def __A ( self ):
return self._src_lang
@src_lang.setter
def __A ( self , A__ ):
A__ : str = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def __A ( self , A__ , A__ = None , A__ = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=A__ , token_ids_a=A__ , already_has_special_tokens=A__ )
A__ : Dict = [1] * len(self.prefix_tokens )
A__ : Dict = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(A__ )) + suffix_ones
return prefix_ones + ([0] * len(A__ )) + ([0] * len(A__ )) + suffix_ones
def __A ( self , A__ , A__ = None ):
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def __A ( self , A__ , A__ = None ):
A__ : Dict = [self.sep_token_id]
A__ : Dict = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __A ( self , A__ , A__ , A__ , A__ , **A__ ):
if src_lang is None or tgt_lang is None:
raise ValueError("""Translation requires a `src_lang` and a `tgt_lang` for this model""" )
A__ : Optional[int] = src_lang
A__ : List[Any] = self(A__ , add_special_tokens=A__ , return_tensors=A__ , **A__ )
A__ : Optional[int] = self.convert_tokens_to_ids(A__ )
A__ : Optional[int] = tgt_lang_id
return inputs
def __A ( self ):
A__ : List[str] = {self.convert_ids_to_tokens(A__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __A ( self , A__ ):
return self.sp_model.encode(A__ , out_type=A__ )
def __A ( self , A__ ):
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
A__ : List[str] = self.sp_model.PieceToId(A__ )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def __A ( self , A__ ):
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def __A ( self , A__ ):
A__ : Optional[Any] = """""".join(A__ ).replace(A__ , """ """ ).strip()
return out_string
def __A ( self , A__ , A__ = None ):
if not os.path.isdir(A__ ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
A__ : Any = os.path.join(
A__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(A__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , A__ )
elif not os.path.isfile(self.vocab_file ):
with open(A__ , """wb""" ) as fi:
A__ : str = self.sp_model.serialized_model_proto()
fi.write(A__ )
return (out_vocab_file,)
def __A ( self , A__ , A__ = "eng_Latn" , A__ = None , A__ = "fra_Latn" , **A__ , ):
A__ : Any = src_lang
A__ : List[Any] = tgt_lang
return super().prepare_seqaseq_batch(A__ , A__ , **A__ )
def __A ( self ):
return self.set_src_lang_special_tokens(self.src_lang )
def __A ( self ):
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def __A ( self , A__ ):
A__ : List[str] = self.lang_code_to_id[src_lang]
if self.legacy_behaviour:
A__ : Dict = []
A__ : str = [self.eos_token_id, self.cur_lang_code]
else:
A__ : List[str] = [self.cur_lang_code]
A__ : Optional[Any] = [self.eos_token_id]
def __A ( self , A__ ):
A__ : Union[str, Any] = self.lang_code_to_id[lang]
if self.legacy_behaviour:
A__ : Union[str, Any] = []
A__ : int = [self.eos_token_id, self.cur_lang_code]
else:
A__ : Dict = [self.cur_lang_code]
A__ : str = [self.eos_token_id]
| 192 | 1 |
'''simple docstring'''
import os
import unittest
from transformers.models.transfo_xl.tokenization_transfo_xl import VOCAB_FILES_NAMES, TransfoXLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class lowercase__ ( lowercase , unittest.TestCase ):
lowercase__ = TransfoXLTokenizer
lowercase__ = False
lowercase__ = False
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
super().setUp()
_UpperCamelCase : Dict = [
'''<unk>''',
'''[CLS]''',
'''[SEP]''',
'''want''',
'''unwanted''',
'''wa''',
'''un''',
'''running''',
''',''',
'''low''',
'''l''',
]
_UpperCamelCase : Tuple = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file ,'w' ,encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
def UpperCamelCase_ ( self : str ,**lowerCamelCase__ : Optional[int] ):
'''simple docstring'''
_UpperCamelCase : Union[str, Any] = True
return TransfoXLTokenizer.from_pretrained(self.tmpdirname ,**lowerCamelCase__ )
def UpperCamelCase_ ( self : int ,lowerCamelCase__ : List[Any] ):
'''simple docstring'''
_UpperCamelCase : str = '''<unk> UNwanted , running'''
_UpperCamelCase : Union[str, Any] = '''<unk> unwanted, running'''
return input_text, output_text
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
_UpperCamelCase : Any = TransfoXLTokenizer(vocab_file=self.vocab_file ,lower_case=lowerCamelCase__ )
_UpperCamelCase : Union[str, Any] = tokenizer.tokenize('<unk> UNwanted , running' )
self.assertListEqual(lowerCamelCase__ ,['<unk>', 'unwanted', ',', 'running'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase__ ) ,[0, 4, 8, 7] )
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
_UpperCamelCase : str = TransfoXLTokenizer(lower_case=lowerCamelCase__ )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo ! how \n Are yoU ? ' ) ,['hello', '!', 'how', 'are', 'you', '?'] )
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
_UpperCamelCase : Tuple = TransfoXLTokenizer(lower_case=lowerCamelCase__ )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo ! how \n Are yoU ? ' ) ,['HeLLo', '!', 'how', 'Are', 'yoU', '?'] )
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
_UpperCamelCase : Tuple = TransfoXLTokenizer(lower_case=lowerCamelCase__ )
_UpperCamelCase : Optional[Any] = '''Hello (bracket) and side-scrolled [and] Henry\'s $5,000 with 3.34 m. What\'s up!?'''
_UpperCamelCase : Optional[Any] = [
'''Hello''',
'''(''',
'''bracket''',
''')''',
'''and''',
'''side''',
'''@-@''',
'''scrolled''',
'''[''',
'''and''',
''']''',
'''Henry''',
'''\'s''',
'''$''',
'''5''',
'''@,@''',
'''000''',
'''with''',
'''3''',
'''@.@''',
'''34''',
'''m''',
'''.''',
'''What''',
'''\'s''',
'''up''',
'''!''',
'''?''',
]
self.assertListEqual(tokenizer.tokenize(lowerCamelCase__ ) ,lowerCamelCase__ )
self.assertEqual(tokenizer.convert_tokens_to_string(lowerCamelCase__ ) ,lowerCamelCase__ )
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
_UpperCamelCase : str = self.get_tokenizer()
_UpperCamelCase : List[Any] = len(lowerCamelCase__ )
tokenizer.add_tokens(['new1', 'new2'] )
tokenizer.move_added_token('new1' ,1 )
# Check that moved token is not copied (duplicate)
self.assertEqual(len(lowerCamelCase__ ) ,original_len + 2 )
# Check that token is moved to specified id
self.assertEqual(tokenizer.encode('new1' ) ,[1] )
self.assertEqual(tokenizer.decode([1] ) ,'new1' )
| 352 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_nllb import NllbTokenizer
else:
snake_case_ : List[Any] = None
snake_case_ : Dict = logging.get_logger(__name__)
snake_case_ : Dict = {'vocab_file': 'sentencepiece.bpe.model', 'tokenizer_file': 'tokenizer.json'}
snake_case_ : List[str] = {
'vocab_file': {
'facebook/nllb-200-distilled-600M': (
'https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/sentencepiece.bpe.model'
),
},
'tokenizer_file': {
'facebook/nllb-200-distilled-600M': (
'https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/tokenizer.json'
),
},
}
snake_case_ : str = {
'facebook/nllb-large-en-ro': 1024,
'facebook/nllb-200-distilled-600M': 1024,
}
# fmt: off
snake_case_ : Optional[Any] = ['ace_Arab', 'ace_Latn', 'acm_Arab', 'acq_Arab', 'aeb_Arab', 'afr_Latn', 'ajp_Arab', 'aka_Latn', 'amh_Ethi', 'apc_Arab', 'arb_Arab', 'ars_Arab', 'ary_Arab', 'arz_Arab', 'asm_Beng', 'ast_Latn', 'awa_Deva', 'ayr_Latn', 'azb_Arab', 'azj_Latn', 'bak_Cyrl', 'bam_Latn', 'ban_Latn', 'bel_Cyrl', 'bem_Latn', 'ben_Beng', 'bho_Deva', 'bjn_Arab', 'bjn_Latn', 'bod_Tibt', 'bos_Latn', 'bug_Latn', 'bul_Cyrl', 'cat_Latn', 'ceb_Latn', 'ces_Latn', 'cjk_Latn', 'ckb_Arab', 'crh_Latn', 'cym_Latn', 'dan_Latn', 'deu_Latn', 'dik_Latn', 'dyu_Latn', 'dzo_Tibt', 'ell_Grek', 'eng_Latn', 'epo_Latn', 'est_Latn', 'eus_Latn', 'ewe_Latn', 'fao_Latn', 'pes_Arab', 'fij_Latn', 'fin_Latn', 'fon_Latn', 'fra_Latn', 'fur_Latn', 'fuv_Latn', 'gla_Latn', 'gle_Latn', 'glg_Latn', 'grn_Latn', 'guj_Gujr', 'hat_Latn', 'hau_Latn', 'heb_Hebr', 'hin_Deva', 'hne_Deva', 'hrv_Latn', 'hun_Latn', 'hye_Armn', 'ibo_Latn', 'ilo_Latn', 'ind_Latn', 'isl_Latn', 'ita_Latn', 'jav_Latn', 'jpn_Jpan', 'kab_Latn', 'kac_Latn', 'kam_Latn', 'kan_Knda', 'kas_Arab', 'kas_Deva', 'kat_Geor', 'knc_Arab', 'knc_Latn', 'kaz_Cyrl', 'kbp_Latn', 'kea_Latn', 'khm_Khmr', 'kik_Latn', 'kin_Latn', 'kir_Cyrl', 'kmb_Latn', 'kon_Latn', 'kor_Hang', 'kmr_Latn', 'lao_Laoo', 'lvs_Latn', 'lij_Latn', 'lim_Latn', 'lin_Latn', 'lit_Latn', 'lmo_Latn', 'ltg_Latn', 'ltz_Latn', 'lua_Latn', 'lug_Latn', 'luo_Latn', 'lus_Latn', 'mag_Deva', 'mai_Deva', 'mal_Mlym', 'mar_Deva', 'min_Latn', 'mkd_Cyrl', 'plt_Latn', 'mlt_Latn', 'mni_Beng', 'khk_Cyrl', 'mos_Latn', 'mri_Latn', 'zsm_Latn', 'mya_Mymr', 'nld_Latn', 'nno_Latn', 'nob_Latn', 'npi_Deva', 'nso_Latn', 'nus_Latn', 'nya_Latn', 'oci_Latn', 'gaz_Latn', 'ory_Orya', 'pag_Latn', 'pan_Guru', 'pap_Latn', 'pol_Latn', 'por_Latn', 'prs_Arab', 'pbt_Arab', 'quy_Latn', 'ron_Latn', 'run_Latn', 'rus_Cyrl', 'sag_Latn', 'san_Deva', 'sat_Beng', 'scn_Latn', 'shn_Mymr', 'sin_Sinh', 'slk_Latn', 'slv_Latn', 'smo_Latn', 'sna_Latn', 'snd_Arab', 'som_Latn', 'sot_Latn', 'spa_Latn', 'als_Latn', 'srd_Latn', 'srp_Cyrl', 'ssw_Latn', 'sun_Latn', 'swe_Latn', 'swh_Latn', 'szl_Latn', 'tam_Taml', 'tat_Cyrl', 'tel_Telu', 'tgk_Cyrl', 'tgl_Latn', 'tha_Thai', 'tir_Ethi', 'taq_Latn', 'taq_Tfng', 'tpi_Latn', 'tsn_Latn', 'tso_Latn', 'tuk_Latn', 'tum_Latn', 'tur_Latn', 'twi_Latn', 'tzm_Tfng', 'uig_Arab', 'ukr_Cyrl', 'umb_Latn', 'urd_Arab', 'uzn_Latn', 'vec_Latn', 'vie_Latn', 'war_Latn', 'wol_Latn', 'xho_Latn', 'ydd_Hebr', 'yor_Latn', 'yue_Hant', 'zho_Hans', 'zho_Hant', 'zul_Latn']
class lowercase__ ( lowercase ):
lowercase__ = VOCAB_FILES_NAMES
lowercase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__ = PRETRAINED_VOCAB_FILES_MAP
lowercase__ = ["""input_ids""", """attention_mask"""]
lowercase__ = NllbTokenizer
lowercase__ = []
lowercase__ = []
def __init__( self : List[Any] ,lowerCamelCase__ : Optional[Any]=None ,lowerCamelCase__ : Dict=None ,lowerCamelCase__ : List[Any]="<s>" ,lowerCamelCase__ : Dict="</s>" ,lowerCamelCase__ : List[Any]="</s>" ,lowerCamelCase__ : Union[str, Any]="<s>" ,lowerCamelCase__ : List[Any]="<unk>" ,lowerCamelCase__ : Any="<pad>" ,lowerCamelCase__ : Optional[Any]="<mask>" ,lowerCamelCase__ : Optional[Any]=None ,lowerCamelCase__ : str=None ,lowerCamelCase__ : Tuple=None ,lowerCamelCase__ : Union[str, Any]=False ,**lowerCamelCase__ : Optional[Any] ,):
'''simple docstring'''
# Mask token behave like a normal word, i.e. include the space before it
_UpperCamelCase : Optional[int] = AddedToken(lowerCamelCase__ ,lstrip=lowerCamelCase__ ,rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ ,lowerCamelCase__ ) else mask_token
_UpperCamelCase : Union[str, Any] = legacy_behaviour
super().__init__(
vocab_file=lowerCamelCase__ ,tokenizer_file=lowerCamelCase__ ,bos_token=lowerCamelCase__ ,eos_token=lowerCamelCase__ ,sep_token=lowerCamelCase__ ,cls_token=lowerCamelCase__ ,unk_token=lowerCamelCase__ ,pad_token=lowerCamelCase__ ,mask_token=lowerCamelCase__ ,src_lang=lowerCamelCase__ ,tgt_lang=lowerCamelCase__ ,additional_special_tokens=lowerCamelCase__ ,legacy_behaviour=lowerCamelCase__ ,**lowerCamelCase__ ,)
_UpperCamelCase : int = vocab_file
_UpperCamelCase : int = False if not self.vocab_file else True
_UpperCamelCase : Dict = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({'additional_special_tokens': _additional_special_tokens} )
_UpperCamelCase : List[str] = {
lang_code: self.convert_tokens_to_ids(lowerCamelCase__ ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
_UpperCamelCase : List[str] = src_lang if src_lang is not None else 'eng_Latn'
_UpperCamelCase : int = self.convert_tokens_to_ids(self._src_lang )
_UpperCamelCase : Dict = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
return self._src_lang
@src_lang.setter
def UpperCamelCase_ ( self : Union[str, Any] ,lowerCamelCase__ : str ):
'''simple docstring'''
_UpperCamelCase : str = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def UpperCamelCase_ ( self : List[str] ,lowerCamelCase__ : List[int] ,lowerCamelCase__ : Optional[List[int]] = None ):
'''simple docstring'''
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def UpperCamelCase_ ( self : Dict ,lowerCamelCase__ : List[int] ,lowerCamelCase__ : Optional[List[int]] = None ):
'''simple docstring'''
_UpperCamelCase : Dict = [self.sep_token_id]
_UpperCamelCase : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def UpperCamelCase_ ( self : Union[str, Any] ,lowerCamelCase__ : str ,lowerCamelCase__ : str ,lowerCamelCase__ : Optional[str] ,lowerCamelCase__ : Optional[str] ,**lowerCamelCase__ : Dict ):
'''simple docstring'''
if src_lang is None or tgt_lang is None:
raise ValueError('Translation requires a `src_lang` and a `tgt_lang` for this model' )
_UpperCamelCase : Tuple = src_lang
_UpperCamelCase : Optional[Any] = self(lowerCamelCase__ ,add_special_tokens=lowerCamelCase__ ,return_tensors=lowerCamelCase__ ,**lowerCamelCase__ )
_UpperCamelCase : Tuple = self.convert_tokens_to_ids(lowerCamelCase__ )
_UpperCamelCase : str = tgt_lang_id
return inputs
def UpperCamelCase_ ( self : int ,lowerCamelCase__ : List[str] ,lowerCamelCase__ : str = "eng_Latn" ,lowerCamelCase__ : Optional[List[str]] = None ,lowerCamelCase__ : str = "fra_Latn" ,**lowerCamelCase__ : Union[str, Any] ,):
'''simple docstring'''
_UpperCamelCase : Tuple = src_lang
_UpperCamelCase : List[str] = tgt_lang
return super().prepare_seqaseq_batch(lowerCamelCase__ ,lowerCamelCase__ ,**lowerCamelCase__ )
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
return self.set_src_lang_special_tokens(self.src_lang )
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def UpperCamelCase_ ( self : str ,lowerCamelCase__ : List[Any] ):
'''simple docstring'''
_UpperCamelCase : int = self.convert_tokens_to_ids(lowerCamelCase__ )
if self.legacy_behaviour:
_UpperCamelCase : Optional[Any] = []
_UpperCamelCase : int = [self.eos_token_id, self.cur_lang_code]
else:
_UpperCamelCase : List[Any] = [self.cur_lang_code]
_UpperCamelCase : List[Any] = [self.eos_token_id]
_UpperCamelCase : Union[str, Any] = self.convert_ids_to_tokens(self.prefix_tokens )
_UpperCamelCase : int = self.convert_ids_to_tokens(self.suffix_tokens )
_UpperCamelCase : Any = processors.TemplateProcessing(
single=prefix_tokens_str + ['$A'] + suffix_tokens_str ,pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str ,special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str ,self.prefix_tokens + self.suffix_tokens ) ) ,)
def UpperCamelCase_ ( self : int ,lowerCamelCase__ : str ):
'''simple docstring'''
_UpperCamelCase : Any = self.convert_tokens_to_ids(lowerCamelCase__ )
if self.legacy_behaviour:
_UpperCamelCase : Tuple = []
_UpperCamelCase : str = [self.eos_token_id, self.cur_lang_code]
else:
_UpperCamelCase : Tuple = [self.cur_lang_code]
_UpperCamelCase : Optional[Any] = [self.eos_token_id]
_UpperCamelCase : int = self.convert_ids_to_tokens(self.prefix_tokens )
_UpperCamelCase : List[Any] = self.convert_ids_to_tokens(self.suffix_tokens )
_UpperCamelCase : Optional[int] = processors.TemplateProcessing(
single=prefix_tokens_str + ['$A'] + suffix_tokens_str ,pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str ,special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str ,self.prefix_tokens + self.suffix_tokens ) ) ,)
def UpperCamelCase_ ( self : Any ,lowerCamelCase__ : str ,lowerCamelCase__ : Optional[str] = None ):
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(lowerCamelCase__ ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory.' )
return
_UpperCamelCase : List[Any] = os.path.join(
lowerCamelCase__ ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCamelCase__ ):
copyfile(self.vocab_file ,lowerCamelCase__ )
return (out_vocab_file,)
| 236 | 0 |
def UpperCamelCase ( __lowercase : List[Any] ):
'''simple docstring'''
for i in range(0 ,_SCREAMING_SNAKE_CASE ):
for _ in range(0 ,n - i - 1 ): # printing spaces
print(' ' ,end='' )
for _ in range(0 ,i + 1 ): # printing stars
print('* ' ,end='' )
print()
def UpperCamelCase ( __lowercase : Optional[Any] ):
'''simple docstring'''
for i in range(_SCREAMING_SNAKE_CASE ,0 ,-1 ):
for _ in range(_SCREAMING_SNAKE_CASE ,0 ,-1 ): # printing stars
print('* ' ,end='' )
print()
for _ in range(n - i + 1 ,0 ,-1 ): # printing spaces
print(' ' ,end='' )
def UpperCamelCase ( __lowercase : List[str] ):
'''simple docstring'''
if n <= 0:
print(' ... .... nothing printing :(' )
return
floyd(_SCREAMING_SNAKE_CASE ) # upper half
reverse_floyd(_SCREAMING_SNAKE_CASE ) # lower half
if __name__ == "__main__":
print(r"""| /\ | |- | |- |--| |\ /| |-""")
print(r"""|/ \| |- |_ |_ |__| | \/ | |_""")
_UpperCAmelCase = 1
while K:
_UpperCAmelCase = int(input("""enter the number and , and see the magic : """))
print()
pretty_print(user_number)
_UpperCAmelCase = int(input("""press 0 to exit... and 1 to continue..."""))
print("""Good Bye...""")
| 140 |
"""simple docstring"""
def a__ ( _SCREAMING_SNAKE_CASE = 1_000 ):
"""simple docstring"""
return sum(2 * a * ((a - 1) // 2) for a in range(3 , n + 1 ) )
if __name__ == "__main__":
print(solution())
| 153 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A_ : List[str] = logging.get_logger(__name__)
A_ : Tuple = {
'SCUT-DLVCLab/lilt-roberta-en-base': (
'https://huggingface.co/SCUT-DLVCLab/lilt-roberta-en-base/resolve/main/config.json'
),
}
class _a (__magic_name__ ):
'''simple docstring'''
UpperCAmelCase__: Dict = '''lilt'''
def __init__( self , A__=3_0522 , A__=768 , A__=12 , A__=12 , A__=3072 , A__="gelu" , A__=0.1 , A__=0.1 , A__=512 , A__=2 , A__=0.0_2 , A__=1e-12 , A__=0 , A__="absolute" , A__=None , A__=4 , A__=1024 , **A__ , ):
super().__init__(pad_token_id=A__ , **A__ )
A__ : List[str] = vocab_size
A__ : Union[str, Any] = hidden_size
A__ : Any = num_hidden_layers
A__ : Optional[Any] = num_attention_heads
A__ : str = hidden_act
A__ : Dict = intermediate_size
A__ : List[str] = hidden_dropout_prob
A__ : Dict = attention_probs_dropout_prob
A__ : Any = max_position_embeddings
A__ : str = type_vocab_size
A__ : Optional[int] = initializer_range
A__ : Tuple = layer_norm_eps
A__ : List[Any] = position_embedding_type
A__ : List[str] = classifier_dropout
A__ : str = channel_shrink_ratio
A__ : Optional[int] = max_ad_position_embeddings
| 141 |
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextModel,
CLIPTokenizer,
WhisperForConditionalGeneration,
WhisperProcessor,
)
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.utils import logging
A_ : int = logging.get_logger(__name__) # pylint: disable=invalid-name
class _a (__magic_name__ ):
'''simple docstring'''
def __init__( self , A__ , A__ , A__ , A__ , A__ , A__ , A__ , A__ , A__ , ):
super().__init__()
if safety_checker is None:
logger.warning(
F"""You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"""
""" that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"""
""" results in services or applications open to the public. Both the diffusers team and Hugging Face"""
""" strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"""
""" it only for use-cases that involve analyzing network behavior or auditing its results. For more"""
""" information, please have a look at https://github.com/huggingface/diffusers/pull/254 .""" )
self.register_modules(
speech_model=A__ , speech_processor=A__ , vae=A__ , text_encoder=A__ , tokenizer=A__ , unet=A__ , scheduler=A__ , feature_extractor=A__ , )
def __A ( self , A__ = "auto" ):
if slice_size == "auto":
A__ : Optional[Any] = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(A__ )
def __A ( self ):
self.enable_attention_slicing(A__ )
@torch.no_grad()
def __call__( self , A__ , A__=1_6000 , A__ = 512 , A__ = 512 , A__ = 50 , A__ = 7.5 , A__ = None , A__ = 1 , A__ = 0.0 , A__ = None , A__ = None , A__ = "pil" , A__ = True , A__ = None , A__ = 1 , **A__ , ):
A__ : Any = self.speech_processor.feature_extractor(
A__ , return_tensors="""pt""" , sampling_rate=A__ ).input_features.to(self.device )
A__ : Optional[Any] = self.speech_model.generate(A__ , max_length=48_0000 )
A__ : Union[str, Any] = self.speech_processor.tokenizer.batch_decode(A__ , skip_special_tokens=A__ , normalize=A__ )[
0
]
if isinstance(A__ , A__ ):
A__ : Dict = 1
elif isinstance(A__ , A__ ):
A__ : Optional[int] = len(A__ )
else:
raise ValueError(F"""`prompt` has to be of type `str` or `list` but is {type(A__ )}""" )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F"""`height` and `width` have to be divisible by 8 but are {height} and {width}.""" )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(A__ , A__ ) or callback_steps <= 0)
):
raise ValueError(
F"""`callback_steps` has to be a positive integer but is {callback_steps} of type"""
F""" {type(A__ )}.""" )
# get prompt text embeddings
A__ : Optional[int] = self.tokenizer(
A__ , padding="""max_length""" , max_length=self.tokenizer.model_max_length , return_tensors="""pt""" , )
A__ : Any = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
A__ : str = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
"""The following part of your input was truncated because CLIP can only handle sequences up to"""
F""" {self.tokenizer.model_max_length} tokens: {removed_text}""" )
A__ : Dict = text_input_ids[:, : self.tokenizer.model_max_length]
A__ : int = self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
A__ , A__ , A__ : List[str] = text_embeddings.shape
A__ : Dict = text_embeddings.repeat(1 , A__ , 1 )
A__ : Union[str, Any] = text_embeddings.view(bs_embed * num_images_per_prompt , A__ , -1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
A__ : int = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
A__ : List[str]
if negative_prompt is None:
A__ : Union[str, Any] = [""""""] * batch_size
elif type(A__ ) is not type(A__ ):
raise TypeError(
F"""`negative_prompt` should be the same type to `prompt`, but got {type(A__ )} !="""
F""" {type(A__ )}.""" )
elif isinstance(A__ , A__ ):
A__ : Union[str, Any] = [negative_prompt]
elif batch_size != len(A__ ):
raise ValueError(
F"""`negative_prompt`: {negative_prompt} has batch size {len(A__ )}, but `prompt`:"""
F""" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"""
""" the batch size of `prompt`.""" )
else:
A__ : int = negative_prompt
A__ : Union[str, Any] = text_input_ids.shape[-1]
A__ : int = self.tokenizer(
A__ , padding="""max_length""" , max_length=A__ , truncation=A__ , return_tensors="""pt""" , )
A__ : Any = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
A__ : List[Any] = uncond_embeddings.shape[1]
A__ : Any = uncond_embeddings.repeat(1 , A__ , 1 )
A__ : Optional[Any] = uncond_embeddings.view(batch_size * num_images_per_prompt , A__ , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
A__ : Any = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
A__ : List[Any] = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
A__ : Dict = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
A__ : Optional[Any] = torch.randn(A__ , generator=A__ , device="""cpu""" , dtype=A__ ).to(
self.device )
else:
A__ : str = torch.randn(A__ , generator=A__ , device=self.device , dtype=A__ )
else:
if latents.shape != latents_shape:
raise ValueError(F"""Unexpected latents shape, got {latents.shape}, expected {latents_shape}""" )
A__ : Any = latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(A__ )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
A__ : List[str] = self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
A__ : Tuple = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
A__ : Any = """eta""" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
A__ : Tuple = {}
if accepts_eta:
A__ : str = eta
for i, t in enumerate(self.progress_bar(A__ ) ):
# expand the latents if we are doing classifier free guidance
A__ : Dict = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
A__ : Tuple = self.scheduler.scale_model_input(A__ , A__ )
# predict the noise residual
A__ : Union[str, Any] = self.unet(A__ , A__ , encoder_hidden_states=A__ ).sample
# perform guidance
if do_classifier_free_guidance:
A__ , A__ : List[Any] = noise_pred.chunk(2 )
A__ : Dict = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
A__ : Tuple = self.scheduler.step(A__ , A__ , A__ , **A__ ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(A__ , A__ , A__ )
A__ : str = 1 / 0.1_8_2_1_5 * latents
A__ : Optional[Any] = self.vae.decode(A__ ).sample
A__ : Tuple = (image / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
A__ : Any = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
A__ : Optional[Any] = self.numpy_to_pil(A__ )
if not return_dict:
return image
return StableDiffusionPipelineOutput(images=A__ , nsfw_content_detected=A__ )
| 141 | 1 |
import tempfile
import unittest
from transformers import TaConfig, is_torch_available
from transformers.testing_utils import (
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
torch_device,
)
from ...generation.test_utils import GenerationTesterMixin
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import AutoTokenizer, UMTaForConditionalGeneration, UMTaForQuestionAnswering, UMTaModel
class _UpperCAmelCase :
"""simple docstring"""
def __init__( self : Dict , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Optional[Any]=9_9 , lowerCAmelCase_ : str=1_3 , lowerCAmelCase_ : int=7 , lowerCAmelCase_ : Union[str, Any]=9 , lowerCAmelCase_ : List[Any]=True , lowerCAmelCase_ : Optional[Any]=True , lowerCAmelCase_ : List[str]=False , lowerCAmelCase_ : List[str]=3_2 , lowerCAmelCase_ : Union[str, Any]=5 , lowerCAmelCase_ : Any=4 , lowerCAmelCase_ : int=3_7 , lowerCAmelCase_ : str=8 , lowerCAmelCase_ : List[Any]=0.1 , lowerCAmelCase_ : str=0.0_02 , lowerCAmelCase_ : Union[str, Any]=1 , lowerCAmelCase_ : Any=0 , lowerCAmelCase_ : List[str]=0 , lowerCAmelCase_ : Union[str, Any]=None , lowerCAmelCase_ : List[str]=None , ) -> List[str]:
__lowerCAmelCase = parent
__lowerCAmelCase = batch_size
__lowerCAmelCase = encoder_seq_length
__lowerCAmelCase = decoder_seq_length
# For common tests
__lowerCAmelCase = self.decoder_seq_length
__lowerCAmelCase = is_training
__lowerCAmelCase = use_attention_mask
__lowerCAmelCase = use_labels
__lowerCAmelCase = vocab_size
__lowerCAmelCase = hidden_size
__lowerCAmelCase = num_hidden_layers
__lowerCAmelCase = num_attention_heads
__lowerCAmelCase = d_ff
__lowerCAmelCase = relative_attention_num_buckets
__lowerCAmelCase = dropout_rate
__lowerCAmelCase = initializer_factor
__lowerCAmelCase = eos_token_id
__lowerCAmelCase = pad_token_id
__lowerCAmelCase = decoder_start_token_id
__lowerCAmelCase = None
__lowerCAmelCase = decoder_layers
def lowercase ( self : List[str] ) -> Tuple:
return TaConfig.from_pretrained('google/umt5-base' )
def lowercase ( self : str , lowerCAmelCase_ : Any , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Any=None , lowerCAmelCase_ : Union[str, Any]=None , lowerCAmelCase_ : Union[str, Any]=None , lowerCAmelCase_ : Optional[int]=None , lowerCAmelCase_ : Tuple=None , ) -> Union[str, Any]:
if attention_mask is None:
__lowerCAmelCase = input_ids.ne(config.pad_token_id )
if decoder_attention_mask is None:
__lowerCAmelCase = decoder_input_ids.ne(config.pad_token_id )
if head_mask is None:
__lowerCAmelCase = torch.ones(config.num_hidden_layers , config.num_attention_heads , device=SCREAMING_SNAKE_CASE_ )
if decoder_head_mask is None:
__lowerCAmelCase = torch.ones(config.num_decoder_layers , config.num_attention_heads , device=SCREAMING_SNAKE_CASE_ )
if cross_attn_head_mask is None:
__lowerCAmelCase = torch.ones(
config.num_decoder_layers , config.num_attention_heads , device=SCREAMING_SNAKE_CASE_ )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
def lowercase ( self : Union[str, Any] ) -> Tuple:
__lowerCAmelCase = ids_tensor([self.batch_size, self.encoder_seq_length] , self.vocab_size )
__lowerCAmelCase = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
# we need to clamp the input ids here to avoid having pad token in between
# this is because for NllbMoe the position_ids are prepared such that
# all pad tokens have pos id = 2 and rest are between 2..seq_length
# and the seq_length here is seq_length - num_pad_tokens
# but when using past, there is no way of knowing if the past input ids had
# pad tokens in them, which results in incorrect seq_lenth and which in turn results in
# position_ids being off by num_pad_tokens in past input
__lowerCAmelCase = input_ids.clamp(self.pad_token_id + 1 )
__lowerCAmelCase = decoder_input_ids.clamp(self.pad_token_id + 1 )
__lowerCAmelCase = self.get_config()
__lowerCAmelCase = config.num_attention_heads
__lowerCAmelCase = self.prepare_inputs_dict(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
return config, input_dict
def lowercase ( self : List[str] ) -> List[str]:
__lowerCAmelCase = self.prepare_config_and_inputs()
return config, inputs_dict
def lowercase ( self : Optional[Any] ) -> List[Any]:
return TaConfig(
vocab_size=1_6_6 , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def lowercase ( self : Tuple ) -> int:
return TaConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def lowercase ( self : Dict , lowerCAmelCase_ : str , lowerCAmelCase_ : Any , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : int , lowerCAmelCase_ : Dict , ) -> Tuple:
__lowerCAmelCase = UMTaModel(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
__lowerCAmelCase = model(
input_ids=SCREAMING_SNAKE_CASE_ , decoder_input_ids=SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , decoder_attention_mask=SCREAMING_SNAKE_CASE_ , )
__lowerCAmelCase = model(input_ids=SCREAMING_SNAKE_CASE_ , decoder_input_ids=SCREAMING_SNAKE_CASE_ )
__lowerCAmelCase = result.last_hidden_state
__lowerCAmelCase = result.past_key_values
__lowerCAmelCase = result.encoder_last_hidden_state
self.parent.assertEqual(encoder_output.size() , (self.batch_size, self.encoder_seq_length, self.hidden_size) )
self.parent.assertEqual(decoder_output.size() , (self.batch_size, self.decoder_seq_length, self.hidden_size) )
# There should be `num_layers` key value embeddings stored in decoder_past
self.parent.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , config.num_layers )
# There should be a self attn key, a self attn value, a cross attn key and a cross attn value stored in each decoder_past tuple
self.parent.assertEqual(len(decoder_past[0] ) , 4 )
def lowercase ( self : int , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : int , lowerCAmelCase_ : str , lowerCAmelCase_ : str , ) -> Optional[int]:
__lowerCAmelCase = UMTaModel(config=SCREAMING_SNAKE_CASE_ ).get_decoder().to(SCREAMING_SNAKE_CASE_ ).eval()
# first forward pass
__lowerCAmelCase = model(SCREAMING_SNAKE_CASE_ , use_cache=SCREAMING_SNAKE_CASE_ )
__lowerCAmelCase = model(SCREAMING_SNAKE_CASE_ )
__lowerCAmelCase = model(SCREAMING_SNAKE_CASE_ , use_cache=SCREAMING_SNAKE_CASE_ )
self.parent.assertTrue(len(SCREAMING_SNAKE_CASE_ ) == len(SCREAMING_SNAKE_CASE_ ) )
self.parent.assertTrue(len(SCREAMING_SNAKE_CASE_ ) == len(SCREAMING_SNAKE_CASE_ ) + 1 )
__lowerCAmelCase = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
__lowerCAmelCase = ids_tensor((self.batch_size, 1) , config.vocab_size )
# append to next input_ids and
__lowerCAmelCase = torch.cat([input_ids, next_tokens] , dim=-1 )
__lowerCAmelCase = model(SCREAMING_SNAKE_CASE_ )['last_hidden_state']
__lowerCAmelCase = model(SCREAMING_SNAKE_CASE_ , past_key_values=SCREAMING_SNAKE_CASE_ )['last_hidden_state']
# select random slice
__lowerCAmelCase = ids_tensor((1,) , output_from_past.shape[-1] ).item()
__lowerCAmelCase = output_from_no_past[:, -1, random_slice_idx].detach()
__lowerCAmelCase = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=1e-3 ) )
def lowercase ( self : str , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Union[str, Any] , ) -> List[Any]:
__lowerCAmelCase = UMTaModel(config=SCREAMING_SNAKE_CASE_ ).to(SCREAMING_SNAKE_CASE_ ).half().eval()
__lowerCAmelCase = model(**SCREAMING_SNAKE_CASE_ )['last_hidden_state']
self.parent.assertFalse(torch.isnan(SCREAMING_SNAKE_CASE_ ).any().item() )
@require_torch
class _UpperCAmelCase ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
a_ = (
(UMTaModel, UMTaForConditionalGeneration, UMTaForQuestionAnswering) if is_torch_available() else ()
)
a_ = (UMTaForConditionalGeneration,) if is_torch_available() else ()
a_ = (
{
'conversational': UMTaForConditionalGeneration,
'feature-extraction': UMTaModel,
'summarization': UMTaForConditionalGeneration,
'text2text-generation': UMTaForConditionalGeneration,
'translation': UMTaForConditionalGeneration,
'question-answering': UMTaForQuestionAnswering,
}
if is_torch_available()
else {}
)
a_ = True
a_ = False
a_ = False
a_ = True
a_ = True
# The small UMT5 model needs higher percentages for CPU/MP tests
a_ = [0.8, 0.9]
def lowercase ( self : Union[str, Any] ) -> Optional[Any]:
__lowerCAmelCase = UMTaModelTester(self )
@unittest.skip('Test has a segmentation fault on torch 1.8.0' )
def lowercase ( self : Any ) -> int:
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
__lowerCAmelCase = UMTaModel(config_and_inputs[0] ).to(SCREAMING_SNAKE_CASE_ )
with tempfile.TemporaryDirectory() as tmpdirname:
torch.onnx.export(
SCREAMING_SNAKE_CASE_ , (config_and_inputs[1], config_and_inputs[3], config_and_inputs[2]) , f"""{tmpdirname}/t5_test.onnx""" , export_params=SCREAMING_SNAKE_CASE_ , opset_version=9 , input_names=['input_ids', 'decoder_input_ids'] , )
@unittest.skipIf(torch_device == 'cpu' , 'Cant do half precision' )
def lowercase ( self : int ) -> Optional[int]:
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model_fpaa_forward(*SCREAMING_SNAKE_CASE_ )
def lowercase ( self : List[Any] ) -> int:
__lowerCAmelCase = ['encoder_attentions', 'decoder_attentions', 'cross_attentions']
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
__lowerCAmelCase = config_and_inputs[0]
__lowerCAmelCase = UMTaForConditionalGeneration(SCREAMING_SNAKE_CASE_ ).eval()
model.to(SCREAMING_SNAKE_CASE_ )
__lowerCAmelCase = {
'head_mask': torch.zeros(config.num_layers , config.num_heads , device=SCREAMING_SNAKE_CASE_ ),
'decoder_head_mask': torch.zeros(config.num_decoder_layers , config.num_heads , device=SCREAMING_SNAKE_CASE_ ),
'cross_attn_head_mask': torch.zeros(config.num_decoder_layers , config.num_heads , device=SCREAMING_SNAKE_CASE_ ),
}
for attn_name, (name, mask) in zip(SCREAMING_SNAKE_CASE_ , head_masking.items() ):
__lowerCAmelCase = {name: mask}
# Explicitly pass decoder_head_mask as it is required from T5 model when head_mask specified
if name == "head_mask":
__lowerCAmelCase = torch.ones(
config.num_decoder_layers , config.num_heads , device=SCREAMING_SNAKE_CASE_ )
__lowerCAmelCase = model.generate(
config_and_inputs[1]['input_ids'] , num_beams=1 , max_length=3 , output_attentions=SCREAMING_SNAKE_CASE_ , return_dict_in_generate=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
# We check the state of decoder_attentions and cross_attentions just from the last step
__lowerCAmelCase = out[attn_name] if attn_name == attention_names[0] else out[attn_name][-1]
self.assertEqual(sum([w.sum().item() for w in attn_weights] ) , 0.0 )
@unittest.skip('Does not work on the tiny model as we keep hitting edge cases.' )
def lowercase ( self : List[Any] ) -> Union[str, Any]:
pass
@require_torch
@require_sentencepiece
@require_tokenizers
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
@unittest.skip(
'Unless we stop stripping left and right by default for all special tokens, the expected ids obtained here will not match the original ones. Wait for https://github.com/huggingface/transformers/pull/23909 to be merged' )
def lowercase ( self : Dict ) -> int:
__lowerCAmelCase = UMTaForConditionalGeneration.from_pretrained('google/umt5-small' , return_dict=SCREAMING_SNAKE_CASE_ ).to(SCREAMING_SNAKE_CASE_ )
__lowerCAmelCase = AutoTokenizer.from_pretrained('google/umt5-small' , use_fast=SCREAMING_SNAKE_CASE_ , legacy=SCREAMING_SNAKE_CASE_ )
__lowerCAmelCase = [
'Bonjour monsieur <extra_id_0> bien <extra_id_1>.',
'No se como puedo <extra_id_0>.',
'This is the reason why we <extra_id_0> them.',
'The <extra_id_0> walks in <extra_id_1>, seats',
'A <extra_id_0> walks into a bar and orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.',
]
__lowerCAmelCase = tokenizer(SCREAMING_SNAKE_CASE_ , return_tensors='pt' , padding=SCREAMING_SNAKE_CASE_ ).input_ids
# fmt: off
__lowerCAmelCase = torch.tensor(
[
[ 3_8_5_3_0, 2_1_0_7_0_3, 2_5_6_2_9_9, 1_4_1_0, 2_5_6_2_9_8, 2_7_4, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 8_2_6, 3_2_1, 6_7_1, 2_5_9_2_2, 2_5_6_2_9_9, 2_7_4, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 1_4_6_0, 3_3_9, 3_1_2, 1_9_0_1_4, 1_0_6_2_0, 7_5_8, 2_5_6_2_9_9, 2_3_5_5,2_7_4, 1, 0, 0, 0, 0, 0, 0,0, 0],
[ 5_1_7, 2_5_6_2_9_9, 1_4_8_6_9, 2_8_1, 3_0_1, 2_5_6_2_9_8, 2_7_5, 1_1_9_9_8_3,1, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 3_2_0, 2_5_6_2_9_9, 1_4_8_6_9, 2_8_1, 2_2_3_4, 2_8_9, 2_2_7_5, 3_3_3,6_1_3_9_1, 2_8_9, 2_5_6_2_9_8, 5_4_3, 2_5_6_2_9_7, 1_6_8_7_1_4, 3_2_9, 2_5_6_2_9_6,2_7_4, 1],
] )
# fmt: on
torch.testing.assert_allclose(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__lowerCAmelCase = model.generate(input_ids.to(SCREAMING_SNAKE_CASE_ ) )
__lowerCAmelCase = [
'<pad><extra_id_0> et<extra_id_1> [eod] <extra_id_2><extra_id_55>.. [eod] 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 <extra_id_56>ajšietosto<extra_id_56>lleux<extra_id_19><extra_id_6>ajšie</s>',
'<pad><extra_id_0>.<extra_id_1>.,<0x0A>...spech <0x0A><extra_id_20> <extra_id_21></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>',
'<pad><extra_id_0> are not going to be a part of the world. We are not going to be a part of<extra_id_1> and<extra_id_2><0x0A><extra_id_48>.<extra_id_48></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>',
'<pad><extra_id_0> door<extra_id_1>, the door<extra_id_2> 피해[/</s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>',
'<pad><extra_id_0>nyone who<extra_id_1> drink<extra_id_2> a<extra_id_3> alcohol<extra_id_4> A<extra_id_5> A. This<extra_id_6> I<extra_id_7><extra_id_52><extra_id_53></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>',
]
__lowerCAmelCase = tokenizer.batch_decode(SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
| 284 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_torch_available,
is_vision_available,
)
A__ : List[str] = {"""configuration_beit""": ["""BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """BeitConfig""", """BeitOnnxConfig"""]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : str = ["""BeitFeatureExtractor"""]
A__ : List[str] = ["""BeitImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : Optional[int] = [
"""BEIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BeitForImageClassification""",
"""BeitForMaskedImageModeling""",
"""BeitForSemanticSegmentation""",
"""BeitModel""",
"""BeitPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : Any = [
"""FlaxBeitForImageClassification""",
"""FlaxBeitForMaskedImageModeling""",
"""FlaxBeitModel""",
"""FlaxBeitPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_beit import BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, BeitConfig, BeitOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_beit import BeitFeatureExtractor
from .image_processing_beit import BeitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_beit import (
BEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
BeitForImageClassification,
BeitForMaskedImageModeling,
BeitForSemanticSegmentation,
BeitModel,
BeitPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_beit import (
FlaxBeitForImageClassification,
FlaxBeitForMaskedImageModeling,
FlaxBeitModel,
FlaxBeitPreTrainedModel,
)
else:
import sys
A__ : Optional[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 185 | 0 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS, UNCONDITIONAL_AUDIO_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class A__ ( _lowerCamelCase , unittest.TestCase):
A_ : Dict = DanceDiffusionPipeline
A_ : Optional[int] = UNCONDITIONAL_AUDIO_GENERATION_PARAMS
A_ : Any = PipelineTesterMixin.required_optional_params - {
'callback',
'latents',
'callback_steps',
'output_type',
'num_images_per_prompt',
}
A_ : Union[str, Any] = UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS
A_ : Tuple = False
A_ : Union[str, Any] = False
def __lowerCamelCase ( self ):
torch.manual_seed(0 )
__lowerCAmelCase : List[Any] = UNetaDModel(
block_out_channels=(32, 32, 64) , extra_in_channels=16 , sample_size=5_12 , sample_rate=1_60_00 , in_channels=2 , out_channels=2 , flip_sin_to_cos=_SCREAMING_SNAKE_CASE , use_timestep_embedding=_SCREAMING_SNAKE_CASE , time_embedding_type='fourier' , mid_block_type='UNetMidBlock1D' , down_block_types=('DownBlock1DNoSkip', 'DownBlock1D', 'AttnDownBlock1D') , up_block_types=('AttnUpBlock1D', 'UpBlock1D', 'UpBlock1DNoSkip') , )
__lowerCAmelCase : Any = IPNDMScheduler()
__lowerCAmelCase : str = {
'unet': unet,
'scheduler': scheduler,
}
return components
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=0 ):
if str(_SCREAMING_SNAKE_CASE ).startswith('mps' ):
__lowerCAmelCase : Dict = torch.manual_seed(_SCREAMING_SNAKE_CASE )
else:
__lowerCAmelCase : Dict = torch.Generator(device=_SCREAMING_SNAKE_CASE ).manual_seed(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Any = {
'batch_size': 1,
'generator': generator,
'num_inference_steps': 4,
}
return inputs
def __lowerCamelCase ( self ):
__lowerCAmelCase : Dict = 'cpu' # ensure determinism for the device-dependent torch.Generator
__lowerCAmelCase : Tuple = self.get_dummy_components()
__lowerCAmelCase : List[Any] = DanceDiffusionPipeline(**_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Tuple = pipe.to(_SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[str] = self.get_dummy_inputs(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Any = pipe(**_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Tuple = output.audios
__lowerCAmelCase : Union[str, Any] = audio[0, -3:, -3:]
assert audio.shape == (1, 2, components["unet"].sample_size)
__lowerCAmelCase : Optional[Any] = np.array([-0.7265, 1.0000, -0.8388, 0.1175, 0.9498, -1.0000] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1E-2
@skip_mps
def __lowerCamelCase ( self ):
return super().test_save_load_local()
@skip_mps
def __lowerCamelCase ( self ):
return super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 )
@skip_mps
def __lowerCamelCase ( self ):
return super().test_save_load_optional_components()
@skip_mps
def __lowerCamelCase ( self ):
return super().test_attention_slicing_forward_pass()
def __lowerCamelCase ( self ):
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class A__ ( unittest.TestCase):
def __lowerCamelCase ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCamelCase ( self ):
__lowerCAmelCase : Any = torch_device
__lowerCAmelCase : Union[str, Any] = DanceDiffusionPipeline.from_pretrained('harmonai/maestro-150k' )
__lowerCAmelCase : Union[str, Any] = pipe.to(_SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Union[str, Any] = torch.manual_seed(0 )
__lowerCAmelCase : str = pipe(generator=_SCREAMING_SNAKE_CASE , num_inference_steps=1_00 , audio_length_in_s=4.096 )
__lowerCAmelCase : str = output.audios
__lowerCAmelCase : Dict = audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
__lowerCAmelCase : Dict = np.array([-0.0192, -0.0231, -0.0318, -0.0059, 0.0002, -0.0020] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1E-2
def __lowerCamelCase ( self ):
__lowerCAmelCase : Union[str, Any] = torch_device
__lowerCAmelCase : Dict = DanceDiffusionPipeline.from_pretrained('harmonai/maestro-150k' , torch_dtype=torch.floataa )
__lowerCAmelCase : List[str] = pipe.to(_SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : int = torch.manual_seed(0 )
__lowerCAmelCase : Union[str, Any] = pipe(generator=_SCREAMING_SNAKE_CASE , num_inference_steps=1_00 , audio_length_in_s=4.096 )
__lowerCAmelCase : Optional[int] = output.audios
__lowerCAmelCase : Tuple = audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
__lowerCAmelCase : str = np.array([-0.0367, -0.0488, -0.0771, -0.0525, -0.0444, -0.0341] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1E-2 | 182 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaInpaintPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class A__ ( _lowerCamelCase , unittest.TestCase):
A_ : List[Any] = KandinskyVaaInpaintPipeline
A_ : Optional[int] = ['image_embeds', 'negative_image_embeds', 'image', 'mask_image']
A_ : Any = [
'image_embeds',
'negative_image_embeds',
'image',
'mask_image',
]
A_ : Any = [
'generator',
'height',
'width',
'latents',
'guidance_scale',
'num_inference_steps',
'return_dict',
'guidance_scale',
'num_images_per_prompt',
'output_type',
'return_dict',
]
A_ : Any = False
@property
def __lowerCamelCase ( self ):
return 32
@property
def __lowerCamelCase ( self ):
return 32
@property
def __lowerCamelCase ( self ):
return self.time_input_dim
@property
def __lowerCamelCase ( self ):
return self.time_input_dim * 4
@property
def __lowerCamelCase ( self ):
return 1_00
@property
def __lowerCamelCase ( self ):
torch.manual_seed(0 )
__lowerCAmelCase : Optional[int] = {
'in_channels': 9,
# Out channels is double in channels because predicts mean and variance
'out_channels': 8,
'addition_embed_type': 'image',
'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'),
'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'),
'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn',
'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2),
'layers_per_block': 1,
'encoder_hid_dim': self.text_embedder_hidden_size,
'encoder_hid_dim_type': 'image_proj',
'cross_attention_dim': self.cross_attention_dim,
'attention_head_dim': 4,
'resnet_time_scale_shift': 'scale_shift',
'class_embed_type': None,
}
__lowerCAmelCase : Any = UNetaDConditionModel(**_SCREAMING_SNAKE_CASE )
return model
@property
def __lowerCamelCase ( self ):
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def __lowerCamelCase ( self ):
torch.manual_seed(0 )
__lowerCAmelCase : Optional[Any] = VQModel(**self.dummy_movq_kwargs )
return model
def __lowerCamelCase ( self ):
__lowerCAmelCase : Dict = self.dummy_unet
__lowerCAmelCase : Optional[Any] = self.dummy_movq
__lowerCAmelCase : Tuple = DDIMScheduler(
num_train_timesteps=10_00 , beta_schedule='linear' , beta_start=0.0_0085 , beta_end=0.012 , clip_sample=_SCREAMING_SNAKE_CASE , set_alpha_to_one=_SCREAMING_SNAKE_CASE , steps_offset=1 , prediction_type='epsilon' , thresholding=_SCREAMING_SNAKE_CASE , )
__lowerCAmelCase : str = {
'unet': unet,
'scheduler': scheduler,
'movq': movq,
}
return components
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=0 ):
__lowerCAmelCase : List[Any] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(_SCREAMING_SNAKE_CASE ) ).to(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Dict = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
_SCREAMING_SNAKE_CASE )
# create init_image
__lowerCAmelCase : int = floats_tensor((1, 3, 64, 64) , rng=random.Random(_SCREAMING_SNAKE_CASE ) ).to(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Tuple = image.cpu().permute(0 , 2 , 3 , 1 )[0]
__lowerCAmelCase : str = Image.fromarray(np.uinta(_SCREAMING_SNAKE_CASE ) ).convert('RGB' ).resize((2_56, 2_56) )
# create mask
__lowerCAmelCase : Dict = np.ones((64, 64) , dtype=np.floataa )
__lowerCAmelCase : List[str] = 0
if str(_SCREAMING_SNAKE_CASE ).startswith('mps' ):
__lowerCAmelCase : Optional[int] = torch.manual_seed(_SCREAMING_SNAKE_CASE )
else:
__lowerCAmelCase : List[Any] = torch.Generator(device=_SCREAMING_SNAKE_CASE ).manual_seed(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Any = {
'image': init_image,
'mask_image': mask,
'image_embeds': image_embeds,
'negative_image_embeds': negative_image_embeds,
'generator': generator,
'height': 64,
'width': 64,
'num_inference_steps': 2,
'guidance_scale': 4.0,
'output_type': 'np',
}
return inputs
def __lowerCamelCase ( self ):
__lowerCAmelCase : List[Any] = 'cpu'
__lowerCAmelCase : Dict = self.get_dummy_components()
__lowerCAmelCase : Union[str, Any] = self.pipeline_class(**_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : int = pipe.to(_SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[Any] = pipe(**self.get_dummy_inputs(_SCREAMING_SNAKE_CASE ) )
__lowerCAmelCase : Optional[Any] = output.images
__lowerCAmelCase : Any = pipe(
**self.get_dummy_inputs(_SCREAMING_SNAKE_CASE ) , return_dict=_SCREAMING_SNAKE_CASE , )[0]
__lowerCAmelCase : Union[str, Any] = image[0, -3:, -3:, -1]
__lowerCAmelCase : Any = image_from_tuple[0, -3:, -3:, -1]
print(f"image.shape {image.shape}" )
assert image.shape == (1, 64, 64, 3)
__lowerCAmelCase : str = np.array(
[0.5077_5903, 0.4952_7195, 0.4882_4543, 0.5019_2237, 0.4864_4906, 0.4937_3814, 0.478_0598, 0.4723_4827, 0.4832_7848] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), f" expected_slice {expected_slice}, but got {image_slice.flatten()}"
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), f" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"
def __lowerCamelCase ( self ):
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class A__ ( unittest.TestCase):
def __lowerCamelCase ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCamelCase ( self ):
__lowerCAmelCase : Dict = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinskyv22/kandinskyv22_inpaint_cat_with_hat_fp16.npy' )
__lowerCAmelCase : Union[str, Any] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/kandinsky/cat.png' )
__lowerCAmelCase : Any = np.ones((7_68, 7_68) , dtype=np.floataa )
__lowerCAmelCase : int = 0
__lowerCAmelCase : str = 'a hat'
__lowerCAmelCase : str = KandinskyVaaPriorPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-prior' , torch_dtype=torch.floataa )
pipe_prior.to(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[int] = KandinskyVaaInpaintPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-decoder-inpaint' , torch_dtype=torch.floataa )
__lowerCAmelCase : Tuple = pipeline.to(_SCREAMING_SNAKE_CASE )
pipeline.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Union[str, Any] = torch.Generator(device='cpu' ).manual_seed(0 )
__lowerCAmelCase , __lowerCAmelCase : Any = pipe_prior(
_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , num_inference_steps=5 , negative_prompt='' , ).to_tuple()
__lowerCAmelCase : Tuple = pipeline(
image=_SCREAMING_SNAKE_CASE , mask_image=_SCREAMING_SNAKE_CASE , image_embeds=_SCREAMING_SNAKE_CASE , negative_image_embeds=_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , num_inference_steps=1_00 , height=7_68 , width=7_68 , output_type='np' , )
__lowerCAmelCase : Optional[Any] = output.images[0]
assert image.shape == (7_68, 7_68, 3)
assert_mean_pixel_difference(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) | 182 | 1 |
from pathlib import PurePosixPath
from typing import Optional
import fsspec
from fsspec import AbstractFileSystem
from huggingface_hub.hf_api import DatasetInfo
from ..utils.file_utils import get_authentication_headers_for_url
from ..utils.hub import hf_hub_url
class _snake_case ( _snake_case ):
SCREAMING_SNAKE_CASE__ = ''
SCREAMING_SNAKE_CASE__ = 'hf-legacy' # "hf://"" is reserved for hffs
def __init__( self , _lowerCamelCase = None , _lowerCamelCase = None , **_lowerCamelCase , ):
super().__init__(self , **_lowerCamelCase )
a :Union[str, Any] = repo_info
a :int = token
a :int = None
def SCREAMING_SNAKE_CASE__ ( self ):
if self.dir_cache is None:
a :Dict = {}
for hf_file in self.repo_info.siblings:
# TODO(QL): add sizes
a :List[Any] = {
'''name''': hf_file.rfilename,
'''size''': None,
'''type''': '''file''',
}
self.dir_cache.update(
{
str(_lowerCamelCase ): {'''name''': str(_lowerCamelCase ), '''size''': None, '''type''': '''directory'''}
for d in list(PurePosixPath(hf_file.rfilename ).parents )[:-1]
} )
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase = "rb" , **_lowerCamelCase , ):
if not isinstance(self.repo_info , _lowerCamelCase ):
raise NotImplementedError(F'''Open is only implemented for dataset repositories, but got {self.repo_info}''' )
a :Optional[int] = hf_hub_url(self.repo_info.id , _lowerCamelCase , revision=self.repo_info.sha )
return fsspec.open(
_lowerCamelCase , mode=_lowerCamelCase , headers=get_authentication_headers_for_url(_lowerCamelCase , use_auth_token=self.token ) , client_kwargs={'''trust_env''': True} , ).open()
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , **_lowerCamelCase ):
self._get_dirs()
a :Union[str, Any] = self._strip_protocol(_lowerCamelCase )
if path in self.dir_cache:
return self.dir_cache[path]
else:
raise FileNotFoundError(_lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase=False , **_lowerCamelCase ):
self._get_dirs()
a :str = PurePosixPath(path.strip('''/''' ) )
a :Tuple = {}
for p, f in self.dir_cache.items():
a :Optional[int] = PurePosixPath(p.strip('''/''' ) )
a :str = p.parent
if root == path:
a :List[str] = f
a :Any = list(paths.values() )
if detail:
return out
else:
return sorted(f['''name'''] for f in out )
| 94 |
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_gpta import GPTaTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
SCREAMING_SNAKE_CASE :List[Any] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE :Dict = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
SCREAMING_SNAKE_CASE :Union[str, Any] = {
'''vocab_file''': {
'''gpt2''': '''https://huggingface.co/gpt2/resolve/main/vocab.json''',
'''gpt2-medium''': '''https://huggingface.co/gpt2-medium/resolve/main/vocab.json''',
'''gpt2-large''': '''https://huggingface.co/gpt2-large/resolve/main/vocab.json''',
'''gpt2-xl''': '''https://huggingface.co/gpt2-xl/resolve/main/vocab.json''',
'''distilgpt2''': '''https://huggingface.co/distilgpt2/resolve/main/vocab.json''',
},
'''merges_file''': {
'''gpt2''': '''https://huggingface.co/gpt2/resolve/main/merges.txt''',
'''gpt2-medium''': '''https://huggingface.co/gpt2-medium/resolve/main/merges.txt''',
'''gpt2-large''': '''https://huggingface.co/gpt2-large/resolve/main/merges.txt''',
'''gpt2-xl''': '''https://huggingface.co/gpt2-xl/resolve/main/merges.txt''',
'''distilgpt2''': '''https://huggingface.co/distilgpt2/resolve/main/merges.txt''',
},
'''tokenizer_file''': {
'''gpt2''': '''https://huggingface.co/gpt2/resolve/main/tokenizer.json''',
'''gpt2-medium''': '''https://huggingface.co/gpt2-medium/resolve/main/tokenizer.json''',
'''gpt2-large''': '''https://huggingface.co/gpt2-large/resolve/main/tokenizer.json''',
'''gpt2-xl''': '''https://huggingface.co/gpt2-xl/resolve/main/tokenizer.json''',
'''distilgpt2''': '''https://huggingface.co/distilgpt2/resolve/main/tokenizer.json''',
},
}
SCREAMING_SNAKE_CASE :Any = {
'''gpt2''': 10_24,
'''gpt2-medium''': 10_24,
'''gpt2-large''': 10_24,
'''gpt2-xl''': 10_24,
'''distilgpt2''': 10_24,
}
class __lowerCAmelCase ( a ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES
_SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP
_SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_SCREAMING_SNAKE_CASE = ['input_ids', 'attention_mask']
_SCREAMING_SNAKE_CASE = GPTaTokenizer
def __init__( self : Any , _lowerCAmelCase : Dict=None , _lowerCAmelCase : Tuple=None , _lowerCAmelCase : Optional[int]=None , _lowerCAmelCase : Union[str, Any]="<|endoftext|>" , _lowerCAmelCase : Union[str, Any]="<|endoftext|>" , _lowerCAmelCase : Union[str, Any]="<|endoftext|>" , _lowerCAmelCase : Any=False , **_lowerCAmelCase : Any , ) -> List[Any]:
"""simple docstring"""
super().__init__(
_lowerCAmelCase , _lowerCAmelCase , tokenizer_file=_lowerCAmelCase , unk_token=_lowerCAmelCase , bos_token=_lowerCAmelCase , eos_token=_lowerCAmelCase , add_prefix_space=_lowerCAmelCase , **_lowerCAmelCase , )
snake_case_ = kwargs.pop("add_bos_token" , _lowerCAmelCase )
snake_case_ = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , _lowerCAmelCase ) != add_prefix_space:
snake_case_ = getattr(_lowerCAmelCase , pre_tok_state.pop("type" ) )
snake_case_ = add_prefix_space
snake_case_ = pre_tok_class(**_lowerCAmelCase )
snake_case_ = add_prefix_space
def lowerCAmelCase__ ( self : List[Any] , *_lowerCAmelCase : Tuple , **_lowerCAmelCase : List[str] ) -> BatchEncoding:
"""simple docstring"""
snake_case_ = kwargs.get("is_split_into_words" , _lowerCAmelCase )
assert self.add_prefix_space or not is_split_into_words, (
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*_lowerCAmelCase , **_lowerCAmelCase )
def lowerCAmelCase__ ( self : Dict , *_lowerCAmelCase : List[Any] , **_lowerCAmelCase : List[str] ) -> BatchEncoding:
"""simple docstring"""
snake_case_ = kwargs.get("is_split_into_words" , _lowerCAmelCase )
assert self.add_prefix_space or not is_split_into_words, (
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._encode_plus(*_lowerCAmelCase , **_lowerCAmelCase )
def lowerCAmelCase__ ( self : Any , _lowerCAmelCase : str , _lowerCAmelCase : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
snake_case_ = self._tokenizer.model.save(_lowerCAmelCase , name=_lowerCAmelCase )
return tuple(_lowerCAmelCase )
def lowerCAmelCase__ ( self : int , _lowerCAmelCase : "Conversation" ) -> List[int]:
"""simple docstring"""
snake_case_ = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase ) + [self.eos_token_id] )
if len(_lowerCAmelCase ) > self.model_max_length:
snake_case_ = input_ids[-self.model_max_length :]
return input_ids
| 159 | 0 |
"""simple docstring"""
from __future__ import annotations
from functools import lru_cache
from math import ceil
__SCREAMING_SNAKE_CASE : List[str] = 100
__SCREAMING_SNAKE_CASE : Union[str, Any] = set(range(3, NUM_PRIMES, 2))
primes.add(2)
__SCREAMING_SNAKE_CASE : int
for prime in range(3, ceil(NUM_PRIMES**0.5), 2):
if prime not in primes:
continue
primes.difference_update(set(range(prime * prime, NUM_PRIMES, prime)))
@lru_cache(maxsize=100 )
def _a ( _SCREAMING_SNAKE_CASE ) -> set[int]:
if number_to_partition < 0:
return set()
elif number_to_partition == 0:
return {1}
snake_case_ = set()
snake_case_ = 42
snake_case_ = 42
for prime in primes:
if prime > number_to_partition:
continue
for sub in partition(number_to_partition - prime ):
ret.add(sub * prime )
return ret
def _a ( _SCREAMING_SNAKE_CASE = 5_000 ) -> int | None:
for number_to_partition in range(1 , _SCREAMING_SNAKE_CASE ):
if len(partition(_SCREAMING_SNAKE_CASE ) ) > number_unique_partitions:
return number_to_partition
return None
if __name__ == "__main__":
print(f"""{solution() = }""")
| 233 |
"""simple docstring"""
def _a ( _SCREAMING_SNAKE_CASE ) -> list:
if len(_SCREAMING_SNAKE_CASE ) <= 1:
return [tuple(_SCREAMING_SNAKE_CASE )]
snake_case_ = []
def generate(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
if k == 1:
res.append(tuple(arr[:] ) )
return
generate(k - 1 , _SCREAMING_SNAKE_CASE )
for i in range(k - 1 ):
if k % 2 == 0: # k is even
snake_case_ , snake_case_ = arr[k - 1], arr[i]
else: # k is odd
snake_case_ , snake_case_ = arr[k - 1], arr[0]
generate(k - 1 , _SCREAMING_SNAKE_CASE )
generate(len(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
return res
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : List[Any] = input('Enter numbers separated by a comma:\n').strip()
__SCREAMING_SNAKE_CASE : str = [int(item) for item in user_input.split(',')]
print(heaps(arr))
| 233 | 1 |
"""simple docstring"""
from __future__ import annotations
import math
def snake_case_ ( A_ : float, A_ : int ):
'''simple docstring'''
_lowerCamelCase : Tuple = u
for i in range(1, A_ ):
_lowerCamelCase : int = temp * (u - i)
return temp
def snake_case_ ( ):
'''simple docstring'''
_lowerCamelCase : Tuple = int(input('''enter the numbers of values: ''' ) )
_lowerCamelCase : list[list[float]] = []
for _ in range(A_ ):
y.append([] )
for i in range(A_ ):
for j in range(A_ ):
y[i].append(A_ )
_lowerCamelCase : Optional[int] = 0
print('''enter the values of parameters in a list: ''' )
_lowerCamelCase : Optional[int] = list(map(A_, input().split() ) )
print('''enter the values of corresponding parameters: ''' )
for i in range(A_ ):
_lowerCamelCase : Dict = float(input() )
_lowerCamelCase : Tuple = int(input('''enter the value to interpolate: ''' ) )
_lowerCamelCase : List[Any] = (value - x[0]) / (x[1] - x[0])
# for calculating forward difference table
for i in range(1, A_ ):
for j in range(n - i ):
_lowerCamelCase : Optional[Any] = y[j + 1][i - 1] - y[j][i - 1]
_lowerCamelCase : Any = y[0][0]
for i in range(1, A_ ):
summ += (ucal(A_, A_ ) * y[0][i]) / math.factorial(A_ )
print(F'''the value at {value} is {summ}''' )
if __name__ == "__main__":
main()
| 72 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__UpperCAmelCase = {'configuration_vit_msn': ['VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ViTMSNConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST',
'ViTMSNModel',
'ViTMSNForImageClassification',
'ViTMSNPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_vit_msn import VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMSNConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_msn import (
VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMSNForImageClassification,
ViTMSNModel,
ViTMSNPreTrainedModel,
)
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 29 | 0 |
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, Features, Value
from .base import TaskTemplate
@dataclass(frozen=__UpperCAmelCase )
class A ( __UpperCAmelCase ):
__snake_case = field(default='automatic-speech-recognition' , metadata={'include_in_asdict_even_if_is_default': True} )
__snake_case = Features({'audio': Audio()} )
__snake_case = Features({'transcription': Value('string' )} )
__snake_case = "audio"
__snake_case = "transcription"
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__ ):
"""simple docstring"""
if self.audio_column not in features:
raise ValueError(f"Column {self.audio_column} is not present in features." )
if not isinstance(features[self.audio_column], UpperCamelCase__ ):
raise ValueError(f"Column {self.audio_column} is not an Audio type." )
lowerCAmelCase_ = copy.deepcopy(self )
lowerCAmelCase_ = self.input_schema.copy()
lowerCAmelCase_ = features[self.audio_column]
lowerCAmelCase_ = input_schema
return task_template
@property
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
return {self.audio_column: "audio", self.transcription_column: "transcription"}
| 167 |
from typing import List, Optional, Union
import numpy as np
import PIL.Image
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
PILImageResampling,
get_image_size,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
_A = logging.get_logger(__name__)
class A ( __UpperCAmelCase ):
__snake_case = ['pixel_values']
def __init__( self, UpperCamelCase__ = True, UpperCamelCase__ = 32, UpperCamelCase__=PILImageResampling.BILINEAR, UpperCamelCase__ = True, **UpperCamelCase__, ):
"""simple docstring"""
lowerCAmelCase_ = do_resize
lowerCAmelCase_ = do_rescale
lowerCAmelCase_ = size_divisor
lowerCAmelCase_ = resample
super().__init__(**UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ = None, **UpperCamelCase__ ):
"""simple docstring"""
lowerCAmelCase_ , lowerCAmelCase_ = get_image_size(UpperCamelCase__ )
# Rounds the height and width down to the closest multiple of size_divisor
lowerCAmelCase_ = height // size_divisor * size_divisor
lowerCAmelCase_ = width // size_divisor * size_divisor
lowerCAmelCase_ = resize(UpperCamelCase__, (new_h, new_w), resample=UpperCamelCase__, data_format=UpperCamelCase__, **UpperCamelCase__ )
return image
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ = None, **UpperCamelCase__ ):
"""simple docstring"""
return rescale(image=UpperCamelCase__, scale=UpperCamelCase__, data_format=UpperCamelCase__, **UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__ = None, UpperCamelCase__ = None, UpperCamelCase__=None, UpperCamelCase__ = None, UpperCamelCase__ = None, UpperCamelCase__ = ChannelDimension.FIRST, **UpperCamelCase__, ):
"""simple docstring"""
lowerCAmelCase_ = do_resize if do_resize is not None else self.do_resize
lowerCAmelCase_ = do_rescale if do_rescale is not None else self.do_rescale
lowerCAmelCase_ = size_divisor if size_divisor is not None else self.size_divisor
lowerCAmelCase_ = resample if resample is not None else self.resample
if do_resize and size_divisor is None:
raise ValueError('''size_divisor is required for resizing''' )
lowerCAmelCase_ = make_list_of_images(UpperCamelCase__ )
if not valid_images(UpperCamelCase__ ):
raise ValueError('''Invalid image(s)''' )
# All transformations expect numpy arrays.
lowerCAmelCase_ = [to_numpy_array(UpperCamelCase__ ) for img in images]
if do_resize:
lowerCAmelCase_ = [self.resize(UpperCamelCase__, size_divisor=UpperCamelCase__, resample=UpperCamelCase__ ) for image in images]
if do_rescale:
lowerCAmelCase_ = [self.rescale(UpperCamelCase__, scale=1 / 255 ) for image in images]
lowerCAmelCase_ = [to_channel_dimension_format(UpperCamelCase__, UpperCamelCase__ ) for image in images]
lowerCAmelCase_ = {'''pixel_values''': images}
return BatchFeature(data=UpperCamelCase__, tensor_type=UpperCamelCase__ )
| 167 | 1 |
"""simple docstring"""
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartaaTokenizer, MBartaaTokenizerFast, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
)
from ...test_tokenization_common import TokenizerTesterMixin
__snake_case = get_tests_dir('''fixtures/test_sentencepiece.model''')
if is_torch_available():
from transformers.models.mbart.modeling_mbart import shift_tokens_right
__snake_case = 250004
__snake_case = 250020
@require_sentencepiece
@require_tokenizers
class __lowerCamelCase ( __snake_case , unittest.TestCase ):
'''simple docstring'''
A_ : Optional[int] = MBartaaTokenizer
A_ : Optional[Any] = MBartaaTokenizerFast
A_ : List[Any] = True
A_ : Union[str, Any] = True
def _UpperCAmelCase ( self ) -> Any:
super().setUp()
# We have a SentencePiece fixture for testing
_a = MBartaaTokenizer(__UpperCAmelCase , src_lang='''en_XX''' , tgt_lang='''ro_RO''' , keep_accents=__UpperCAmelCase )
tokenizer.save_pretrained(self.tmpdirname )
def _UpperCAmelCase ( self ) -> Optional[Any]:
_a = '<s>'
_a = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__UpperCAmelCase ) , __UpperCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__UpperCAmelCase ) , __UpperCAmelCase )
def _UpperCAmelCase ( self ) -> int:
_a = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<s>''' )
self.assertEqual(vocab_keys[1] , '''<pad>''' )
self.assertEqual(vocab_keys[-1] , '''<mask>''' )
self.assertEqual(len(__UpperCAmelCase ) , 1054 )
def _UpperCAmelCase ( self ) -> int:
self.assertEqual(self.get_tokenizer().vocab_size , 1054 )
def _UpperCAmelCase ( self ) -> int:
_a = MBartaaTokenizer(__UpperCAmelCase , src_lang='''en_XX''' , tgt_lang='''ro_RO''' , keep_accents=__UpperCAmelCase )
_a = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(__UpperCAmelCase , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__UpperCAmelCase ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
_a = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
__UpperCAmelCase , [SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''9''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''é''', '''.'''] , )
_a = tokenizer.convert_tokens_to_ids(__UpperCAmelCase )
self.assertListEqual(
__UpperCAmelCase , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
_a = tokenizer.convert_ids_to_tokens(__UpperCAmelCase )
self.assertListEqual(
__UpperCAmelCase , [SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''<unk>''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''<unk>''', '''.'''] , )
@slow
def _UpperCAmelCase ( self ) -> List[str]:
# fmt: off
_a = {'input_ids': [[250004, 11062, 82772, 7, 15, 82772, 538, 51529, 237, 17198, 1290, 206, 9, 215175, 1314, 136, 17198, 1290, 206, 9, 56359, 42, 122009, 9, 16466, 16, 87344, 4537, 9, 4717, 78381, 6, 159958, 7, 15, 24480, 618, 4, 527, 22693, 5428, 4, 2777, 24480, 9874, 4, 43523, 594, 4, 803, 18392, 33189, 18, 4, 43523, 24447, 12399, 100, 24955, 83658, 9626, 144057, 15, 839, 22335, 16, 136, 24955, 83658, 83479, 15, 39102, 724, 16, 678, 645, 2789, 1328, 4589, 42, 122009, 115774, 23, 805, 1328, 46876, 7, 136, 53894, 1940, 42227, 41159, 17721, 823, 425, 4, 27512, 98722, 206, 136, 5531, 4970, 919, 17336, 5, 2], [250004, 20080, 618, 83, 82775, 47, 479, 9, 1517, 73, 53894, 333, 80581, 110117, 18811, 5256, 1295, 51, 152526, 297, 7986, 390, 124416, 538, 35431, 214, 98, 15044, 25737, 136, 7108, 43701, 23, 756, 135355, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [250004, 581, 63773, 119455, 6, 147797, 88203, 7, 645, 70, 21, 3285, 10269, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__UpperCAmelCase , model_name='''facebook/mbart-large-50''' , revision='''d3913889c59cd5c9e456b269c376325eabad57e2''' , )
def _UpperCAmelCase ( self ) -> Optional[Any]:
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
_a = (self.rust_tokenizer_class, 'hf-internal-testing/tiny-random-mbart50', {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
_a = self.rust_tokenizer_class.from_pretrained(__UpperCAmelCase , **__UpperCAmelCase )
_a = self.tokenizer_class.from_pretrained(__UpperCAmelCase , **__UpperCAmelCase )
_a = tempfile.mkdtemp()
_a = tokenizer_r.save_pretrained(__UpperCAmelCase )
_a = tokenizer_p.save_pretrained(__UpperCAmelCase )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) )
_a = tuple(f for f in tokenizer_r_files if '''tokenizer.json''' not in f )
self.assertSequenceEqual(__UpperCAmelCase , __UpperCAmelCase )
# Checks everything loads correctly in the same way
_a = tokenizer_r.from_pretrained(__UpperCAmelCase )
_a = tokenizer_p.from_pretrained(__UpperCAmelCase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__UpperCAmelCase , __UpperCAmelCase ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(__UpperCAmelCase )
# Save tokenizer rust, legacy_format=True
_a = tempfile.mkdtemp()
_a = tokenizer_r.save_pretrained(__UpperCAmelCase , legacy_format=__UpperCAmelCase )
_a = tokenizer_p.save_pretrained(__UpperCAmelCase )
# Checks it save with the same files
self.assertSequenceEqual(__UpperCAmelCase , __UpperCAmelCase )
# Checks everything loads correctly in the same way
_a = tokenizer_r.from_pretrained(__UpperCAmelCase )
_a = tokenizer_p.from_pretrained(__UpperCAmelCase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__UpperCAmelCase , __UpperCAmelCase ) )
shutil.rmtree(__UpperCAmelCase )
# Save tokenizer rust, legacy_format=False
_a = tempfile.mkdtemp()
_a = tokenizer_r.save_pretrained(__UpperCAmelCase , legacy_format=__UpperCAmelCase )
_a = tokenizer_p.save_pretrained(__UpperCAmelCase )
# Checks it saved the tokenizer.json file
self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
_a = tokenizer_r.from_pretrained(__UpperCAmelCase )
_a = tokenizer_p.from_pretrained(__UpperCAmelCase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__UpperCAmelCase , __UpperCAmelCase ) )
shutil.rmtree(__UpperCAmelCase )
@require_torch
@require_sentencepiece
@require_tokenizers
class __lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
A_ : int = """facebook/mbart-large-50-one-to-many-mmt"""
A_ : Optional[Any] = [
""" UN Chief Says There Is No Military Solution in Syria""",
""" Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for Syria is that \"there is no military solution\" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.""",
]
A_ : Union[str, Any] = [
"""Şeful ONU declară că nu există o soluţie militară în Siria""",
"""Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei"""
""" pentru Siria este că \"nu există o soluţie militară\" la conflictul de aproape cinci ani şi că noi arme nu vor"""
""" face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.""",
]
A_ : Union[str, Any] = [EN_CODE, 8_274, 127_873, 25_916, 7, 8_622, 2_071, 438, 67_485, 53, 187_895, 23, 51_712, 2]
@classmethod
def _UpperCAmelCase ( cls ) -> Tuple:
_a = MBartaaTokenizer.from_pretrained(
cls.checkpoint_name , src_lang='''en_XX''' , tgt_lang='''ro_RO''' )
_a = 1
return cls
def _UpperCAmelCase ( self ) -> Optional[Any]:
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''ar_AR'''] , 250001 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''en_EN'''] , 250004 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''ro_RO'''] , 250020 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''mr_IN'''] , 250038 )
def _UpperCAmelCase ( self ) -> Tuple:
_a = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , __UpperCAmelCase )
def _UpperCAmelCase ( self ) -> Optional[int]:
self.assertIn(__UpperCAmelCase , self.tokenizer.all_special_ids )
_a = [RO_CODE, 884, 9019, 96, 9, 916, 86792, 36, 18743, 15596, 5, 2]
_a = self.tokenizer.decode(__UpperCAmelCase , skip_special_tokens=__UpperCAmelCase )
_a = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=__UpperCAmelCase )
self.assertEqual(__UpperCAmelCase , __UpperCAmelCase )
self.assertNotIn(self.tokenizer.eos_token , __UpperCAmelCase )
def _UpperCAmelCase ( self ) -> Dict:
_a = ['this is gunna be a long sentence ' * 20]
assert isinstance(src_text[0] , __UpperCAmelCase )
_a = 10
_a = self.tokenizer(__UpperCAmelCase , max_length=__UpperCAmelCase , truncation=__UpperCAmelCase ).input_ids[0]
self.assertEqual(ids[0] , __UpperCAmelCase )
self.assertEqual(ids[-1] , 2 )
self.assertEqual(len(__UpperCAmelCase ) , __UpperCAmelCase )
def _UpperCAmelCase ( self ) -> Dict:
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(['''<mask>''', '''ar_AR'''] ) , [250053, 250001] )
def _UpperCAmelCase ( self ) -> Optional[int]:
_a = tempfile.mkdtemp()
_a = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(__UpperCAmelCase )
_a = MBartaaTokenizer.from_pretrained(__UpperCAmelCase )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , __UpperCAmelCase )
@require_torch
def _UpperCAmelCase ( self ) -> List[Any]:
_a = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=__UpperCAmelCase , return_tensors='''pt''' )
_a = shift_tokens_right(batch['''labels'''] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
assert batch.input_ids[1][0] == EN_CODE
assert batch.input_ids[1][-1] == 2
assert batch.labels[1][0] == RO_CODE
assert batch.labels[1][-1] == 2
assert batch.decoder_input_ids[1][:2].tolist() == [2, RO_CODE]
@require_torch
def _UpperCAmelCase ( self ) -> List[str]:
_a = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=__UpperCAmelCase , truncation=__UpperCAmelCase , max_length=len(self.expected_src_tokens ) , return_tensors='''pt''' , )
_a = shift_tokens_right(batch['''labels'''] , self.tokenizer.pad_token_id )
self.assertIsInstance(__UpperCAmelCase , __UpperCAmelCase )
self.assertEqual((2, 14) , batch.input_ids.shape )
self.assertEqual((2, 14) , batch.attention_mask.shape )
_a = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , __UpperCAmelCase )
self.assertEqual(2 , batch.decoder_input_ids[0, 0] ) # decoder_start_token_id
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [EN_CODE] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
def _UpperCAmelCase ( self ) -> List[Any]:
_a = self.tokenizer(self.src_text , padding=__UpperCAmelCase , truncation=__UpperCAmelCase , max_length=3 , return_tensors='''pt''' )
_a = self.tokenizer(
text_target=self.tgt_text , padding=__UpperCAmelCase , truncation=__UpperCAmelCase , max_length=10 , return_tensors='''pt''' )
_a = targets['input_ids']
_a = shift_tokens_right(__UpperCAmelCase , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 10 )
@require_torch
def _UpperCAmelCase ( self ) -> Tuple:
_a = self.tokenizer._build_translation_inputs(
'''A test''' , return_tensors='''pt''' , src_lang='''en_XX''' , tgt_lang='''ar_AR''' )
self.assertEqual(
nested_simplify(__UpperCAmelCase ) , {
# en_XX, A, test, EOS
'''input_ids''': [[250004, 62, 3034, 2]],
'''attention_mask''': [[1, 1, 1, 1]],
# ar_AR
'''forced_bos_token_id''': 250001,
} , ) | 320 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
snake_case_ = {
'''configuration_instructblip''': [
'''INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''InstructBlipConfig''',
'''InstructBlipQFormerConfig''',
'''InstructBlipVisionConfig''',
],
'''processing_instructblip''': ['''InstructBlipProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ = [
'''INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''InstructBlipQFormerModel''',
'''InstructBlipPreTrainedModel''',
'''InstructBlipForConditionalGeneration''',
'''InstructBlipVisionModel''',
]
if TYPE_CHECKING:
from .configuration_instructblip import (
INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
InstructBlipConfig,
InstructBlipQFormerConfig,
InstructBlipVisionConfig,
)
from .processing_instructblip import InstructBlipProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_instructblip import (
INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
InstructBlipForConditionalGeneration,
InstructBlipPreTrainedModel,
InstructBlipQFormerModel,
InstructBlipVisionModel,
)
else:
import sys
snake_case_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 214 | 0 |
"""simple docstring"""
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert import BertTokenizer
UpperCAmelCase : Optional[Any] = logging.get_logger(__name__)
UpperCAmelCase : str = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
UpperCAmelCase : List[Any] = {
"vocab_file": {
"facebook/dpr-ctx_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt"
),
"facebook/dpr-ctx_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"facebook/dpr-ctx_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json"
),
"facebook/dpr-ctx_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json"
),
},
}
UpperCAmelCase : List[Any] = {
"vocab_file": {
"facebook/dpr-question_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt"
),
"facebook/dpr-question_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"facebook/dpr-question_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json"
),
"facebook/dpr-question_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json"
),
},
}
UpperCAmelCase : Any = {
"vocab_file": {
"facebook/dpr-reader-single-nq-base": (
"https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt"
),
"facebook/dpr-reader-multiset-base": (
"https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"facebook/dpr-reader-single-nq-base": (
"https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json"
),
"facebook/dpr-reader-multiset-base": (
"https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json"
),
},
}
UpperCAmelCase : List[str] = {
"facebook/dpr-ctx_encoder-single-nq-base": 512,
"facebook/dpr-ctx_encoder-multiset-base": 512,
}
UpperCAmelCase : str = {
"facebook/dpr-question_encoder-single-nq-base": 512,
"facebook/dpr-question_encoder-multiset-base": 512,
}
UpperCAmelCase : Optional[Any] = {
"facebook/dpr-reader-single-nq-base": 512,
"facebook/dpr-reader-multiset-base": 512,
}
UpperCAmelCase : Optional[int] = {
"facebook/dpr-ctx_encoder-single-nq-base": {"do_lower_case": True},
"facebook/dpr-ctx_encoder-multiset-base": {"do_lower_case": True},
}
UpperCAmelCase : List[str] = {
"facebook/dpr-question_encoder-single-nq-base": {"do_lower_case": True},
"facebook/dpr-question_encoder-multiset-base": {"do_lower_case": True},
}
UpperCAmelCase : Optional[Any] = {
"facebook/dpr-reader-single-nq-base": {"do_lower_case": True},
"facebook/dpr-reader-multiset-base": {"do_lower_case": True},
}
class lowerCamelCase__ ( A ):
"""simple docstring"""
__a = VOCAB_FILES_NAMES
__a = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
__a = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__a = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
class lowerCamelCase__ ( A ):
"""simple docstring"""
__a = VOCAB_FILES_NAMES
__a = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
__a = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__a = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
UpperCAmelCase : List[Any] = collections.namedtuple(
'DPRSpanPrediction', ['span_score', 'relevance_score', 'doc_id', 'start_index', 'end_index', 'text']
)
UpperCAmelCase : str = collections.namedtuple('DPRReaderOutput', ['start_logits', 'end_logits', 'relevance_logits'])
UpperCAmelCase : List[str] = R"\n Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.\n It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),\n using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`\n with the format:\n\n ```\n [CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>\n ```\n\n Args:\n questions (`str` or `List[str]`):\n The questions to be encoded. You can specify one question for many passages. In this case, the question\n will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in\n `titles` or `texts`.\n titles (`str` or `List[str]`):\n The passages titles to be encoded. This can be a string or a list of strings if there are several passages.\n texts (`str` or `List[str]`):\n The passages texts to be encoded. This can be a string or a list of strings if there are several passages.\n padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):\n Activates and controls padding. Accepts the following values:\n\n - `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence\n if provided).\n - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided.\n - `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different\n lengths).\n truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):\n Activates and controls truncation. Accepts the following values:\n\n - `True` or `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or to\n the maximum acceptable input length for the model if that argument is not provided. This will truncate\n token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch\n of pairs) is provided.\n - `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the first\n sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the\n second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `False` or `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths\n greater than the model maximum admissible input size).\n max_length (`int`, *optional*):\n Controls the maximum length to use by one of the truncation/padding parameters.\n\n If left unset or set to `None`, this will use the predefined model maximum length if a maximum length\n is required by one of the truncation/padding parameters. If the model has no specific maximum input\n length (like XLNet) truncation/padding to a maximum length will be deactivated.\n return_tensors (`str` or [`~utils.TensorType`], *optional*):\n If set, will return tensors instead of list of python integers. Acceptable values are:\n\n - `'tf'`: Return TensorFlow `tf.constant` objects.\n - `'pt'`: Return PyTorch `torch.Tensor` objects.\n - `'np'`: Return Numpy `np.ndarray` objects.\n return_attention_mask (`bool`, *optional*):\n Whether or not to return the attention mask. If not set, will return the attention mask according to the\n specific tokenizer's default, defined by the `return_outputs` attribute.\n\n [What are attention masks?](../glossary#attention-mask)\n\n Returns:\n `Dict[str, List[List[int]]]`: A dictionary with the following keys:\n\n - `input_ids`: List of token ids to be fed to a model.\n - `attention_mask`: List of indices specifying which tokens should be attended to by the model.\n "
@add_start_docstrings(A )
class lowerCamelCase__ :
"""simple docstring"""
def __call__( self : Tuple , UpperCamelCase : str , UpperCamelCase : Optional[Any] = None , UpperCamelCase : List[Any] = None , UpperCamelCase : int = False , UpperCamelCase : Optional[Any] = False , UpperCamelCase : Dict = None , UpperCamelCase : Tuple = None , UpperCamelCase : Tuple = None , **UpperCamelCase : Any , ):
'''simple docstring'''
if titles is None and texts is None:
return super().__call__(
_a , padding=_a , truncation=_a , max_length=_a , return_tensors=_a , return_attention_mask=_a , **_a , )
elif titles is None or texts is None:
__UpperCAmelCase : Optional[int] = titles if texts is None else texts
return super().__call__(
_a , _a , padding=_a , truncation=_a , max_length=_a , return_tensors=_a , return_attention_mask=_a , **_a , )
__UpperCAmelCase : Dict = titles if not isinstance(_a , _a ) else [titles]
__UpperCAmelCase : Any = texts if not isinstance(_a , _a ) else [texts]
__UpperCAmelCase : Dict = len(_a )
__UpperCAmelCase : Tuple = questions if not isinstance(_a , _a ) else [questions] * n_passages
if len(_a ) != len(_a ):
raise ValueError(
f'''There should be as many titles than texts but got {len(_a )} titles and {len(_a )} texts.''' )
__UpperCAmelCase : int = super().__call__(_a , _a , padding=_a , truncation=_a )["input_ids"]
__UpperCAmelCase : Dict = super().__call__(_a , add_special_tokens=_a , padding=_a , truncation=_a )["input_ids"]
__UpperCAmelCase : Any = {
"input_ids": [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(_a , _a )
]
}
if return_attention_mask is not False:
__UpperCAmelCase : Dict = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
__UpperCAmelCase : Tuple = attention_mask
return self.pad(_a , padding=_a , max_length=_a , return_tensors=_a )
def lowerCamelCase__ ( self : int , UpperCamelCase : Tuple , UpperCamelCase : Optional[int] , UpperCamelCase : int = 16 , UpperCamelCase : Any = 64 , UpperCamelCase : List[Any] = 4 , ):
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = reader_input["input_ids"]
__UpperCAmelCase : Union[str, Any] = reader_output[:3]
__UpperCAmelCase : List[str] = len(_a )
__UpperCAmelCase : Tuple = sorted(range(_a ) , reverse=_a , key=relevance_logits.__getitem__ )
__UpperCAmelCase : List[DPRReaderOutput] = []
for doc_id in sorted_docs:
__UpperCAmelCase : Tuple = list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
__UpperCAmelCase : List[Any] = sequence_ids.index(self.sep_token_id , 2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
__UpperCAmelCase : Union[str, Any] = sequence_ids.index(self.pad_token_id )
else:
__UpperCAmelCase : str = len(_a )
__UpperCAmelCase : Tuple = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=_a , top_spans=_a , )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=_a , start_index=_a , end_index=_a , text=self.decode(sequence_ids[start_index : end_index + 1] ) , ) )
if len(_a ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def lowerCamelCase__ ( self : List[Any] , UpperCamelCase : List[Any] , UpperCamelCase : Union[str, Any] , UpperCamelCase : Any , UpperCamelCase : Union[str, Any] , ):
'''simple docstring'''
__UpperCAmelCase : Optional[int] = []
for start_index, start_score in enumerate(_a ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
__UpperCAmelCase : Union[str, Any] = sorted(_a , key=lambda UpperCamelCase : x[1] , reverse=_a )
__UpperCAmelCase : List[Any] = []
for (start_index, end_index), score in scores:
if start_index > end_index:
raise ValueError(f'''Wrong span indices: [{start_index}:{end_index}]''' )
__UpperCAmelCase : str = end_index - start_index + 1
if length > max_answer_length:
raise ValueError(f'''Span is too long: {length} > {max_answer_length}''' )
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(_a ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(A )
class lowerCamelCase__ ( A , A ):
"""simple docstring"""
__a = VOCAB_FILES_NAMES
__a = READER_PRETRAINED_VOCAB_FILES_MAP
__a = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__a = READER_PRETRAINED_INIT_CONFIGURATION
__a = ["""input_ids""", """attention_mask"""]
| 369 |
"""simple docstring"""
import json
import os
import unittest
from transformers import BatchEncoding, LEDTokenizer, LEDTokenizerFast
from transformers.models.led.tokenization_led import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowerCamelCase__ ( A , unittest.TestCase ):
"""simple docstring"""
__a = LEDTokenizer
__a = LEDTokenizerFast
__a = True
def lowerCamelCase__ ( self : Tuple ):
'''simple docstring'''
super().setUp()
__UpperCAmelCase : Tuple = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
]
__UpperCAmelCase : str = dict(zip(UpperCamelCase , range(len(UpperCamelCase ) ) ) )
__UpperCAmelCase : Union[str, Any] = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
__UpperCAmelCase : Dict = {"""unk_token""": """<unk>"""}
__UpperCAmelCase : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
__UpperCAmelCase : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(UpperCamelCase ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(UpperCamelCase ) )
def lowerCamelCase__ ( self : Tuple , **UpperCamelCase : int ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **UpperCamelCase )
def lowerCamelCase__ ( self : Optional[int] , **UpperCamelCase : List[str] ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **UpperCamelCase )
def lowerCamelCase__ ( self : str , UpperCamelCase : Any ):
'''simple docstring'''
return "lower newer", "lower newer"
@cached_property
def lowerCamelCase__ ( self : Dict ):
'''simple docstring'''
return LEDTokenizer.from_pretrained("""allenai/led-base-16384""" )
@cached_property
def lowerCamelCase__ ( self : str ):
'''simple docstring'''
return LEDTokenizerFast.from_pretrained("""allenai/led-base-16384""" )
@require_torch
def lowerCamelCase__ ( self : List[Any] ):
'''simple docstring'''
__UpperCAmelCase : List[Any] = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
__UpperCAmelCase : Union[str, Any] = [0, 250, 251, 17_818, 13, 39_186, 1_938, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
__UpperCAmelCase : Any = tokenizer(UpperCamelCase , max_length=len(UpperCamelCase ) , padding=UpperCamelCase , return_tensors="""pt""" )
self.assertIsInstance(UpperCamelCase , UpperCamelCase )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
__UpperCAmelCase : Optional[Any] = batch.input_ids.tolist()[0]
self.assertListEqual(UpperCamelCase , UpperCamelCase )
@require_torch
def lowerCamelCase__ ( self : Any ):
'''simple docstring'''
__UpperCAmelCase : Optional[int] = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
__UpperCAmelCase : Optional[int] = tokenizer(UpperCamelCase , padding=UpperCamelCase , return_tensors="""pt""" )
self.assertIn("""input_ids""" , UpperCamelCase )
self.assertIn("""attention_mask""" , UpperCamelCase )
self.assertNotIn("""labels""" , UpperCamelCase )
self.assertNotIn("""decoder_attention_mask""" , UpperCamelCase )
@require_torch
def lowerCamelCase__ ( self : Optional[int] ):
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = [
"""Summary of the text.""",
"""Another summary.""",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
__UpperCAmelCase : Optional[Any] = tokenizer(text_target=UpperCamelCase , max_length=32 , padding="""max_length""" , return_tensors="""pt""" )
self.assertEqual(32 , targets["""input_ids"""].shape[1] )
@require_torch
def lowerCamelCase__ ( self : List[str] ):
'''simple docstring'''
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
__UpperCAmelCase : str = tokenizer(
["""I am a small frog""" * 1_024, """I am a small frog"""] , padding=UpperCamelCase , truncation=UpperCamelCase , return_tensors="""pt""" )
self.assertIsInstance(UpperCamelCase , UpperCamelCase )
self.assertEqual(batch.input_ids.shape , (2, 5_122) )
@require_torch
def lowerCamelCase__ ( self : Dict ):
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = ["""A long paragraph for summarization."""]
__UpperCAmelCase : int = [
"""Summary of the text.""",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
__UpperCAmelCase : List[str] = tokenizer(UpperCamelCase , return_tensors="""pt""" )
__UpperCAmelCase : Tuple = tokenizer(text_target=UpperCamelCase , return_tensors="""pt""" )
__UpperCAmelCase : Optional[Any] = inputs["""input_ids"""]
__UpperCAmelCase : List[str] = targets["""input_ids"""]
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() )
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() )
@require_torch
def lowerCamelCase__ ( self : List[Any] ):
'''simple docstring'''
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
__UpperCAmelCase : Any = ["""Summary of the text.""", """Another summary."""]
__UpperCAmelCase : List[str] = [[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, -1, -1]]
__UpperCAmelCase : List[str] = tokenizer(UpperCamelCase , padding=UpperCamelCase )
__UpperCAmelCase : str = [[0] * len(UpperCamelCase ) for x in encoded_output["""input_ids"""]]
__UpperCAmelCase : List[Any] = tokenizer.pad(UpperCamelCase )
self.assertSequenceEqual(outputs["""global_attention_mask"""] , UpperCamelCase )
def lowerCamelCase__ ( self : List[Any] ):
'''simple docstring'''
pass
def lowerCamelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
__UpperCAmelCase : Optional[Any] = self.rust_tokenizer_class.from_pretrained(UpperCamelCase , **UpperCamelCase )
__UpperCAmelCase : Tuple = self.tokenizer_class.from_pretrained(UpperCamelCase , **UpperCamelCase )
__UpperCAmelCase : Any = """A, <mask> AllenNLP sentence."""
__UpperCAmelCase : Dict = tokenizer_r.encode_plus(UpperCamelCase , add_special_tokens=UpperCamelCase , return_token_type_ids=UpperCamelCase )
__UpperCAmelCase : List[Any] = tokenizer_p.encode_plus(UpperCamelCase , add_special_tokens=UpperCamelCase , return_token_type_ids=UpperCamelCase )
self.assertEqual(sum(tokens_r["""token_type_ids"""] ) , sum(tokens_p["""token_type_ids"""] ) )
self.assertEqual(
sum(tokens_r["""attention_mask"""] ) / len(tokens_r["""attention_mask"""] ) , sum(tokens_p["""attention_mask"""] ) / len(tokens_p["""attention_mask"""] ) , )
__UpperCAmelCase : Dict = tokenizer_r.convert_ids_to_tokens(tokens_r["""input_ids"""] )
__UpperCAmelCase : Union[str, Any] = tokenizer_p.convert_ids_to_tokens(tokens_p["""input_ids"""] )
self.assertSequenceEqual(tokens_p["""input_ids"""] , [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] )
self.assertSequenceEqual(tokens_r["""input_ids"""] , [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] )
self.assertSequenceEqual(
UpperCamelCase , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
self.assertSequenceEqual(
UpperCamelCase , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
| 320 | 0 |
def __UpperCAmelCase ( __a : int = 1_000 ) -> int:
"""simple docstring"""
_a , _a : List[Any] = 1, 1
_a : Union[str, Any] = 2
while True:
_a : Optional[Any] = 0
_a : Any = fa + fa
_a , _a : Any = fa, f
index += 1
for _ in str(__a ):
i += 1
if i == n:
break
return index
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 235 |
import datasets
from .nmt_bleu import compute_bleu # From: https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py
a__ = '''\
@INPROCEEDINGS{Papineni02bleu:a,
author = {Kishore Papineni and Salim Roukos and Todd Ward and Wei-jing Zhu},
title = {BLEU: a Method for Automatic Evaluation of Machine Translation},
booktitle = {},
year = {2002},
pages = {311--318}
}
@inproceedings{lin-och-2004-orange,
title = "{ORANGE}: a Method for Evaluating Automatic Evaluation Metrics for Machine Translation",
author = "Lin, Chin-Yew and
Och, Franz Josef",
booktitle = "{COLING} 2004: Proceedings of the 20th International Conference on Computational Linguistics",
month = "aug 23{--}aug 27",
year = "2004",
address = "Geneva, Switzerland",
publisher = "COLING",
url = "https://www.aclweb.org/anthology/C04-1072",
pages = "501--507",
}
'''
a__ = '''\
BLEU (bilingual evaluation understudy) is an algorithm for evaluating the quality of text which has been machine-translated from one natural language to another.
Quality is considered to be the correspondence between a machine\'s output and that of a human: "the closer a machine translation is to a professional human translation,
the better it is" – this is the central idea behind BLEU. BLEU was one of the first metrics to claim a high correlation with human judgements of quality, and
remains one of the most popular automated and inexpensive metrics.
Scores are calculated for individual translated segments—generally sentences—by comparing them with a set of good quality reference translations.
Those scores are then averaged over the whole corpus to reach an estimate of the translation\'s overall quality. Intelligibility or grammatical correctness
are not taken into account[citation needed].
BLEU\'s output is always a number between 0 and 1. This value indicates how similar the candidate text is to the reference texts, with values closer to 1
representing more similar texts. Few human translations will attain a score of 1, since this would indicate that the candidate is identical to one of the
reference translations. For this reason, it is not necessary to attain a score of 1. Because there are more opportunities to match, adding additional
reference translations will increase the BLEU score.
'''
a__ = '''
Computes BLEU score of translated segments against one or more references.
Args:
predictions: list of translations to score.
Each translation should be tokenized into a list of tokens.
references: list of lists of references for each translation.
Each reference should be tokenized into a list of tokens.
max_order: Maximum n-gram order to use when computing BLEU score.
smooth: Whether or not to apply Lin et al. 2004 smoothing.
Returns:
\'bleu\': bleu score,
\'precisions\': geometric mean of n-gram precisions,
\'brevity_penalty\': brevity penalty,
\'length_ratio\': ratio of lengths,
\'translation_length\': translation_length,
\'reference_length\': reference_length
Examples:
>>> predictions = [
... ["hello", "there", "general", "kenobi"], # tokenized prediction of the first sample
... ["foo", "bar", "foobar"] # tokenized prediction of the second sample
... ]
>>> references = [
... [["hello", "there", "general", "kenobi"], ["hello", "there", "!"]], # tokenized references for the first sample (2 references)
... [["foo", "bar", "foobar"]] # tokenized references for the second sample (1 reference)
... ]
>>> bleu = datasets.load_metric("bleu")
>>> results = bleu.compute(predictions=predictions, references=references)
>>> print(results["bleu"])
1.0
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCAmelCase_ ( datasets.Metric ):
"""simple docstring"""
def __lowercase ( self ) -> Optional[int]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Sequence(datasets.Value('''string''' , id='''token''' ) , id='''sequence''' ),
'''references''': datasets.Sequence(
datasets.Sequence(datasets.Value('''string''' , id='''token''' ) , id='''sequence''' ) , id='''references''' ),
} ) , codebase_urls=['''https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py'''] , reference_urls=[
'''https://en.wikipedia.org/wiki/BLEU''',
'''https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213''',
] , )
def __lowercase ( self , _a , _a , _a=4 , _a=False ) -> Optional[Any]:
_a : List[Any] = compute_bleu(
reference_corpus=_a , translation_corpus=_a , max_order=_a , smooth=_a )
((_a) , (_a) , (_a) , (_a) , (_a) , (_a)) : Dict = score
return {
"bleu": bleu,
"precisions": precisions,
"brevity_penalty": bp,
"length_ratio": ratio,
"translation_length": translation_length,
"reference_length": reference_length,
}
| 235 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowerCamelCase = {
'configuration_lilt': ['LILT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'LiltConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase = [
'LILT_PRETRAINED_MODEL_ARCHIVE_LIST',
'LiltForQuestionAnswering',
'LiltForSequenceClassification',
'LiltForTokenClassification',
'LiltModel',
'LiltPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_lilt import LILT_PRETRAINED_CONFIG_ARCHIVE_MAP, LiltConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_lilt import (
LILT_PRETRAINED_MODEL_ARCHIVE_LIST,
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
LiltPreTrainedModel,
)
else:
import sys
_lowerCamelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 354 |
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, ClassLabel, Features
from .base import TaskTemplate
@dataclass(frozen=_A )
class a ( _A ):
'''simple docstring'''
lowerCAmelCase : str = field(default='audio-classification' , metadata={'include_in_asdict_even_if_is_default': True} )
lowerCAmelCase : ClassVar[Features] = Features({'audio': Audio()} )
lowerCAmelCase : ClassVar[Features] = Features({'labels': ClassLabel} )
lowerCAmelCase : str = "audio"
lowerCAmelCase : str = "labels"
def lowerCamelCase_ ( self : Optional[Any] , __snake_case : List[Any] ):
if self.label_column not in features:
raise ValueError(F'Column {self.label_column} is not present in features.' )
if not isinstance(features[self.label_column] , __snake_case ):
raise ValueError(F'Column {self.label_column} is not a ClassLabel.' )
UpperCAmelCase_ = copy.deepcopy(self )
UpperCAmelCase_ = self.label_schema.copy()
UpperCAmelCase_ = features[self.label_column]
UpperCAmelCase_ = label_schema
return task_template
@property
def lowerCamelCase_ ( self : Tuple ):
return {
self.audio_column: "audio",
self.label_column: "labels",
}
| 177 | 0 |
'''simple docstring'''
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from ...utils import deprecate
from ..controlnet.multicontrolnet import MultiControlNetModel # noqa: F401
from ..controlnet.pipeline_controlnet import StableDiffusionControlNetPipeline # noqa: F401
deprecate(
"""stable diffusion controlnet""",
"""0.22.0""",
"""Importing `StableDiffusionControlNetPipeline` or `MultiControlNetModel` from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import StableDiffusionControlNetPipeline` instead.""",
standard_warn=False,
stacklevel=3,
)
| 323 |
'''simple docstring'''
from manim import *
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = Rectangle(height=0.5 , width=0.5 )
SCREAMING_SNAKE_CASE : Union[str, Any] = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
SCREAMING_SNAKE_CASE : List[str] = Rectangle(height=0.25 , width=0.25 )
SCREAMING_SNAKE_CASE : Optional[int] = [mem.copy() for i in range(6 )]
SCREAMING_SNAKE_CASE : List[Any] = [mem.copy() for i in range(6 )]
SCREAMING_SNAKE_CASE : Any = VGroup(*lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0 )
SCREAMING_SNAKE_CASE : str = VGroup(*lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0 )
SCREAMING_SNAKE_CASE : Tuple = VGroup(lowerCamelCase_ , lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0 )
SCREAMING_SNAKE_CASE : List[Any] = Text("""CPU""" , font_size=24 )
SCREAMING_SNAKE_CASE : Any = Group(lowerCamelCase_ , lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0.5 , aligned_edge=lowerCamelCase_ )
cpu.move_to([-2.5, -0.5, 0] )
self.add(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Any = [mem.copy() for i in range(4 )]
SCREAMING_SNAKE_CASE : Any = VGroup(*lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0 )
SCREAMING_SNAKE_CASE : Optional[Any] = Text("""GPU""" , font_size=24 )
SCREAMING_SNAKE_CASE : Dict = Group(lowerCamelCase_ , lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0.5 , aligned_edge=lowerCamelCase_ )
gpu.move_to([-1, -1, 0] )
self.add(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : str = [mem.copy() for i in range(6 )]
SCREAMING_SNAKE_CASE : List[Any] = VGroup(*lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0 )
SCREAMING_SNAKE_CASE : Union[str, Any] = Text("""Model""" , font_size=24 )
SCREAMING_SNAKE_CASE : List[str] = Group(lowerCamelCase_ , lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0.5 , aligned_edge=lowerCamelCase_ )
model.move_to([3, -1.0, 0] )
self.add(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = []
SCREAMING_SNAKE_CASE : Union[str, Any] = []
for i, rect in enumerate(lowerCamelCase_ ):
SCREAMING_SNAKE_CASE : str = fill.copy().set_fill(lowerCamelCase_ , opacity=0.8 )
target.move_to(lowerCamelCase_ )
model_arr.append(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0.0 ).set_fill(lowerCamelCase_ , opacity=0.8 )
cpu_target.move_to(cpu_left_col_base[i] )
model_cpu_arr.append(lowerCamelCase_ )
self.add(*lowerCamelCase_ , *lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Any = [meta_mem.copy() for i in range(6 )]
SCREAMING_SNAKE_CASE : Tuple = [meta_mem.copy() for i in range(6 )]
SCREAMING_SNAKE_CASE : Tuple = VGroup(*lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0 )
SCREAMING_SNAKE_CASE : Optional[int] = VGroup(*lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0 )
SCREAMING_SNAKE_CASE : Dict = VGroup(lowerCamelCase_ , lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0 )
SCREAMING_SNAKE_CASE : List[Any] = Text("""Disk""" , font_size=24 )
SCREAMING_SNAKE_CASE : Dict = Group(lowerCamelCase_ , lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0.5 , aligned_edge=lowerCamelCase_ )
disk.move_to([-4, -1.25, 0] )
self.add(lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[int] = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
SCREAMING_SNAKE_CASE : Optional[Any] = MarkupText(
f'''<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model''' , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : int = MarkupText(
f'''<span fgcolor=\'{BLUE}\'>●</span> Checkpoint''' , font_size=18 , )
blue_text.next_to(lowerCamelCase_ , DOWN * 2.4 , aligned_edge=key_text.get_left() )
self.add(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[int] = MarkupText(
f'''Now watch as an input is passed through the model\nand how the memory is utilized and handled.''' , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(lowerCamelCase_ ) )
SCREAMING_SNAKE_CASE : Optional[Any] = Square(0.3 )
input.set_fill(lowerCamelCase_ , opacity=1.0 )
input.set_stroke(width=0.0 )
input.next_to(model_base[0] , lowerCamelCase_ , buff=0.5 )
self.play(Write(lowerCamelCase_ ) )
input.generate_target()
input.target.next_to(model_arr[0] , direction=lowerCamelCase_ , buff=0.02 )
self.play(MoveToTarget(lowerCamelCase_ ) )
self.play(FadeOut(lowerCamelCase_ ) )
SCREAMING_SNAKE_CASE : int = Arrow(start=lowerCamelCase_ , end=lowerCamelCase_ , color=lowerCamelCase_ , buff=0.5 )
a.next_to(model_arr[0].get_left() , lowerCamelCase_ , buff=0.2 )
model_cpu_arr[0].generate_target()
model_cpu_arr[0].target.move_to(gpu_rect[0] )
SCREAMING_SNAKE_CASE : Optional[int] = MarkupText(
f'''As the input reaches a layer, the hook triggers\nand weights are moved from the CPU\nto the GPU and back.''' , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(lowerCamelCase_ , run_time=3 ) )
SCREAMING_SNAKE_CASE : Union[str, Any] = {"""run_time""": 1, """fade_in""": True, """fade_out""": True, """buff""": 0.02}
self.play(
Write(lowerCamelCase_ ) , Circumscribe(model_arr[0] , color=lowerCamelCase_ , **lowerCamelCase_ ) , Circumscribe(model_cpu_arr[0] , color=lowerCamelCase_ , **lowerCamelCase_ ) , Circumscribe(gpu_rect[0] , color=lowerCamelCase_ , **lowerCamelCase_ ) , )
self.play(MoveToTarget(model_cpu_arr[0] ) )
SCREAMING_SNAKE_CASE : Optional[int] = a.copy()
for i in range(6 ):
a_c.next_to(model_arr[i].get_right() + 0.02 , lowerCamelCase_ , buff=0.2 )
input.generate_target()
input.target.move_to(model_arr[i].get_right() + 0.02 )
SCREAMING_SNAKE_CASE : Any = AnimationGroup(
FadeOut(lowerCamelCase_ , run_time=0.5 ) , MoveToTarget(lowerCamelCase_ , run_time=0.5 ) , FadeIn(lowerCamelCase_ , run_time=0.5 ) , lag_ratio=0.2 )
self.play(lowerCamelCase_ )
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[i] )
if i < 5:
model_cpu_arr[i + 1].generate_target()
model_cpu_arr[i + 1].target.move_to(gpu_rect[0] )
if i >= 1:
SCREAMING_SNAKE_CASE : Optional[Any] = 0.7
self.play(
Circumscribe(model_arr[i] , **lowerCamelCase_ ) , Circumscribe(cpu_left_col_base[i] , **lowerCamelCase_ ) , Circumscribe(cpu_left_col_base[i + 1] , color=lowerCamelCase_ , **lowerCamelCase_ ) , Circumscribe(gpu_rect[0] , color=lowerCamelCase_ , **lowerCamelCase_ ) , Circumscribe(model_arr[i + 1] , color=lowerCamelCase_ , **lowerCamelCase_ ) , )
if i < 1:
self.play(
MoveToTarget(model_cpu_arr[i] ) , MoveToTarget(model_cpu_arr[i + 1] ) , )
else:
self.play(
MoveToTarget(model_cpu_arr[i] , run_time=0.7 ) , MoveToTarget(model_cpu_arr[i + 1] , run_time=0.7 ) , )
else:
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[-1] )
input.generate_target()
input.target.next_to(model_arr[-1].get_right() , RIGHT + 0.02 , buff=0.2 )
self.play(
Circumscribe(model_arr[-1] , color=lowerCamelCase_ , **lowerCamelCase_ ) , Circumscribe(cpu_left_col_base[-1] , color=lowerCamelCase_ , **lowerCamelCase_ ) , Circumscribe(gpu_rect[0] , color=lowerCamelCase_ , **lowerCamelCase_ ) , )
self.play(MoveToTarget(model_cpu_arr[i] ) )
SCREAMING_SNAKE_CASE : Union[str, Any] = a_c
SCREAMING_SNAKE_CASE : Optional[Any] = a_c.copy()
input.generate_target()
input.target.next_to(model_base[-1] , RIGHT + 0.02 , buff=0.5 )
self.play(
FadeOut(lowerCamelCase_ ) , FadeOut(lowerCamelCase_ , run_time=0.5 ) , )
SCREAMING_SNAKE_CASE : int = MarkupText(f'''Inference on a model too large for GPU memory\nis successfully completed.''' , font_size=24 )
step_a.move_to([2, 2, 0] )
self.play(Write(lowerCamelCase_ , run_time=3 ) , MoveToTarget(lowerCamelCase_ ) )
self.wait()
| 323 | 1 |
'''simple docstring'''
import requests
UpperCamelCase = '''YOUR API KEY'''
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase = giphy_api_key ) -> list:
A: int = '''+'''.join(query.split() )
A: Union[str, Any] = F"""https://api.giphy.com/v1/gifs/search?q={formatted_query}&api_key={api_key}"""
A: str = requests.get(__lowercase ).json()['''data''']
return [gif["url"] for gif in gifs]
if __name__ == "__main__":
print('''\n'''.join(get_gifs('''space ship''')))
| 334 |
'''simple docstring'''
from __future__ import annotations
from typing import Any
class lowerCAmelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
pass
class lowerCAmelCase_ :
'''simple docstring'''
def __init__( self : List[Any] , SCREAMING_SNAKE_CASE_ : Any ) -> None:
'''simple docstring'''
A: Any = data
A: Node | None = None
def __iter__( self : Optional[int] ) -> List[str]:
'''simple docstring'''
A: List[str] = self
A: Dict = []
while node:
if node in visited:
raise ContainsLoopError
visited.append(SCREAMING_SNAKE_CASE_ )
yield node.data
A: str = node.next_node
@property
def _snake_case ( self : List[str] ) -> bool:
'''simple docstring'''
try:
list(self )
return False
except ContainsLoopError:
return True
if __name__ == "__main__":
UpperCamelCase = Node(1)
UpperCamelCase = Node(2)
UpperCamelCase = Node(3)
UpperCamelCase = Node(4)
print(root_node.has_loop) # False
UpperCamelCase = root_node.next_node
print(root_node.has_loop) # True
UpperCamelCase = Node(5)
UpperCamelCase = Node(6)
UpperCamelCase = Node(5)
UpperCamelCase = Node(6)
print(root_node.has_loop) # False
UpperCamelCase = Node(1)
print(root_node.has_loop) # False
| 334 | 1 |
import torch
from diffusers import DiffusionPipeline
class _a ( UpperCamelCase__ ):
def __init__( self: Union[str, Any] , UpperCamelCase_: Dict , UpperCamelCase_: Union[str, Any] ) -> Any:
"""simple docstring"""
super().__init__()
self.register_modules(unet=UpperCamelCase_ , scheduler=UpperCamelCase_ )
def __call__( self: Union[str, Any] ) -> int:
"""simple docstring"""
lowercase__ = torch.randn(
(1, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , )
lowercase__ = 1
lowercase__ = self.unet(UpperCamelCase_ , UpperCamelCase_ ).sample
lowercase__ = self.scheduler.step(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ).prev_sample
lowercase__ = scheduler_output - scheduler_output + torch.ones_like(UpperCamelCase_ )
return result
| 110 |
import multiprocessing
import time
from arguments import PretokenizationArguments
from datasets import load_dataset
from transformers import AutoTokenizer, HfArgumentParser
def lowerCamelCase__ ( _a):
SCREAMING_SNAKE_CASE : int = {}
SCREAMING_SNAKE_CASE : Any = tokenizer(example["content"] , truncation=_a)["input_ids"]
SCREAMING_SNAKE_CASE : Dict = len(example["content"]) / len(output["input_ids"])
return output
a_ = HfArgumentParser(PretokenizationArguments)
a_ = parser.parse_args()
if args.num_workers is None:
a_ = multiprocessing.cpu_count()
a_ = AutoTokenizer.from_pretrained(args.tokenizer_dir)
a_ = time.time()
a_ = load_dataset(args.dataset_name, split='train')
print(F'''Dataset loaded in {time.time()-t_start:.2f}s''')
a_ = time.time()
a_ = ds.map(
tokenize,
num_proc=args.num_workers,
remove_columns=[
'repo_name',
'path',
'copies',
'size',
'content',
'license',
'hash',
'line_mean',
'line_max',
'alpha_frac',
'autogenerated',
],
)
print(F'''Dataset tokenized in {time.time()-t_start:.2f}s''')
a_ = time.time()
ds.push_to_hub(args.tokenized_data_repo)
print(F'''Data pushed to the hub in {time.time()-t_start:.2f}s''') | 76 | 0 |
def lowerCAmelCase_ (lowerCAmelCase__: Tuple ):
"""simple docstring"""
UpperCAmelCase_: str = len(lowerCAmelCase__ )
UpperCAmelCase_: Any = sum(lowerCAmelCase__ )
UpperCAmelCase_: Tuple = [[False for x in range(s + 1 )] for y in range(n + 1 )]
for i in range(1 , n + 1 ):
UpperCAmelCase_: str = True
for i in range(1 , s + 1 ):
UpperCAmelCase_: Optional[int] = False
for i in range(1 , n + 1 ):
for j in range(1 , s + 1 ):
UpperCAmelCase_: Optional[Any] = dp[i][j - 1]
if arr[i - 1] <= j:
UpperCAmelCase_: Union[str, Any] = dp[i][j] or dp[i - 1][j - arr[i - 1]]
for j in range(int(s / 2 ) , -1 , -1 ):
if dp[n][j] is True:
UpperCAmelCase_: List[str] = s - 2 * j
break
return diff
| 82 |
import unittest
from transformers import (
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
Pipeline,
ZeroShotClassificationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow
from .test_pipelines_common import ANY
# These 2 model types require different inputs than those of the usual text models.
a : Tuple = {'LayoutLMv2Config', 'LayoutLMv3Config'}
@is_pipeline_test
class _a ( unittest.TestCase ):
A = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
A = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if model_mapping is not None:
A = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP}
if tf_model_mapping is not None:
A = {
config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP
}
def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> Dict:
UpperCAmelCase_: Dict = ZeroShotClassificationPipeline(
model=SCREAMING_SNAKE_CASE_, tokenizer=SCREAMING_SNAKE_CASE_, candidate_labels=["""polics""", """health"""] )
return classifier, ["Who are you voting for in 2020?", "My stomach hurts."]
def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> Dict:
UpperCAmelCase_: Dict = classifier("""Who are you voting for in 2020?""", candidate_labels="""politics""" )
self.assertEqual(SCREAMING_SNAKE_CASE_, {"""sequence""": ANY(SCREAMING_SNAKE_CASE_ ), """labels""": [ANY(SCREAMING_SNAKE_CASE_ )], """scores""": [ANY(SCREAMING_SNAKE_CASE_ )]} )
# No kwarg
UpperCAmelCase_: Optional[int] = classifier("""Who are you voting for in 2020?""", ["""politics"""] )
self.assertEqual(SCREAMING_SNAKE_CASE_, {"""sequence""": ANY(SCREAMING_SNAKE_CASE_ ), """labels""": [ANY(SCREAMING_SNAKE_CASE_ )], """scores""": [ANY(SCREAMING_SNAKE_CASE_ )]} )
UpperCAmelCase_: Optional[int] = classifier("""Who are you voting for in 2020?""", candidate_labels=["""politics"""] )
self.assertEqual(SCREAMING_SNAKE_CASE_, {"""sequence""": ANY(SCREAMING_SNAKE_CASE_ ), """labels""": [ANY(SCREAMING_SNAKE_CASE_ )], """scores""": [ANY(SCREAMING_SNAKE_CASE_ )]} )
UpperCAmelCase_: List[Any] = classifier("""Who are you voting for in 2020?""", candidate_labels="""politics, public health""" )
self.assertEqual(
SCREAMING_SNAKE_CASE_, {"""sequence""": ANY(SCREAMING_SNAKE_CASE_ ), """labels""": [ANY(SCREAMING_SNAKE_CASE_ ), ANY(SCREAMING_SNAKE_CASE_ )], """scores""": [ANY(SCREAMING_SNAKE_CASE_ ), ANY(SCREAMING_SNAKE_CASE_ )]} )
self.assertAlmostEqual(sum(nested_simplify(outputs["""scores"""] ) ), 1.0 )
UpperCAmelCase_: Tuple = classifier("""Who are you voting for in 2020?""", candidate_labels=["""politics""", """public health"""] )
self.assertEqual(
SCREAMING_SNAKE_CASE_, {"""sequence""": ANY(SCREAMING_SNAKE_CASE_ ), """labels""": [ANY(SCREAMING_SNAKE_CASE_ ), ANY(SCREAMING_SNAKE_CASE_ )], """scores""": [ANY(SCREAMING_SNAKE_CASE_ ), ANY(SCREAMING_SNAKE_CASE_ )]} )
self.assertAlmostEqual(sum(nested_simplify(outputs["""scores"""] ) ), 1.0 )
UpperCAmelCase_: str = classifier(
"""Who are you voting for in 2020?""", candidate_labels="""politics""", hypothesis_template="""This text is about {}""" )
self.assertEqual(SCREAMING_SNAKE_CASE_, {"""sequence""": ANY(SCREAMING_SNAKE_CASE_ ), """labels""": [ANY(SCREAMING_SNAKE_CASE_ )], """scores""": [ANY(SCREAMING_SNAKE_CASE_ )]} )
# https://github.com/huggingface/transformers/issues/13846
UpperCAmelCase_: Union[str, Any] = classifier(["""I am happy"""], ["""positive""", """negative"""] )
self.assertEqual(
SCREAMING_SNAKE_CASE_, [
{"""sequence""": ANY(SCREAMING_SNAKE_CASE_ ), """labels""": [ANY(SCREAMING_SNAKE_CASE_ ), ANY(SCREAMING_SNAKE_CASE_ )], """scores""": [ANY(SCREAMING_SNAKE_CASE_ ), ANY(SCREAMING_SNAKE_CASE_ )]}
for i in range(1 )
], )
UpperCAmelCase_: Dict = classifier(["""I am happy""", """I am sad"""], ["""positive""", """negative"""] )
self.assertEqual(
SCREAMING_SNAKE_CASE_, [
{"""sequence""": ANY(SCREAMING_SNAKE_CASE_ ), """labels""": [ANY(SCREAMING_SNAKE_CASE_ ), ANY(SCREAMING_SNAKE_CASE_ )], """scores""": [ANY(SCREAMING_SNAKE_CASE_ ), ANY(SCREAMING_SNAKE_CASE_ )]}
for i in range(2 )
], )
with self.assertRaises(SCREAMING_SNAKE_CASE_ ):
classifier("""""", candidate_labels="""politics""" )
with self.assertRaises(SCREAMING_SNAKE_CASE_ ):
classifier(SCREAMING_SNAKE_CASE_, candidate_labels="""politics""" )
with self.assertRaises(SCREAMING_SNAKE_CASE_ ):
classifier("""Who are you voting for in 2020?""", candidate_labels="""""" )
with self.assertRaises(SCREAMING_SNAKE_CASE_ ):
classifier("""Who are you voting for in 2020?""", candidate_labels=SCREAMING_SNAKE_CASE_ )
with self.assertRaises(SCREAMING_SNAKE_CASE_ ):
classifier(
"""Who are you voting for in 2020?""", candidate_labels="""politics""", hypothesis_template="""Not formatting template""", )
with self.assertRaises(SCREAMING_SNAKE_CASE_ ):
classifier(
"""Who are you voting for in 2020?""", candidate_labels="""politics""", hypothesis_template=SCREAMING_SNAKE_CASE_, )
self.run_entailment_id(SCREAMING_SNAKE_CASE_ )
def __snake_case (self, SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
UpperCAmelCase_: int = zero_shot_classifier.model.config
UpperCAmelCase_: Optional[int] = config.labelaid
UpperCAmelCase_: str = zero_shot_classifier.entailment_id
UpperCAmelCase_: Union[str, Any] = {"""LABEL_0""": 0, """LABEL_1""": 1, """LABEL_2""": 2}
self.assertEqual(zero_shot_classifier.entailment_id, -1 )
UpperCAmelCase_: int = {"""entailment""": 0, """neutral""": 1, """contradiction""": 2}
self.assertEqual(zero_shot_classifier.entailment_id, 0 )
UpperCAmelCase_: Dict = {"""ENTAIL""": 0, """NON-ENTAIL""": 1}
self.assertEqual(zero_shot_classifier.entailment_id, 0 )
UpperCAmelCase_: Tuple = {"""ENTAIL""": 2, """NEUTRAL""": 1, """CONTR""": 0}
self.assertEqual(zero_shot_classifier.entailment_id, 2 )
UpperCAmelCase_: Any = original_labelaid
self.assertEqual(SCREAMING_SNAKE_CASE_, zero_shot_classifier.entailment_id )
@require_torch
def __snake_case (self ) -> str:
UpperCAmelCase_: Any = pipeline(
"""zero-shot-classification""", model="""sshleifer/tiny-distilbert-base-cased-distilled-squad""", framework="""pt""", )
# There was a regression in 4.10 for this
# Adding a test so we don't make the mistake again.
# https://github.com/huggingface/transformers/issues/13381#issuecomment-912343499
zero_shot_classifier(
"""Who are you voting for in 2020?""" * 100, candidate_labels=["""politics""", """public health""", """science"""] )
@require_torch
def __snake_case (self ) -> Union[str, Any]:
UpperCAmelCase_: str = pipeline(
"""zero-shot-classification""", model="""sshleifer/tiny-distilbert-base-cased-distilled-squad""", framework="""pt""", )
UpperCAmelCase_: Tuple = zero_shot_classifier(
"""Who are you voting for in 2020?""", candidate_labels=["""politics""", """public health""", """science"""] )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE_ ), {
"""sequence""": """Who are you voting for in 2020?""",
"""labels""": ["""science""", """public health""", """politics"""],
"""scores""": [0.3_3_3, 0.3_3_3, 0.3_3_3],
}, )
@require_tf
def __snake_case (self ) -> int:
UpperCAmelCase_: List[Any] = pipeline(
"""zero-shot-classification""", model="""sshleifer/tiny-distilbert-base-cased-distilled-squad""", framework="""tf""", )
UpperCAmelCase_: Optional[Any] = zero_shot_classifier(
"""Who are you voting for in 2020?""", candidate_labels=["""politics""", """public health""", """science"""] )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE_ ), {
"""sequence""": """Who are you voting for in 2020?""",
"""labels""": ["""science""", """public health""", """politics"""],
"""scores""": [0.3_3_3, 0.3_3_3, 0.3_3_3],
}, )
@slow
@require_torch
def __snake_case (self ) -> Optional[int]:
UpperCAmelCase_: List[Any] = pipeline("""zero-shot-classification""", model="""roberta-large-mnli""", framework="""pt""" )
UpperCAmelCase_: Optional[int] = zero_shot_classifier(
"""Who are you voting for in 2020?""", candidate_labels=["""politics""", """public health""", """science"""] )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE_ ), {
"""sequence""": """Who are you voting for in 2020?""",
"""labels""": ["""politics""", """public health""", """science"""],
"""scores""": [0.9_7_6, 0.0_1_5, 0.0_0_9],
}, )
UpperCAmelCase_: Optional[Any] = zero_shot_classifier(
"""The dominant sequence transduction models are based on complex recurrent or convolutional neural networks"""
""" in an encoder-decoder configuration. The best performing models also connect the encoder and decoder"""
""" through an attention mechanism. We propose a new simple network architecture, the Transformer, based"""
""" solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two"""
""" machine translation tasks show these models to be superior in quality while being more parallelizable"""
""" and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014"""
""" English-to-German translation task, improving over the existing best results, including ensembles by"""
""" over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new"""
""" single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small"""
""" fraction of the training costs of the best models from the literature. We show that the Transformer"""
""" generalizes well to other tasks by applying it successfully to English constituency parsing both with"""
""" large and limited training data.""", candidate_labels=["""machine learning""", """statistics""", """translation""", """vision"""], multi_label=SCREAMING_SNAKE_CASE_, )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE_ ), {
"""sequence""": (
"""The dominant sequence transduction models are based on complex recurrent or convolutional neural"""
""" networks in an encoder-decoder configuration. The best performing models also connect the"""
""" encoder and decoder through an attention mechanism. We propose a new simple network"""
""" architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence"""
""" and convolutions entirely. Experiments on two machine translation tasks show these models to be"""
""" superior in quality while being more parallelizable and requiring significantly less time to"""
""" train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,"""
""" improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014"""
""" English-to-French translation task, our model establishes a new single-model state-of-the-art"""
""" BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training"""
""" costs of the best models from the literature. We show that the Transformer generalizes well to"""
""" other tasks by applying it successfully to English constituency parsing both with large and"""
""" limited training data."""
),
"""labels""": ["""translation""", """machine learning""", """vision""", """statistics"""],
"""scores""": [0.8_1_7, 0.7_1_3, 0.0_1_8, 0.0_1_8],
}, )
@slow
@require_tf
def __snake_case (self ) -> Optional[int]:
UpperCAmelCase_: List[str] = pipeline("""zero-shot-classification""", model="""roberta-large-mnli""", framework="""tf""" )
UpperCAmelCase_: Optional[Any] = zero_shot_classifier(
"""Who are you voting for in 2020?""", candidate_labels=["""politics""", """public health""", """science"""] )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE_ ), {
"""sequence""": """Who are you voting for in 2020?""",
"""labels""": ["""politics""", """public health""", """science"""],
"""scores""": [0.9_7_6, 0.0_1_5, 0.0_0_9],
}, )
UpperCAmelCase_: Any = zero_shot_classifier(
"""The dominant sequence transduction models are based on complex recurrent or convolutional neural networks"""
""" in an encoder-decoder configuration. The best performing models also connect the encoder and decoder"""
""" through an attention mechanism. We propose a new simple network architecture, the Transformer, based"""
""" solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two"""
""" machine translation tasks show these models to be superior in quality while being more parallelizable"""
""" and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014"""
""" English-to-German translation task, improving over the existing best results, including ensembles by"""
""" over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new"""
""" single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small"""
""" fraction of the training costs of the best models from the literature. We show that the Transformer"""
""" generalizes well to other tasks by applying it successfully to English constituency parsing both with"""
""" large and limited training data.""", candidate_labels=["""machine learning""", """statistics""", """translation""", """vision"""], multi_label=SCREAMING_SNAKE_CASE_, )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE_ ), {
"""sequence""": (
"""The dominant sequence transduction models are based on complex recurrent or convolutional neural"""
""" networks in an encoder-decoder configuration. The best performing models also connect the"""
""" encoder and decoder through an attention mechanism. We propose a new simple network"""
""" architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence"""
""" and convolutions entirely. Experiments on two machine translation tasks show these models to be"""
""" superior in quality while being more parallelizable and requiring significantly less time to"""
""" train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,"""
""" improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014"""
""" English-to-French translation task, our model establishes a new single-model state-of-the-art"""
""" BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training"""
""" costs of the best models from the literature. We show that the Transformer generalizes well to"""
""" other tasks by applying it successfully to English constituency parsing both with large and"""
""" limited training data."""
),
"""labels""": ["""translation""", """machine learning""", """vision""", """statistics"""],
"""scores""": [0.8_1_7, 0.7_1_3, 0.0_1_8, 0.0_1_8],
}, )
| 82 | 1 |
"""simple docstring"""
import argparse
import torch
from transformers import OpenAIGPTConfig, OpenAIGPTModel, load_tf_weights_in_openai_gpt
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def _A ( UpperCamelCase_ : Union[str, Any], UpperCamelCase_ : str, UpperCamelCase_ : Optional[Any]) -> List[Any]:
'''simple docstring'''
if openai_config_file == "":
__lowercase = OpenAIGPTConfig()
else:
__lowercase = OpenAIGPTConfig.from_json_file(UpperCamelCase_)
__lowercase = OpenAIGPTModel(UpperCamelCase_)
# Load weights from numpy
load_tf_weights_in_openai_gpt(UpperCamelCase_, UpperCamelCase_, UpperCamelCase_)
# Save pytorch-model
__lowercase = pytorch_dump_folder_path + "/" + WEIGHTS_NAME
__lowercase = pytorch_dump_folder_path + "/" + CONFIG_NAME
print(F"""Save PyTorch model to {pytorch_weights_dump_path}""")
torch.save(model.state_dict(), UpperCamelCase_)
print(F"""Save configuration file to {pytorch_config_dump_path}""")
with open(UpperCamelCase_, "w", encoding="utf-8") as f:
f.write(config.to_json_string())
if __name__ == "__main__":
_a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--openai_checkpoint_folder_path',
default=None,
type=str,
required=True,
help='Path to the TensorFlow checkpoint path.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--openai_config_file',
default='',
type=str,
help=(
'An optional config json file corresponding to the pre-trained OpenAI model. \n'
'This specifies the model architecture.'
),
)
_a = parser.parse_args()
convert_openai_checkpoint_to_pytorch(
args.openai_checkpoint_folder_path, args.openai_config_file, args.pytorch_dump_folder_path
)
| 17 |
"""simple docstring"""
def _A ( UpperCamelCase_ : Any) -> List[str]:
'''simple docstring'''
__lowercase ,__lowercase = [], []
while len(UpperCamelCase_) > 1:
__lowercase ,__lowercase = min(UpperCamelCase_), max(UpperCamelCase_)
start.append(UpperCamelCase_)
end.append(UpperCamelCase_)
collection.remove(UpperCamelCase_)
collection.remove(UpperCamelCase_)
end.reverse()
return start + collection + end
if __name__ == "__main__":
_a = input('Enter numbers separated by a comma:\n').strip()
_a = [int(item) for item in user_input.split(',')]
print(*merge_sort(unsorted), sep=',')
| 17 | 1 |
"""simple docstring"""
from __future__ import annotations
def a__ ( __lowercase ) -> int:
if not nums:
return 0
_A = nums[0]
_A = 0
for num in nums[1:]:
_A , _A = (
max_excluding + num,
max(__lowercase , __lowercase ),
)
return max(__lowercase , __lowercase )
if __name__ == "__main__":
import doctest
doctest.testmod() | 163 |
"""simple docstring"""
import pytest
from datasets.utils.sharding import _distribute_shards, _number_of_shards_in_gen_kwargs, _split_gen_kwargs
@pytest.mark.parametrize(
"kwargs, expected" , [
({"num_shards": 0, "max_num_jobs": 1}, []),
({"num_shards": 10, "max_num_jobs": 1}, [range(10 )]),
({"num_shards": 10, "max_num_jobs": 10}, [range(__lowercase , i + 1 ) for i in range(10 )]),
({"num_shards": 1, "max_num_jobs": 10}, [range(1 )]),
({"num_shards": 10, "max_num_jobs": 3}, [range(0 , 4 ), range(4 , 7 ), range(7 , 10 )]),
({"num_shards": 3, "max_num_jobs": 10}, [range(0 , 1 ), range(1 , 2 ), range(2 , 3 )]),
] , )
def a__ ( __lowercase , __lowercase ) -> Optional[Any]:
_A = _distribute_shards(**__lowercase )
assert out == expected
@pytest.mark.parametrize(
"gen_kwargs, max_num_jobs, expected" , [
({"foo": 0}, 10, [{"foo": 0}]),
({"shards": [0, 1, 2, 3]}, 1, [{"shards": [0, 1, 2, 3]}]),
({"shards": [0, 1, 2, 3]}, 4, [{"shards": [0]}, {"shards": [1]}, {"shards": [2]}, {"shards": [3]}]),
({"shards": [0, 1]}, 4, [{"shards": [0]}, {"shards": [1]}]),
({"shards": [0, 1, 2, 3]}, 2, [{"shards": [0, 1]}, {"shards": [2, 3]}]),
] , )
def a__ ( __lowercase , __lowercase , __lowercase ) -> List[str]:
_A = _split_gen_kwargs(__lowercase , __lowercase )
assert out == expected
@pytest.mark.parametrize(
"gen_kwargs, expected" , [
({"foo": 0}, 1),
({"shards": [0]}, 1),
({"shards": [0, 1, 2, 3]}, 4),
({"shards": [0, 1, 2, 3], "foo": 0}, 4),
({"shards": [0, 1, 2, 3], "other": (0, 1)}, 4),
({"shards": [0, 1, 2, 3], "shards2": [0, 1]}, RuntimeError),
] , )
def a__ ( __lowercase , __lowercase ) -> List[Any]:
if expected is RuntimeError:
with pytest.raises(__lowercase ):
_number_of_shards_in_gen_kwargs(__lowercase )
else:
_A = _number_of_shards_in_gen_kwargs(__lowercase )
assert out == expected | 163 | 1 |
from __future__ import annotations
import unittest
from transformers import XGLMConfig, XGLMTokenizer, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.xglm.modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
)
@require_tf
class UpperCAmelCase__ :
"""simple docstring"""
UpperCAmelCase__ : List[Any] = XGLMConfig
UpperCAmelCase__ : List[str] = {}
UpperCAmelCase__ : Any = "gelu"
def __init__( self , A_ , A_=14 , A_=7 , A_=True , A_=True , A_=True , A_=99 , A_=32 , A_=2 , A_=4 , A_=37 , A_="gelu" , A_=0.1 , A_=0.1 , A_=512 , A_=0.02 , ) -> Optional[Any]:
__UpperCamelCase =parent
__UpperCamelCase =batch_size
__UpperCamelCase =seq_length
__UpperCamelCase =is_training
__UpperCamelCase =use_input_mask
__UpperCamelCase =use_labels
__UpperCamelCase =vocab_size
__UpperCamelCase =d_model
__UpperCamelCase =num_hidden_layers
__UpperCamelCase =num_attention_heads
__UpperCamelCase =ffn_dim
__UpperCamelCase =activation_function
__UpperCamelCase =activation_dropout
__UpperCamelCase =attention_dropout
__UpperCamelCase =max_position_embeddings
__UpperCamelCase =initializer_range
__UpperCamelCase =None
__UpperCamelCase =0
__UpperCamelCase =2
__UpperCamelCase =1
def _a ( self ) -> Optional[Any]:
return XGLMConfig.from_pretrained('facebook/xglm-564M' )
def _a ( self ) -> Dict:
__UpperCamelCase =tf.clip_by_value(
ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) , clip_value_min=0 , clip_value_max=3 )
__UpperCamelCase =None
if self.use_input_mask:
__UpperCamelCase =random_attention_mask([self.batch_size, self.seq_length] )
__UpperCamelCase =self.get_config()
__UpperCamelCase =floats_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
input_mask,
head_mask,
)
def _a ( self ) -> Optional[int]:
return XGLMConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , num_layers=self.num_hidden_layers , attention_heads=self.num_attention_heads , ffn_dim=self.ffn_dim , activation_function=self.activation_function , activation_dropout=self.activation_dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , use_cache=A_ , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , return_dict=A_ , )
def _a ( self ) -> Any:
__UpperCamelCase =self.prepare_config_and_inputs()
(
(
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) ,
) =config_and_inputs
__UpperCamelCase ={
'input_ids': input_ids,
'head_mask': head_mask,
}
return config, inputs_dict
@require_tf
class UpperCAmelCase__ ( A_ , A_ , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : int = (TFXGLMModel, TFXGLMForCausalLM) if is_tf_available() else ()
UpperCAmelCase__ : Optional[Any] = (TFXGLMForCausalLM,) if is_tf_available() else ()
UpperCAmelCase__ : Dict = (
{"feature-extraction": TFXGLMModel, "text-generation": TFXGLMForCausalLM} if is_tf_available() else {}
)
UpperCAmelCase__ : Tuple = False
UpperCAmelCase__ : Optional[int] = False
UpperCAmelCase__ : Optional[Any] = False
def _a ( self ) -> Tuple:
__UpperCamelCase =TFXGLMModelTester(self )
__UpperCamelCase =ConfigTester(self , config_class=A_ , n_embd=37 )
def _a ( self ) -> Any:
self.config_tester.run_common_tests()
@slow
def _a ( self ) -> Optional[int]:
for model_name in TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCamelCase =TFXGLMModel.from_pretrained(A_ )
self.assertIsNotNone(A_ )
@unittest.skip(reason='Currently, model embeddings are going to undergo a major refactor.' )
def _a ( self ) -> str:
super().test_resize_token_embeddings()
@require_tf
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def _a ( self , A_=True ) -> int:
__UpperCamelCase =TFXGLMForCausalLM.from_pretrained('facebook/xglm-564M' )
__UpperCamelCase =tf.convert_to_tensor([[2, 268, 9865]] , dtype=tf.intaa ) # The dog
# </s> The dog is a very friendly dog. He is very affectionate and loves to play with other
# fmt: off
__UpperCamelCase =[2, 268, 9865, 67, 11, 1988, 57252, 9865, 5, 984, 67, 1988, 213838, 1658, 53, 70446, 33, 6657, 278, 1581]
# fmt: on
__UpperCamelCase =model.generate(A_ , do_sample=A_ , num_beams=1 )
if verify_outputs:
self.assertListEqual(output_ids[0].numpy().tolist() , A_ )
@slow
def _a ( self ) -> Any:
__UpperCamelCase =XGLMTokenizer.from_pretrained('facebook/xglm-564M' )
__UpperCamelCase =TFXGLMForCausalLM.from_pretrained('facebook/xglm-564M' )
tf.random.set_seed(0 )
__UpperCamelCase =tokenizer('Today is a nice day and' , return_tensors='tf' )
__UpperCamelCase =tokenized.input_ids
# forces the generation to happen on CPU, to avoid GPU-related quirks (and assure same output regardless of the available devices)
with tf.device(':/CPU:0' ):
__UpperCamelCase =model.generate(A_ , do_sample=A_ , seed=[7, 0] )
__UpperCamelCase =tokenizer.decode(output_ids[0] , skip_special_tokens=A_ )
__UpperCamelCase =(
'Today is a nice day and warm evening here over Southern Alberta!! Today when they closed schools due'
)
self.assertEqual(A_ , A_ )
@slow
def _a ( self ) -> Optional[Any]:
__UpperCamelCase =TFXGLMForCausalLM.from_pretrained('facebook/xglm-564M' )
__UpperCamelCase =XGLMTokenizer.from_pretrained('facebook/xglm-564M' )
__UpperCamelCase ='left'
# use different length sentences to test batching
__UpperCamelCase =[
'This is an extremelly long sentence that only exists to test the ability of the model to cope with '
'left-padding, such as in batched generation. The output for the sequence below should be the same '
'regardless of whether left padding is applied or not. When',
'Hello, my dog is a little',
]
__UpperCamelCase =tokenizer(A_ , return_tensors='tf' , padding=A_ )
__UpperCamelCase =inputs['input_ids']
__UpperCamelCase =model.generate(input_ids=A_ , attention_mask=inputs['attention_mask'] , max_new_tokens=12 )
__UpperCamelCase =tokenizer(sentences[0] , return_tensors='tf' ).input_ids
__UpperCamelCase =model.generate(input_ids=A_ , max_new_tokens=12 )
__UpperCamelCase =tokenizer(sentences[1] , return_tensors='tf' ).input_ids
__UpperCamelCase =model.generate(input_ids=A_ , max_new_tokens=12 )
__UpperCamelCase =tokenizer.batch_decode(A_ , skip_special_tokens=A_ )
__UpperCamelCase =tokenizer.decode(output_non_padded[0] , skip_special_tokens=A_ )
__UpperCamelCase =tokenizer.decode(output_padded[0] , skip_special_tokens=A_ )
__UpperCamelCase =[
'This is an extremelly long sentence that only exists to test the ability of the model to cope with '
'left-padding, such as in batched generation. The output for the sequence below should be the same '
'regardless of whether left padding is applied or not. When left padding is applied, the sequence will be '
'a single',
'Hello, my dog is a little bit of a shy one, but he is very friendly',
]
self.assertListEqual(A_ , A_ )
self.assertListEqual(A_ , [non_padded_sentence, padded_sentence] )
| 62 |
import os
from math import logaa
def UpperCAmelCase__ ( lowerCamelCase = "base_exp.txt" ):
lowercase :float = 0
lowercase :str = 0
for i, line in enumerate(open(os.path.join(os.path.dirname(lowerCamelCase ), lowerCamelCase ) ) ):
lowercase , lowercase :str = list(map(lowerCamelCase, line.split("," ) ) )
if x * logaa(lowerCamelCase ) > largest:
lowercase :Optional[Any] = x * logaa(lowerCamelCase )
lowercase :Any = i + 1
return result
if __name__ == "__main__":
print(solution())
| 236 | 0 |
import argparse
import random
import joblib
import numpy as np
import torch
from igf.igf import (
SecondaryLearner,
collect_objective_set,
compute_perplexity,
generate_datasets,
load_gpta,
recopy_gpta,
set_seed,
train_secondary_learner,
)
from torch.utils.data import DataLoader, RandomSampler
from transformers import GPTaLMHeadModel
def lowerCAmelCase_ ( _snake_case : Any=32 , _snake_case : str=10 , _snake_case : Optional[Any]=100 , _snake_case : Union[str, Any]=1026 , _snake_case : List[str]=True , _snake_case : Optional[int]="data/tokenized_stories_train_wikitext103.jbl" , _snake_case : List[Any]="igf_context_pairs.jbl" , ) -> str:
'''simple docstring'''
set_seed(3 )
# generate train_data and objective_set
__magic_name__ , __magic_name__ : Dict = generate_datasets(
_snake_case , _snake_case , number=_snake_case , min_len=1026 , trim=_snake_case )
# keeps model same across runs
set_seed(4 )
# model, lm_optimizer, lm_scheduler = recopy_gpt2(model, device, max_steps) # store original model weights
# can we train on GPU?
__magic_name__ : Dict = torch.device("cuda:0" if torch.cuda.is_available() else "cpu" )
# load pretrained model
__magic_name__ : str = load_gpta("gpt2" ).to(_snake_case )
print("computing perplexity on objective set" )
__magic_name__ : Union[str, Any] = compute_perplexity(_snake_case , _snake_case , _snake_case ).item()
print("perplexity on objective set:" , _snake_case )
# collect igf pairs and save to file demo.jbl
collect_objective_set(_snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case )
# clean up, delete model and data we don't need anymore
del model, train_data, objective_set
torch.cuda.empty_cache()
def lowerCAmelCase_ ( _snake_case : int , _snake_case : str=15 , _snake_case : Optional[Any]=128 , _snake_case : Optional[Any]=100 , _snake_case : Any="igf_model.pt" , ) -> List[str]:
'''simple docstring'''
set_seed(42 )
# Load pre-trained model
__magic_name__ : Any = GPTaLMHeadModel.from_pretrained("gpt2" )
# Initialize secondary learner to use embedding weights of model
__magic_name__ : List[str] = SecondaryLearner(_snake_case )
# Train secondary learner
__magic_name__ : Tuple = train_secondary_learner(
_snake_case , _snake_case , max_epochs=_snake_case , batch_size=_snake_case , eval_freq=100 , igf_model_path=_snake_case , )
del model, secondary_learner_train_data
torch.cuda.empty_cache()
return secondary_learner
def lowerCAmelCase_ ( _snake_case : str , _snake_case : Any , _snake_case : Optional[Any] , _snake_case : int=32 , _snake_case : int=1000 , _snake_case : Union[str, Any]=16 , _snake_case : List[str]=1.0 , _snake_case : int=recopy_gpta , _snake_case : Optional[Any]=None , _snake_case : List[Any]=10 , _snake_case : Tuple="gpt2_finetuned.pt" , ) -> List[Any]:
'''simple docstring'''
__magic_name__ : List[str] = torch.device("cuda:0" if torch.cuda.is_available() else "cpu" )
__magic_name__ : List[Any] = RandomSampler(_snake_case )
__magic_name__ : str = DataLoader(_snake_case , sampler=_snake_case )
__magic_name__ : Dict = max_steps // (len(_snake_case )) + 1
__magic_name__ : Union[str, Any] = 0
__magic_name__ : Optional[Any] = torch.zeros((1, context_len) , dtype=torch.long , device=_snake_case )
__magic_name__ , __magic_name__ , __magic_name__ : Union[str, Any] = recopy_model(_snake_case , _snake_case , _snake_case )
model.train()
if secondary_learner is not None:
secondary_learner.to(_snake_case )
secondary_learner.eval()
__magic_name__ : str = []
__magic_name__ : str = 0
__magic_name__ : List[Any] = []
__magic_name__ : Tuple = []
# Compute the performance of the transformer model at the beginning
__magic_name__ : Union[str, Any] = compute_perplexity(_snake_case , _snake_case , _snake_case )
test_perps.append(_snake_case )
print("Test perplexity, step" , _snake_case , ":" , _snake_case )
for epoch in range(int(_snake_case ) ):
for step, example in enumerate(_snake_case ):
torch.cuda.empty_cache()
__magic_name__ : str = random.randint(0 , example.size(2 ) - context_len - 1 )
__magic_name__ : str = example[0, 0, start : start + context_len]
lm_optimizer.zero_grad()
__magic_name__ : Union[str, Any] = model(_snake_case , labels=_snake_case )
__magic_name__ : Tuple = True
if secondary_learner is not None:
__magic_name__ : Union[str, Any] = secondary_learner.forward(
torch.tensor(_snake_case , dtype=torch.long , device=_snake_case ).unsqueeze(0 ) )[0].item()
observed_qs.append(float(_snake_case ) )
# Here we implement the simple non-constant threshold for the predicted IG(X) value
# We will decay the selectivity of our secondary learner filter from
# 1 standard deviation above average to 1 below average after 10 batches.
if global_step == 10:
__magic_name__ : Optional[int] = -1
if predicted_q < threshold:
__magic_name__ : List[Any] = False
# If we passed the filter, add the context to the batch!
if do_backprop:
contexts.append(np.array(context.cpu() ) )
__magic_name__ : int = outputs[0]
lm_loss.backward()
examples += 1
del outputs
# Once the batch is filled with enough contexts, backprop on the batch.
if examples == batch_size:
torch.cuda.empty_cache()
__magic_name__ : Dict = 0
# Do LM backprop
torch.nn.utils.clip_grad_norm_(model.parameters() , 3.0 )
lm_optimizer.step()
lm_scheduler.step() # Update learning rate schedule
global_step += 1
# Compute the performance of the transformer model at this batch
if global_step % eval_interval == 0:
__magic_name__ : str = compute_perplexity(_snake_case , _snake_case , _snake_case )
test_perps.append(_snake_case )
print("Test perplexity, step" , _snake_case , ":" , _snake_case )
# Break out of the loop after 60 batches
if max_steps > 0 and global_step > 60:
break
if max_steps > 0 and global_step > 60:
break
# save finetuned transformer model
torch.save(model.state_dict() , _snake_case )
torch.cuda.empty_cache()
# Do some cleaning up so we can reinitialize for the next run of this function
del lm_optimizer
del lm_scheduler
return model
def lowerCAmelCase_ ( ) -> List[str]:
'''simple docstring'''
__magic_name__ : Any = argparse.ArgumentParser(description="Fine-tune a transformer model with IGF on a language modeling task" )
# Required parameters
parser.add_argument(
"--data_dir" , default=_snake_case , type=_snake_case , required=_snake_case , help="The input data dir. Should contain data files for WikiText." , )
parser.add_argument(
"--model_name_or_path" , default=_snake_case , type=_snake_case , required=_snake_case , help="Path to pretrained model or model identifier from huggingface.co/models" , )
parser.add_argument(
"--data_file" , type=_snake_case , default=_snake_case , help=(
"A jbl file containing tokenized data which can be split as objective dataset, "
"train_dataset and test_dataset."
) , )
parser.add_argument(
"--igf_data_file" , type=_snake_case , default=_snake_case , help="A jbl file containing the context and information gain pairs to train secondary learner." , )
parser.add_argument(
"--output_dir" , default=_snake_case , type=_snake_case , required=_snake_case , help="The output directory where the final fine-tuned model is stored." , )
parser.add_argument(
"--tokenizer_name" , default=_snake_case , type=_snake_case , help="Pretrained tokenizer name or path if not the same as model_name" , )
parser.add_argument("--seed" , type=_snake_case , default=_snake_case , help="A seed for reproducible training." )
parser.add_argument(
"--context_len" , default=32 , type=_snake_case , help=(
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
) , )
parser.add_argument(
"--size_objective_set" , default=100 , type=_snake_case , help="number of articles that are long enough to be used as our objective set" , )
parser.add_argument(
"--eval_freq" , default=100 , type=_snake_case , help="secondary model evaluation is triggered at eval_freq" )
parser.add_argument("--max_steps" , default=1000 , type=_snake_case , help="To calculate training epochs" )
parser.add_argument(
"--secondary_learner_batch_size" , default=128 , type=_snake_case , help="batch size of training data for secondary learner" , )
parser.add_argument(
"--batch_size" , default=16 , type=_snake_case , help="batch size of training data of language model(gpt2) " )
parser.add_argument(
"--eval_interval" , default=10 , type=_snake_case , help=(
"decay the selectivity of our secondary learner filter from"
"1 standard deviation above average to 1 below average after 10 batches"
) , )
parser.add_argument(
"--number" , default=100 , type=_snake_case , help="The number of examples split to be used as objective_set/test_data" )
parser.add_argument(
"--min_len" , default=1026 , type=_snake_case , help="The minimum length of the article to be used as objective set" )
parser.add_argument(
"--secondary_learner_max_epochs" , default=15 , type=_snake_case , help="number of epochs to train secondary learner" )
parser.add_argument("--trim" , default=_snake_case , type=_snake_case , help="truncate the example if it exceeds context length" )
parser.add_argument(
"--threshold" , default=1.0 , type=_snake_case , help=(
"The threshold value used by secondary learner to filter the train_data and allow only"
" informative data as input to the model"
) , )
parser.add_argument("--finetuned_model_name" , default="gpt2_finetuned.pt" , type=_snake_case , help="finetuned_model_name" )
parser.add_argument(
"--recopy_model" , default=_snake_case , type=_snake_case , help="Reset the model to the original pretrained GPT-2 weights after each iteration" , )
# function calls
# Collecting *n* pairs of context and information gain(X, IG(X)) for training the secondary learner
generate_n_pairs(
context_len=32 , max_steps=10 , size_objective_set=100 , min_len=1026 , trim=_snake_case , data_file="data/tokenized_stories_train_wikitext103.jbl" , igf_data_file="igf_context_pairs.jbl" , )
# Load train data for secondary learner
__magic_name__ : List[str] = joblib.load("data/IGF_values.jbl" )
# Train secondary learner
__magic_name__ : List[str] = training_secondary_learner(
_snake_case , secondary_learner_max_epochs=15 , secondary_learner_batch_size=128 , eval_freq=100 , igf_model_path="igf_model.pt" , )
# load pretrained gpt2 model
__magic_name__ : int = GPTaLMHeadModel.from_pretrained("gpt2" )
set_seed(42 )
# Generate train and test data to train and evaluate gpt2 model
__magic_name__ , __magic_name__ : List[str] = generate_datasets(
context_len=32 , file="data/tokenized_stories_train_wikitext103.jbl" , number=100 , min_len=1026 , trim=_snake_case )
# fine-tuning of the gpt2 model using igf (Information Gain Filtration)
finetune(
_snake_case , _snake_case , _snake_case , context_len=32 , max_steps=1000 , batch_size=16 , threshold=1.0 , recopy_model=_snake_case , secondary_learner=_snake_case , eval_interval=10 , finetuned_model_name="gpt2_finetuned.pt" , )
if __name__ == "__main__":
main()
| 41 |
# coding=utf-8
# Copyright 2020 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script dumps information about the environment
import os
import sys
import transformers
snake_case : Any = "3"
print("Python version:", sys.version)
print("transformers version:", transformers.__version__)
try:
import torch
print("Torch version:", torch.__version__)
print("Cuda available:", torch.cuda.is_available())
print("Cuda version:", torch.version.cuda)
print("CuDNN version:", torch.backends.cudnn.version())
print("Number of GPUs available:", torch.cuda.device_count())
print("NCCL version:", torch.cuda.nccl.version())
except ImportError:
print("Torch version:", None)
try:
import deepspeed
print("DeepSpeed version:", deepspeed.__version__)
except ImportError:
print("DeepSpeed version:", None)
try:
import tensorflow as tf
print("TensorFlow version:", tf.__version__)
print("TF GPUs available:", bool(tf.config.list_physical_devices("GPU")))
print("Number of TF GPUs available:", len(tf.config.list_physical_devices("GPU")))
except ImportError:
print("TensorFlow version:", None)
| 41 | 1 |
'''simple docstring'''
import inspect
import tempfile
from collections import OrderedDict, UserDict
from collections.abc import MutableMapping
from contextlib import ExitStack, contextmanager
from dataclasses import fields
from enum import Enum
from typing import Any, ContextManager, List, Tuple
import numpy as np
from .import_utils import is_flax_available, is_tf_available, is_torch_available, is_torch_fx_proxy
if is_flax_available():
import jax.numpy as jnp
class lowerCAmelCase ( A ):
def __get__( self : Any , __lowercase : int , __lowercase : List[Any]=None ):
"""simple docstring"""
if obj is None:
return self
if self.fget is None:
raise AttributeError('unreadable attribute' )
__lowercase ='__cached_' + self.fget.__name__
__lowercase =getattr(__lowercase , __lowercase , __lowercase )
if cached is None:
__lowercase =self.fget(__lowercase )
setattr(__lowercase , __lowercase , __lowercase )
return cached
def __UpperCamelCase ( lowercase__ : List[str] ):
'''simple docstring'''
__lowercase =val.lower()
if val in {"y", "yes", "t", "true", "on", "1"}:
return 1
if val in {"n", "no", "f", "false", "off", "0"}:
return 0
raise ValueError(F'''invalid truth value {val!r}''' )
def __UpperCamelCase ( lowercase__ : Dict ):
'''simple docstring'''
if is_torch_fx_proxy(lowercase__ ):
return True
if is_torch_available():
import torch
if isinstance(lowercase__, torch.Tensor ):
return True
if is_tf_available():
import tensorflow as tf
if isinstance(lowercase__, tf.Tensor ):
return True
if is_flax_available():
import jax.numpy as jnp
from jax.core import Tracer
if isinstance(lowercase__, (jnp.ndarray, Tracer) ):
return True
return isinstance(lowercase__, np.ndarray )
def __UpperCamelCase ( lowercase__ : Any ):
'''simple docstring'''
return isinstance(lowercase__, np.ndarray )
def __UpperCamelCase ( lowercase__ : List[Any] ):
'''simple docstring'''
return _is_numpy(lowercase__ )
def __UpperCamelCase ( lowercase__ : Any ):
'''simple docstring'''
import torch
return isinstance(lowercase__, torch.Tensor )
def __UpperCamelCase ( lowercase__ : Dict ):
'''simple docstring'''
return False if not is_torch_available() else _is_torch(lowercase__ )
def __UpperCamelCase ( lowercase__ : int ):
'''simple docstring'''
import torch
return isinstance(lowercase__, torch.device )
def __UpperCamelCase ( lowercase__ : List[Any] ):
'''simple docstring'''
return False if not is_torch_available() else _is_torch_device(lowercase__ )
def __UpperCamelCase ( lowercase__ : List[str] ):
'''simple docstring'''
import torch
if isinstance(lowercase__, lowercase__ ):
if hasattr(lowercase__, lowercase__ ):
__lowercase =getattr(lowercase__, lowercase__ )
else:
return False
return isinstance(lowercase__, torch.dtype )
def __UpperCamelCase ( lowercase__ : Optional[Any] ):
'''simple docstring'''
return False if not is_torch_available() else _is_torch_dtype(lowercase__ )
def __UpperCamelCase ( lowercase__ : Any ):
'''simple docstring'''
import tensorflow as tf
return isinstance(lowercase__, tf.Tensor )
def __UpperCamelCase ( lowercase__ : Union[str, Any] ):
'''simple docstring'''
return False if not is_tf_available() else _is_tensorflow(lowercase__ )
def __UpperCamelCase ( lowercase__ : Union[str, Any] ):
'''simple docstring'''
import tensorflow as tf
# the `is_symbolic_tensor` predicate is only available starting with TF 2.14
if hasattr(lowercase__, 'is_symbolic_tensor' ):
return tf.is_symbolic_tensor(lowercase__ )
return type(lowercase__ ) == tf.Tensor
def __UpperCamelCase ( lowercase__ : int ):
'''simple docstring'''
return False if not is_tf_available() else _is_tf_symbolic_tensor(lowercase__ )
def __UpperCamelCase ( lowercase__ : str ):
'''simple docstring'''
import jax.numpy as jnp # noqa: F811
return isinstance(lowercase__, jnp.ndarray )
def __UpperCamelCase ( lowercase__ : int ):
'''simple docstring'''
return False if not is_flax_available() else _is_jax(lowercase__ )
def __UpperCamelCase ( lowercase__ : int ):
'''simple docstring'''
if isinstance(lowercase__, (dict, UserDict) ):
return {k: to_py_obj(lowercase__ ) for k, v in obj.items()}
elif isinstance(lowercase__, (list, tuple) ):
return [to_py_obj(lowercase__ ) for o in obj]
elif is_tf_tensor(lowercase__ ):
return obj.numpy().tolist()
elif is_torch_tensor(lowercase__ ):
return obj.detach().cpu().tolist()
elif is_jax_tensor(lowercase__ ):
return np.asarray(lowercase__ ).tolist()
elif isinstance(lowercase__, (np.ndarray, np.number) ): # tolist also works on 0d np arrays
return obj.tolist()
else:
return obj
def __UpperCamelCase ( lowercase__ : int ):
'''simple docstring'''
if isinstance(lowercase__, (dict, UserDict) ):
return {k: to_numpy(lowercase__ ) for k, v in obj.items()}
elif isinstance(lowercase__, (list, tuple) ):
return np.array(lowercase__ )
elif is_tf_tensor(lowercase__ ):
return obj.numpy()
elif is_torch_tensor(lowercase__ ):
return obj.detach().cpu().numpy()
elif is_jax_tensor(lowercase__ ):
return np.asarray(lowercase__ )
else:
return obj
class lowerCAmelCase ( A ):
def snake_case ( self : List[Any] ):
"""simple docstring"""
__lowercase =fields(self )
# Safety and consistency checks
if not len(__lowercase ):
raise ValueError(f'''{self.__class__.__name__} has no fields.''' )
if not all(field.default is None for field in class_fields[1:] ):
raise ValueError(f'''{self.__class__.__name__} should not have more than one required field.''' )
__lowercase =getattr(self , class_fields[0].name )
__lowercase =all(getattr(self , field.name ) is None for field in class_fields[1:] )
if other_fields_are_none and not is_tensor(__lowercase ):
if isinstance(__lowercase , __lowercase ):
__lowercase =first_field.items()
__lowercase =True
else:
try:
__lowercase =iter(__lowercase )
__lowercase =True
except TypeError:
__lowercase =False
# if we provided an iterator as first field and the iterator is a (key, value) iterator
# set the associated fields
if first_field_iterator:
for idx, element in enumerate(__lowercase ):
if (
not isinstance(__lowercase , (list, tuple) )
or not len(__lowercase ) == 2
or not isinstance(element[0] , __lowercase )
):
if idx == 0:
# If we do not have an iterator of key/values, set it as attribute
__lowercase =first_field
else:
# If we have a mixed iterator, raise an error
raise ValueError(
f'''Cannot set key/value for {element}. It needs to be a tuple (key, value).''' )
break
setattr(self , element[0] , element[1] )
if element[1] is not None:
__lowercase =element[1]
elif first_field is not None:
__lowercase =first_field
else:
for field in class_fields:
__lowercase =getattr(self , field.name )
if v is not None:
__lowercase =v
def __delitem__( self : Optional[Any] , *__lowercase : Union[str, Any] , **__lowercase : str ):
"""simple docstring"""
raise Exception(f'''You cannot use ``__delitem__`` on a {self.__class__.__name__} instance.''' )
def snake_case ( self : Union[str, Any] , *__lowercase : int , **__lowercase : Dict ):
"""simple docstring"""
raise Exception(f'''You cannot use ``setdefault`` on a {self.__class__.__name__} instance.''' )
def snake_case ( self : Any , *__lowercase : str , **__lowercase : Optional[int] ):
"""simple docstring"""
raise Exception(f'''You cannot use ``pop`` on a {self.__class__.__name__} instance.''' )
def snake_case ( self : str , *__lowercase : List[str] , **__lowercase : Optional[int] ):
"""simple docstring"""
raise Exception(f'''You cannot use ``update`` on a {self.__class__.__name__} instance.''' )
def __getitem__( self : List[Any] , __lowercase : Union[str, Any] ):
"""simple docstring"""
if isinstance(__lowercase , __lowercase ):
__lowercase =dict(self.items() )
return inner_dict[k]
else:
return self.to_tuple()[k]
def __setattr__( self : List[Any] , __lowercase : Optional[int] , __lowercase : List[Any] ):
"""simple docstring"""
if name in self.keys() and value is not None:
# Don't call self.__setitem__ to avoid recursion errors
super().__setitem__(__lowercase , __lowercase )
super().__setattr__(__lowercase , __lowercase )
def __setitem__( self : List[Any] , __lowercase : Tuple , __lowercase : Any ):
"""simple docstring"""
super().__setitem__(__lowercase , __lowercase )
# Don't call self.__setattr__ to avoid recursion errors
super().__setattr__(__lowercase , __lowercase )
def snake_case ( self : List[str] ):
"""simple docstring"""
return tuple(self[k] for k in self.keys() )
class lowerCAmelCase ( A , A ):
@classmethod
def snake_case ( cls : Dict , __lowercase : Union[str, Any] ):
"""simple docstring"""
raise ValueError(
f'''{value} is not a valid {cls.__name__}, please select one of {list(cls._valueamember_map_.keys() )}''' )
class lowerCAmelCase ( A ):
lowerCAmelCase_ = "longest"
lowerCAmelCase_ = "max_length"
lowerCAmelCase_ = "do_not_pad"
class lowerCAmelCase ( A ):
lowerCAmelCase_ = "pt"
lowerCAmelCase_ = "tf"
lowerCAmelCase_ = "np"
lowerCAmelCase_ = "jax"
class lowerCAmelCase :
def __init__( self : int , __lowercase : List[ContextManager] ):
"""simple docstring"""
__lowercase =context_managers
__lowercase =ExitStack()
def __enter__( self : Dict ):
"""simple docstring"""
for context_manager in self.context_managers:
self.stack.enter_context(__lowercase )
def __exit__( self : Tuple , *__lowercase : int , **__lowercase : Optional[int] ):
"""simple docstring"""
self.stack.__exit__(*__lowercase , **__lowercase )
def __UpperCamelCase ( lowercase__ : Optional[Any] ):
'''simple docstring'''
__lowercase =infer_framework(lowercase__ )
if framework == "tf":
__lowercase =inspect.signature(model_class.call ) # TensorFlow models
elif framework == "pt":
__lowercase =inspect.signature(model_class.forward ) # PyTorch models
else:
__lowercase =inspect.signature(model_class.__call__ ) # Flax models
for p in signature.parameters:
if p == "return_loss" and signature.parameters[p].default is True:
return True
return False
def __UpperCamelCase ( lowercase__ : int ):
'''simple docstring'''
__lowercase =model_class.__name__
__lowercase =infer_framework(lowercase__ )
if framework == "tf":
__lowercase =inspect.signature(model_class.call ) # TensorFlow models
elif framework == "pt":
__lowercase =inspect.signature(model_class.forward ) # PyTorch models
else:
__lowercase =inspect.signature(model_class.__call__ ) # Flax models
if "QuestionAnswering" in model_name:
return [p for p in signature.parameters if "label" in p or p in ("start_positions", "end_positions")]
else:
return [p for p in signature.parameters if "label" in p]
def __UpperCamelCase ( lowercase__ : MutableMapping, lowercase__ : str = "", lowercase__ : str = "." ):
'''simple docstring'''
def _flatten_dict(lowercase__ : Dict, lowercase__ : Dict="", lowercase__ : List[str]="." ):
for k, v in d.items():
__lowercase =str(lowercase__ ) + delimiter + str(lowercase__ ) if parent_key else k
if v and isinstance(lowercase__, lowercase__ ):
yield from flatten_dict(lowercase__, lowercase__, delimiter=lowercase__ ).items()
else:
yield key, v
return dict(_flatten_dict(lowercase__, lowercase__, lowercase__ ) )
@contextmanager
def __UpperCamelCase ( lowercase__ : Dict, lowercase__ : bool = False ):
'''simple docstring'''
if use_temp_dir:
with tempfile.TemporaryDirectory() as tmp_dir:
yield tmp_dir
else:
yield working_dir
def __UpperCamelCase ( lowercase__ : Any, lowercase__ : Optional[Any]=None ):
'''simple docstring'''
if is_numpy_array(lowercase__ ):
return np.transpose(lowercase__, axes=lowercase__ )
elif is_torch_tensor(lowercase__ ):
return array.T if axes is None else array.permute(*lowercase__ )
elif is_tf_tensor(lowercase__ ):
import tensorflow as tf
return tf.transpose(lowercase__, perm=lowercase__ )
elif is_jax_tensor(lowercase__ ):
return jnp.transpose(lowercase__, axes=lowercase__ )
else:
raise ValueError(F'''Type not supported for transpose: {type(lowercase__ )}.''' )
def __UpperCamelCase ( lowercase__ : Dict, lowercase__ : List[str] ):
'''simple docstring'''
if is_numpy_array(lowercase__ ):
return np.reshape(lowercase__, lowercase__ )
elif is_torch_tensor(lowercase__ ):
return array.reshape(*lowercase__ )
elif is_tf_tensor(lowercase__ ):
import tensorflow as tf
return tf.reshape(lowercase__, lowercase__ )
elif is_jax_tensor(lowercase__ ):
return jnp.reshape(lowercase__, lowercase__ )
else:
raise ValueError(F'''Type not supported for reshape: {type(lowercase__ )}.''' )
def __UpperCamelCase ( lowercase__ : List[str], lowercase__ : List[str]=None ):
'''simple docstring'''
if is_numpy_array(lowercase__ ):
return np.squeeze(lowercase__, axis=lowercase__ )
elif is_torch_tensor(lowercase__ ):
return array.squeeze() if axis is None else array.squeeze(dim=lowercase__ )
elif is_tf_tensor(lowercase__ ):
import tensorflow as tf
return tf.squeeze(lowercase__, axis=lowercase__ )
elif is_jax_tensor(lowercase__ ):
return jnp.squeeze(lowercase__, axis=lowercase__ )
else:
raise ValueError(F'''Type not supported for squeeze: {type(lowercase__ )}.''' )
def __UpperCamelCase ( lowercase__ : Dict, lowercase__ : Tuple ):
'''simple docstring'''
if is_numpy_array(lowercase__ ):
return np.expand_dims(lowercase__, lowercase__ )
elif is_torch_tensor(lowercase__ ):
return array.unsqueeze(dim=lowercase__ )
elif is_tf_tensor(lowercase__ ):
import tensorflow as tf
return tf.expand_dims(lowercase__, axis=lowercase__ )
elif is_jax_tensor(lowercase__ ):
return jnp.expand_dims(lowercase__, axis=lowercase__ )
else:
raise ValueError(F'''Type not supported for expand_dims: {type(lowercase__ )}.''' )
def __UpperCamelCase ( lowercase__ : Union[str, Any] ):
'''simple docstring'''
if is_numpy_array(lowercase__ ):
return np.size(lowercase__ )
elif is_torch_tensor(lowercase__ ):
return array.numel()
elif is_tf_tensor(lowercase__ ):
import tensorflow as tf
return tf.size(lowercase__ )
elif is_jax_tensor(lowercase__ ):
return array.size
else:
raise ValueError(F'''Type not supported for expand_dims: {type(lowercase__ )}.''' )
def __UpperCamelCase ( lowercase__ : Optional[int], lowercase__ : List[str] ):
'''simple docstring'''
for key, value in auto_map.items():
if isinstance(lowercase__, (tuple, list) ):
__lowercase =[F'''{repo_id}--{v}''' if (v is not None and '--' not in v) else v for v in value]
elif value is not None and "--" not in value:
__lowercase =F'''{repo_id}--{value}'''
return auto_map
def __UpperCamelCase ( lowercase__ : Optional[Any] ):
'''simple docstring'''
for base_class in inspect.getmro(lowercase__ ):
__lowercase =base_class.__module__
__lowercase =base_class.__name__
if module.startswith('tensorflow' ) or module.startswith('keras' ) or name == "TFPreTrainedModel":
return "tf"
elif module.startswith('torch' ) or name == "PreTrainedModel":
return "pt"
elif module.startswith('flax' ) or module.startswith('jax' ) or name == "FlaxPreTrainedModel":
return "flax"
else:
raise TypeError(F'''Could not infer framework from class {model_class}.''' )
| 141 |
'''simple docstring'''
import unittest
from transformers import AutoConfig, AutoTokenizer, BertConfig, TensorType, is_flax_available
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, slow
if is_flax_available():
import jax
from transformers.models.auto.modeling_flax_auto import FlaxAutoModel
from transformers.models.bert.modeling_flax_bert import FlaxBertModel
from transformers.models.roberta.modeling_flax_roberta import FlaxRobertaModel
@require_flax
class lowerCAmelCase ( unittest.TestCase ):
@slow
def snake_case ( self : Optional[int] ):
"""simple docstring"""
for model_name in ["bert-base-cased", "bert-large-uncased"]:
with self.subTest(__lowercase ):
__lowercase =AutoConfig.from_pretrained(__lowercase )
self.assertIsNotNone(__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
__lowercase =FlaxAutoModel.from_pretrained(__lowercase )
self.assertIsNotNone(__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
@slow
def snake_case ( self : Optional[Any] ):
"""simple docstring"""
for model_name in ["roberta-base", "roberta-large"]:
with self.subTest(__lowercase ):
__lowercase =AutoConfig.from_pretrained(__lowercase )
self.assertIsNotNone(__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
__lowercase =FlaxAutoModel.from_pretrained(__lowercase )
self.assertIsNotNone(__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
@slow
def snake_case ( self : Optional[int] ):
"""simple docstring"""
for model_name in ["bert-base-cased", "bert-large-uncased"]:
__lowercase =AutoTokenizer.from_pretrained(__lowercase )
__lowercase =FlaxBertModel.from_pretrained(__lowercase )
__lowercase =tokenizer('Do you support jax jitted function?' , return_tensors=TensorType.JAX )
@jax.jit
def eval(**__lowercase : Optional[Any] ):
return model(**__lowercase )
eval(**__lowercase ).block_until_ready()
@slow
def snake_case ( self : List[Any] ):
"""simple docstring"""
for model_name in ["roberta-base", "roberta-large"]:
__lowercase =AutoTokenizer.from_pretrained(__lowercase )
__lowercase =FlaxRobertaModel.from_pretrained(__lowercase )
__lowercase =tokenizer('Do you support jax jitted function?' , return_tensors=TensorType.JAX )
@jax.jit
def eval(**__lowercase : Dict ):
return model(**__lowercase )
eval(**__lowercase ).block_until_ready()
def snake_case ( self : Any ):
"""simple docstring"""
with self.assertRaisesRegex(
__lowercase , 'bert-base is not a local folder and is not a valid model identifier' ):
__lowercase =FlaxAutoModel.from_pretrained('bert-base' )
def snake_case ( self : Optional[Any] ):
"""simple docstring"""
with self.assertRaisesRegex(
__lowercase , r'aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)' ):
__lowercase =FlaxAutoModel.from_pretrained(__lowercase , revision='aaaaaa' )
def snake_case ( self : Optional[Any] ):
"""simple docstring"""
with self.assertRaisesRegex(
__lowercase , 'hf-internal-testing/config-no-model does not appear to have a file named flax_model.msgpack' , ):
__lowercase =FlaxAutoModel.from_pretrained('hf-internal-testing/config-no-model' )
def snake_case ( self : Any ):
"""simple docstring"""
with self.assertRaisesRegex(__lowercase , 'Use `from_pt=True` to load this model' ):
__lowercase =FlaxAutoModel.from_pretrained('hf-internal-testing/tiny-bert-pt-only' )
| 141 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
UpperCAmelCase__ = {
"""configuration_perceiver""": ["""PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """PerceiverConfig""", """PerceiverOnnxConfig"""],
"""tokenization_perceiver""": ["""PerceiverTokenizer"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = ["""PerceiverFeatureExtractor"""]
UpperCAmelCase__ = ["""PerceiverImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
"""PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""PerceiverForImageClassificationConvProcessing""",
"""PerceiverForImageClassificationFourier""",
"""PerceiverForImageClassificationLearned""",
"""PerceiverForMaskedLM""",
"""PerceiverForMultimodalAutoencoding""",
"""PerceiverForOpticalFlow""",
"""PerceiverForSequenceClassification""",
"""PerceiverLayer""",
"""PerceiverModel""",
"""PerceiverPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_perceiver import PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP, PerceiverConfig, PerceiverOnnxConfig
from .tokenization_perceiver import PerceiverTokenizer
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_perceiver import PerceiverFeatureExtractor
from .image_processing_perceiver import PerceiverImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_perceiver import (
PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST,
PerceiverForImageClassificationConvProcessing,
PerceiverForImageClassificationFourier,
PerceiverForImageClassificationLearned,
PerceiverForMaskedLM,
PerceiverForMultimodalAutoencoding,
PerceiverForOpticalFlow,
PerceiverForSequenceClassification,
PerceiverLayer,
PerceiverModel,
PerceiverPreTrainedModel,
)
else:
import sys
UpperCAmelCase__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 350 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bert import BertTokenizer
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
UpperCAmelCase__ = {
'vocab_file': {
'bert-base-uncased': 'https://huggingface.co/bert-base-uncased/resolve/main/vocab.txt',
'bert-large-uncased': 'https://huggingface.co/bert-large-uncased/resolve/main/vocab.txt',
'bert-base-cased': 'https://huggingface.co/bert-base-cased/resolve/main/vocab.txt',
'bert-large-cased': 'https://huggingface.co/bert-large-cased/resolve/main/vocab.txt',
'bert-base-multilingual-uncased': (
'https://huggingface.co/bert-base-multilingual-uncased/resolve/main/vocab.txt'
),
'bert-base-multilingual-cased': 'https://huggingface.co/bert-base-multilingual-cased/resolve/main/vocab.txt',
'bert-base-chinese': 'https://huggingface.co/bert-base-chinese/resolve/main/vocab.txt',
'bert-base-german-cased': 'https://huggingface.co/bert-base-german-cased/resolve/main/vocab.txt',
'bert-large-uncased-whole-word-masking': (
'https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/vocab.txt'
),
'bert-large-cased-whole-word-masking': (
'https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/vocab.txt'
),
'bert-large-uncased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt'
),
'bert-large-cased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt'
),
'bert-base-cased-finetuned-mrpc': (
'https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/vocab.txt'
),
'bert-base-german-dbmdz-cased': 'https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/vocab.txt',
'bert-base-german-dbmdz-uncased': (
'https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/vocab.txt'
),
'TurkuNLP/bert-base-finnish-cased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/vocab.txt'
),
'TurkuNLP/bert-base-finnish-uncased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/vocab.txt'
),
'wietsedv/bert-base-dutch-cased': (
'https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'bert-base-uncased': 'https://huggingface.co/bert-base-uncased/resolve/main/tokenizer.json',
'bert-large-uncased': 'https://huggingface.co/bert-large-uncased/resolve/main/tokenizer.json',
'bert-base-cased': 'https://huggingface.co/bert-base-cased/resolve/main/tokenizer.json',
'bert-large-cased': 'https://huggingface.co/bert-large-cased/resolve/main/tokenizer.json',
'bert-base-multilingual-uncased': (
'https://huggingface.co/bert-base-multilingual-uncased/resolve/main/tokenizer.json'
),
'bert-base-multilingual-cased': (
'https://huggingface.co/bert-base-multilingual-cased/resolve/main/tokenizer.json'
),
'bert-base-chinese': 'https://huggingface.co/bert-base-chinese/resolve/main/tokenizer.json',
'bert-base-german-cased': 'https://huggingface.co/bert-base-german-cased/resolve/main/tokenizer.json',
'bert-large-uncased-whole-word-masking': (
'https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/tokenizer.json'
),
'bert-large-cased-whole-word-masking': (
'https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/tokenizer.json'
),
'bert-large-uncased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json'
),
'bert-large-cased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json'
),
'bert-base-cased-finetuned-mrpc': (
'https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/tokenizer.json'
),
'bert-base-german-dbmdz-cased': (
'https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/tokenizer.json'
),
'bert-base-german-dbmdz-uncased': (
'https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/tokenizer.json'
),
'TurkuNLP/bert-base-finnish-cased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/tokenizer.json'
),
'TurkuNLP/bert-base-finnish-uncased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/tokenizer.json'
),
'wietsedv/bert-base-dutch-cased': (
'https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/tokenizer.json'
),
},
}
UpperCAmelCase__ = {
'bert-base-uncased': 512,
'bert-large-uncased': 512,
'bert-base-cased': 512,
'bert-large-cased': 512,
'bert-base-multilingual-uncased': 512,
'bert-base-multilingual-cased': 512,
'bert-base-chinese': 512,
'bert-base-german-cased': 512,
'bert-large-uncased-whole-word-masking': 512,
'bert-large-cased-whole-word-masking': 512,
'bert-large-uncased-whole-word-masking-finetuned-squad': 512,
'bert-large-cased-whole-word-masking-finetuned-squad': 512,
'bert-base-cased-finetuned-mrpc': 512,
'bert-base-german-dbmdz-cased': 512,
'bert-base-german-dbmdz-uncased': 512,
'TurkuNLP/bert-base-finnish-cased-v1': 512,
'TurkuNLP/bert-base-finnish-uncased-v1': 512,
'wietsedv/bert-base-dutch-cased': 512,
}
UpperCAmelCase__ = {
'bert-base-uncased': {'do_lower_case': True},
'bert-large-uncased': {'do_lower_case': True},
'bert-base-cased': {'do_lower_case': False},
'bert-large-cased': {'do_lower_case': False},
'bert-base-multilingual-uncased': {'do_lower_case': True},
'bert-base-multilingual-cased': {'do_lower_case': False},
'bert-base-chinese': {'do_lower_case': False},
'bert-base-german-cased': {'do_lower_case': False},
'bert-large-uncased-whole-word-masking': {'do_lower_case': True},
'bert-large-cased-whole-word-masking': {'do_lower_case': False},
'bert-large-uncased-whole-word-masking-finetuned-squad': {'do_lower_case': True},
'bert-large-cased-whole-word-masking-finetuned-squad': {'do_lower_case': False},
'bert-base-cased-finetuned-mrpc': {'do_lower_case': False},
'bert-base-german-dbmdz-cased': {'do_lower_case': False},
'bert-base-german-dbmdz-uncased': {'do_lower_case': True},
'TurkuNLP/bert-base-finnish-cased-v1': {'do_lower_case': False},
'TurkuNLP/bert-base-finnish-uncased-v1': {'do_lower_case': True},
'wietsedv/bert-base-dutch-cased': {'do_lower_case': False},
}
class lowerCAmelCase__ ( A_ ):
__a = VOCAB_FILES_NAMES
__a = PRETRAINED_VOCAB_FILES_MAP
__a = PRETRAINED_INIT_CONFIGURATION
__a = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__a = BertTokenizer
def __init__( self : Optional[int] , _lowerCamelCase : int=None , _lowerCamelCase : Optional[int]=None , _lowerCamelCase : Tuple=True , _lowerCamelCase : Optional[int]="[UNK]" , _lowerCamelCase : Any="[SEP]" , _lowerCamelCase : Any="[PAD]" , _lowerCamelCase : List[Any]="[CLS]" , _lowerCamelCase : Dict="[MASK]" , _lowerCamelCase : Union[str, Any]=True , _lowerCamelCase : Any=None , **_lowerCamelCase : int , ):
super().__init__(
_lowerCamelCase , tokenizer_file=_lowerCamelCase , do_lower_case=_lowerCamelCase , unk_token=_lowerCamelCase , sep_token=_lowerCamelCase , pad_token=_lowerCamelCase , cls_token=_lowerCamelCase , mask_token=_lowerCamelCase , tokenize_chinese_chars=_lowerCamelCase , strip_accents=_lowerCamelCase , **_lowerCamelCase , )
_snake_case = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , _lowerCamelCase ) != do_lower_case
or normalizer_state.get('''strip_accents''' , _lowerCamelCase ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , _lowerCamelCase ) != tokenize_chinese_chars
):
_snake_case = getattr(_lowerCamelCase , normalizer_state.pop('''type''' ) )
_snake_case = do_lower_case
_snake_case = strip_accents
_snake_case = tokenize_chinese_chars
_snake_case = normalizer_class(**_lowerCamelCase )
_snake_case = do_lower_case
def lowercase ( self : Optional[int] , _lowerCamelCase : List[Any] , _lowerCamelCase : List[Any]=None ):
_snake_case = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def lowercase ( self : Optional[Any] , _lowerCamelCase : List[int] , _lowerCamelCase : Optional[List[int]] = None ):
_snake_case = [self.sep_token_id]
_snake_case = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowercase ( self : Optional[int] , _lowerCamelCase : str , _lowerCamelCase : Optional[str] = None ):
_snake_case = self._tokenizer.model.save(_lowerCamelCase , name=_lowerCamelCase )
return tuple(_lowerCamelCase )
| 40 | 0 |
import doctest
import glob
import importlib
import inspect
import os
import re
from contextlib import contextmanager
from functools import wraps
from unittest.mock import patch
import numpy as np
import pytest
from absl.testing import parameterized
import datasets
from datasets import load_metric
from .utils import for_all_test_methods, local, slow
# mark all tests as integration
__UpperCamelCase : int = pytest.mark.integration
__UpperCamelCase : List[str] = {'comet'}
__UpperCamelCase : Tuple = importlib.util.find_spec('fairseq') is not None
__UpperCamelCase : Optional[int] = {'code_eval'}
__UpperCamelCase : List[str] = os.name == 'nt'
__UpperCamelCase : List[Any] = {'bertscore', 'frugalscore', 'perplexity'}
__UpperCamelCase : List[str] = importlib.util.find_spec('transformers') is not None
def A ( _lowercase ):
@wraps(_lowercase )
def wrapper(self , _lowercase ):
if not _has_fairseq and metric_name in REQUIRE_FAIRSEQ:
self.skipTest('''"test requires Fairseq"''' )
else:
test_case(self , _lowercase )
return wrapper
def A ( _lowercase ):
@wraps(_lowercase )
def wrapper(self , _lowercase ):
if not _has_transformers and metric_name in REQUIRE_TRANSFORMERS:
self.skipTest('''"test requires transformers"''' )
else:
test_case(self , _lowercase )
return wrapper
def A ( _lowercase ):
@wraps(_lowercase )
def wrapper(self , _lowercase ):
if _on_windows and metric_name in UNSUPPORTED_ON_WINDOWS:
self.skipTest('''"test not supported on Windows"''' )
else:
test_case(self , _lowercase )
return wrapper
def A ( ):
SCREAMING_SNAKE_CASE : int = [metric_dir.split(os.sep )[-2] for metric_dir in glob.glob('''./metrics/*/''' )]
return [{"testcase_name": x, "metric_name": x} for x in metrics if x != "gleu"] # gleu is unfinished
@parameterized.named_parameters(get_local_metric_names())
@for_all_test_methods(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_)
@local
class lowercase__ ( parameterized.TestCase):
UpperCamelCase_ = {}
UpperCamelCase_ = None
@pytest.mark.filterwarnings('''ignore:metric_module_factory is deprecated:FutureWarning''' )
@pytest.mark.filterwarnings('''ignore:load_metric is deprecated:FutureWarning''' )
def __A ( self : Union[str, Any] , UpperCamelCase__ : Optional[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = '''[...]'''
SCREAMING_SNAKE_CASE : Union[str, Any] = importlib.import_module(
datasets.load.metric_module_factory(os.path.join('''metrics''' , UpperCamelCase__ ) ).module_path )
SCREAMING_SNAKE_CASE : str = datasets.load.import_main_class(metric_module.__name__ , dataset=UpperCamelCase__ )
# check parameters
SCREAMING_SNAKE_CASE : Tuple = inspect.signature(metric._compute ).parameters
self.assertTrue(all(p.kind != p.VAR_KEYWORD for p in parameters.values() ) ) # no **kwargs
# run doctest
with self.patch_intensive_calls(UpperCamelCase__ , metric_module.__name__ ):
with self.use_local_metrics():
try:
SCREAMING_SNAKE_CASE : List[Any] = doctest.testmod(UpperCamelCase__ , verbose=UpperCamelCase__ , raise_on_error=UpperCamelCase__ )
except doctest.UnexpectedException as e:
raise e.exc_info[1] # raise the exception that doctest caught
self.assertEqual(results.failed , 0 )
self.assertGreater(results.attempted , 1 )
@slow
def __A ( self : Union[str, Any] , UpperCamelCase__ : Any ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = '''[...]'''
SCREAMING_SNAKE_CASE : Union[str, Any] = importlib.import_module(
datasets.load.metric_module_factory(os.path.join('''metrics''' , UpperCamelCase__ ) ).module_path )
# run doctest
with self.use_local_metrics():
SCREAMING_SNAKE_CASE : Union[str, Any] = doctest.testmod(UpperCamelCase__ , verbose=UpperCamelCase__ , raise_on_error=UpperCamelCase__ )
self.assertEqual(results.failed , 0 )
self.assertGreater(results.attempted , 1 )
@contextmanager
def __A ( self : List[Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Optional[Any] ):
'''simple docstring'''
if metric_name in self.INTENSIVE_CALLS_PATCHER:
with self.INTENSIVE_CALLS_PATCHER[metric_name](UpperCamelCase__ ):
yield
else:
yield
@contextmanager
def __A ( self : Optional[Any] ):
'''simple docstring'''
def load_local_metric(UpperCamelCase__ : str , *UpperCamelCase__ : Tuple , **UpperCamelCase__ : str ):
return load_metric(os.path.join('''metrics''' , UpperCamelCase__ ) , *UpperCamelCase__ , **UpperCamelCase__ )
with patch('''datasets.load_metric''' ) as mock_load_metric:
SCREAMING_SNAKE_CASE : Dict = load_local_metric
yield
@classmethod
def __A ( cls : int , UpperCamelCase__ : Any ):
'''simple docstring'''
def wrapper(UpperCamelCase__ : List[Any] ):
SCREAMING_SNAKE_CASE : List[Any] = contextmanager(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : List[str] = patcher
return patcher
return wrapper
@LocalMetricTest.register_intensive_calls_patcher('''bleurt''' )
def A ( _lowercase ):
import tensorflow.compat.va as tf
from bleurt.score import Predictor
tf.flags.DEFINE_string('''sv''' , '''''' , '''''' ) # handle pytest cli flags
class lowercase__ ( UpperCamelCase_):
def __A ( self : List[str] , UpperCamelCase__ : List[Any] ):
'''simple docstring'''
assert len(input_dict['''input_ids'''] ) == 2
return np.array([1.03, 1.04] )
# mock predict_fn which is supposed to do a forward pass with a bleurt model
with patch('''bleurt.score._create_predictor''' ) as mock_create_predictor:
SCREAMING_SNAKE_CASE : List[str] = MockedPredictor()
yield
@LocalMetricTest.register_intensive_calls_patcher('''bertscore''' )
def A ( _lowercase ):
import torch
def bert_cos_score_idf(_lowercase , _lowercase , *_lowercase , **_lowercase ):
return torch.tensor([[1.0, 1.0, 1.0]] * len(_lowercase ) )
# mock get_model which is supposed to do download a bert model
# mock bert_cos_score_idf which is supposed to do a forward pass with a bert model
with patch('''bert_score.scorer.get_model''' ), patch(
'''bert_score.scorer.bert_cos_score_idf''' ) as mock_bert_cos_score_idf:
SCREAMING_SNAKE_CASE : str = bert_cos_score_idf
yield
@LocalMetricTest.register_intensive_calls_patcher('''comet''' )
def A ( _lowercase ):
def load_from_checkpoint(_lowercase ):
class lowercase__ :
def __A ( self : Union[str, Any] , UpperCamelCase__ : int , *UpperCamelCase__ : Any , **UpperCamelCase__ : Optional[Any] ):
'''simple docstring'''
assert len(UpperCamelCase__ ) == 2
SCREAMING_SNAKE_CASE : Tuple = [0.19, 0.92]
return scores, sum(UpperCamelCase__ ) / len(UpperCamelCase__ )
return Model()
# mock load_from_checkpoint which is supposed to do download a bert model
# mock load_from_checkpoint which is supposed to do download a bert model
with patch('''comet.download_model''' ) as mock_download_model:
SCREAMING_SNAKE_CASE : List[str] = None
with patch('''comet.load_from_checkpoint''' ) as mock_load_from_checkpoint:
SCREAMING_SNAKE_CASE : Tuple = load_from_checkpoint
yield
def A ( ):
SCREAMING_SNAKE_CASE : Any = load_metric(os.path.join('''metrics''' , '''seqeval''' ) )
SCREAMING_SNAKE_CASE : Dict = '''ERROR'''
SCREAMING_SNAKE_CASE : str = f"""Scheme should be one of [IOB1, IOB2, IOE1, IOE2, IOBES, BILOU], got {wrong_scheme}"""
with pytest.raises(_lowercase , match=re.escape(_lowercase ) ):
metric.compute(predictions=[] , references=[] , scheme=_lowercase )
| 182 | def A ( _lowercase , _lowercase , _lowercase ):
return round(float(moles / volume ) * nfactor )
def A ( _lowercase , _lowercase , _lowercase ):
return round(float((moles * 0.0821 * temperature) / (volume) ) )
def A ( _lowercase , _lowercase , _lowercase ):
return round(float((moles * 0.0821 * temperature) / (pressure) ) )
def A ( _lowercase , _lowercase , _lowercase ):
return round(float((pressure * volume) / (0.0821 * moles) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 182 | 1 |
import os
from collections import deque
import torch
from torch.utils.data import Dataset
class __snake_case ( lowerCAmelCase ):
def __init__( self ,snake_case="" ,snake_case="train" ):
'''simple docstring'''
assert os.path.isdir(snake_case )
lowercase : Tuple = []
lowercase : str = os.listdir(snake_case )
for story_filename in story_filenames_list:
if "summary" in story_filename:
continue
lowercase : Union[str, Any] = os.path.join(snake_case ,snake_case )
if not os.path.isfile(snake_case ):
continue
self.documents.append(snake_case )
def __len__( self ):
'''simple docstring'''
return len(self.documents )
def __getitem__( self ,snake_case ):
'''simple docstring'''
lowercase : Any = self.documents[idx]
lowercase : Union[str, Any] = document_path.split("""/""" )[-1]
with open(snake_case ,encoding="""utf-8""" ) as source:
lowercase : Optional[int] = source.read()
lowercase : List[Any] = process_story(snake_case )
return document_name, story_lines, summary_lines
def _snake_case( SCREAMING_SNAKE_CASE__ ) -> Any:
lowercase : Any = list(filter(lambda SCREAMING_SNAKE_CASE__ : len(SCREAMING_SNAKE_CASE__ ) != 0 , [line.strip() for line in raw_story.split("""\n""" )] ) )
# for some unknown reason some lines miss a period, add it
lowercase : Dict = [_add_missing_period(SCREAMING_SNAKE_CASE__ ) for line in nonempty_lines]
# gather article lines
lowercase : List[Any] = []
lowercase : Union[str, Any] = deque(SCREAMING_SNAKE_CASE__ )
while True:
try:
lowercase : Any = lines.popleft()
if element.startswith("""@highlight""" ):
break
story_lines.append(SCREAMING_SNAKE_CASE__ )
except IndexError:
# if "@highlight" is absent from the file we pop
# all elements until there is None, raising an exception.
return story_lines, []
# gather summary lines
lowercase : Dict = list(filter(lambda SCREAMING_SNAKE_CASE__ : not t.startswith("""@highlight""" ) , SCREAMING_SNAKE_CASE__ ) )
return story_lines, summary_lines
def _snake_case( SCREAMING_SNAKE_CASE__ ) -> int:
lowercase : Dict = [""".""", """!""", """?""", """...""", """'""", """`""", """\"""", """\u2019""", """\u2019""", """)"""]
if line.startswith("""@highlight""" ):
return line
if line[-1] in END_TOKENS:
return line
return line + "."
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> int:
if len(SCREAMING_SNAKE_CASE__ ) > block_size:
return sequence[:block_size]
else:
sequence.extend([pad_token_id] * (block_size - len(SCREAMING_SNAKE_CASE__ )) )
return sequence
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Tuple:
lowercase : Union[str, Any] = torch.ones_like(SCREAMING_SNAKE_CASE__ )
lowercase : List[str] = sequence == pad_token_id
lowercase : Optional[Any] = 0
return mask
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> List[str]:
lowercase : List[str] = [tokenizer.encode(SCREAMING_SNAKE_CASE__ ) for line in story_lines]
lowercase : Tuple = [token for sentence in story_lines_token_ids for token in sentence]
lowercase : Tuple = [tokenizer.encode(SCREAMING_SNAKE_CASE__ ) for line in summary_lines]
lowercase : Tuple = [token for sentence in summary_lines_token_ids for token in sentence]
return story_token_ids, summary_token_ids
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Optional[int]:
lowercase : Optional[Any] = []
for sequence in batch:
lowercase : int = -1
lowercase : Optional[int] = []
for s in sequence:
if s == separator_token_id:
sentence_num += 1
embeddings.append(sentence_num % 2 )
batch_embeddings.append(SCREAMING_SNAKE_CASE__ )
return torch.tensor(SCREAMING_SNAKE_CASE__ )
| 368 |
from __future__ import annotations
from itertools import permutations
from random import randint
from timeit import repeat
def _snake_case( ) -> tuple[list[int], int]:
lowercase : List[Any] = [randint(-1_000 , 1_000 ) for i in range(10 )]
lowercase : Tuple = randint(-5_000 , 5_000 )
return (arr, r)
lowercase : List[Any] = make_dataset()
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> tuple[int, ...]:
for triplet in permutations(SCREAMING_SNAKE_CASE__ , 3 ):
if sum(SCREAMING_SNAKE_CASE__ ) == target:
return tuple(sorted(SCREAMING_SNAKE_CASE__ ) )
return (0, 0, 0)
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> tuple[int, int, int]:
arr.sort()
lowercase : Optional[int] = len(SCREAMING_SNAKE_CASE__ )
for i in range(n - 1 ):
lowercase , lowercase : Optional[Any] = i + 1, n - 1
while left < right:
if arr[i] + arr[left] + arr[right] == target:
return (arr[i], arr[left], arr[right])
elif arr[i] + arr[left] + arr[right] < target:
left += 1
elif arr[i] + arr[left] + arr[right] > target:
right -= 1
return (0, 0, 0)
def _snake_case( ) -> tuple[float, float]:
lowercase : Dict = """
from __main__ import dataset, triplet_sum1, triplet_sum2
"""
lowercase : Tuple = """
triplet_sum1(*dataset)
"""
lowercase : int = """
triplet_sum2(*dataset)
"""
lowercase : str = repeat(setup=SCREAMING_SNAKE_CASE__ , stmt=SCREAMING_SNAKE_CASE__ , repeat=5 , number=10_000 )
lowercase : Dict = repeat(setup=SCREAMING_SNAKE_CASE__ , stmt=SCREAMING_SNAKE_CASE__ , repeat=5 , number=10_000 )
return (min(SCREAMING_SNAKE_CASE__ ), min(SCREAMING_SNAKE_CASE__ ))
if __name__ == "__main__":
from doctest import testmod
testmod()
lowercase : Union[str, Any] = solution_times()
print(F'''The time for naive implementation is {times[0]}.''')
print(F'''The time for optimized implementation is {times[1]}.''')
| 285 | 0 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version('''>=''', '''4.25.0''')):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline
else:
from .pipeline_unclip import UnCLIPPipeline
from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline
from .text_proj import UnCLIPTextProjModel | 233 |
import math
class lowerCAmelCase :
'''simple docstring'''
def __init__( self : Tuple , __a : int=0 ) -> Optional[Any]: # a graph with Node 0,1,...,N-1
"""simple docstring"""
__lowercase : Any = n
__lowercase : Optional[int] = [
[math.inf for j in range(0 , __a )] for i in range(0 , __a )
] # adjacency matrix for weight
__lowercase : Dict = [
[math.inf for j in range(0 , __a )] for i in range(0 , __a )
] # dp[i][j] stores minimum distance from i to j
def lowerCAmelCase ( self : int , __a : Optional[int] , __a : Tuple , __a : Dict ) -> Dict:
"""simple docstring"""
__lowercase : Any = w
def lowerCAmelCase ( self : List[str] ) -> Tuple:
"""simple docstring"""
for k in range(0 , self.n ):
for i in range(0 , self.n ):
for j in range(0 , self.n ):
__lowercase : List[str] = min(self.dp[i][j] , self.dp[i][k] + self.dp[k][j] )
def lowerCAmelCase ( self : Any , __a : Any , __a : str ) -> int:
"""simple docstring"""
return self.dp[u][v]
if __name__ == "__main__":
lowerCamelCase : int = Graph(5)
graph.add_edge(0, 2, 9)
graph.add_edge(0, 4, 10)
graph.add_edge(1, 3, 5)
graph.add_edge(2, 3, 7)
graph.add_edge(3, 0, 10)
graph.add_edge(3, 1, 2)
graph.add_edge(3, 2, 1)
graph.add_edge(3, 4, 6)
graph.add_edge(4, 1, 3)
graph.add_edge(4, 2, 4)
graph.add_edge(4, 3, 9)
graph.floyd_warshall()
graph.show_min(1, 4)
graph.show_min(0, 3) | 233 | 1 |
import unittest
from transformers import (
MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TextaTextGenerationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, require_tf, require_torch
from transformers.utils import is_torch_available
from .test_pipelines_common import ANY
if is_torch_available():
import torch
@is_pipeline_test
class A_ ( unittest.TestCase ):
'''simple docstring'''
__snake_case = MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
__snake_case = TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
def _snake_case ( self: Tuple , a: Tuple , a: int , a: int ) -> List[Any]:
__lowerCamelCase : str = TextaTextGenerationPipeline(model=a__ , tokenizer=a__ )
return generator, ["Something to write", "Something else"]
def _snake_case ( self: List[str] , a: List[Any] , a: List[str] ) -> Tuple:
__lowerCamelCase : List[Any] = generator('Something there' )
self.assertEqual(a__ , [{'generated_text': ANY(a__ )}] )
# These are encoder decoder, they don't just append to incoming string
self.assertFalse(outputs[0]['generated_text'].startswith('Something there' ) )
__lowerCamelCase : int = generator(['This is great !', 'Something else'] , num_return_sequences=2 , do_sample=a__ )
self.assertEqual(
a__ , [
[{'generated_text': ANY(a__ )}, {'generated_text': ANY(a__ )}],
[{'generated_text': ANY(a__ )}, {'generated_text': ANY(a__ )}],
] , )
__lowerCamelCase : List[Any] = generator(
['This is great !', 'Something else'] , num_return_sequences=2 , batch_size=2 , do_sample=a__ )
self.assertEqual(
a__ , [
[{'generated_text': ANY(a__ )}, {'generated_text': ANY(a__ )}],
[{'generated_text': ANY(a__ )}, {'generated_text': ANY(a__ )}],
] , )
with self.assertRaises(a__ ):
generator(4 )
@require_torch
def _snake_case ( self: Any ) -> Dict:
__lowerCamelCase : Union[str, Any] = pipeline('text2text-generation' , model='patrickvonplaten/t5-tiny-random' , framework='pt' )
# do_sample=False necessary for reproducibility
__lowerCamelCase : str = generator('Something there' , do_sample=a__ )
self.assertEqual(a__ , [{'generated_text': ''}] )
__lowerCamelCase : Union[str, Any] = 3
__lowerCamelCase : Tuple = generator(
'Something there' , num_return_sequences=a__ , num_beams=a__ , )
__lowerCamelCase : int = [
{'generated_text': 'Beide Beide Beide Beide Beide Beide Beide Beide Beide'},
{'generated_text': 'Beide Beide Beide Beide Beide Beide Beide Beide'},
{'generated_text': ''},
]
self.assertEqual(a__ , a__ )
__lowerCamelCase : Union[str, Any] = generator('This is a test' , do_sample=a__ , num_return_sequences=2 , return_tensors=a__ )
self.assertEqual(
a__ , [
{'generated_token_ids': ANY(torch.Tensor )},
{'generated_token_ids': ANY(torch.Tensor )},
] , )
__lowerCamelCase : List[Any] = generator.model.config.eos_token_id
__lowerCamelCase : Tuple = '<pad>'
__lowerCamelCase : Union[str, Any] = generator(
['This is a test', 'This is a second test'] , do_sample=a__ , num_return_sequences=2 , batch_size=2 , return_tensors=a__ , )
self.assertEqual(
a__ , [
[
{'generated_token_ids': ANY(torch.Tensor )},
{'generated_token_ids': ANY(torch.Tensor )},
],
[
{'generated_token_ids': ANY(torch.Tensor )},
{'generated_token_ids': ANY(torch.Tensor )},
],
] , )
@require_tf
def _snake_case ( self: int ) -> Tuple:
__lowerCamelCase : Optional[int] = pipeline('text2text-generation' , model='patrickvonplaten/t5-tiny-random' , framework='tf' )
# do_sample=False necessary for reproducibility
__lowerCamelCase : str = generator('Something there' , do_sample=a__ )
self.assertEqual(a__ , [{'generated_text': ''}] )
| 360 |
import warnings
from .generation import TFGenerationMixin
class A_ ( __UpperCamelCase ):
'''simple docstring'''
warnings.warn(
"""Importing `TFGenerationMixin` from `src/transformers/generation_tf_utils.py` is deprecated and will """
"""be removed in Transformers v5. Import as `from transformers import TFGenerationMixin` instead.""" , __UpperCamelCase , )
| 194 | 0 |
"""simple docstring"""
from typing import Dict, Optional
import numpy as np
import datasets
_lowerCamelCase : Union[str, Any] = '\nIoU is the area of overlap between the predicted segmentation and the ground truth divided by the area of union\nbetween the predicted segmentation and the ground truth. For binary (two classes) or multi-class segmentation,\nthe mean IoU of the image is calculated by taking the IoU of each class and averaging them.\n'
_lowerCamelCase : Dict = '\nArgs:\n predictions (`List[ndarray]`):\n List of predicted segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.\n references (`List[ndarray]`):\n List of ground truth segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.\n num_labels (`int`):\n Number of classes (categories).\n ignore_index (`int`):\n Index that will be ignored during evaluation.\n nan_to_num (`int`, *optional*):\n If specified, NaN values will be replaced by the number defined by the user.\n label_map (`dict`, *optional*):\n If specified, dictionary mapping old label indices to new label indices.\n reduce_labels (`bool`, *optional*, defaults to `False`):\n Whether or not to reduce all label values of segmentation maps by 1. Usually used for datasets where 0 is used for background,\n and background itself is not included in all classes of a dataset (e.g. ADE20k). The background label will be replaced by 255.\n\nReturns:\n `Dict[str, float | ndarray]` comprising various elements:\n - *mean_iou* (`float`):\n Mean Intersection-over-Union (IoU averaged over all categories).\n - *mean_accuracy* (`float`):\n Mean accuracy (averaged over all categories).\n - *overall_accuracy* (`float`):\n Overall accuracy on all images.\n - *per_category_accuracy* (`ndarray` of shape `(num_labels,)`):\n Per category accuracy.\n - *per_category_iou* (`ndarray` of shape `(num_labels,)`):\n Per category IoU.\n\nExamples:\n\n >>> import numpy as np\n\n >>> mean_iou = datasets.load_metric("mean_iou")\n\n >>> # suppose one has 3 different segmentation maps predicted\n >>> predicted_1 = np.array([[1, 2], [3, 4], [5, 255]])\n >>> actual_1 = np.array([[0, 3], [5, 4], [6, 255]])\n\n >>> predicted_2 = np.array([[2, 7], [9, 2], [3, 6]])\n >>> actual_2 = np.array([[1, 7], [9, 2], [3, 6]])\n\n >>> predicted_3 = np.array([[2, 2, 3], [8, 2, 4], [3, 255, 2]])\n >>> actual_3 = np.array([[1, 2, 2], [8, 2, 1], [3, 255, 1]])\n\n >>> predicted = [predicted_1, predicted_2, predicted_3]\n >>> ground_truth = [actual_1, actual_2, actual_3]\n\n >>> results = mean_iou.compute(predictions=predicted, references=ground_truth, num_labels=10, ignore_index=255, reduce_labels=False)\n >>> print(results) # doctest: +NORMALIZE_WHITESPACE\n {\'mean_iou\': 0.47750000000000004, \'mean_accuracy\': 0.5916666666666666, \'overall_accuracy\': 0.5263157894736842, \'per_category_iou\': array([0. , 0. , 0.375, 0.4 , 0.5 , 0. , 0.5 , 1. , 1. , 1. ]), \'per_category_accuracy\': array([0. , 0. , 0.75 , 0.66666667, 1. , 0. , 0.5 , 1. , 1. , 1. ])}\n'
_lowerCamelCase : Tuple = '\\n@software{MMSegmentation_Contributors_OpenMMLab_Semantic_Segmentation_2020,\nauthor = {{MMSegmentation Contributors}},\nlicense = {Apache-2.0},\nmonth = {7},\ntitle = {{OpenMMLab Semantic Segmentation Toolbox and Benchmark}},\nurl = {https://github.com/open-mmlab/mmsegmentation},\nyear = {2020}\n}'
def lowercase_ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = False , ):
"""simple docstring"""
if label_map is not None:
for old_id, new_id in label_map.items():
A_ : Optional[int] = new_id
# turn into Numpy arrays
A_ : Any = np.array(_UpperCAmelCase )
A_ : Any = np.array(_UpperCAmelCase )
if reduce_labels:
A_ : Dict = 255
A_ : Dict = label - 1
A_ : str = 255
A_ : Tuple = label != ignore_index
A_ : List[str] = np.not_equal(_UpperCAmelCase , _UpperCAmelCase )
A_ : Tuple = pred_label[mask]
A_ : Optional[Any] = np.array(_UpperCAmelCase )[mask]
A_ : List[Any] = pred_label[pred_label == label]
A_ : Optional[int] = np.histogram(_UpperCAmelCase , bins=_UpperCAmelCase , range=(0, num_labels - 1) )[0]
A_ : Any = np.histogram(_UpperCAmelCase , bins=_UpperCAmelCase , range=(0, num_labels - 1) )[0]
A_ : List[str] = np.histogram(_UpperCAmelCase , bins=_UpperCAmelCase , range=(0, num_labels - 1) )[0]
A_ : str = area_pred_label + area_label - area_intersect
return area_intersect, area_union, area_pred_label, area_label
def lowercase_ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = False , ):
"""simple docstring"""
A_ : Optional[Any] = np.zeros((num_labels,) , dtype=np.floataa )
A_ : Union[str, Any] = np.zeros((num_labels,) , dtype=np.floataa )
A_ : int = np.zeros((num_labels,) , dtype=np.floataa )
A_ : Optional[int] = np.zeros((num_labels,) , dtype=np.floataa )
for result, gt_seg_map in zip(_UpperCAmelCase , _UpperCAmelCase ):
A_ , A_ , A_ , A_ : Optional[Any] = intersect_and_union(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
total_area_intersect += area_intersect
total_area_union += area_union
total_area_pred_label += area_pred_label
total_area_label += area_label
return total_area_intersect, total_area_union, total_area_pred_label, total_area_label
def lowercase_ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = False , ):
"""simple docstring"""
A_ , A_ , A_ , A_ : Optional[Any] = total_intersect_and_union(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# compute metrics
A_ : Optional[int] = {}
A_ : Optional[int] = total_area_intersect.sum() / total_area_label.sum()
A_ : Tuple = total_area_intersect / total_area_union
A_ : str = total_area_intersect / total_area_label
A_ : Union[str, Any] = np.nanmean(_UpperCAmelCase )
A_ : List[Any] = np.nanmean(_UpperCAmelCase )
A_ : Any = all_acc
A_ : Tuple = iou
A_ : List[str] = acc
if nan_to_num is not None:
A_ : Optional[int] = {metric: np.nan_to_num(_UpperCAmelCase , nan=_UpperCAmelCase ) for metric, metric_value in metrics.items()}
return metrics
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class lowercase ( datasets.Metric):
def a_ ( self : Any ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
# 1st Seq - height dim, 2nd - width dim
{
'''predictions''': datasets.Sequence(datasets.Sequence(datasets.Value('''uint16''' ) ) ),
'''references''': datasets.Sequence(datasets.Sequence(datasets.Value('''uint16''' ) ) ),
} ) , reference_urls=[
'''https://github.com/open-mmlab/mmsegmentation/blob/71c201b1813267d78764f306a297ca717827c4bf/mmseg/core/evaluation/metrics.py'''
] , )
def a_ ( self : Dict , _lowerCamelCase : Optional[Any] , _lowerCamelCase : Optional[Any] , _lowerCamelCase : int , _lowerCamelCase : bool , _lowerCamelCase : Optional[int] = None , _lowerCamelCase : Optional[Dict[int, int]] = None , _lowerCamelCase : bool = False , ):
"""simple docstring"""
A_ : Tuple = mean_iou(
results=_lowerCamelCase , gt_seg_maps=_lowerCamelCase , num_labels=_lowerCamelCase , ignore_index=_lowerCamelCase , nan_to_num=_lowerCamelCase , label_map=_lowerCamelCase , reduce_labels=_lowerCamelCase , )
return iou_result
| 167 |
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPSegProcessor, ViTImageProcessor
@require_vision
class lowercase ( unittest.TestCase):
def a_ ( self : List[str] ):
"""simple docstring"""
A_ : Tuple = tempfile.mkdtemp()
# fmt: off
A_ : List[Any] = ['''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''lo''', '''l</w>''', '''w</w>''', '''r</w>''', '''t</w>''', '''low</w>''', '''er</w>''', '''lowest</w>''', '''newer</w>''', '''wider''', '''<unk>''', '''<|startoftext|>''', '''<|endoftext|>''']
# fmt: on
A_ : Tuple = dict(zip(_lowerCamelCase , range(len(_lowerCamelCase ) ) ) )
A_ : Optional[int] = ['''#version: 0.2''', '''l o''', '''lo w</w>''', '''e r</w>''', '''''']
A_ : Tuple = {'''unk_token''': '''<unk>'''}
A_ : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
A_ : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(_lowerCamelCase ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(_lowerCamelCase ) )
A_ : str = {
'''do_resize''': True,
'''size''': 20,
'''do_center_crop''': True,
'''crop_size''': 18,
'''do_normalize''': True,
'''image_mean''': [0.48145466, 0.4578275, 0.40821073],
'''image_std''': [0.26862954, 0.26130258, 0.27577711],
}
A_ : str = os.path.join(self.tmpdirname , _lowerCamelCase )
with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp:
json.dump(_lowerCamelCase , _lowerCamelCase )
def a_ ( self : Any , **_lowerCamelCase : Dict ):
"""simple docstring"""
return CLIPTokenizer.from_pretrained(self.tmpdirname , **_lowerCamelCase )
def a_ ( self : Dict , **_lowerCamelCase : Optional[int] ):
"""simple docstring"""
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **_lowerCamelCase )
def a_ ( self : List[str] , **_lowerCamelCase : List[Any] ):
"""simple docstring"""
return ViTImageProcessor.from_pretrained(self.tmpdirname , **_lowerCamelCase )
def a_ ( self : int ):
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def a_ ( self : List[str] ):
"""simple docstring"""
A_ : Dict = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
A_ : Dict = [Image.fromarray(np.moveaxis(_lowerCamelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def a_ ( self : List[str] ):
"""simple docstring"""
A_ : int = self.get_tokenizer()
A_ : int = self.get_rust_tokenizer()
A_ : Optional[Any] = self.get_image_processor()
A_ : Union[str, Any] = CLIPSegProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase )
processor_slow.save_pretrained(self.tmpdirname )
A_ : List[Any] = CLIPSegProcessor.from_pretrained(self.tmpdirname , use_fast=_lowerCamelCase )
A_ : Optional[Any] = CLIPSegProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase )
processor_fast.save_pretrained(self.tmpdirname )
A_ : Any = CLIPSegProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , _lowerCamelCase )
self.assertIsInstance(processor_fast.tokenizer , _lowerCamelCase )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , _lowerCamelCase )
self.assertIsInstance(processor_fast.image_processor , _lowerCamelCase )
def a_ ( self : str ):
"""simple docstring"""
A_ : Tuple = CLIPSegProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
A_ : Tuple = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
A_ : Dict = self.get_image_processor(do_normalize=_lowerCamelCase , padding_value=1.0 )
A_ : List[Any] = CLIPSegProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=_lowerCamelCase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , _lowerCamelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _lowerCamelCase )
def a_ ( self : int ):
"""simple docstring"""
A_ : List[str] = self.get_image_processor()
A_ : Union[str, Any] = self.get_tokenizer()
A_ : Union[str, Any] = CLIPSegProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase )
A_ : Tuple = self.prepare_image_inputs()
A_ : Dict = image_processor(_lowerCamelCase , return_tensors='''np''' )
A_ : Optional[int] = processor(images=_lowerCamelCase , return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def a_ ( self : str ):
"""simple docstring"""
A_ : Optional[int] = self.get_image_processor()
A_ : int = self.get_tokenizer()
A_ : int = CLIPSegProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase )
A_ : Union[str, Any] = '''lower newer'''
A_ : int = processor(text=_lowerCamelCase )
A_ : Any = tokenizer(_lowerCamelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def a_ ( self : str ):
"""simple docstring"""
A_ : str = self.get_image_processor()
A_ : List[Any] = self.get_tokenizer()
A_ : Tuple = CLIPSegProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase )
A_ : Union[str, Any] = '''lower newer'''
A_ : Optional[Any] = self.prepare_image_inputs()
A_ : Dict = processor(text=_lowerCamelCase , images=_lowerCamelCase )
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''attention_mask''', '''pixel_values'''] )
# test if it raises when no input is passed
with pytest.raises(_lowerCamelCase ):
processor()
def a_ ( self : List[Any] ):
"""simple docstring"""
A_ : Optional[int] = self.get_image_processor()
A_ : int = self.get_tokenizer()
A_ : Any = CLIPSegProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase )
A_ : Tuple = self.prepare_image_inputs()
A_ : Tuple = self.prepare_image_inputs()
A_ : Optional[int] = processor(images=_lowerCamelCase , visual_prompt=_lowerCamelCase )
self.assertListEqual(list(inputs.keys() ) , ['''pixel_values''', '''conditional_pixel_values'''] )
# test if it raises when no input is passed
with pytest.raises(_lowerCamelCase ):
processor()
def a_ ( self : List[Any] ):
"""simple docstring"""
A_ : Optional[int] = self.get_image_processor()
A_ : Union[str, Any] = self.get_tokenizer()
A_ : Optional[Any] = CLIPSegProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase )
A_ : Union[str, Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
A_ : List[str] = processor.batch_decode(_lowerCamelCase )
A_ : str = tokenizer.batch_decode(_lowerCamelCase )
self.assertListEqual(_lowerCamelCase , _lowerCamelCase )
| 167 | 1 |
import copy
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {
"microsoft/conditional-detr-resnet-50": (
"https://huggingface.co/microsoft/conditional-detr-resnet-50/resolve/main/config.json"
),
}
class lowercase__ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a : Dict = "conditional_detr"
a : List[Any] = ["past_key_values"]
a : str = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
}
def __init__( self, __magic_name__=True, __magic_name__=None, __magic_name__=3, __magic_name__=300, __magic_name__=6, __magic_name__=2048, __magic_name__=8, __magic_name__=6, __magic_name__=2048, __magic_name__=8, __magic_name__=0.0, __magic_name__=0.0, __magic_name__=True, __magic_name__="relu", __magic_name__=256, __magic_name__=0.1, __magic_name__=0.0, __magic_name__=0.0, __magic_name__=0.02, __magic_name__=1.0, __magic_name__=False, __magic_name__="sine", __magic_name__="resnet50", __magic_name__=True, __magic_name__=False, __magic_name__=2, __magic_name__=5, __magic_name__=2, __magic_name__=1, __magic_name__=1, __magic_name__=2, __magic_name__=5, __magic_name__=2, __magic_name__=0.25, **__magic_name__, ) -> Tuple:
"""simple docstring"""
if backbone_config is not None and use_timm_backbone:
raise ValueError('''You can\'t specify both `backbone_config` and `use_timm_backbone`.''' )
if not use_timm_backbone:
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' )
UpperCamelCase__ : Optional[int] = CONFIG_MAPPING["""resnet"""](out_features=['''stage4'''] )
elif isinstance(__magic_name__, __magic_name__ ):
UpperCamelCase__ : Dict = backbone_config.get('''model_type''' )
UpperCamelCase__ : Optional[Any] = CONFIG_MAPPING[backbone_model_type]
UpperCamelCase__ : Dict = config_class.from_dict(__magic_name__ )
UpperCamelCase__ : int = use_timm_backbone
UpperCamelCase__ : str = backbone_config
UpperCamelCase__ : str = num_channels
UpperCamelCase__ : List[str] = num_queries
UpperCamelCase__ : str = d_model
UpperCamelCase__ : int = encoder_ffn_dim
UpperCamelCase__ : List[str] = encoder_layers
UpperCamelCase__ : int = encoder_attention_heads
UpperCamelCase__ : str = decoder_ffn_dim
UpperCamelCase__ : Optional[Any] = decoder_layers
UpperCamelCase__ : Union[str, Any] = decoder_attention_heads
UpperCamelCase__ : Any = dropout
UpperCamelCase__ : List[Any] = attention_dropout
UpperCamelCase__ : Optional[Any] = activation_dropout
UpperCamelCase__ : str = activation_function
UpperCamelCase__ : Optional[Any] = init_std
UpperCamelCase__ : str = init_xavier_std
UpperCamelCase__ : Union[str, Any] = encoder_layerdrop
UpperCamelCase__ : Union[str, Any] = decoder_layerdrop
UpperCamelCase__ : Tuple = encoder_layers
UpperCamelCase__ : Tuple = auxiliary_loss
UpperCamelCase__ : Tuple = position_embedding_type
UpperCamelCase__ : Optional[Any] = backbone
UpperCamelCase__ : str = use_pretrained_backbone
UpperCamelCase__ : Union[str, Any] = dilation
# Hungarian matcher
UpperCamelCase__ : Any = class_cost
UpperCamelCase__ : Union[str, Any] = bbox_cost
UpperCamelCase__ : Tuple = giou_cost
# Loss coefficients
UpperCamelCase__ : Union[str, Any] = mask_loss_coefficient
UpperCamelCase__ : Union[str, Any] = dice_loss_coefficient
UpperCamelCase__ : List[str] = cls_loss_coefficient
UpperCamelCase__ : str = bbox_loss_coefficient
UpperCamelCase__ : List[Any] = giou_loss_coefficient
UpperCamelCase__ : Optional[Any] = focal_alpha
super().__init__(is_encoder_decoder=__magic_name__, **__magic_name__ )
@property
def UpperCamelCase__ ( self ) -> int:
"""simple docstring"""
return self.encoder_attention_heads
@property
def UpperCamelCase__ ( self ) -> int:
"""simple docstring"""
return self.d_model
def UpperCamelCase__ ( self ) -> Any:
"""simple docstring"""
UpperCamelCase__ : Optional[int] = copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
UpperCamelCase__ : Optional[int] = self.backbone_config.to_dict()
UpperCamelCase__ : Optional[int] = self.__class__.model_type
return output
class lowercase__ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a : Optional[Any] = version.parse("1.11" )
@property
def UpperCamelCase__ ( self ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
('''pixel_mask''', {0: '''batch'''}),
] )
@property
def UpperCamelCase__ ( self ) -> float:
"""simple docstring"""
return 1E-5
@property
def UpperCamelCase__ ( self ) -> int:
"""simple docstring"""
return 12
| 351 |
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ConvNextConfig, UperNetConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import UperNetForSemanticSegmentation
from transformers.models.upernet.modeling_upernet import UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowercase__ :
'''simple docstring'''
def __init__( self, __magic_name__, __magic_name__=13, __magic_name__=32, __magic_name__=3, __magic_name__=4, __magic_name__=[10, 20, 30, 40], __magic_name__=[2, 2, 3, 2], __magic_name__=True, __magic_name__=True, __magic_name__=37, __magic_name__="gelu", __magic_name__=10, __magic_name__=0.02, __magic_name__=["stage2", "stage3", "stage4"], __magic_name__=3, __magic_name__=None, ) -> str:
"""simple docstring"""
UpperCamelCase__ : List[Any] = parent
UpperCamelCase__ : Tuple = batch_size
UpperCamelCase__ : Tuple = image_size
UpperCamelCase__ : Optional[int] = num_channels
UpperCamelCase__ : int = num_stages
UpperCamelCase__ : Union[str, Any] = hidden_sizes
UpperCamelCase__ : str = depths
UpperCamelCase__ : str = is_training
UpperCamelCase__ : int = use_labels
UpperCamelCase__ : Union[str, Any] = intermediate_size
UpperCamelCase__ : Dict = hidden_act
UpperCamelCase__ : Optional[Any] = type_sequence_label_size
UpperCamelCase__ : List[str] = initializer_range
UpperCamelCase__ : str = out_features
UpperCamelCase__ : Union[str, Any] = num_labels
UpperCamelCase__ : Dict = scope
UpperCamelCase__ : List[str] = num_stages
def UpperCamelCase__ ( self ) -> List[str]:
"""simple docstring"""
UpperCamelCase__ : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase__ : Dict = None
if self.use_labels:
UpperCamelCase__ : Optional[Any] = ids_tensor([self.batch_size], self.type_sequence_label_size )
UpperCamelCase__ : Optional[int] = self.get_config()
return config, pixel_values, labels
def UpperCamelCase__ ( self ) -> Optional[int]:
"""simple docstring"""
return ConvNextConfig(
num_channels=self.num_channels, num_stages=self.num_stages, hidden_sizes=self.hidden_sizes, depths=self.depths, is_training=self.is_training, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, out_features=self.out_features, )
def UpperCamelCase__ ( self ) -> str:
"""simple docstring"""
return UperNetConfig(
backbone_config=self.get_backbone_config(), hidden_size=512, pool_scales=[1, 2, 3, 6], use_auxiliary_head=__magic_name__, auxiliary_loss_weight=0.4, auxiliary_in_channels=40, auxiliary_channels=256, auxiliary_num_convs=1, auxiliary_concat_input=__magic_name__, loss_ignore_index=255, num_labels=self.num_labels, )
def UpperCamelCase__ ( self, __magic_name__, __magic_name__, __magic_name__ ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase__ : Optional[Any] = UperNetForSemanticSegmentation(config=__magic_name__ )
model.to(__magic_name__ )
model.eval()
UpperCamelCase__ : Any = model(__magic_name__ )
self.parent.assertEqual(
result.logits.shape, (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def UpperCamelCase__ ( self ) -> Any:
"""simple docstring"""
UpperCamelCase__ : Optional[Any] = self.prepare_config_and_inputs()
(
(
UpperCamelCase__
) ,(
UpperCamelCase__
) ,(
UpperCamelCase__
) ,
) : List[Any] = config_and_inputs
UpperCamelCase__ : List[Any] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class lowercase__ ( __lowerCamelCase , __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
a : Union[str, Any] = (UperNetForSemanticSegmentation,) if is_torch_available() else ()
a : List[str] = {"image-segmentation": UperNetForSemanticSegmentation} if is_torch_available() else {}
a : Union[str, Any] = False
a : Tuple = False
a : int = False
a : List[str] = False
a : Union[str, Any] = False
a : str = False
def UpperCamelCase__ ( self ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase__ : Union[str, Any] = UperNetModelTester(self )
UpperCamelCase__ : List[str] = ConfigTester(self, config_class=__magic_name__, has_text_modality=__magic_name__, hidden_size=37 )
def UpperCamelCase__ ( self ) -> Dict:
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCamelCase__ ( self ) -> str:
"""simple docstring"""
return
def UpperCamelCase__ ( self ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase__ ,UpperCamelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase__ : Optional[Any] = model_class(__magic_name__ )
UpperCamelCase__ : Optional[int] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase__ : List[Any] = [*signature.parameters.keys()]
UpperCamelCase__ : Any = ['''pixel_values''']
self.assertListEqual(arg_names[:1], __magic_name__ )
def UpperCamelCase__ ( self ) -> List[Any]:
"""simple docstring"""
UpperCamelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*__magic_name__ )
@unittest.skip(reason='''UperNet does not use inputs_embeds''' )
def UpperCamelCase__ ( self ) -> Dict:
"""simple docstring"""
pass
@unittest.skip(reason='''UperNet does not support input and output embeddings''' )
def UpperCamelCase__ ( self ) -> Union[str, Any]:
"""simple docstring"""
pass
@unittest.skip(reason='''UperNet does not have a base model''' )
def UpperCamelCase__ ( self ) -> List[str]:
"""simple docstring"""
pass
@unittest.skip(reason='''UperNet does not have a base model''' )
def UpperCamelCase__ ( self ) -> Optional[Any]:
"""simple docstring"""
pass
@require_torch_multi_gpu
@unittest.skip(reason='''UperNet has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`''' )
def UpperCamelCase__ ( self ) -> Optional[Any]:
"""simple docstring"""
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def UpperCamelCase__ ( self ) -> Any:
"""simple docstring"""
pass
def UpperCamelCase__ ( self ) -> Optional[int]:
"""simple docstring"""
def check_hidden_states_output(__magic_name__, __magic_name__, __magic_name__ ):
UpperCamelCase__ : Any = model_class(__magic_name__ )
model.to(__magic_name__ )
model.eval()
with torch.no_grad():
UpperCamelCase__ : Optional[int] = model(**self._prepare_for_class(__magic_name__, __magic_name__ ) )
UpperCamelCase__ : str = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
UpperCamelCase__ : Any = self.model_tester.num_stages
self.assertEqual(len(__magic_name__ ), expected_num_stages + 1 )
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ), [self.model_tester.image_size // 4, self.model_tester.image_size // 4], )
UpperCamelCase__ ,UpperCamelCase__ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase__ : Optional[Any] = True
check_hidden_states_output(__magic_name__, __magic_name__, __magic_name__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCamelCase__ : str = True
check_hidden_states_output(__magic_name__, __magic_name__, __magic_name__ )
def UpperCamelCase__ ( self ) -> Any:
"""simple docstring"""
UpperCamelCase__ ,UpperCamelCase__ : int = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase__ : Union[str, Any] = _config_zero_init(__magic_name__ )
UpperCamelCase__ : Union[str, Any] = _config_zero_init(configs_no_init.backbone_config )
for model_class in self.all_model_classes:
UpperCamelCase__ : Optional[int] = model_class(config=__magic_name__ )
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item(), [0.0, 1.0], msg=f"Parameter {name} of model {model_class} seems not properly initialized", )
@unittest.skip(reason='''UperNet does not have tied weights''' )
def UpperCamelCase__ ( self ) -> Union[str, Any]:
"""simple docstring"""
pass
@slow
def UpperCamelCase__ ( self ) -> Dict:
"""simple docstring"""
for model_name in UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase__ : int = UperNetForSemanticSegmentation.from_pretrained(__magic_name__ )
self.assertIsNotNone(__magic_name__ )
def lowerCAmelCase_ ( ) -> int:
UpperCamelCase__ : Tuple = hf_hub_download(
repo_id='''hf-internal-testing/fixtures_ade20k''' , repo_type='''dataset''' , filename='''ADE_val_00000001.jpg''' )
UpperCamelCase__ : str = Image.open(__UpperCAmelCase ).convert('''RGB''' )
return image
@require_torch
@require_vision
@slow
class lowercase__ ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase__ ( self ) -> int:
"""simple docstring"""
UpperCamelCase__ : Optional[Any] = AutoImageProcessor.from_pretrained('''openmmlab/upernet-swin-tiny''' )
UpperCamelCase__ : Optional[int] = UperNetForSemanticSegmentation.from_pretrained('''openmmlab/upernet-swin-tiny''' ).to(__magic_name__ )
UpperCamelCase__ : Any = prepare_img()
UpperCamelCase__ : List[Any] = processor(images=__magic_name__, return_tensors='''pt''' ).to(__magic_name__ )
with torch.no_grad():
UpperCamelCase__ : Optional[int] = model(**__magic_name__ )
UpperCamelCase__ : Tuple = torch.Size((1, model.config.num_labels, 512, 512) )
self.assertEqual(outputs.logits.shape, __magic_name__ )
UpperCamelCase__ : int = torch.tensor(
[[-7.5958, -7.5958, -7.4302], [-7.5958, -7.5958, -7.4302], [-7.4797, -7.4797, -7.3068]] ).to(__magic_name__ )
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3], __magic_name__, atol=1E-4 ) )
def UpperCamelCase__ ( self ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase__ : Any = AutoImageProcessor.from_pretrained('''openmmlab/upernet-convnext-tiny''' )
UpperCamelCase__ : Dict = UperNetForSemanticSegmentation.from_pretrained('''openmmlab/upernet-convnext-tiny''' ).to(__magic_name__ )
UpperCamelCase__ : str = prepare_img()
UpperCamelCase__ : int = processor(images=__magic_name__, return_tensors='''pt''' ).to(__magic_name__ )
with torch.no_grad():
UpperCamelCase__ : Dict = model(**__magic_name__ )
UpperCamelCase__ : Any = torch.Size((1, model.config.num_labels, 512, 512) )
self.assertEqual(outputs.logits.shape, __magic_name__ )
UpperCamelCase__ : Tuple = torch.tensor(
[[-8.8110, -8.8110, -8.6521], [-8.8110, -8.8110, -8.6521], [-8.7746, -8.7746, -8.6130]] ).to(__magic_name__ )
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3], __magic_name__, atol=1E-4 ) )
| 247 | 0 |
'''simple docstring'''
# This script creates a super tiny model that is useful inside tests, when we just want to test that
# the machinery works, without needing to the check the quality of the outcomes.
#
# This version creates a tiny model through reduction of a normal pre-trained model, but keeping the
# full vocab, merges file, and thus also resulting in a larger model due to a large vocab size.
# This gives ~3MB in total for all files.
#
# If you want a 50 times smaller than this see `fsmt-make-super-tiny-model.py`, which is slightly more complicated
#
#
# It will be used then as "stas/tiny-wmt19-en-de"
# Build
from transformers import FSMTTokenizer, FSMTConfig, FSMTForConditionalGeneration
_A : str ='''facebook/wmt19-en-de'''
_A : List[Any] =FSMTTokenizer.from_pretrained(mname)
# get the correct vocab sizes, etc. from the master model
_A : Any =FSMTConfig.from_pretrained(mname)
config.update(
dict(
d_model=4,
encoder_layers=1,
decoder_layers=1,
encoder_ffn_dim=4,
decoder_ffn_dim=4,
encoder_attention_heads=1,
decoder_attention_heads=1,
)
)
_A : List[Any] =FSMTForConditionalGeneration(config)
print(F'num of params {tiny_model.num_parameters()}')
# Test
_A : Tuple =tokenizer(['''Making tiny model'''], return_tensors='''pt''')
_A : Optional[int] =tiny_model(**batch)
print('''test output:''', len(outputs.logits[0]))
# Save
_A : Any ='''tiny-wmt19-en-de'''
tiny_model.half() # makes it smaller
tiny_model.save_pretrained(mname_tiny)
tokenizer.save_pretrained(mname_tiny)
print(F'Generated {mname_tiny}')
# Upload
# transformers-cli upload tiny-wmt19-en-de
| 41 |
"""simple docstring"""
import hashlib
import unittest
from typing import Dict
import numpy as np
from transformers import (
MODEL_FOR_MASK_GENERATION_MAPPING,
TF_MODEL_FOR_MASK_GENERATION_MAPPING,
is_vision_available,
pipeline,
)
from transformers.pipelines import MaskGenerationPipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
if is_vision_available():
from PIL import Image
else:
class __lowerCamelCase :
'''simple docstring'''
@staticmethod
def _UpperCAmelCase ( *__UpperCAmelCase , **__UpperCAmelCase ) -> Tuple:
pass
def A_ ( _lowerCAmelCase : Image ):
"""simple docstring"""
_a = hashlib.mda(image.tobytes() )
return m.hexdigest()[:10]
def A_ ( _lowerCAmelCase : Image ):
"""simple docstring"""
_a = np.array(_lowerCAmelCase )
_a = npimg.shape
return {"hash": hashimage(_lowerCAmelCase ), "shape": shape}
@is_pipeline_test
@require_vision
@require_torch
class __lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
A_ : Any = dict(
(list(MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if MODEL_FOR_MASK_GENERATION_MAPPING else []) )
A_ : str = dict(
(list(TF_MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if TF_MODEL_FOR_MASK_GENERATION_MAPPING else []) )
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> List[str]:
_a = MaskGenerationPipeline(model=__UpperCAmelCase , image_processor=__UpperCAmelCase )
return image_segmenter, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase ) -> int:
pass
@require_tf
@unittest.skip('''Image segmentation not implemented in TF''' )
def _UpperCAmelCase ( self ) -> List[str]:
pass
@slow
@require_torch
def _UpperCAmelCase ( self ) -> int:
_a = pipeline('''mask-generation''' , model='''facebook/sam-vit-huge''' )
_a = image_segmenter('''http://images.cocodataset.org/val2017/000000039769.jpg''' , points_per_batch=256 )
# Shortening by hashing
_a = []
for i, o in enumerate(outputs['''masks'''] ):
new_outupt += [{"mask": mask_to_test_readable(__UpperCAmelCase ), "scores": outputs["scores"][i]}]
# fmt: off
self.assertEqual(
nested_simplify(__UpperCAmelCase , decimals=4 ) , [
{'''mask''': {'''hash''': '''115ad19f5f''', '''shape''': (480, 640)}, '''scores''': 1.0444},
{'''mask''': {'''hash''': '''6affa964c6''', '''shape''': (480, 640)}, '''scores''': 1.021},
{'''mask''': {'''hash''': '''dfe28a0388''', '''shape''': (480, 640)}, '''scores''': 1.0167},
{'''mask''': {'''hash''': '''c0a5f4a318''', '''shape''': (480, 640)}, '''scores''': 1.0132},
{'''mask''': {'''hash''': '''fe8065c197''', '''shape''': (480, 640)}, '''scores''': 1.0053},
{'''mask''': {'''hash''': '''e2d0b7a0b7''', '''shape''': (480, 640)}, '''scores''': 0.9967},
{'''mask''': {'''hash''': '''453c7844bd''', '''shape''': (480, 640)}, '''scores''': 0.993},
{'''mask''': {'''hash''': '''3d44f2926d''', '''shape''': (480, 640)}, '''scores''': 0.9909},
{'''mask''': {'''hash''': '''64033ddc3f''', '''shape''': (480, 640)}, '''scores''': 0.9879},
{'''mask''': {'''hash''': '''801064ff79''', '''shape''': (480, 640)}, '''scores''': 0.9834},
{'''mask''': {'''hash''': '''6172f276ef''', '''shape''': (480, 640)}, '''scores''': 0.9716},
{'''mask''': {'''hash''': '''b49e60e084''', '''shape''': (480, 640)}, '''scores''': 0.9612},
{'''mask''': {'''hash''': '''a811e775fd''', '''shape''': (480, 640)}, '''scores''': 0.9599},
{'''mask''': {'''hash''': '''a6a8ebcf4b''', '''shape''': (480, 640)}, '''scores''': 0.9552},
{'''mask''': {'''hash''': '''9d8257e080''', '''shape''': (480, 640)}, '''scores''': 0.9532},
{'''mask''': {'''hash''': '''32de6454a8''', '''shape''': (480, 640)}, '''scores''': 0.9516},
{'''mask''': {'''hash''': '''af3d4af2c8''', '''shape''': (480, 640)}, '''scores''': 0.9499},
{'''mask''': {'''hash''': '''3c6db475fb''', '''shape''': (480, 640)}, '''scores''': 0.9483},
{'''mask''': {'''hash''': '''c290813fb9''', '''shape''': (480, 640)}, '''scores''': 0.9464},
{'''mask''': {'''hash''': '''b6f0b8f606''', '''shape''': (480, 640)}, '''scores''': 0.943},
{'''mask''': {'''hash''': '''92ce16bfdf''', '''shape''': (480, 640)}, '''scores''': 0.943},
{'''mask''': {'''hash''': '''c749b25868''', '''shape''': (480, 640)}, '''scores''': 0.9408},
{'''mask''': {'''hash''': '''efb6cab859''', '''shape''': (480, 640)}, '''scores''': 0.9335},
{'''mask''': {'''hash''': '''1ff2eafb30''', '''shape''': (480, 640)}, '''scores''': 0.9326},
{'''mask''': {'''hash''': '''788b798e24''', '''shape''': (480, 640)}, '''scores''': 0.9262},
{'''mask''': {'''hash''': '''abea804f0e''', '''shape''': (480, 640)}, '''scores''': 0.8999},
{'''mask''': {'''hash''': '''7b9e8ddb73''', '''shape''': (480, 640)}, '''scores''': 0.8986},
{'''mask''': {'''hash''': '''cd24047c8a''', '''shape''': (480, 640)}, '''scores''': 0.8984},
{'''mask''': {'''hash''': '''6943e6bcbd''', '''shape''': (480, 640)}, '''scores''': 0.8873},
{'''mask''': {'''hash''': '''b5f47c9191''', '''shape''': (480, 640)}, '''scores''': 0.8871}
] , )
# fmt: on
@require_torch
@slow
def _UpperCAmelCase ( self ) -> Any:
_a = '''facebook/sam-vit-huge'''
_a = pipeline('''mask-generation''' , model=__UpperCAmelCase )
_a = image_segmenter(
'''http://images.cocodataset.org/val2017/000000039769.jpg''' , pred_iou_thresh=1 , points_per_batch=256 )
# Shortening by hashing
_a = []
for i, o in enumerate(outputs['''masks'''] ):
new_outupt += [{"mask": mask_to_test_readable(__UpperCAmelCase ), "scores": outputs["scores"][i]}]
self.assertEqual(
nested_simplify(__UpperCAmelCase , decimals=4 ) , [
{'''mask''': {'''hash''': '''115ad19f5f''', '''shape''': (480, 640)}, '''scores''': 1.0444},
{'''mask''': {'''hash''': '''6affa964c6''', '''shape''': (480, 640)}, '''scores''': 1.0210},
{'''mask''': {'''hash''': '''dfe28a0388''', '''shape''': (480, 640)}, '''scores''': 1.0167},
{'''mask''': {'''hash''': '''c0a5f4a318''', '''shape''': (480, 640)}, '''scores''': 1.0132},
{'''mask''': {'''hash''': '''fe8065c197''', '''shape''': (480, 640)}, '''scores''': 1.0053},
] , ) | 320 | 0 |
import argparse
import logging
import pickle
from collections import Counter
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO
)
__A = logging.getLogger(__name__)
if __name__ == "__main__":
__A = argparse.ArgumentParser(
description="Token Counts for smoothing the masking probabilities in MLM (cf XLM/word2vec)"
)
parser.add_argument(
"--data_file", type=str, default="data/dump.bert-base-uncased.pickle", help="The binarized dataset."
)
parser.add_argument(
"--token_counts_dump", type=str, default="data/token_counts.bert-base-uncased.pickle", help="The dump file."
)
parser.add_argument("--vocab_size", default=3_0522, type=int)
__A = parser.parse_args()
logger.info(f'Loading data from {args.data_file}')
with open(args.data_file, "rb") as fp:
__A = pickle.load(fp)
logger.info("Counting occurrences for MLM.")
__A = Counter()
for tk_ids in data:
counter.update(tk_ids)
__A = [0] * args.vocab_size
for k, v in counter.items():
__A = v
logger.info(f'Dump to {args.token_counts_dump}')
with open(args.token_counts_dump, "wb") as handle:
pickle.dump(counts, handle, protocol=pickle.HIGHEST_PROTOCOL)
| 273 |
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import debug_launcher
from accelerate.test_utils import (
execute_subprocess_async,
require_cpu,
require_huggingface_suite,
require_multi_gpu,
require_single_gpu,
)
from accelerate.utils import patch_environment
@require_huggingface_suite
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ (self : Optional[int]) ->Tuple:
'''simple docstring'''
lowerCamelCase__: List[str] =inspect.getfile(accelerate.test_utils)
lowerCamelCase__: str =os.path.sep.join(
mod_file.split(os.path.sep)[:-1] + ["scripts", "external_deps", "test_metrics.py"])
from accelerate.test_utils.scripts.external_deps import test_metrics # noqa: F401
lowerCamelCase__: str =test_metrics
@require_cpu
def SCREAMING_SNAKE_CASE_ (self : int) ->Any:
'''simple docstring'''
debug_launcher(self.test_metrics.main , num_processes=1)
@require_cpu
def SCREAMING_SNAKE_CASE_ (self : int) ->int:
'''simple docstring'''
debug_launcher(self.test_metrics.main)
@require_single_gpu
def SCREAMING_SNAKE_CASE_ (self : Any) ->int:
'''simple docstring'''
self.test_metrics.main()
@require_multi_gpu
def SCREAMING_SNAKE_CASE_ (self : Tuple) ->Dict:
'''simple docstring'''
print(F"""Found {torch.cuda.device_count()} devices.""")
lowerCamelCase__: Optional[Any] =["torchrun", F"""--nproc_per_node={torch.cuda.device_count()}""", self.test_file_path]
with patch_environment(omp_num_threads=1):
execute_subprocess_async(UpperCAmelCase_ , env=os.environ.copy())
| 273 | 1 |
'''simple docstring'''
from math import sqrt
def lowerCAmelCase_ ( snake_case_ : int ) -> int:
'''simple docstring'''
UpperCAmelCase_ = 0
for i in range(1 , int(sqrt(snake_case_ ) + 1 ) ):
if n % i == 0 and i != sqrt(snake_case_ ):
total += i + n // i
elif i == sqrt(snake_case_ ):
total += i
return total - n
def lowerCAmelCase_ ( snake_case_ : int = 1_00_00 ) -> int:
'''simple docstring'''
UpperCAmelCase_ = sum(
i
for i in range(1 , snake_case_ )
if sum_of_divisors(sum_of_divisors(snake_case_ ) ) == i and sum_of_divisors(snake_case_ ) != i )
return total
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 1 | """simple docstring"""
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ) -> int:
if n == 1 or not isinstance(__UpperCAmelCase , __UpperCAmelCase ):
return 0
elif n == 2:
return 1
else:
lowercase__: List[Any] = [0, 1]
for i in range(2 , n + 1 ):
sequence.append(sequence[i - 1] + sequence[i - 2] )
return sequence[n]
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ) -> int:
lowercase__: Union[str, Any] = 0
lowercase__: List[Any] = 2
while digits < n:
index += 1
lowercase__: Dict = len(str(fibonacci(__UpperCAmelCase ) ) )
return index
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase = 1_0_0_0 ) -> int:
return fibonacci_digits_index(__UpperCAmelCase )
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 177 | 0 |
"""simple docstring"""
import argparse
import re
from pathlib import Path
import requests
import torch
from PIL import Image
from torchvision.transforms import CenterCrop, Compose, Normalize, Resize, ToTensor
from transformers import (
EfficientFormerConfig,
EfficientFormerForImageClassificationWithTeacher,
EfficientFormerImageProcessor,
)
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def _SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ):
A__ = old_name
if "patch_embed" in old_name:
A__ = old_name.split("." )
if layer == "0":
A__ = old_name.replace("0" , "convolution1" )
elif layer == "1":
A__ = old_name.replace("1" , "batchnorm_before" )
elif layer == "3":
A__ = old_name.replace("3" , "convolution2" )
else:
A__ = old_name.replace("4" , "batchnorm_after" )
if "network" in old_name and re.search(R"\d\.\d" , lowercase_ ):
A__ = R"""\b\d{2}\b"""
if bool(re.search(lowercase_ , lowercase_ ) ):
A__ = re.search(R"\d\.\d\d." , lowercase_ ).group()
else:
A__ = re.search(R"\d\.\d." , lowercase_ ).group()
if int(match[0] ) < 6:
A__ = old_name.replace(lowercase_ , "" )
A__ = trimmed_name.replace("network" , match[0] + ".meta4D_layers.blocks." + match[2:-1] )
A__ = """intermediate_stages.""" + trimmed_name
else:
A__ = old_name.replace(lowercase_ , "" )
if int(match[2] ) < num_meta4D_last_stage:
A__ = trimmed_name.replace("network" , "meta4D_layers.blocks." + match[2] )
else:
A__ = str(int(match[2] ) - num_meta4D_last_stage )
A__ = trimmed_name.replace("network" , "meta3D_layers.blocks." + layer_index )
if "norm1" in old_name:
A__ = trimmed_name.replace("norm1" , "layernorm1" )
elif "norm2" in old_name:
A__ = trimmed_name.replace("norm2" , "layernorm2" )
elif "fc1" in old_name:
A__ = trimmed_name.replace("fc1" , "linear_in" )
elif "fc2" in old_name:
A__ = trimmed_name.replace("fc2" , "linear_out" )
A__ = """last_stage.""" + trimmed_name
elif "network" in old_name and re.search(R".\d." , lowercase_ ):
A__ = old_name.replace("network" , "intermediate_stages" )
if "fc" in new_name:
A__ = new_name.replace("fc" , "convolution" )
elif ("norm1" in new_name) and ("layernorm1" not in new_name):
A__ = new_name.replace("norm1" , "batchnorm_before" )
elif ("norm2" in new_name) and ("layernorm2" not in new_name):
A__ = new_name.replace("norm2" , "batchnorm_after" )
if "proj" in new_name:
A__ = new_name.replace("proj" , "projection" )
if "dist_head" in new_name:
A__ = new_name.replace("dist_head" , "distillation_classifier" )
elif "head" in new_name:
A__ = new_name.replace("head" , "classifier" )
elif "patch_embed" in new_name:
A__ = """efficientformer.""" + new_name
elif new_name == "norm.weight" or new_name == "norm.bias":
A__ = new_name.replace("norm" , "layernorm" )
A__ = """efficientformer.""" + new_name
else:
A__ = """efficientformer.encoder.""" + new_name
return new_name
def _SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ):
for key in checkpoint.copy().keys():
A__ = checkpoint.pop(lowercase_ )
A__ = val
return checkpoint
def _SCREAMING_SNAKE_CASE ( ):
A__ = """http://images.cocodataset.org/val2017/000000039769.jpg"""
A__ = Image.open(requests.get(lowercase_ , stream=lowercase_ ).raw )
return image
def _SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ , lowercase_ ):
A__ = torch.load(lowercase_ , map_location="cpu" )["""model"""]
A__ = EfficientFormerConfig.from_json_file(lowercase_ )
A__ = EfficientFormerForImageClassificationWithTeacher(lowercase_ )
A__ = """_""".join(checkpoint_path.split("/" )[-1].split("." )[0].split("_" )[:-1] )
A__ = config.depths[-1] - config.num_metaad_blocks + 1
A__ = convert_torch_checkpoint(lowercase_ , lowercase_ )
model.load_state_dict(lowercase_ )
model.eval()
A__ = {
"""bilinear""": PILImageResampling.BILINEAR,
"""bicubic""": PILImageResampling.BICUBIC,
"""nearest""": PILImageResampling.NEAREST,
}
# prepare image
A__ = prepare_img()
A__ = 2_56
A__ = 2_24
A__ = EfficientFormerImageProcessor(
size={"shortest_edge": image_size} , crop_size={"height": crop_size, "width": crop_size} , resample=pillow_resamplings["bicubic"] , )
A__ = processor(images=lowercase_ , return_tensors="pt" ).pixel_values
# original processing pipeline
A__ = Compose(
[
Resize(lowercase_ , interpolation=pillow_resamplings["bicubic"] ),
CenterCrop(lowercase_ ),
ToTensor(),
Normalize(lowercase_ , lowercase_ ),
] )
A__ = image_transforms(lowercase_ ).unsqueeze(0 )
assert torch.allclose(lowercase_ , lowercase_ )
A__ = model(lowercase_ )
A__ = outputs.logits
A__ = (1, 10_00)
if "l1" in model_name:
A__ = torch.Tensor(
[-0.1_3_1_2, 0.4_3_5_3, -1.0_4_9_9, -0.5_1_2_4, 0.4_1_8_3, -0.6_7_9_3, -1.3_7_7_7, -0.0_8_9_3, -0.7_3_5_8, -2.4_3_2_8] )
assert torch.allclose(logits[0, :10] , lowercase_ , atol=1E-3 )
assert logits.shape == expected_shape
elif "l3" in model_name:
A__ = torch.Tensor(
[-1.3_1_5_0, -1.5_4_5_6, -1.2_5_5_6, -0.8_4_9_6, -0.7_1_2_7, -0.7_8_9_7, -0.9_7_2_8, -0.3_0_5_2, 0.3_7_5_1, -0.3_1_2_7] )
assert torch.allclose(logits[0, :10] , lowercase_ , atol=1E-3 )
assert logits.shape == expected_shape
elif "l7" in model_name:
A__ = torch.Tensor(
[-1.0_2_8_3, -1.4_1_3_1, -0.5_6_4_4, -1.3_1_1_5, -0.5_7_8_5, -1.2_0_4_9, -0.7_5_2_8, 0.1_9_9_2, -0.3_8_2_2, -0.0_8_7_8] )
assert logits.shape == expected_shape
else:
raise ValueError(
f"""Unknown model checkpoint: {checkpoint_path}. Supported version of efficientformer are l1, l3 and l7""" )
# Save Checkpoints
Path(lowercase_ ).mkdir(exist_ok=lowercase_ )
model.save_pretrained(lowercase_ )
print(f"""Checkpoint successfuly converted. Model saved at {pytorch_dump_path}""" )
processor.save_pretrained(lowercase_ )
print(f"""Processor successfuly saved at {pytorch_dump_path}""" )
if push_to_hub:
print("Pushing model to the hub..." )
model.push_to_hub(
repo_id=f"""Bearnardd/{pytorch_dump_path}""" , commit_message="Add model" , use_temp_dir=lowercase_ , )
processor.push_to_hub(
repo_id=f"""Bearnardd/{pytorch_dump_path}""" , commit_message="Add image processor" , use_temp_dir=lowercase_ , )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--pytorch_model_path",
default=None,
type=str,
required=True,
help="Path to EfficientFormer pytorch checkpoint.",
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help="The json file for EfficientFormer model config.",
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument("--push_to_hub", action="store_true", help="Push model and image processor to the hub")
parser.add_argument(
"--no-push_to_hub",
dest="push_to_hub",
action="store_false",
help="Do not push model and image processor to the hub",
)
parser.set_defaults(push_to_hub=True)
SCREAMING_SNAKE_CASE = parser.parse_args()
convert_efficientformer_checkpoint(
checkpoint_path=args.pytorch_model_path,
efficientformer_config_file=args.config_file,
pytorch_dump_path=args.pytorch_dump_path,
push_to_hub=args.push_to_hub,
)
| 360 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
SCREAMING_SNAKE_CASE = {
"configuration_mobilevit": ["MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "MobileViTConfig", "MobileViTOnnxConfig"],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = ["MobileViTFeatureExtractor"]
SCREAMING_SNAKE_CASE = ["MobileViTImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = [
"MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"MobileViTForImageClassification",
"MobileViTForSemanticSegmentation",
"MobileViTModel",
"MobileViTPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = [
"TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFMobileViTForImageClassification",
"TFMobileViTForSemanticSegmentation",
"TFMobileViTModel",
"TFMobileViTPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_mobilevit import MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, MobileViTConfig, MobileViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_mobilevit import MobileViTFeatureExtractor
from .image_processing_mobilevit import MobileViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilevit import (
MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTModel,
MobileViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mobilevit import (
TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFMobileViTForImageClassification,
TFMobileViTForSemanticSegmentation,
TFMobileViTModel,
TFMobileViTPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 230 | 0 |
import requests
_lowerCamelCase ="YOUR API KEY"
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_ = giphy_api_key ):
"""simple docstring"""
SCREAMING_SNAKE_CASE ='+'.join(query.split() )
SCREAMING_SNAKE_CASE =F'https://api.giphy.com/v1/gifs/search?q={formatted_query}&api_key={api_key}'
SCREAMING_SNAKE_CASE =requests.get(lowerCAmelCase_ ).json()['data']
return [gif["url"] for gif in gifs]
if __name__ == "__main__":
print("\n".join(get_gifs("space ship")))
| 334 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tensorflow_text_available, is_torch_available
_lowerCamelCase ={
"configuration_ernie": ["ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP", "ErnieConfig", "ErnieOnnxConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase =[
"ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST",
"ErnieForCausalLM",
"ErnieForMaskedLM",
"ErnieForMultipleChoice",
"ErnieForNextSentencePrediction",
"ErnieForPreTraining",
"ErnieForQuestionAnswering",
"ErnieForSequenceClassification",
"ErnieForTokenClassification",
"ErnieModel",
"ErniePreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_ernie import ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP, ErnieConfig, ErnieOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ernie import (
ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST,
ErnieForCausalLM,
ErnieForMaskedLM,
ErnieForMultipleChoice,
ErnieForNextSentencePrediction,
ErnieForPreTraining,
ErnieForQuestionAnswering,
ErnieForSequenceClassification,
ErnieForTokenClassification,
ErnieModel,
ErniePreTrainedModel,
)
else:
import sys
_lowerCamelCase =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 334 | 1 |
'''simple docstring'''
from __future__ import annotations
class _a :
def __init__( self ,_SCREAMING_SNAKE_CASE=None ) -> Dict:
_snake_case = data
_snake_case = None
def __repr__( self ) -> Optional[int]:
_snake_case = []
_snake_case = self
while temp:
string_rep.append(f"""{temp.data}""" )
_snake_case = temp.next
return "->".join(lowercase_ )
def __a ( _UpperCamelCase: list ) -> Tuple:
"""simple docstring"""
if not elements_list:
raise Exception("The Elements List is empty" )
_snake_case = Node(elements_list[0] )
for i in range(1 , len(UpperCAmelCase__ ) ):
_snake_case = Node(elements_list[i] )
_snake_case = current.next
return head
def __a ( _UpperCamelCase: Node ) -> None:
"""simple docstring"""
if head_node is not None and isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
print_reverse(head_node.next )
print(head_node.data )
def __a ( ) -> Union[str, Any]:
"""simple docstring"""
from doctest import testmod
testmod()
_snake_case = make_linked_list([14, 52, 14, 12, 43] )
print("Linked List:" )
print(UpperCAmelCase__ )
print("Elements in Reverse:" )
print_reverse(UpperCAmelCase__ )
if __name__ == "__main__":
main()
| 371 |
'''simple docstring'''
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase_ : str = logging.get_logger(__name__)
UpperCamelCase_ : Optional[Any] = {
'''asapp/sew-tiny-100k''': '''https://huggingface.co/asapp/sew-tiny-100k/resolve/main/config.json''',
# See all SEW models at https://huggingface.co/models?filter=sew
}
class _a ( __lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ : Dict = """sew"""
def __init__( self ,_SCREAMING_SNAKE_CASE=32 ,_SCREAMING_SNAKE_CASE=768 ,_SCREAMING_SNAKE_CASE=12 ,_SCREAMING_SNAKE_CASE=12 ,_SCREAMING_SNAKE_CASE=3_072 ,_SCREAMING_SNAKE_CASE=2 ,_SCREAMING_SNAKE_CASE="gelu" ,_SCREAMING_SNAKE_CASE=0.1 ,_SCREAMING_SNAKE_CASE=0.1 ,_SCREAMING_SNAKE_CASE=0.1 ,_SCREAMING_SNAKE_CASE=0.0 ,_SCREAMING_SNAKE_CASE=0.1 ,_SCREAMING_SNAKE_CASE=0.1 ,_SCREAMING_SNAKE_CASE=0.0_2 ,_SCREAMING_SNAKE_CASE=1e-5 ,_SCREAMING_SNAKE_CASE="group" ,_SCREAMING_SNAKE_CASE="gelu" ,_SCREAMING_SNAKE_CASE=(64, 128, 128, 128, 128, 256, 256, 256, 256, 512, 512, 512, 512) ,_SCREAMING_SNAKE_CASE=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) ,_SCREAMING_SNAKE_CASE=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) ,_SCREAMING_SNAKE_CASE=False ,_SCREAMING_SNAKE_CASE=128 ,_SCREAMING_SNAKE_CASE=16 ,_SCREAMING_SNAKE_CASE=True ,_SCREAMING_SNAKE_CASE=0.0_5 ,_SCREAMING_SNAKE_CASE=10 ,_SCREAMING_SNAKE_CASE=2 ,_SCREAMING_SNAKE_CASE=0.0 ,_SCREAMING_SNAKE_CASE=10 ,_SCREAMING_SNAKE_CASE=0 ,_SCREAMING_SNAKE_CASE="mean" ,_SCREAMING_SNAKE_CASE=False ,_SCREAMING_SNAKE_CASE=False ,_SCREAMING_SNAKE_CASE=256 ,_SCREAMING_SNAKE_CASE=0 ,_SCREAMING_SNAKE_CASE=1 ,_SCREAMING_SNAKE_CASE=2 ,**_SCREAMING_SNAKE_CASE ,) -> str:
super().__init__(**_SCREAMING_SNAKE_CASE ,pad_token_id=_SCREAMING_SNAKE_CASE ,bos_token_id=_SCREAMING_SNAKE_CASE ,eos_token_id=_SCREAMING_SNAKE_CASE )
_snake_case = hidden_size
_snake_case = feat_extract_norm
_snake_case = feat_extract_activation
_snake_case = list(_SCREAMING_SNAKE_CASE )
_snake_case = list(_SCREAMING_SNAKE_CASE )
_snake_case = list(_SCREAMING_SNAKE_CASE )
_snake_case = conv_bias
_snake_case = num_conv_pos_embeddings
_snake_case = num_conv_pos_embedding_groups
_snake_case = len(self.conv_dim )
_snake_case = num_hidden_layers
_snake_case = intermediate_size
_snake_case = squeeze_factor
_snake_case = hidden_act
_snake_case = num_attention_heads
_snake_case = hidden_dropout
_snake_case = attention_dropout
_snake_case = activation_dropout
_snake_case = feat_proj_dropout
_snake_case = final_dropout
_snake_case = layerdrop
_snake_case = layer_norm_eps
_snake_case = initializer_range
_snake_case = vocab_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"Configuration for convolutional layers is incorrect."
"It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,"
f"""but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)"""
f"""= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
_snake_case = apply_spec_augment
_snake_case = mask_time_prob
_snake_case = mask_time_length
_snake_case = mask_time_min_masks
_snake_case = mask_feature_prob
_snake_case = mask_feature_length
_snake_case = mask_feature_min_masks
# ctc loss
_snake_case = ctc_loss_reduction
_snake_case = ctc_zero_infinity
# sequence classification
_snake_case = use_weighted_layer_sum
_snake_case = classifier_proj_size
@property
def _lowercase ( self ) -> Optional[Any]:
return functools.reduce(operator.mul ,self.conv_stride ,1 )
| 142 | 0 |
import os
from math import logaa
def _UpperCAmelCase ( snake_case = "base_exp.txt" ):
"""simple docstring"""
_lowerCAmelCase = 0
_lowerCAmelCase = 0
for i, line in enumerate(open(os.path.join(os.path.dirname(snake_case ) , snake_case ) ) ):
_lowerCAmelCase , _lowerCAmelCase = list(map(snake_case , line.split(""",""" ) ) )
if x * logaa(snake_case ) > largest:
_lowerCAmelCase = x * logaa(snake_case )
_lowerCAmelCase = i + 1
return result
if __name__ == "__main__":
print(solution())
| 82 |
A__ = [0, 2, 4, 6, 8]
A__ = [1, 3, 5, 7, 9]
def _UpperCAmelCase ( snake_case , snake_case , snake_case , snake_case ):
"""simple docstring"""
if remaining_length == 0:
if digits[0] == 0 or digits[-1] == 0:
return 0
for i in range(length // 2 - 1 , -1 , -1 ):
remainder += digits[i] + digits[length - i - 1]
if remainder % 2 == 0:
return 0
remainder //= 10
return 1
if remaining_length == 1:
if remainder % 2 == 0:
return 0
_lowerCAmelCase = 0
for digit in range(10 ):
_lowerCAmelCase = digit
result += reversible_numbers(
0 , (remainder + 2 * digit) // 10 , snake_case , snake_case )
return result
_lowerCAmelCase = 0
for digita in range(10 ):
_lowerCAmelCase = digita
if (remainder + digita) % 2 == 0:
_lowerCAmelCase = ODD_DIGITS
else:
_lowerCAmelCase = EVEN_DIGITS
for digita in other_parity_digits:
_lowerCAmelCase = digita
result += reversible_numbers(
remaining_length - 2 , (remainder + digita + digita) // 10 , snake_case , snake_case , )
return result
def _UpperCAmelCase ( snake_case = 9 ):
"""simple docstring"""
_lowerCAmelCase = 0
for length in range(1 , max_power + 1 ):
result += reversible_numbers(snake_case , 0 , [0] * length , snake_case )
return result
if __name__ == "__main__":
print(f"{solution() = }")
| 82 | 1 |
"""simple docstring"""
import math
def lowercase__( __SCREAMING_SNAKE_CASE : float , __SCREAMING_SNAKE_CASE : float ):
if initial_intensity < 0:
raise ValueError('The value of intensity cannot be negative' )
# handling of negative values of initial intensity
if angle < 0 or angle > 3_60:
raise ValueError('In Malus Law, the angle is in the range 0-360 degrees' )
# handling of values out of allowed range
return initial_intensity * (math.cos(math.radians(__SCREAMING_SNAKE_CASE ) ) ** 2)
if __name__ == "__main__":
import doctest
doctest.testmod(name="malus_law")
| 360 | """simple docstring"""
import os
import sys
import unittest
__SCREAMING_SNAKE_CASE =os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, "utils"))
import get_test_info # noqa: E402
from get_test_info import ( # noqa: E402
get_model_to_test_mapping,
get_model_to_tester_mapping,
get_test_to_tester_mapping,
)
__SCREAMING_SNAKE_CASE =os.path.join("tests", "models", "bert", "test_modeling_bert.py")
__SCREAMING_SNAKE_CASE =os.path.join("tests", "models", "blip", "test_modeling_blip.py")
class UpperCamelCase ( unittest.TestCase ):
def _UpperCAmelCase ( self ) -> List[str]:
'''simple docstring'''
lowercase_ : Tuple = get_test_to_tester_mapping(__UpperCamelCase )
lowercase_ : Optional[int] = get_test_to_tester_mapping(__UpperCamelCase )
lowercase_ : List[str] = {'BertModelTest': 'BertModelTester'}
lowercase_ : Union[str, Any] = {
'BlipModelTest': 'BlipModelTester',
'BlipTextImageModelTest': 'BlipTextImageModelsModelTester',
'BlipTextModelTest': 'BlipTextModelTester',
'BlipTextRetrievalModelTest': 'BlipTextRetrievalModelTester',
'BlipVQAModelTest': 'BlipVQAModelTester',
'BlipVisionModelTest': 'BlipVisionModelTester',
}
self.assertEqual(get_test_info.to_json(__UpperCamelCase ) ,__UpperCamelCase )
self.assertEqual(get_test_info.to_json(__UpperCamelCase ) ,__UpperCamelCase )
def _UpperCAmelCase ( self ) -> List[Any]:
'''simple docstring'''
lowercase_ : Optional[Any] = get_model_to_test_mapping(__UpperCamelCase )
lowercase_ : List[str] = get_model_to_test_mapping(__UpperCamelCase )
lowercase_ : Any = {
'BertForMaskedLM': ['BertModelTest'],
'BertForMultipleChoice': ['BertModelTest'],
'BertForNextSentencePrediction': ['BertModelTest'],
'BertForPreTraining': ['BertModelTest'],
'BertForQuestionAnswering': ['BertModelTest'],
'BertForSequenceClassification': ['BertModelTest'],
'BertForTokenClassification': ['BertModelTest'],
'BertLMHeadModel': ['BertModelTest'],
'BertModel': ['BertModelTest'],
}
lowercase_ : Any = {
'BlipForConditionalGeneration': ['BlipTextImageModelTest'],
'BlipForImageTextRetrieval': ['BlipTextRetrievalModelTest'],
'BlipForQuestionAnswering': ['BlipVQAModelTest'],
'BlipModel': ['BlipModelTest'],
'BlipTextModel': ['BlipTextModelTest'],
'BlipVisionModel': ['BlipVisionModelTest'],
}
self.assertEqual(get_test_info.to_json(__UpperCamelCase ) ,__UpperCamelCase )
self.assertEqual(get_test_info.to_json(__UpperCamelCase ) ,__UpperCamelCase )
def _UpperCAmelCase ( self ) -> Any:
'''simple docstring'''
lowercase_ : List[str] = get_model_to_tester_mapping(__UpperCamelCase )
lowercase_ : Dict = get_model_to_tester_mapping(__UpperCamelCase )
lowercase_ : Tuple = {
'BertForMaskedLM': ['BertModelTester'],
'BertForMultipleChoice': ['BertModelTester'],
'BertForNextSentencePrediction': ['BertModelTester'],
'BertForPreTraining': ['BertModelTester'],
'BertForQuestionAnswering': ['BertModelTester'],
'BertForSequenceClassification': ['BertModelTester'],
'BertForTokenClassification': ['BertModelTester'],
'BertLMHeadModel': ['BertModelTester'],
'BertModel': ['BertModelTester'],
}
lowercase_ : Optional[Any] = {
'BlipForConditionalGeneration': ['BlipTextImageModelsModelTester'],
'BlipForImageTextRetrieval': ['BlipTextRetrievalModelTester'],
'BlipForQuestionAnswering': ['BlipVQAModelTester'],
'BlipModel': ['BlipModelTester'],
'BlipTextModel': ['BlipTextModelTester'],
'BlipVisionModel': ['BlipVisionModelTester'],
}
self.assertEqual(get_test_info.to_json(__UpperCamelCase ) ,__UpperCamelCase )
self.assertEqual(get_test_info.to_json(__UpperCamelCase ) ,__UpperCamelCase )
| 321 | 0 |
'''simple docstring'''
import json
import os
import unittest
from transformers.models.gptsan_japanese.tokenization_gptsan_japanese import (
VOCAB_FILES_NAMES,
GPTSanJapaneseTokenizer,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _snake_case ( a__ , unittest.TestCase ):
lowerCAmelCase :Tuple = GPTSanJapaneseTokenizer
lowerCAmelCase :List[str] = False
lowerCAmelCase :List[Any] = {'''do_clean_text''': False, '''add_prefix_space''': False}
def snake_case__ ( self):
super().setUp()
# fmt: off
UpperCAmelCase__ : List[Any] = ["""こん""", """こんに""", """にちは""", """ばんは""", """世界,㔺界""", """、""", """。""", """<BR>""", """<SP>""", """<TAB>""", """<URL>""", """<EMAIL>""", """<TEL>""", """<DATE>""", """<PRICE>""", """<BLOCK>""", """<KIGOU>""", """<U2000U2BFF>""", """<|emoji1|>""", """<unk>""", """<|bagoftoken|>""", """<|endoftext|>"""]
# fmt: on
UpperCAmelCase__ : Optional[int] = {"""emoji""": {"""\ud83d\ude00""": """<|emoji1|>"""}, """emoji_inv""": {"""<|emoji1|>""": """\ud83d\ude00"""}} # 😀
UpperCAmelCase__ : str = {"""unk_token""": """<unk>"""}
UpperCAmelCase__ : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""])
UpperCAmelCase__ : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""emoji_file"""])
with open(self.vocab_file , """w""" , encoding="""utf-8""") as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens]))
with open(self.emoji_file , """w""") as emoji_writer:
emoji_writer.write(json.dumps(_lowerCamelCase))
def snake_case__ ( self , **_lowerCamelCase):
kwargs.update(self.special_tokens_map)
return GPTSanJapaneseTokenizer.from_pretrained(self.tmpdirname , **_lowerCamelCase)
def snake_case__ ( self , _lowerCamelCase):
UpperCAmelCase__ : Union[str, Any] = """こんにちは、世界。 \nこんばんは、㔺界。😀"""
UpperCAmelCase__ : str = """こんにちは、世界。 \nこんばんは、世界。😀"""
return input_text, output_text
def snake_case__ ( self , _lowerCamelCase):
UpperCAmelCase__ , UpperCAmelCase__ : str = self.get_input_output_texts(_lowerCamelCase)
UpperCAmelCase__ : List[Any] = tokenizer.encode(_lowerCamelCase , add_special_tokens=_lowerCamelCase)
UpperCAmelCase__ : Tuple = tokenizer.decode(_lowerCamelCase , clean_up_tokenization_spaces=_lowerCamelCase)
return text, ids
def snake_case__ ( self):
pass # TODO add if relevant
def snake_case__ ( self):
pass # TODO add if relevant
def snake_case__ ( self):
pass # TODO add if relevant
def snake_case__ ( self):
UpperCAmelCase__ : str = self.get_tokenizer()
# Testing tokenization
UpperCAmelCase__ : List[str] = """こんにちは、世界。 こんばんは、㔺界。"""
UpperCAmelCase__ : List[Any] = ["""こん""", """にちは""", """、""", """世界""", """。""", """<SP>""", """こん""", """ばんは""", """、""", """㔺界""", """。"""]
UpperCAmelCase__ : Union[str, Any] = tokenizer.tokenize(_lowerCamelCase)
self.assertListEqual(_lowerCamelCase , _lowerCamelCase)
# Testing conversion to ids without special tokens
UpperCAmelCase__ : List[str] = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6]
UpperCAmelCase__ : Tuple = tokenizer.convert_tokens_to_ids(_lowerCamelCase)
self.assertListEqual(_lowerCamelCase , _lowerCamelCase)
# Testing conversion to ids with special tokens
UpperCAmelCase__ : List[str] = tokens + [tokenizer.unk_token]
UpperCAmelCase__ : Tuple = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6, 19]
UpperCAmelCase__ : Tuple = tokenizer.convert_tokens_to_ids(_lowerCamelCase)
self.assertListEqual(_lowerCamelCase , _lowerCamelCase)
def snake_case__ ( self):
UpperCAmelCase__ : Optional[Any] = self.get_tokenizer()
# Testing tokenization
UpperCAmelCase__ : Any = """こんにちは、<|bagoftoken|>世界。こんばんは、<|bagoftoken|>㔺界。"""
UpperCAmelCase__ : Tuple = """こんにちは、、、、世界。こんばんは、、、、世界。"""
UpperCAmelCase__ : Optional[Any] = tokenizer.encode(_lowerCamelCase)
UpperCAmelCase__ : List[str] = tokenizer.decode(_lowerCamelCase)
self.assertEqual(_lowerCamelCase , _lowerCamelCase)
@slow
def snake_case__ ( self):
UpperCAmelCase__ : Union[str, Any] = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""")
# Testing tokenization
UpperCAmelCase__ : List[Any] = """こんにちは、世界。"""
UpperCAmelCase__ : Union[str, Any] = """こんばんは、㔺界。😀"""
UpperCAmelCase__ : Union[str, Any] = """こんにちは、世界。こんばんは、世界。😀"""
UpperCAmelCase__ : Union[str, Any] = tokenizer.encode(prefix_text + input_text)
UpperCAmelCase__ : Union[str, Any] = tokenizer.encode("""""" , prefix_text=prefix_text + input_text)
UpperCAmelCase__ : List[str] = tokenizer.encode(_lowerCamelCase , prefix_text=_lowerCamelCase)
UpperCAmelCase__ : int = tokenizer.decode(_lowerCamelCase)
UpperCAmelCase__ : Dict = tokenizer.decode(_lowerCamelCase)
UpperCAmelCase__ : int = tokenizer.decode(_lowerCamelCase)
self.assertEqual(_lowerCamelCase , _lowerCamelCase)
self.assertEqual(_lowerCamelCase , _lowerCamelCase)
self.assertEqual(_lowerCamelCase , _lowerCamelCase)
@slow
def snake_case__ ( self):
UpperCAmelCase__ : Tuple = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""")
# Testing tokenization
UpperCAmelCase__ : int = """こんにちは、世界。"""
UpperCAmelCase__ : Any = """こんばんは、㔺界。😀"""
UpperCAmelCase__ : List[str] = len(tokenizer.encode(_lowerCamelCase)) - 2
UpperCAmelCase__ : Union[str, Any] = len(tokenizer.encode(_lowerCamelCase)) - 2
UpperCAmelCase__ : List[str] = [1] + [0] * (len_prefix + len_text + 1)
UpperCAmelCase__ : Dict = [1] * (len_prefix + len_text + 1) + [0]
UpperCAmelCase__ : Optional[Any] = [1] + [1] * (len_prefix) + [0] * (len_text + 1)
UpperCAmelCase__ : Dict = tokenizer(prefix_text + input_text).token_type_ids
UpperCAmelCase__ : List[str] = tokenizer("""""" , prefix_text=prefix_text + input_text).token_type_ids
UpperCAmelCase__ : List[Any] = tokenizer(_lowerCamelCase , prefix_text=_lowerCamelCase).token_type_ids
self.assertListEqual(_lowerCamelCase , _lowerCamelCase)
self.assertListEqual(_lowerCamelCase , _lowerCamelCase)
self.assertListEqual(_lowerCamelCase , _lowerCamelCase)
@slow
def snake_case__ ( self):
UpperCAmelCase__ : int = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""")
UpperCAmelCase__ : Optional[int] = tokenizer.encode("""あンいワ""")
UpperCAmelCase__ : Tuple = tokenizer.encode("""""" , prefix_text="""あンいワ""")
UpperCAmelCase__ : Union[str, Any] = tokenizer.encode("""いワ""" , prefix_text="""あン""")
self.assertEqual(tokenizer.decode(_lowerCamelCase) , tokenizer.decode(_lowerCamelCase))
self.assertEqual(tokenizer.decode(_lowerCamelCase) , tokenizer.decode(_lowerCamelCase))
self.assertNotEqual(_lowerCamelCase , _lowerCamelCase)
self.assertNotEqual(_lowerCamelCase , _lowerCamelCase)
self.assertEqual(x_token_a[1] , x_token_a[-1]) # SEG token
self.assertEqual(x_token_a[1] , x_token_a[3]) # SEG token
@slow
def snake_case__ ( self):
UpperCAmelCase__ : str = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""")
UpperCAmelCase__ : Dict = [["""武田信玄""", """は、"""], ["""織田信長""", """の配下の、"""]]
UpperCAmelCase__ : Dict = tokenizer(_lowerCamelCase , padding=_lowerCamelCase)
UpperCAmelCase__ : Any = tokenizer.batch_encode_plus(_lowerCamelCase , padding=_lowerCamelCase)
# fmt: off
UpperCAmelCase__ : Any = [[3_5993, 8640, 2_5948, 3_5998, 3_0647, 3_5675, 3_5999, 3_5999], [3_5993, 1_0382, 9868, 3_5998, 3_0646, 9459, 3_0646, 3_5675]]
UpperCAmelCase__ : List[str] = [[1, 1, 1, 0, 0, 0, 0, 0], [1, 1, 1, 0, 0, 0, 0, 0]]
UpperCAmelCase__ : Union[str, Any] = [[1, 1, 1, 1, 1, 1, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1]]
# fmt: on
self.assertListEqual(x_token.input_ids , _lowerCamelCase)
self.assertListEqual(x_token.token_type_ids , _lowerCamelCase)
self.assertListEqual(x_token.attention_mask , _lowerCamelCase)
self.assertListEqual(x_token_a.input_ids , _lowerCamelCase)
self.assertListEqual(x_token_a.token_type_ids , _lowerCamelCase)
self.assertListEqual(x_token_a.attention_mask , _lowerCamelCase)
def snake_case__ ( self):
# Intentionally convert some words to accommodate character fluctuations unique to Japanese
pass
def snake_case__ ( self):
# tokenizer has no padding token
pass | 163 |
'''simple docstring'''
from typing import List
import datasets
from datasets.tasks import AudioClassification
from ..folder_based_builder import folder_based_builder
__A =datasets.utils.logging.get_logger(__name__)
class _snake_case ( folder_based_builder.FolderBasedBuilderConfig ):
lowerCAmelCase :bool = None
lowerCAmelCase :bool = None
class _snake_case ( folder_based_builder.FolderBasedBuilder ):
lowerCAmelCase :Optional[Any] = datasets.Audio()
lowerCAmelCase :Tuple = '''audio'''
lowerCAmelCase :Optional[Any] = AudioFolderConfig
lowerCAmelCase :List[str] # definition at the bottom of the script
lowerCAmelCase :Union[str, Any] = AudioClassification(audio_column='''audio''' , label_column='''label''' )
__A =[
'.aiff',
'.au',
'.avr',
'.caf',
'.flac',
'.htk',
'.svx',
'.mat4',
'.mat5',
'.mpc2k',
'.ogg',
'.paf',
'.pvf',
'.raw',
'.rf64',
'.sd2',
'.sds',
'.ircam',
'.voc',
'.w64',
'.wav',
'.nist',
'.wavex',
'.wve',
'.xi',
'.mp3',
'.opus',
]
__A =AUDIO_EXTENSIONS | 163 | 1 |
from ...utils import is_note_seq_available, is_transformers_available, is_torch_available
from ...utils import OptionalDependencyNotAvailable
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .notes_encoder import SpectrogramNotesEncoder
from .continous_encoder import SpectrogramContEncoder
from .pipeline_spectrogram_diffusion import (
SpectrogramContEncoder,
SpectrogramDiffusionPipeline,
TaFilmDecoder,
)
try:
if not (is_transformers_available() and is_torch_available() and is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403
else:
from .midi_utils import MidiProcessor
| 173 |
import json
import os
import shutil
import tempfile
from unittest import TestCase
from transformers import BartTokenizer, BartTokenizerFast, DPRQuestionEncoderTokenizer, DPRQuestionEncoderTokenizerFast
from transformers.models.bart.configuration_bart import BartConfig
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES
from transformers.models.dpr.configuration_dpr import DPRConfig
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES
from transformers.testing_utils import require_faiss, require_tokenizers, require_torch, slow
from transformers.utils import is_datasets_available, is_faiss_available, is_torch_available
if is_torch_available() and is_datasets_available() and is_faiss_available():
from transformers.models.rag.configuration_rag import RagConfig
from transformers.models.rag.tokenization_rag import RagTokenizer
@require_faiss
@require_torch
class __SCREAMING_SNAKE_CASE ( A__ ):
def __lowerCamelCase ( self ):
lowercase : List[str] = tempfile.mkdtemp()
lowercase : Any = 8
# DPR tok
lowercase : List[str] = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
lowercase : int = os.path.join(self.tmpdirname , '''dpr_tokenizer''' )
os.makedirs(SCREAMING_SNAKE_CASE__ , exist_ok=SCREAMING_SNAKE_CASE__ )
lowercase : str = os.path.join(SCREAMING_SNAKE_CASE__ , DPR_VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
# BART tok
lowercase : int = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
]
lowercase : Optional[int] = dict(zip(SCREAMING_SNAKE_CASE__ , range(len(SCREAMING_SNAKE_CASE__ ) ) ) )
lowercase : str = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
lowercase : Union[str, Any] = {'''unk_token''': '''<unk>'''}
lowercase : int = os.path.join(self.tmpdirname , '''bart_tokenizer''' )
os.makedirs(SCREAMING_SNAKE_CASE__ , exist_ok=SCREAMING_SNAKE_CASE__ )
lowercase : Optional[int] = os.path.join(SCREAMING_SNAKE_CASE__ , BART_VOCAB_FILES_NAMES['''vocab_file'''] )
lowercase : int = os.path.join(SCREAMING_SNAKE_CASE__ , BART_VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(SCREAMING_SNAKE_CASE__ ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(SCREAMING_SNAKE_CASE__ ) )
def __lowerCamelCase ( self ):
return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''dpr_tokenizer''' ) )
def __lowerCamelCase ( self ):
return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''bart_tokenizer''' ) )
def __lowerCamelCase ( self ):
shutil.rmtree(self.tmpdirname )
@require_tokenizers
def __lowerCamelCase ( self ):
lowercase : Optional[Any] = os.path.join(self.tmpdirname , '''rag_tokenizer''' )
lowercase : Dict = RagConfig(question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() )
lowercase : Dict = RagTokenizer(question_encoder=self.get_dpr_tokenizer() , generator=self.get_bart_tokenizer() )
rag_config.save_pretrained(SCREAMING_SNAKE_CASE__ )
rag_tokenizer.save_pretrained(SCREAMING_SNAKE_CASE__ )
lowercase : Dict = RagTokenizer.from_pretrained(SCREAMING_SNAKE_CASE__ , config=SCREAMING_SNAKE_CASE__ )
self.assertIsInstance(new_rag_tokenizer.question_encoder , SCREAMING_SNAKE_CASE__ )
self.assertEqual(new_rag_tokenizer.question_encoder.get_vocab() , rag_tokenizer.question_encoder.get_vocab() )
self.assertIsInstance(new_rag_tokenizer.generator , SCREAMING_SNAKE_CASE__ )
self.assertEqual(new_rag_tokenizer.generator.get_vocab() , rag_tokenizer.generator.get_vocab() )
@slow
def __lowerCamelCase ( self ):
lowercase : Dict = RagTokenizer.from_pretrained('''facebook/rag-token-nq''' )
lowercase : Optional[int] = [
'''who got the first nobel prize in physics''',
'''when is the next deadpool movie being released''',
'''which mode is used for short wave broadcast service''',
'''who is the owner of reading football club''',
'''when is the next scandal episode coming out''',
'''when is the last time the philadelphia won the superbowl''',
'''what is the most current adobe flash player version''',
'''how many episodes are there in dragon ball z''',
'''what is the first step in the evolution of the eye''',
'''where is gall bladder situated in human body''',
'''what is the main mineral in lithium batteries''',
'''who is the president of usa right now''',
'''where do the greasers live in the outsiders''',
'''panda is a national animal of which country''',
'''what is the name of manchester united stadium''',
]
lowercase : str = tokenizer(SCREAMING_SNAKE_CASE__ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE__ )
@slow
def __lowerCamelCase ( self ):
lowercase : int = RagTokenizer.from_pretrained('''facebook/rag-sequence-nq''' )
lowercase : Tuple = [
'''who got the first nobel prize in physics''',
'''when is the next deadpool movie being released''',
'''which mode is used for short wave broadcast service''',
'''who is the owner of reading football club''',
'''when is the next scandal episode coming out''',
'''when is the last time the philadelphia won the superbowl''',
'''what is the most current adobe flash player version''',
'''how many episodes are there in dragon ball z''',
'''what is the first step in the evolution of the eye''',
'''where is gall bladder situated in human body''',
'''what is the main mineral in lithium batteries''',
'''who is the president of usa right now''',
'''where do the greasers live in the outsiders''',
'''panda is a national animal of which country''',
'''what is the name of manchester united stadium''',
]
lowercase : Any = tokenizer(SCREAMING_SNAKE_CASE__ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE__ )
| 173 | 1 |
'''simple docstring'''
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, PreTrainedTokenizerBase, TensorType
_A : Optional[Any] =logging.get_logger(__name__)
_A : int ={
'''microsoft/deberta-v2-xlarge''': '''https://huggingface.co/microsoft/deberta-v2-xlarge/resolve/main/config.json''',
'''microsoft/deberta-v2-xxlarge''': '''https://huggingface.co/microsoft/deberta-v2-xxlarge/resolve/main/config.json''',
'''microsoft/deberta-v2-xlarge-mnli''': (
'''https://huggingface.co/microsoft/deberta-v2-xlarge-mnli/resolve/main/config.json'''
),
'''microsoft/deberta-v2-xxlarge-mnli''': (
'''https://huggingface.co/microsoft/deberta-v2-xxlarge-mnli/resolve/main/config.json'''
),
}
class _lowercase ( _lowercase ):
a = """deberta-v2"""
def __init__( self: Optional[int] , UpperCamelCase__: Union[str, Any]=128_100 , UpperCamelCase__: str=1_536 , UpperCamelCase__: List[Any]=24 , UpperCamelCase__: Any=24 , UpperCamelCase__: str=6_144 , UpperCamelCase__: int="gelu" , UpperCamelCase__: Union[str, Any]=0.1 , UpperCamelCase__: Dict=0.1 , UpperCamelCase__: Optional[Any]=512 , UpperCamelCase__: Optional[int]=0 , UpperCamelCase__: Optional[int]=0.02 , UpperCamelCase__: List[str]=1e-7 , UpperCamelCase__: List[Any]=False , UpperCamelCase__: Any=-1 , UpperCamelCase__: Union[str, Any]=0 , UpperCamelCase__: Tuple=True , UpperCamelCase__: Optional[int]=None , UpperCamelCase__: Union[str, Any]=0 , UpperCamelCase__: Tuple="gelu" , **UpperCamelCase__: List[str] , ):
super().__init__(**UpperCamelCase__ )
lowerCamelCase__ : List[Any] = hidden_size
lowerCamelCase__ : Tuple = num_hidden_layers
lowerCamelCase__ : int = num_attention_heads
lowerCamelCase__ : Dict = intermediate_size
lowerCamelCase__ : List[Any] = hidden_act
lowerCamelCase__ : List[str] = hidden_dropout_prob
lowerCamelCase__ : Optional[int] = attention_probs_dropout_prob
lowerCamelCase__ : Dict = max_position_embeddings
lowerCamelCase__ : Tuple = type_vocab_size
lowerCamelCase__ : Union[str, Any] = initializer_range
lowerCamelCase__ : Any = relative_attention
lowerCamelCase__ : Any = max_relative_positions
lowerCamelCase__ : Any = pad_token_id
lowerCamelCase__ : List[str] = position_biased_input
# Backwards compatibility
if type(UpperCamelCase__ ) == str:
lowerCamelCase__ : Union[str, Any] = [x.strip() for x in pos_att_type.lower().split("""|""" )]
lowerCamelCase__ : Tuple = pos_att_type
lowerCamelCase__ : List[str] = vocab_size
lowerCamelCase__ : int = layer_norm_eps
lowerCamelCase__ : Tuple = kwargs.get("""pooler_hidden_size""" , UpperCamelCase__ )
lowerCamelCase__ : Optional[int] = pooler_dropout
lowerCamelCase__ : str = pooler_hidden_act
class _lowercase ( _lowercase ):
@property
def lowerCamelCase_ ( self: Any ):
if self.task == "multiple-choice":
lowerCamelCase__ : Tuple = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
lowerCamelCase__ : Any = {0: """batch""", 1: """sequence"""}
if self._config.type_vocab_size > 0:
return OrderedDict(
[("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis), ("""token_type_ids""", dynamic_axis)] )
else:
return OrderedDict([("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis)] )
@property
def lowerCamelCase_ ( self: Dict ):
return 12
def lowerCamelCase_ ( self: List[Any] , UpperCamelCase__: Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] , UpperCamelCase__: int = -1 , UpperCamelCase__: int = -1 , UpperCamelCase__: int = -1 , UpperCamelCase__: bool = False , UpperCamelCase__: Optional["TensorType"] = None , UpperCamelCase__: int = 3 , UpperCamelCase__: int = 40 , UpperCamelCase__: int = 40 , UpperCamelCase__: "PreTrainedTokenizerBase" = None , ):
lowerCamelCase__ : List[str] = super().generate_dummy_inputs(preprocessor=UpperCamelCase__ , framework=UpperCamelCase__ )
if self._config.type_vocab_size == 0 and "token_type_ids" in dummy_inputs:
del dummy_inputs["token_type_ids"]
return dummy_inputs
| 41 |
'''simple docstring'''
from heapq import heappop, heappush
import numpy as np
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , ) -> tuple[float | int, list[tuple[int, int]]]:
lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] = grid.shape
lowerCamelCase__ : List[str] = [-1, 1, 0, 0]
lowerCamelCase__ : Dict = [0, 0, -1, 1]
if allow_diagonal:
dx += [-1, -1, 1, 1]
dy += [-1, 1, -1, 1]
lowerCamelCase__ , lowerCamelCase__ : Any = [(0, source)], set()
lowerCamelCase__ : Tuple = np.full((rows, cols) , np.inf )
lowerCamelCase__ : List[str] = 0
lowerCamelCase__ : Optional[int] = np.empty((rows, cols) , dtype=UpperCamelCase )
lowerCamelCase__ : str = None
while queue:
((lowerCamelCase__) , (lowerCamelCase__)) : List[str] = heappop(UpperCamelCase )
if (x, y) in visited:
continue
visited.add((x, y) )
if (x, y) == destination:
lowerCamelCase__ : Optional[int] = []
while (x, y) != source:
path.append((x, y) )
lowerCamelCase__ , lowerCamelCase__ : List[Any] = predecessors[x, y]
path.append(UpperCamelCase ) # add the source manually
path.reverse()
return matrix[destination], path
for i in range(len(UpperCamelCase ) ):
lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] = x + dx[i], y + dy[i]
if 0 <= nx < rows and 0 <= ny < cols:
lowerCamelCase__ : Any = grid[nx][ny]
if next_node == 1 and matrix[nx, ny] > dist + 1:
heappush(UpperCamelCase , (dist + 1, (nx, ny)) )
lowerCamelCase__ : Union[str, Any] = dist + 1
lowerCamelCase__ : List[str] = (x, y)
return np.inf, []
if __name__ == "__main__":
import doctest
doctest.testmod()
| 41 | 1 |
"""simple docstring"""
import numpy as np
UpperCAmelCase : Optional[Any] = [
['a', 'b', 'c', 'd', 'e'],
['f', 'g', 'h', 'i', 'k'],
['l', 'm', 'n', 'o', 'p'],
['q', 'r', 's', 't', 'u'],
['v', 'w', 'x', 'y', 'z'],
]
class SCREAMING_SNAKE_CASE__ :
def __init__( self : Any):
"""simple docstring"""
lowercase_ = np.array(__snake_case)
def _UpperCAmelCase ( self : Union[str, Any] , lowerCAmelCase_ : str):
"""simple docstring"""
lowercase_ = np.where(letter == self.SQUARE)
lowercase_ = np.concatenate([indexa + 1, indexa + 1])
return indexes
def _UpperCAmelCase ( self : Any , lowerCAmelCase_ : int , lowerCAmelCase_ : int):
"""simple docstring"""
lowercase_ = self.SQUARE[indexa - 1, indexa - 1]
return letter
def _UpperCAmelCase ( self : Optional[Any] , lowerCAmelCase_ : str):
"""simple docstring"""
lowercase_ = message.lower()
lowercase_ = message.replace(""" """ , """""")
lowercase_ = message.replace("""j""" , """i""")
lowercase_ = np.empty((2, len(__snake_case)))
for letter_index in range(len(__snake_case)):
lowercase_ = self.letter_to_numbers(message[letter_index])
lowercase_ = numbers[0]
lowercase_ = numbers[1]
lowercase_ = first_step.reshape(2 * len(__snake_case))
lowercase_ = ''
for numbers_index in range(len(__snake_case)):
lowercase_ = int(second_step[numbers_index * 2])
lowercase_ = int(second_step[(numbers_index * 2) + 1])
lowercase_ = self.numbers_to_letter(__snake_case , __snake_case)
lowercase_ = encoded_message + letter
return encoded_message
def _UpperCAmelCase ( self : Any , lowerCAmelCase_ : str):
"""simple docstring"""
lowercase_ = message.lower()
message.replace(""" """ , """""")
lowercase_ = np.empty(2 * len(__snake_case))
for letter_index in range(len(__snake_case)):
lowercase_ = self.letter_to_numbers(message[letter_index])
lowercase_ = numbers[0]
lowercase_ = numbers[1]
lowercase_ = first_step.reshape((2, len(__snake_case)))
lowercase_ = ''
for numbers_index in range(len(__snake_case)):
lowercase_ = int(second_step[0, numbers_index])
lowercase_ = int(second_step[1, numbers_index])
lowercase_ = self.numbers_to_letter(__snake_case , __snake_case)
lowercase_ = decoded_message + letter
return decoded_message
| 358 |
"""simple docstring"""
import unittest
import numpy as np
import timeout_decorator # noqa
from transformers import BlenderbotConfig, is_flax_available
from transformers.testing_utils import jax_device, require_flax, slow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
UpperCAmelCase : Tuple = "platform"
import jax
import jax.numpy as jnp
from transformers import BlenderbotTokenizer
from transformers.models.blenderbot.modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
shift_tokens_right,
)
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , ) -> List[Any]:
'''simple docstring'''
if attention_mask is None:
lowercase_ = np.where(input_ids != config.pad_token_id , 1 , 0 )
if decoder_attention_mask is None:
lowercase_ = np.where(decoder_input_ids != config.pad_token_id , 1 , 0 )
if head_mask is None:
lowercase_ = np.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
lowercase_ = np.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
lowercase_ = np.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
}
class SCREAMING_SNAKE_CASE__ :
def __init__( self : Optional[Any] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : int=1_3 , lowerCAmelCase_ : Optional[Any]=7 , lowerCAmelCase_ : Union[str, Any]=True , lowerCAmelCase_ : Union[str, Any]=False , lowerCAmelCase_ : Dict=9_9 , lowerCAmelCase_ : List[Any]=1_6 , lowerCAmelCase_ : Union[str, Any]=2 , lowerCAmelCase_ : str=4 , lowerCAmelCase_ : Dict=4 , lowerCAmelCase_ : List[str]="gelu" , lowerCAmelCase_ : Tuple=0.1 , lowerCAmelCase_ : Tuple=0.1 , lowerCAmelCase_ : str=3_2 , lowerCAmelCase_ : Dict=2 , lowerCAmelCase_ : Any=1 , lowerCAmelCase_ : str=0 , lowerCAmelCase_ : List[Any]=0.02 , ):
"""simple docstring"""
lowercase_ = parent
lowercase_ = batch_size
lowercase_ = seq_length
lowercase_ = is_training
lowercase_ = use_labels
lowercase_ = vocab_size
lowercase_ = hidden_size
lowercase_ = num_hidden_layers
lowercase_ = num_attention_heads
lowercase_ = intermediate_size
lowercase_ = hidden_act
lowercase_ = hidden_dropout_prob
lowercase_ = attention_probs_dropout_prob
lowercase_ = max_position_embeddings
lowercase_ = eos_token_id
lowercase_ = pad_token_id
lowercase_ = bos_token_id
lowercase_ = initializer_range
def _UpperCAmelCase ( self : str):
"""simple docstring"""
lowercase_ = np.clip(ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size) , 3 , self.vocab_size)
lowercase_ = np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1) , dtype=np.intaa)) , -1)
lowercase_ = shift_tokens_right(lowerCAmelCase_ , 1 , 2)
lowercase_ = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , initializer_range=self.initializer_range , use_cache=lowerCAmelCase_ , )
lowercase_ = prepare_blenderbot_inputs_dict(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_)
return config, inputs_dict
def _UpperCAmelCase ( self : int):
"""simple docstring"""
lowercase_ , lowercase_ = self.prepare_config_and_inputs()
return config, inputs_dict
def _UpperCAmelCase ( self : Any , lowerCAmelCase_ : str , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : str):
"""simple docstring"""
lowercase_ = 2_0
lowercase_ = model_class_name(lowerCAmelCase_)
lowercase_ = model.encode(inputs_dict["""input_ids"""])
lowercase_ , lowercase_ = (
inputs_dict["""decoder_input_ids"""],
inputs_dict["""decoder_attention_mask"""],
)
lowercase_ = model.init_cache(decoder_input_ids.shape[0] , lowerCAmelCase_ , lowerCAmelCase_)
lowercase_ = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype="""i4""")
lowercase_ = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1)[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
lowercase_ = model.decode(
decoder_input_ids[:, :-1] , lowerCAmelCase_ , decoder_attention_mask=lowerCAmelCase_ , past_key_values=lowerCAmelCase_ , decoder_position_ids=lowerCAmelCase_ , )
lowercase_ = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="""i4""")
lowercase_ = model.decode(
decoder_input_ids[:, -1:] , lowerCAmelCase_ , decoder_attention_mask=lowerCAmelCase_ , past_key_values=outputs_cache.past_key_values , decoder_position_ids=lowerCAmelCase_ , )
lowercase_ = model.decode(lowerCAmelCase_ , lowerCAmelCase_)
lowercase_ = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5])))
self.parent.assertTrue(diff < 1E-3 , msg=F'''Max diff is {diff}''')
def _UpperCAmelCase ( self : int , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Any , lowerCAmelCase_ : List[Any]):
"""simple docstring"""
lowercase_ = 2_0
lowercase_ = model_class_name(lowerCAmelCase_)
lowercase_ = model.encode(inputs_dict["""input_ids"""])
lowercase_ , lowercase_ = (
inputs_dict["""decoder_input_ids"""],
inputs_dict["""decoder_attention_mask"""],
)
lowercase_ = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1])),
] , axis=-1 , )
lowercase_ = model.init_cache(decoder_input_ids.shape[0] , lowerCAmelCase_ , lowerCAmelCase_)
lowercase_ = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1)[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
lowercase_ = model.decode(
decoder_input_ids[:, :-1] , lowerCAmelCase_ , decoder_attention_mask=lowerCAmelCase_ , past_key_values=lowerCAmelCase_ , decoder_position_ids=lowerCAmelCase_ , )
lowercase_ = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="""i4""")
lowercase_ = model.decode(
decoder_input_ids[:, -1:] , lowerCAmelCase_ , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=lowerCAmelCase_ , decoder_position_ids=lowerCAmelCase_ , )
lowercase_ = model.decode(lowerCAmelCase_ , lowerCAmelCase_ , decoder_attention_mask=lowerCAmelCase_)
lowercase_ = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5])))
self.parent.assertTrue(diff < 1E-3 , msg=F'''Max diff is {diff}''')
@require_flax
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
lowercase__ = 99
def _UpperCAmelCase ( self : Any):
"""simple docstring"""
lowercase_ = np.array(
[
[7_1, 8_2, 1_8, 3_3, 4_6, 9_1, 2],
[6_8, 3_4, 2_6, 5_8, 3_0, 8_2, 2],
[5, 9_7, 1_7, 3_9, 9_4, 4_0, 2],
[7_6, 8_3, 9_4, 2_5, 7_0, 7_8, 2],
[8_7, 5_9, 4_1, 3_5, 4_8, 6_6, 2],
[5_5, 1_3, 1_6, 5_8, 5, 2, 1], # note padding
[6_4, 2_7, 3_1, 5_1, 1_2, 7_5, 2],
[5_2, 6_4, 8_6, 1_7, 8_3, 3_9, 2],
[4_8, 6_1, 9, 2_4, 7_1, 8_2, 2],
[2_6, 1, 6_0, 4_8, 2_2, 1_3, 2],
[2_1, 5, 6_2, 2_8, 1_4, 7_6, 2],
[4_5, 9_8, 3_7, 8_6, 5_9, 4_8, 2],
[7_0, 7_0, 5_0, 9, 2_8, 0, 2],
] , dtype=np.intaa , )
lowercase_ = input_ids.shape[0]
lowercase_ = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=2_4 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=3_2 , decoder_ffn_dim=3_2 , max_position_embeddings=4_8 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
def _UpperCAmelCase ( self : Dict):
"""simple docstring"""
lowercase_ , lowercase_ , lowercase_ = self._get_config_and_data()
lowercase_ = FlaxBlenderbotForConditionalGeneration(lowerCAmelCase_)
lowercase_ = lm_model(input_ids=lowerCAmelCase_)
lowercase_ = (batch_size, input_ids.shape[1], config.vocab_size)
self.assertEqual(outputs["""logits"""].shape , lowerCAmelCase_)
def _UpperCAmelCase ( self : Dict):
"""simple docstring"""
lowercase_ = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=1_4 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=8 , decoder_ffn_dim=8 , max_position_embeddings=4_8 , )
lowercase_ = FlaxBlenderbotForConditionalGeneration(lowerCAmelCase_)
lowercase_ = np.array([[7_1, 8_2, 1_8, 3_3, 4_6, 9_1, 2], [6_8, 3_4, 2_6, 5_8, 3_0, 2, 1]] , dtype=np.intaa)
lowercase_ = np.array([[8_2, 7_1, 8_2, 1_8, 2], [5_8, 6_8, 2, 1, 1]] , dtype=np.intaa)
lowercase_ = lm_model(input_ids=lowerCAmelCase_ , decoder_input_ids=lowerCAmelCase_)
lowercase_ = (*summary.shape, config.vocab_size)
self.assertEqual(outputs["""logits"""].shape , lowerCAmelCase_)
def _UpperCAmelCase ( self : Dict):
"""simple docstring"""
lowercase_ = np.array([[7_1, 8_2, 1_8, 3_3, 2, 1, 1], [6_8, 3_4, 2_6, 5_8, 3_0, 8_2, 2]] , dtype=np.intaa)
lowercase_ = shift_tokens_right(lowerCAmelCase_ , 1 , 2)
lowercase_ = np.equal(lowerCAmelCase_ , 1).astype(np.floataa).sum()
lowercase_ = np.equal(lowerCAmelCase_ , 1).astype(np.floataa).sum()
self.assertEqual(shifted.shape , input_ids.shape)
self.assertEqual(lowerCAmelCase_ , n_pad_before - 1)
self.assertTrue(np.equal(shifted[:, 0] , 2).all())
@require_flax
class SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , unittest.TestCase , __UpperCAmelCase ):
lowercase__ = True
lowercase__ = (
(
FlaxBlenderbotModel,
FlaxBlenderbotForConditionalGeneration,
)
if is_flax_available()
else ()
)
lowercase__ = (FlaxBlenderbotForConditionalGeneration,) if is_flax_available() else ()
def _UpperCAmelCase ( self : Dict):
"""simple docstring"""
lowercase_ = FlaxBlenderbotModelTester(self)
def _UpperCAmelCase ( self : int):
"""simple docstring"""
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_)
def _UpperCAmelCase ( self : int):
"""simple docstring"""
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_)
def _UpperCAmelCase ( self : Union[str, Any]):
"""simple docstring"""
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__):
lowercase_ = self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_)
lowercase_ = model_class(lowerCAmelCase_)
@jax.jit
def encode_jitted(lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Union[str, Any]=None , **lowerCAmelCase_ : str):
return model.encode(input_ids=lowerCAmelCase_ , attention_mask=lowerCAmelCase_)
with self.subTest("""JIT Enabled"""):
lowercase_ = encode_jitted(**lowerCAmelCase_).to_tuple()
with self.subTest("""JIT Disabled"""):
with jax.disable_jit():
lowercase_ = encode_jitted(**lowerCAmelCase_).to_tuple()
self.assertEqual(len(lowerCAmelCase_) , len(lowerCAmelCase_))
for jitted_output, output in zip(lowerCAmelCase_ , lowerCAmelCase_):
self.assertEqual(jitted_output.shape , output.shape)
def _UpperCAmelCase ( self : List[Any]):
"""simple docstring"""
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__):
lowercase_ = model_class(lowerCAmelCase_)
lowercase_ = model.encode(inputs_dict["""input_ids"""] , inputs_dict["""attention_mask"""])
lowercase_ = {
"""decoder_input_ids""": inputs_dict["""decoder_input_ids"""],
"""decoder_attention_mask""": inputs_dict["""decoder_attention_mask"""],
"""encoder_outputs""": encoder_outputs,
}
@jax.jit
def decode_jitted(lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Optional[Any]):
return model.decode(
decoder_input_ids=lowerCAmelCase_ , decoder_attention_mask=lowerCAmelCase_ , encoder_outputs=lowerCAmelCase_ , )
with self.subTest("""JIT Enabled"""):
lowercase_ = decode_jitted(**lowerCAmelCase_).to_tuple()
with self.subTest("""JIT Disabled"""):
with jax.disable_jit():
lowercase_ = decode_jitted(**lowerCAmelCase_).to_tuple()
self.assertEqual(len(lowerCAmelCase_) , len(lowerCAmelCase_))
for jitted_output, output in zip(lowerCAmelCase_ , lowerCAmelCase_):
self.assertEqual(jitted_output.shape , output.shape)
@slow
def _UpperCAmelCase ( self : Optional[int]):
"""simple docstring"""
for model_class_name in self.all_model_classes:
lowercase_ = model_class_name.from_pretrained("""facebook/blenderbot-400M-distill""")
# FlaxBlenderbotForSequenceClassification expects eos token in input_ids
lowercase_ = np.ones((1, 1)) * model.config.eos_token_id
lowercase_ = model(lowerCAmelCase_)
self.assertIsNotNone(lowerCAmelCase_)
@unittest.skipUnless(jax_device != """cpu""" , """3B test too slow on CPU.""")
@slow
def _UpperCAmelCase ( self : List[str]):
"""simple docstring"""
lowercase_ = {"""num_beams""": 1, """early_stopping""": True, """min_length""": 1_5, """max_length""": 2_5}
lowercase_ = {"""skip_special_tokens""": True, """clean_up_tokenization_spaces""": True}
lowercase_ = FlaxBlenderbotForConditionalGeneration.from_pretrained("""facebook/blenderbot-3B""" , from_pt=lowerCAmelCase_)
lowercase_ = BlenderbotTokenizer.from_pretrained("""facebook/blenderbot-3B""")
lowercase_ = ["""Sam"""]
lowercase_ = tokenizer(lowerCAmelCase_ , return_tensors="""jax""")
lowercase_ = model.generate(**lowerCAmelCase_ , **lowerCAmelCase_)
lowercase_ = """Sam is a great name. It means \"sun\" in Gaelic."""
lowercase_ = tokenizer.batch_decode(lowerCAmelCase_ , **lowerCAmelCase_)
assert generated_txt[0].strip() == tgt_text
| 313 | 0 |
"""simple docstring"""
import unittest
import numpy as np
import torch
from .utils_summarization import build_mask, compute_token_type_ids, process_story, truncate_or_pad
class __A ( unittest.TestCase ):
def __A ( self ):
_lowerCAmelCase : int = 10
def __A ( self ):
_lowerCAmelCase : str = [1, 2, 3, 4]
_lowerCAmelCase : int = [1, 2, 3, 4, 0, 0, 0, 0, 0, 0]
self.assertEqual(truncate_or_pad(a__ , self.block_size , 0 ) , a__ )
def __A ( self ):
_lowerCAmelCase : int = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
_lowerCAmelCase : Any = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(a__ , self.block_size , 0 ) , a__ )
def __A ( self ):
_lowerCAmelCase : str = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13]
_lowerCAmelCase : Optional[Any] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(a__ , self.block_size , 0 ) , a__ )
def __A ( self ):
_lowerCAmelCase : Union[str, Any] = """It was the year of Our Lord one thousand seven hundred and
seventy-five.\n\nSpiritual revelations were conceded to England at that
favoured period, as at this."""
_lowerCAmelCase , _lowerCAmelCase : Optional[int] = process_story(a__ )
self.assertEqual(a__ , [] )
def __A ( self ):
_lowerCAmelCase : List[Any] = """"""
_lowerCAmelCase , _lowerCAmelCase : Union[str, Any] = process_story(a__ )
self.assertEqual(a__ , [] )
self.assertEqual(a__ , [] )
def __A ( self ):
_lowerCAmelCase : str = (
"""It was the year of Our Lord one thousand seven hundred and """
"""seventy-five\n\nSpiritual revelations were conceded to England """
"""at that favoured period, as at this.\n@highlight\n\nIt was the best of times"""
)
_lowerCAmelCase , _lowerCAmelCase : List[str] = process_story(a__ )
_lowerCAmelCase : Union[str, Any] = [
"""It was the year of Our Lord one thousand seven hundred and seventy-five.""",
"""Spiritual revelations were conceded to England at that favoured period, as at this.""",
]
self.assertEqual(a__ , a__ )
_lowerCAmelCase : List[str] = ["""It was the best of times."""]
self.assertEqual(a__ , a__ )
def __A ( self ):
_lowerCAmelCase : Any = torch.tensor([1, 2, 3, 4] )
_lowerCAmelCase : Union[str, Any] = torch.tensor([1, 1, 1, 1] )
np.testing.assert_array_equal(build_mask(a__ , 0 ).numpy() , expected.numpy() )
def __A ( self ):
_lowerCAmelCase : Tuple = torch.tensor([1, 2, 3, 4, 23, 23, 23] )
_lowerCAmelCase : Any = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(a__ , 23 ).numpy() , expected.numpy() )
def __A ( self ):
_lowerCAmelCase : Tuple = torch.tensor([8, 2, 3, 4, 1, 1, 1] )
_lowerCAmelCase : Dict = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(a__ , 1 ).numpy() , expected.numpy() )
def __A ( self ):
_lowerCAmelCase : int = 101
_lowerCAmelCase : Tuple = torch.tensor([[1, 2, 3, 4, 5, 6], [1, 2, 3, 101, 5, 6], [1, 101, 3, 4, 101, 6]] )
_lowerCAmelCase : Dict = torch.tensor([[1, 1, 1, 1, 1, 1], [1, 1, 1, 0, 0, 0], [1, 0, 0, 0, 1, 1]] )
_lowerCAmelCase : int = compute_token_type_ids(a__ , a__ )
np.testing.assert_array_equal(a__ , a__ )
| 44 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from diffusers import (
DDIMScheduler,
KandinskyVaaControlnetPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class _A ( _a ,unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase : Dict = KandinskyVaaControlnetPipeline
UpperCAmelCase : List[str] = ["""image_embeds""", """negative_image_embeds""", """hint"""]
UpperCAmelCase : Optional[Any] = ["""image_embeds""", """negative_image_embeds""", """hint"""]
UpperCAmelCase : Dict = [
"""generator""",
"""height""",
"""width""",
"""latents""",
"""guidance_scale""",
"""num_inference_steps""",
"""return_dict""",
"""guidance_scale""",
"""num_images_per_prompt""",
"""output_type""",
"""return_dict""",
]
UpperCAmelCase : Optional[int] = False
@property
def __snake_case ( self : Optional[Any]):
return 32
@property
def __snake_case ( self : Dict):
return 32
@property
def __snake_case ( self : Dict):
return self.time_input_dim
@property
def __snake_case ( self : Any):
return self.time_input_dim * 4
@property
def __snake_case ( self : str):
return 100
@property
def __snake_case ( self : str):
torch.manual_seed(0)
a : str = {
"in_channels": 8,
# Out channels is double in channels because predicts mean and variance
"out_channels": 8,
"addition_embed_type": "image_hint",
"down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"),
"up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"),
"mid_block_type": "UNetMidBlock2DSimpleCrossAttn",
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"layers_per_block": 1,
"encoder_hid_dim": self.text_embedder_hidden_size,
"encoder_hid_dim_type": "image_proj",
"cross_attention_dim": self.cross_attention_dim,
"attention_head_dim": 4,
"resnet_time_scale_shift": "scale_shift",
"class_embed_type": None,
}
a : Dict = UNetaDConditionModel(**__UpperCAmelCase)
return model
@property
def __snake_case ( self : str):
return {
"block_out_channels": [32, 32, 64, 64],
"down_block_types": [
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"AttnDownEncoderBlock2D",
],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],
"vq_embed_dim": 4,
}
@property
def __snake_case ( self : Union[str, Any]):
torch.manual_seed(0)
a : Dict = VQModel(**self.dummy_movq_kwargs)
return model
def __snake_case ( self : Optional[Any]):
a : Optional[Any] = self.dummy_unet
a : int = self.dummy_movq
a : str = DDIMScheduler(
num_train_timesteps=1000 , beta_schedule="linear" , beta_start=0.00_085 , beta_end=0.012 , clip_sample=__UpperCAmelCase , set_alpha_to_one=__UpperCAmelCase , steps_offset=1 , prediction_type="epsilon" , thresholding=__UpperCAmelCase , )
a : Optional[Any] = {
"unet": unet,
"scheduler": scheduler,
"movq": movq,
}
return components
def __snake_case ( self : Optional[int] , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : int=0):
a : Optional[int] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(__UpperCAmelCase)).to(__UpperCAmelCase)
a : List[str] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1)).to(
__UpperCAmelCase)
# create hint
a : Union[str, Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(__UpperCAmelCase)).to(__UpperCAmelCase)
if str(__UpperCAmelCase).startswith("mps"):
a : Union[str, Any] = torch.manual_seed(__UpperCAmelCase)
else:
a : List[Any] = torch.Generator(device=__UpperCAmelCase).manual_seed(__UpperCAmelCase)
a : str = {
"image_embeds": image_embeds,
"negative_image_embeds": negative_image_embeds,
"hint": hint,
"generator": generator,
"height": 64,
"width": 64,
"guidance_scale": 4.0,
"num_inference_steps": 2,
"output_type": "np",
}
return inputs
def __snake_case ( self : Dict):
a : str = "cpu"
a : Tuple = self.get_dummy_components()
a : Dict = self.pipeline_class(**__UpperCAmelCase)
a : Optional[int] = pipe.to(__UpperCAmelCase)
pipe.set_progress_bar_config(disable=__UpperCAmelCase)
a : Optional[Any] = pipe(**self.get_dummy_inputs(__UpperCAmelCase))
a : Any = output.images
a : Any = pipe(
**self.get_dummy_inputs(__UpperCAmelCase) , return_dict=__UpperCAmelCase , )[0]
a : Union[str, Any] = image[0, -3:, -3:, -1]
a : int = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
a : Tuple = np.array(
[0.6_959_826, 0.868_279, 0.7_558_092, 0.68_769_467, 0.85_805_804, 0.65_977_496, 0.44_885_302, 0.5_959_111, 0.4_251_595])
assert (
np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
), f''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2
), f''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
@slow
@require_torch_gpu
class _A ( unittest.TestCase ):
"""simple docstring"""
def __snake_case ( self : Optional[int]):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __snake_case ( self : List[str]):
a : List[Any] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/kandinskyv22/kandinskyv22_controlnet_robotcat_fp16.npy")
a : Union[str, Any] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/kandinskyv22/hint_image_cat.png")
a : List[Any] = torch.from_numpy(np.array(__UpperCAmelCase)).float() / 255.0
a : str = hint.permute(2 , 0 , 1).unsqueeze(0)
a : Optional[int] = KandinskyVaaPriorPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-2-prior" , torch_dtype=torch.floataa)
pipe_prior.to(__UpperCAmelCase)
a : List[str] = KandinskyVaaControlnetPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-2-controlnet-depth" , torch_dtype=torch.floataa)
a : int = pipeline.to(__UpperCAmelCase)
pipeline.set_progress_bar_config(disable=__UpperCAmelCase)
a : Tuple = "A robot, 4k photo"
a : Any = torch.Generator(device="cuda").manual_seed(0)
a , a : int = pipe_prior(
__UpperCAmelCase , generator=__UpperCAmelCase , num_inference_steps=5 , negative_prompt="" , ).to_tuple()
a : str = torch.Generator(device="cuda").manual_seed(0)
a : Union[str, Any] = pipeline(
image_embeds=__UpperCAmelCase , negative_image_embeds=__UpperCAmelCase , hint=__UpperCAmelCase , generator=__UpperCAmelCase , num_inference_steps=100 , output_type="np" , )
a : str = output.images[0]
assert image.shape == (512, 512, 3)
assert_mean_pixel_difference(__UpperCAmelCase , __UpperCAmelCase)
| 40 | 0 |
"""simple docstring"""
import doctest
import sys
import warnings
from os.path import abspath, dirname, join
import _pytest
from transformers.testing_utils import HfDoctestModule, HfDocTestParser
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
snake_case = abspath(join(dirname(__file__), """src"""))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action="""ignore""", category=FutureWarning)
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
config.addinivalue_line(
"markers" , "is_pt_tf_cross_test: mark test to run only when PT and TF interactions are tested" )
config.addinivalue_line(
"markers" , "is_pt_flax_cross_test: mark test to run only when PT and FLAX interactions are tested" )
config.addinivalue_line("markers" , "is_pipeline_test: mark test to run only when pipelines are tested" )
config.addinivalue_line("markers" , "is_staging_test: mark test to run only in the staging environment" )
config.addinivalue_line("markers" , "accelerate_tests: mark test that require accelerate" )
config.addinivalue_line("markers" , "tool_tests: mark the tool tests that are run on their specific schedule" )
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(__UpperCAmelCase )
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
from transformers.testing_utils import pytest_terminal_summary_main
SCREAMING_SNAKE_CASE : Any = terminalreporter.config.getoption("--make-reports" )
if make_reports:
pytest_terminal_summary_main(__UpperCAmelCase , id=__UpperCAmelCase )
def lowerCamelCase__ ( lowercase , lowercase ):
"""simple docstring"""
if exitstatus == 5:
SCREAMING_SNAKE_CASE : List[Any] = 0
# Doctest custom flag to ignore output.
snake_case = doctest.register_optionflag("""IGNORE_RESULT""")
snake_case = doctest.OutputChecker
class SCREAMING_SNAKE_CASE ( _UpperCAmelCase ):
'''simple docstring'''
def _A ( self : Any , UpperCAmelCase_ : Any , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Union[str, Any] ):
if IGNORE_RESULT & optionflags:
return True
return OutputChecker.check_output(self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
snake_case = CustomOutputChecker
snake_case = HfDoctestModule
snake_case = HfDocTestParser
| 352 |
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = n ** (1 / 3)
return (val * val * val) == n
if __name__ == "__main__":
print(perfect_cube(27))
print(perfect_cube(4))
| 319 | 0 |
import inspect
import unittest
import torch
import torch.nn as nn
from accelerate.hooks import (
AlignDevicesHook,
ModelHook,
SequentialHook,
add_hook_to_module,
attach_align_device_hook,
remove_hook_from_module,
remove_hook_from_submodules,
)
from accelerate.test_utils import require_multi_gpu
class lowercase__ ( nn.Module ):
def __init__( self )-> str:
'''simple docstring'''
super().__init__()
lowerCAmelCase__ = nn.Linear(3 , 4 )
lowerCAmelCase__ = nn.BatchNormad(4 )
lowerCAmelCase__ = nn.Linear(4 , 5 )
def UpperCAmelCase ( self , __UpperCAmelCase )-> Any:
'''simple docstring'''
return self.lineara(self.batchnorm(self.lineara(__UpperCAmelCase ) ) )
class lowercase__ ( lowercase_ ):
def UpperCAmelCase ( self , __UpperCAmelCase , *__UpperCAmelCase , **__UpperCAmelCase )-> int:
'''simple docstring'''
return (args[0] + 1,) + args[1:], kwargs
class lowercase__ ( lowercase_ ):
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase )-> Dict:
'''simple docstring'''
return output + 1
class lowercase__ ( unittest.TestCase ):
def UpperCAmelCase ( self )-> Optional[int]:
'''simple docstring'''
lowerCAmelCase__ = ModelForTest()
lowerCAmelCase__ = ModelHook()
add_hook_to_module(__UpperCAmelCase , __UpperCAmelCase )
self.assertEqual(test_model._hf_hook , __UpperCAmelCase )
self.assertTrue(hasattr(__UpperCAmelCase , "_old_forward" ) )
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__ , "forward" )
self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ["x"] )
remove_hook_from_module(__UpperCAmelCase )
self.assertFalse(hasattr(__UpperCAmelCase , "_hf_hook" ) )
self.assertFalse(hasattr(__UpperCAmelCase , "_old_forward" ) )
def UpperCAmelCase ( self )-> Any:
'''simple docstring'''
lowerCAmelCase__ = ModelForTest()
lowerCAmelCase__ = ModelHook()
add_hook_to_module(__UpperCAmelCase , __UpperCAmelCase )
add_hook_to_module(__UpperCAmelCase , __UpperCAmelCase , append=__UpperCAmelCase )
self.assertEqual(isinstance(test_model._hf_hook , __UpperCAmelCase ) , __UpperCAmelCase )
self.assertEqual(len(test_model._hf_hook.hooks ) , 2 )
self.assertTrue(hasattr(__UpperCAmelCase , "_old_forward" ) )
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__ , "forward" )
self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ["x"] )
remove_hook_from_module(__UpperCAmelCase )
self.assertFalse(hasattr(__UpperCAmelCase , "_hf_hook" ) )
self.assertFalse(hasattr(__UpperCAmelCase , "_old_forward" ) )
def UpperCAmelCase ( self )-> str:
'''simple docstring'''
lowerCAmelCase__ = ModelForTest()
lowerCAmelCase__ = torch.randn(2 , 3 )
lowerCAmelCase__ = test_model(x + 1 )
lowerCAmelCase__ = test_model(x + 2 )
lowerCAmelCase__ = PreForwardHook()
add_hook_to_module(__UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ = test_model(__UpperCAmelCase )
self.assertTrue(torch.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1E-5 ) )
# Attaching a hook to a model when it already has one replaces, does not chain
lowerCAmelCase__ = PreForwardHook()
add_hook_to_module(__UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ = test_model(__UpperCAmelCase )
self.assertTrue(torch.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1E-5 ) )
# You need to use the sequential hook to chain two or more hooks
lowerCAmelCase__ = SequentialHook(PreForwardHook() , PreForwardHook() )
add_hook_to_module(__UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ = test_model(__UpperCAmelCase )
assert torch.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1E-5 )
def UpperCAmelCase ( self )-> Optional[Any]:
'''simple docstring'''
lowerCAmelCase__ = ModelForTest()
lowerCAmelCase__ = torch.randn(2 , 3 )
lowerCAmelCase__ = test_model(__UpperCAmelCase )
lowerCAmelCase__ = PostForwardHook()
add_hook_to_module(__UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ = test_model(__UpperCAmelCase )
self.assertTrue(torch.allclose(__UpperCAmelCase , output + 1 , atol=1E-5 ) )
# Attaching a hook to a model when it already has one replaces, does not chain
lowerCAmelCase__ = PostForwardHook()
add_hook_to_module(__UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ = test_model(__UpperCAmelCase )
self.assertTrue(torch.allclose(__UpperCAmelCase , output + 1 , atol=1E-5 ) )
# You need to use the sequential hook to chain two or more hooks
lowerCAmelCase__ = SequentialHook(PostForwardHook() , PostForwardHook() )
add_hook_to_module(__UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ = test_model(__UpperCAmelCase )
assert torch.allclose(__UpperCAmelCase , output + 2 , atol=1E-5 )
def UpperCAmelCase ( self )-> str:
'''simple docstring'''
lowerCAmelCase__ = ModelForTest()
lowerCAmelCase__ = torch.randn(2 , 3 )
lowerCAmelCase__ = test_model(__UpperCAmelCase )
lowerCAmelCase__ = PostForwardHook()
add_hook_to_module(__UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ = test_model(__UpperCAmelCase )
self.assertTrue(torch.allclose(__UpperCAmelCase , output + 1 ) )
self.assertTrue(outputa.requires_grad )
lowerCAmelCase__ = True
lowerCAmelCase__ = test_model(__UpperCAmelCase )
self.assertFalse(outputa.requires_grad )
@require_multi_gpu
def UpperCAmelCase ( self )-> int:
'''simple docstring'''
lowerCAmelCase__ = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("cpu" ) )
self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) )
# This will move each submodule on different devices
add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=0 ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(execution_device=0 ) )
add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=1 ) )
self.assertEqual(model.lineara.weight.device , torch.device(0 ) )
self.assertEqual(model.batchnorm.weight.device , torch.device(0 ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device(0 ) )
self.assertEqual(model.lineara.weight.device , torch.device(1 ) )
# We can still make a forward pass. The input does not need to be on any particular device
lowerCAmelCase__ = torch.randn(2 , 3 )
lowerCAmelCase__ = model(__UpperCAmelCase )
self.assertEqual(output.device , torch.device(1 ) )
# We can add a general hook to put back output on same device as input.
add_hook_to_module(__UpperCAmelCase , AlignDevicesHook(io_same_device=__UpperCAmelCase ) )
lowerCAmelCase__ = torch.randn(2 , 3 ).to(0 )
lowerCAmelCase__ = model(__UpperCAmelCase )
self.assertEqual(output.device , torch.device(0 ) )
def UpperCAmelCase ( self )-> List[Any]:
'''simple docstring'''
lowerCAmelCase__ = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("cpu" ) )
self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) )
# This will move each submodule on different devices
lowerCAmelCase__ = {"execution_device": 0 if torch.cuda.is_available() else "cpu", "offload": True}
add_hook_to_module(model.lineara , AlignDevicesHook(**__UpperCAmelCase ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(**__UpperCAmelCase ) )
add_hook_to_module(model.lineara , AlignDevicesHook(**__UpperCAmelCase ) )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device("meta" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("meta" ) )
self.assertEqual(model.lineara.weight.device , torch.device("meta" ) )
# Buffers are not included in the offload by default, so are on the execution device
lowerCAmelCase__ = torch.device(hook_kwargs["execution_device"] )
self.assertEqual(model.batchnorm.running_mean.device , __UpperCAmelCase )
lowerCAmelCase__ = torch.randn(2 , 3 )
lowerCAmelCase__ = model(__UpperCAmelCase )
self.assertEqual(output.device , __UpperCAmelCase )
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara )
remove_hook_from_module(model.batchnorm )
remove_hook_from_module(model.lineara )
self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("cpu" ) )
self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) )
# Now test with buffers included in the offload
lowerCAmelCase__ = {
"execution_device": 0 if torch.cuda.is_available() else "cpu",
"offload": True,
"offload_buffers": True,
}
add_hook_to_module(model.lineara , AlignDevicesHook(**__UpperCAmelCase ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(**__UpperCAmelCase ) )
add_hook_to_module(model.lineara , AlignDevicesHook(**__UpperCAmelCase ) )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device("meta" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("meta" ) )
self.assertEqual(model.lineara.weight.device , torch.device("meta" ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device("meta" ) )
lowerCAmelCase__ = torch.randn(2 , 3 )
lowerCAmelCase__ = model(__UpperCAmelCase )
self.assertEqual(output.device , __UpperCAmelCase )
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara )
remove_hook_from_module(model.batchnorm )
remove_hook_from_module(model.lineara )
self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("cpu" ) )
self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) )
def UpperCAmelCase ( self )-> Optional[int]:
'''simple docstring'''
lowerCAmelCase__ = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("cpu" ) )
self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) )
# This will move each submodule on different devices
lowerCAmelCase__ = 0 if torch.cuda.is_available() else "cpu"
attach_align_device_hook(__UpperCAmelCase , execution_device=__UpperCAmelCase , offload=__UpperCAmelCase )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device("meta" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("meta" ) )
self.assertEqual(model.lineara.weight.device , torch.device("meta" ) )
# Buffers are not included in the offload by default, so are on the execution device
lowerCAmelCase__ = torch.device(__UpperCAmelCase )
self.assertEqual(model.batchnorm.running_mean.device , __UpperCAmelCase )
lowerCAmelCase__ = torch.randn(2 , 3 )
lowerCAmelCase__ = model(__UpperCAmelCase )
self.assertEqual(output.device , __UpperCAmelCase )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(__UpperCAmelCase )
self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("cpu" ) )
self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) )
# Now test with buffers included in the offload
attach_align_device_hook(__UpperCAmelCase , execution_device=__UpperCAmelCase , offload=__UpperCAmelCase , offload_buffers=__UpperCAmelCase )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device("meta" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("meta" ) )
self.assertEqual(model.lineara.weight.device , torch.device("meta" ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device("meta" ) )
lowerCAmelCase__ = torch.randn(2 , 3 )
lowerCAmelCase__ = model(__UpperCAmelCase )
self.assertEqual(output.device , __UpperCAmelCase )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(__UpperCAmelCase )
self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("cpu" ) )
self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) )
def UpperCAmelCase ( self )-> Optional[int]:
'''simple docstring'''
lowerCAmelCase__ = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("cpu" ) )
self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) )
# This will move each submodule on different devices
lowerCAmelCase__ = 0 if torch.cuda.is_available() else "cpu"
attach_align_device_hook(
__UpperCAmelCase , execution_device=__UpperCAmelCase , offload=__UpperCAmelCase , weights_map=model.state_dict() )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device("meta" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("meta" ) )
self.assertEqual(model.lineara.weight.device , torch.device("meta" ) )
# Buffers are not included in the offload by default, so are on the execution device
lowerCAmelCase__ = torch.device(__UpperCAmelCase )
self.assertEqual(model.batchnorm.running_mean.device , __UpperCAmelCase )
lowerCAmelCase__ = torch.randn(2 , 3 )
lowerCAmelCase__ = model(__UpperCAmelCase )
self.assertEqual(output.device , __UpperCAmelCase )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(__UpperCAmelCase )
self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("cpu" ) )
self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) )
# Now test with buffers included in the offload
attach_align_device_hook(
__UpperCAmelCase , execution_device=__UpperCAmelCase , offload=__UpperCAmelCase , weights_map=model.state_dict() , offload_buffers=__UpperCAmelCase , )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device("meta" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("meta" ) )
self.assertEqual(model.lineara.weight.device , torch.device("meta" ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device("meta" ) )
lowerCAmelCase__ = torch.randn(2 , 3 )
lowerCAmelCase__ = model(__UpperCAmelCase )
self.assertEqual(output.device , __UpperCAmelCase )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(__UpperCAmelCase )
self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("cpu" ) )
self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) )
| 340 |
import os
import pytest
import yaml
from datasets.features.features import Features, Value
from datasets.info import DatasetInfo, DatasetInfosDict
@pytest.mark.parametrize(
'files' , [
['full:README.md', 'dataset_infos.json'],
['empty:README.md', 'dataset_infos.json'],
['dataset_infos.json'],
['full:README.md'],
] , )
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
snake_case_ = tmp_path_factory.mktemp('dset_infos_dir' )
if "full:README.md" in files:
with open(dataset_infos_dir / 'README.md' , 'w' ) as f:
f.write('---\ndataset_info:\n dataset_size: 42\n---' )
if "empty:README.md" in files:
with open(dataset_infos_dir / 'README.md' , 'w' ) as f:
f.write('' )
# we want to support dataset_infos.json for backward compatibility
if "dataset_infos.json" in files:
with open(dataset_infos_dir / 'dataset_infos.json' , 'w' ) as f:
f.write('{"default": {"dataset_size": 42}}' )
snake_case_ = DatasetInfosDict.from_directory(UpperCamelCase__ )
assert dataset_infos
assert dataset_infos["default"].dataset_size == 42
@pytest.mark.parametrize(
'dataset_info' , [
DatasetInfo(),
DatasetInfo(
description='foo' , features=Features({'a': Value('int32' )} ) , builder_name='builder' , config_name='config' , version='1.0.0' , splits=[{'name': 'train'}] , download_size=42 , ),
] , )
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
snake_case_ = str(UpperCamelCase__ )
dataset_info.write_to_directory(UpperCamelCase__ )
snake_case_ = DatasetInfo.from_directory(UpperCamelCase__ )
assert dataset_info == reloaded
assert os.path.exists(os.path.join(UpperCamelCase__ , 'dataset_info.json' ) )
def __lowerCamelCase ( ):
'''simple docstring'''
snake_case_ = DatasetInfo(
description='foo' , citation='bar' , homepage='https://foo.bar' , license='CC0' , features=Features({'a': Value('int32' )} ) , post_processed={} , supervised_keys=() , task_templates=[] , builder_name='builder' , config_name='config' , version='1.0.0' , splits=[{'name': 'train', 'num_examples': 42}] , download_checksums={} , download_size=1337 , post_processing_size=442 , dataset_size=1234 , size_in_bytes=1337 + 442 + 1234 , )
snake_case_ = dataset_info._to_yaml_dict()
assert sorted(UpperCamelCase__ ) == sorted(DatasetInfo._INCLUDED_INFO_IN_YAML )
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
assert key in dataset_info_yaml_dict
assert isinstance(dataset_info_yaml_dict[key] , (list, dict, int, str) )
snake_case_ = yaml.safe_dump(UpperCamelCase__ )
snake_case_ = yaml.safe_load(UpperCamelCase__ )
assert dataset_info_yaml_dict == reloaded
def __lowerCamelCase ( ):
'''simple docstring'''
snake_case_ = DatasetInfo()
snake_case_ = dataset_info._to_yaml_dict()
assert dataset_info_yaml_dict == {}
@pytest.mark.parametrize(
'dataset_infos_dict' , [
DatasetInfosDict(),
DatasetInfosDict({'default': DatasetInfo()} ),
DatasetInfosDict({'my_config_name': DatasetInfo()} ),
DatasetInfosDict(
{
'default': DatasetInfo(
description='foo' , features=Features({'a': Value('int32' )} ) , builder_name='builder' , config_name='config' , version='1.0.0' , splits=[{'name': 'train'}] , download_size=42 , )
} ),
DatasetInfosDict(
{
'v1': DatasetInfo(dataset_size=42 ),
'v2': DatasetInfo(dataset_size=1337 ),
} ),
] , )
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
snake_case_ = str(UpperCamelCase__ )
dataset_infos_dict.write_to_directory(UpperCamelCase__ )
snake_case_ = DatasetInfosDict.from_directory(UpperCamelCase__ )
# the config_name of the dataset_infos_dict take over the attribute
for config_name, dataset_info in dataset_infos_dict.items():
snake_case_ = config_name
# the yaml representation doesn't include fields like description or citation
# so we just test that we can recover what we can from the yaml
snake_case_ = DatasetInfo._from_yaml_dict(dataset_info._to_yaml_dict() )
assert dataset_infos_dict == reloaded
if dataset_infos_dict:
assert os.path.exists(os.path.join(UpperCamelCase__ , 'README.md' ) )
| 285 | 0 |
import copy
import inspect
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import TimesformerConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING,
TimesformerForVideoClassification,
TimesformerModel,
)
from transformers.models.timesformer.modeling_timesformer import TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from transformers import VideoMAEImageProcessor
class snake_case__:
"""simple docstring"""
def __init__( self : Any , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : List[str]=13 , SCREAMING_SNAKE_CASE : Union[str, Any]=10 , SCREAMING_SNAKE_CASE : Union[str, Any]=3 , SCREAMING_SNAKE_CASE : int=2 , SCREAMING_SNAKE_CASE : int=2 , SCREAMING_SNAKE_CASE : Optional[int]=True , SCREAMING_SNAKE_CASE : str=True , SCREAMING_SNAKE_CASE : int=32 , SCREAMING_SNAKE_CASE : Dict=5 , SCREAMING_SNAKE_CASE : str=4 , SCREAMING_SNAKE_CASE : Optional[int]=37 , SCREAMING_SNAKE_CASE : Any="gelu" , SCREAMING_SNAKE_CASE : List[Any]=0.1 , SCREAMING_SNAKE_CASE : Optional[Any]=0.1 , SCREAMING_SNAKE_CASE : Dict=10 , SCREAMING_SNAKE_CASE : Optional[int]=0.02 , SCREAMING_SNAKE_CASE : str="divided_space_time" , SCREAMING_SNAKE_CASE : Tuple=None , ):
lowercase__ : List[str] = parent
lowercase__ : Optional[int] = batch_size
lowercase__ : List[Any] = image_size
lowercase__ : Optional[Any] = num_channels
lowercase__ : List[str] = patch_size
lowercase__ : str = num_frames
lowercase__ : List[str] = is_training
lowercase__ : List[str] = use_labels
lowercase__ : int = hidden_size
lowercase__ : List[Any] = num_hidden_layers
lowercase__ : Optional[Any] = num_attention_heads
lowercase__ : Dict = intermediate_size
lowercase__ : Union[str, Any] = hidden_act
lowercase__ : str = hidden_dropout_prob
lowercase__ : Tuple = attention_probs_dropout_prob
lowercase__ : Tuple = attention_type
lowercase__ : Union[str, Any] = initializer_range
lowercase__ : Any = scope
lowercase__ : Optional[int] = num_labels
# in TimeSformer, the number of spatial tokens equals num_frames * num_patches per frame + 1 CLS token
lowercase__ : Union[str, Any] = (image_size // patch_size) ** 2
lowercase__ : Union[str, Any] = (num_frames) * self.num_patches_per_frame + 1
def snake_case ( self : Optional[int] ):
lowercase__ : Any = floats_tensor(
[self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] )
lowercase__ : Optional[Any] = None
if self.use_labels:
lowercase__ : str = ids_tensor([self.batch_size] , self.num_labels )
lowercase__ : List[Any] = self.get_config()
return config, pixel_values, labels
def snake_case ( self : Any ):
lowercase__ : Optional[int] = TimesformerConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_frames=self.num_frames , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , attention_type=self.attention_type , )
lowercase__ : List[Any] = self.num_labels
return config
def snake_case ( self : Optional[int] , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Tuple ):
lowercase__ : Optional[Any] = TimesformerModel(config=SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
lowercase__ : Union[str, Any] = model(SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def snake_case ( self : Dict , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Any ):
lowercase__ : List[Any] = TimesformerForVideoClassification(SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
lowercase__ : List[str] = model(SCREAMING_SNAKE_CASE )
# verify the logits shape
lowercase__ : List[str] = torch.Size((self.batch_size, self.num_labels) )
self.parent.assertEqual(result.logits.shape , SCREAMING_SNAKE_CASE )
def snake_case ( self : Optional[Any] ):
lowercase__ : List[str] = self.prepare_config_and_inputs()
lowercase__ , lowercase__ , lowercase__ : int = config_and_inputs
lowercase__ : Optional[int] = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class snake_case__(_UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
lowercase_ = (TimesformerModel, TimesformerForVideoClassification) if is_torch_available() else ()
lowercase_ = (
{"""feature-extraction""": TimesformerModel, """video-classification""": TimesformerForVideoClassification}
if is_torch_available()
else {}
)
lowercase_ = False
lowercase_ = False
lowercase_ = False
lowercase_ = False
def snake_case ( self : Dict ):
lowercase__ : Tuple = TimesformerModelTester(self )
lowercase__ : Any = ConfigTester(
self , config_class=SCREAMING_SNAKE_CASE , has_text_modality=SCREAMING_SNAKE_CASE , hidden_size=37 )
def snake_case ( self : Dict , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Tuple=False ):
lowercase__ : Union[str, Any] = copy.deepcopy(SCREAMING_SNAKE_CASE )
if return_labels:
if model_class in get_values(SCREAMING_SNAKE_CASE ):
lowercase__ : int = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=SCREAMING_SNAKE_CASE )
return inputs_dict
def snake_case ( self : Optional[Any] ):
self.config_tester.run_common_tests()
@unittest.skip(reason="TimeSformer does not use inputs_embeds" )
def snake_case ( self : Any ):
pass
def snake_case ( self : Tuple ):
lowercase__ , lowercase__ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : Union[str, Any] = model_class(SCREAMING_SNAKE_CASE )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowercase__ : Dict = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(SCREAMING_SNAKE_CASE , nn.Linear ) )
def snake_case ( self : Union[str, Any] ):
lowercase__ , lowercase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : str = model_class(SCREAMING_SNAKE_CASE )
lowercase__ : Any = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase__ : Any = [*signature.parameters.keys()]
lowercase__ : Dict = ["pixel_values"]
self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE )
def snake_case ( self : Tuple ):
lowercase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE )
def snake_case ( self : str ):
lowercase__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_video_classification(*SCREAMING_SNAKE_CASE )
@slow
def snake_case ( self : Optional[int] ):
for model_name in TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ : Optional[int] = TimesformerModel.from_pretrained(SCREAMING_SNAKE_CASE )
self.assertIsNotNone(SCREAMING_SNAKE_CASE )
def snake_case ( self : Any ):
if not self.has_attentions:
pass
else:
lowercase__ , lowercase__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ : Union[str, Any] = True
for model_class in self.all_model_classes:
lowercase__ : List[str] = self.model_tester.seq_length
lowercase__ : Any = self.model_tester.num_frames
lowercase__ : Optional[int] = True
lowercase__ : List[str] = False
lowercase__ : List[Any] = True
lowercase__ : Tuple = model_class(SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
with torch.no_grad():
lowercase__ : Any = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
lowercase__ : List[Any] = outputs.attentions
self.assertEqual(len(SCREAMING_SNAKE_CASE ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
lowercase__ : Dict = True
lowercase__ : int = model_class(SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
with torch.no_grad():
lowercase__ : Optional[Any] = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
lowercase__ : Any = outputs.attentions
self.assertEqual(len(SCREAMING_SNAKE_CASE ) , self.model_tester.num_hidden_layers )
# attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1)
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , )
lowercase__ : Any = len(SCREAMING_SNAKE_CASE )
# Check attention is always last and order is fine
lowercase__ : Tuple = True
lowercase__ : Tuple = True
lowercase__ : Union[str, Any] = model_class(SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
with torch.no_grad():
lowercase__ : Union[str, Any] = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
self.assertEqual(out_len + 1 , len(SCREAMING_SNAKE_CASE ) )
lowercase__ : Optional[Any] = outputs.attentions
self.assertEqual(len(SCREAMING_SNAKE_CASE ) , self.model_tester.num_hidden_layers )
# attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1)
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , )
def snake_case ( self : List[Any] ):
def check_hidden_states_output(SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Union[str, Any] ):
lowercase__ : Optional[int] = model_class(SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
with torch.no_grad():
lowercase__ : Tuple = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
lowercase__ : Optional[Any] = outputs.hidden_states
lowercase__ : List[str] = self.model_tester.num_hidden_layers + 1
self.assertEqual(len(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE )
lowercase__ : int = self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
lowercase__ , lowercase__ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : Dict = True
check_hidden_states_output(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase__ : Tuple = True
check_hidden_states_output(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( ):
"""simple docstring"""
lowercase__ : Optional[Any] = hf_hub_download(
repo_id="hf-internal-testing/spaghetti-video" , filename="eating_spaghetti.npy" , repo_type="dataset" )
lowercase__ : Optional[Any] = np.load(lowerCamelCase__ )
return list(lowerCamelCase__ )
@require_torch
@require_vision
class snake_case__(unittest.TestCase ):
"""simple docstring"""
@cached_property
def snake_case ( self : Dict ):
# logits were tested with a different mean and std, so we use the same here
return (
VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
if is_vision_available()
else None
)
@slow
def snake_case ( self : List[Any] ):
lowercase__ : List[Any] = TimesformerForVideoClassification.from_pretrained("facebook/timesformer-base-finetuned-k400" ).to(
SCREAMING_SNAKE_CASE )
lowercase__ : int = self.default_image_processor
lowercase__ : List[str] = prepare_video()
lowercase__ : str = image_processor(video[:8] , return_tensors="pt" ).to(SCREAMING_SNAKE_CASE )
# forward pass
with torch.no_grad():
lowercase__ : str = model(**SCREAMING_SNAKE_CASE )
# verify the logits
lowercase__ : Union[str, Any] = torch.Size((1, 400) )
self.assertEqual(outputs.logits.shape , SCREAMING_SNAKE_CASE )
lowercase__ : Optional[Any] = torch.tensor([-0.3_016, -0.7_713, -0.4_205] ).to(SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , SCREAMING_SNAKE_CASE , atol=1E-4 ) )
| 121 |
import copy
import inspect
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import TimesformerConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING,
TimesformerForVideoClassification,
TimesformerModel,
)
from transformers.models.timesformer.modeling_timesformer import TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from transformers import VideoMAEImageProcessor
class snake_case__:
"""simple docstring"""
def __init__( self : Any , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : List[str]=13 , SCREAMING_SNAKE_CASE : Union[str, Any]=10 , SCREAMING_SNAKE_CASE : Union[str, Any]=3 , SCREAMING_SNAKE_CASE : int=2 , SCREAMING_SNAKE_CASE : int=2 , SCREAMING_SNAKE_CASE : Optional[int]=True , SCREAMING_SNAKE_CASE : str=True , SCREAMING_SNAKE_CASE : int=32 , SCREAMING_SNAKE_CASE : Dict=5 , SCREAMING_SNAKE_CASE : str=4 , SCREAMING_SNAKE_CASE : Optional[int]=37 , SCREAMING_SNAKE_CASE : Any="gelu" , SCREAMING_SNAKE_CASE : List[Any]=0.1 , SCREAMING_SNAKE_CASE : Optional[Any]=0.1 , SCREAMING_SNAKE_CASE : Dict=10 , SCREAMING_SNAKE_CASE : Optional[int]=0.02 , SCREAMING_SNAKE_CASE : str="divided_space_time" , SCREAMING_SNAKE_CASE : Tuple=None , ):
lowercase__ : List[str] = parent
lowercase__ : Optional[int] = batch_size
lowercase__ : List[Any] = image_size
lowercase__ : Optional[Any] = num_channels
lowercase__ : List[str] = patch_size
lowercase__ : str = num_frames
lowercase__ : List[str] = is_training
lowercase__ : List[str] = use_labels
lowercase__ : int = hidden_size
lowercase__ : List[Any] = num_hidden_layers
lowercase__ : Optional[Any] = num_attention_heads
lowercase__ : Dict = intermediate_size
lowercase__ : Union[str, Any] = hidden_act
lowercase__ : str = hidden_dropout_prob
lowercase__ : Tuple = attention_probs_dropout_prob
lowercase__ : Tuple = attention_type
lowercase__ : Union[str, Any] = initializer_range
lowercase__ : Any = scope
lowercase__ : Optional[int] = num_labels
# in TimeSformer, the number of spatial tokens equals num_frames * num_patches per frame + 1 CLS token
lowercase__ : Union[str, Any] = (image_size // patch_size) ** 2
lowercase__ : Union[str, Any] = (num_frames) * self.num_patches_per_frame + 1
def snake_case ( self : Optional[int] ):
lowercase__ : Any = floats_tensor(
[self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] )
lowercase__ : Optional[Any] = None
if self.use_labels:
lowercase__ : str = ids_tensor([self.batch_size] , self.num_labels )
lowercase__ : List[Any] = self.get_config()
return config, pixel_values, labels
def snake_case ( self : Any ):
lowercase__ : Optional[int] = TimesformerConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_frames=self.num_frames , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , attention_type=self.attention_type , )
lowercase__ : List[Any] = self.num_labels
return config
def snake_case ( self : Optional[int] , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Tuple ):
lowercase__ : Optional[Any] = TimesformerModel(config=SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
lowercase__ : Union[str, Any] = model(SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def snake_case ( self : Dict , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Any ):
lowercase__ : List[Any] = TimesformerForVideoClassification(SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
lowercase__ : List[str] = model(SCREAMING_SNAKE_CASE )
# verify the logits shape
lowercase__ : List[str] = torch.Size((self.batch_size, self.num_labels) )
self.parent.assertEqual(result.logits.shape , SCREAMING_SNAKE_CASE )
def snake_case ( self : Optional[Any] ):
lowercase__ : List[str] = self.prepare_config_and_inputs()
lowercase__ , lowercase__ , lowercase__ : int = config_and_inputs
lowercase__ : Optional[int] = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class snake_case__(_UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
lowercase_ = (TimesformerModel, TimesformerForVideoClassification) if is_torch_available() else ()
lowercase_ = (
{"""feature-extraction""": TimesformerModel, """video-classification""": TimesformerForVideoClassification}
if is_torch_available()
else {}
)
lowercase_ = False
lowercase_ = False
lowercase_ = False
lowercase_ = False
def snake_case ( self : Dict ):
lowercase__ : Tuple = TimesformerModelTester(self )
lowercase__ : Any = ConfigTester(
self , config_class=SCREAMING_SNAKE_CASE , has_text_modality=SCREAMING_SNAKE_CASE , hidden_size=37 )
def snake_case ( self : Dict , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Tuple=False ):
lowercase__ : Union[str, Any] = copy.deepcopy(SCREAMING_SNAKE_CASE )
if return_labels:
if model_class in get_values(SCREAMING_SNAKE_CASE ):
lowercase__ : int = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=SCREAMING_SNAKE_CASE )
return inputs_dict
def snake_case ( self : Optional[Any] ):
self.config_tester.run_common_tests()
@unittest.skip(reason="TimeSformer does not use inputs_embeds" )
def snake_case ( self : Any ):
pass
def snake_case ( self : Tuple ):
lowercase__ , lowercase__ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : Union[str, Any] = model_class(SCREAMING_SNAKE_CASE )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowercase__ : Dict = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(SCREAMING_SNAKE_CASE , nn.Linear ) )
def snake_case ( self : Union[str, Any] ):
lowercase__ , lowercase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : str = model_class(SCREAMING_SNAKE_CASE )
lowercase__ : Any = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase__ : Any = [*signature.parameters.keys()]
lowercase__ : Dict = ["pixel_values"]
self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE )
def snake_case ( self : Tuple ):
lowercase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE )
def snake_case ( self : str ):
lowercase__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_video_classification(*SCREAMING_SNAKE_CASE )
@slow
def snake_case ( self : Optional[int] ):
for model_name in TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ : Optional[int] = TimesformerModel.from_pretrained(SCREAMING_SNAKE_CASE )
self.assertIsNotNone(SCREAMING_SNAKE_CASE )
def snake_case ( self : Any ):
if not self.has_attentions:
pass
else:
lowercase__ , lowercase__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ : Union[str, Any] = True
for model_class in self.all_model_classes:
lowercase__ : List[str] = self.model_tester.seq_length
lowercase__ : Any = self.model_tester.num_frames
lowercase__ : Optional[int] = True
lowercase__ : List[str] = False
lowercase__ : List[Any] = True
lowercase__ : Tuple = model_class(SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
with torch.no_grad():
lowercase__ : Any = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
lowercase__ : List[Any] = outputs.attentions
self.assertEqual(len(SCREAMING_SNAKE_CASE ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
lowercase__ : Dict = True
lowercase__ : int = model_class(SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
with torch.no_grad():
lowercase__ : Optional[Any] = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
lowercase__ : Any = outputs.attentions
self.assertEqual(len(SCREAMING_SNAKE_CASE ) , self.model_tester.num_hidden_layers )
# attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1)
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , )
lowercase__ : Any = len(SCREAMING_SNAKE_CASE )
# Check attention is always last and order is fine
lowercase__ : Tuple = True
lowercase__ : Tuple = True
lowercase__ : Union[str, Any] = model_class(SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
with torch.no_grad():
lowercase__ : Union[str, Any] = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
self.assertEqual(out_len + 1 , len(SCREAMING_SNAKE_CASE ) )
lowercase__ : Optional[Any] = outputs.attentions
self.assertEqual(len(SCREAMING_SNAKE_CASE ) , self.model_tester.num_hidden_layers )
# attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1)
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , )
def snake_case ( self : List[Any] ):
def check_hidden_states_output(SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Union[str, Any] ):
lowercase__ : Optional[int] = model_class(SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
with torch.no_grad():
lowercase__ : Tuple = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
lowercase__ : Optional[Any] = outputs.hidden_states
lowercase__ : List[str] = self.model_tester.num_hidden_layers + 1
self.assertEqual(len(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE )
lowercase__ : int = self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
lowercase__ , lowercase__ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : Dict = True
check_hidden_states_output(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase__ : Tuple = True
check_hidden_states_output(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( ):
"""simple docstring"""
lowercase__ : Optional[Any] = hf_hub_download(
repo_id="hf-internal-testing/spaghetti-video" , filename="eating_spaghetti.npy" , repo_type="dataset" )
lowercase__ : Optional[Any] = np.load(lowerCamelCase__ )
return list(lowerCamelCase__ )
@require_torch
@require_vision
class snake_case__(unittest.TestCase ):
"""simple docstring"""
@cached_property
def snake_case ( self : Dict ):
# logits were tested with a different mean and std, so we use the same here
return (
VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
if is_vision_available()
else None
)
@slow
def snake_case ( self : List[Any] ):
lowercase__ : List[Any] = TimesformerForVideoClassification.from_pretrained("facebook/timesformer-base-finetuned-k400" ).to(
SCREAMING_SNAKE_CASE )
lowercase__ : int = self.default_image_processor
lowercase__ : List[str] = prepare_video()
lowercase__ : str = image_processor(video[:8] , return_tensors="pt" ).to(SCREAMING_SNAKE_CASE )
# forward pass
with torch.no_grad():
lowercase__ : str = model(**SCREAMING_SNAKE_CASE )
# verify the logits
lowercase__ : Union[str, Any] = torch.Size((1, 400) )
self.assertEqual(outputs.logits.shape , SCREAMING_SNAKE_CASE )
lowercase__ : Optional[Any] = torch.tensor([-0.3_016, -0.7_713, -0.4_205] ).to(SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , SCREAMING_SNAKE_CASE , atol=1E-4 ) )
| 121 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
snake_case : List[Any] = {
'''configuration_conditional_detr''': [
'''CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''ConditionalDetrConfig''',
'''ConditionalDetrOnnxConfig''',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case : str = ['''ConditionalDetrFeatureExtractor''']
snake_case : List[str] = ['''ConditionalDetrImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case : str = [
'''CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ConditionalDetrForObjectDetection''',
'''ConditionalDetrForSegmentation''',
'''ConditionalDetrModel''',
'''ConditionalDetrPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP,
ConditionalDetrConfig,
ConditionalDetrOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_conditional_detr import ConditionalDetrFeatureExtractor
from .image_processing_conditional_detr import ConditionalDetrImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrModel,
ConditionalDetrPreTrainedModel,
)
else:
import sys
snake_case : List[str] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 94 |
"""simple docstring"""
def lowerCamelCase__ ( __snake_case ) -> bool:
"""simple docstring"""
return credit_card_number.startswith(('''34''', '''35''', '''37''', '''4''', '''5''', '''6''') )
def lowerCamelCase__ ( __snake_case ) -> bool:
"""simple docstring"""
_UpperCamelCase = credit_card_number
_UpperCamelCase = 0
_UpperCamelCase = len(__snake_case ) - 2
for i in range(__snake_case, -1, -2 ):
# double the value of every second digit
_UpperCamelCase = int(cc_number[i] )
digit *= 2
# If doubling of a number results in a two digit number
# i.e greater than 9(e.g., 6 × 2 = 12),
# then add the digits of the product (e.g., 12: 1 + 2 = 3, 15: 1 + 5 = 6),
# to get a single digit number.
if digit > 9:
digit %= 10
digit += 1
_UpperCamelCase = cc_number[:i] + str(__snake_case ) + cc_number[i + 1 :]
total += digit
# Sum up the remaining digits
for i in range(len(__snake_case ) - 1, -1, -2 ):
total += int(cc_number[i] )
return total % 10 == 0
def lowerCamelCase__ ( __snake_case ) -> bool:
"""simple docstring"""
_UpperCamelCase = F'''{credit_card_number} is an invalid credit card number because'''
if not credit_card_number.isdigit():
print(F'''{error_message} it has nonnumerical characters.''' )
return False
if not 13 <= len(__snake_case ) <= 16:
print(F'''{error_message} of its length.''' )
return False
if not validate_initial_digits(__snake_case ):
print(F'''{error_message} of its first two digits.''' )
return False
if not luhn_validation(__snake_case ):
print(F'''{error_message} it fails the Luhn check.''' )
return False
print(F'''{credit_card_number} is a valid credit card number.''' )
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
validate_credit_card_number("""4111111111111111""")
validate_credit_card_number("""32323""")
| 194 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCamelCase_ = {
'configuration_lilt': ['LILT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'LiltConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
'LILT_PRETRAINED_MODEL_ARCHIVE_LIST',
'LiltForQuestionAnswering',
'LiltForSequenceClassification',
'LiltForTokenClassification',
'LiltModel',
'LiltPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_lilt import LILT_PRETRAINED_CONFIG_ARCHIVE_MAP, LiltConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_lilt import (
LILT_PRETRAINED_MODEL_ARCHIVE_LIST,
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
LiltPreTrainedModel,
)
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 303 |
"""simple docstring"""
import numpy as np
import torch
from imwatermark import WatermarkEncoder
# Copied from https://github.com/Stability-AI/generative-models/blob/613af104c6b85184091d42d374fef420eddb356d/scripts/demo/streamlit_helpers.py#L66
UpperCamelCase_ = 0b10_11_00_11_11_10_11_00_10_01_00_00_01_11_10_11_10_11_00_01_10_01_11_10
# bin(x)[2:] gives bits of x as str, use int to convert them to 0/1
UpperCamelCase_ = [int(bit) for bit in bin(WATERMARK_MESSAGE)[2:]]
class snake_case :
def __init__( self) ->Optional[int]:
a_ = WATERMARK_BITS
a_ = WatermarkEncoder()
self.encoder.set_watermark("bits" , self.watermark)
def UpperCAmelCase__ ( self , __UpperCAmelCase) ->Optional[int]:
# can't encode images that are smaller than 256
if images.shape[-1] < 2_56:
return images
a_ = (2_55 * (images / 2 + 0.5)).cpu().permute(0 , 2 , 3 , 1).float().numpy()
a_ = [self.encoder.encode(__UpperCAmelCase , "dwtDct") for image in images]
a_ = torch.from_numpy(np.array(__UpperCAmelCase)).permute(0 , 3 , 1 , 2)
a_ = torch.clamp(2 * (images / 2_55 - 0.5) , min=-1.0 , max=1.0)
return images | 303 | 1 |
'''simple docstring'''
from math import sqrt
def A__ ( UpperCAmelCase_ ):
_UpperCamelCase : Any = 0
for i in range(1 , int(sqrt(UpperCAmelCase_ ) + 1 ) ):
if n % i == 0 and i != sqrt(UpperCAmelCase_ ):
total += i + n // i
elif i == sqrt(UpperCAmelCase_ ):
total += i
return total - n
def A__ ( UpperCAmelCase_ = 1_0_0_0_0 ):
_UpperCamelCase : str = sum(
i
for i in range(1 , UpperCAmelCase_ )
if sum_of_divisors(sum_of_divisors(UpperCAmelCase_ ) ) == i and sum_of_divisors(UpperCAmelCase_ ) != i )
return total
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 83 |
"""simple docstring"""
import argparse
import os
import re
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_dummies.py
SCREAMING_SNAKE_CASE = "src/diffusers"
# Matches is_xxx_available()
SCREAMING_SNAKE_CASE = re.compile(r"is\_([a-z_]*)_available\(\)")
# Matches from xxx import bla
SCREAMING_SNAKE_CASE = re.compile(r"\s+from\s+\S*\s+import\s+([^\(\s].*)\n")
SCREAMING_SNAKE_CASE = "\n{0} = None\n"
SCREAMING_SNAKE_CASE = "\nclass {0}(metaclass=DummyObject):\n _backends = {1}\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, {1})\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, {1})\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, {1})\n"
SCREAMING_SNAKE_CASE = "\ndef {0}(*args, **kwargs):\n requires_backends({0}, {1})\n"
def _SCREAMING_SNAKE_CASE ( lowercase_ ) -> Tuple:
A__ = _re_backend.findall(lowercase_ )
if len(lowercase_ ) == 0:
return None
return "_and_".join(lowercase_ )
def _SCREAMING_SNAKE_CASE ( ) -> str:
with open(os.path.join(lowercase_ , "__init__.py" ) , "r" , encoding="utf-8" , newline="\n" ) as f:
A__ = f.readlines()
# Get to the point we do the actual imports for type checking
A__ = 0
A__ = {}
# Go through the end of the file
while line_index < len(lowercase_ ):
# If the line contains is_backend_available, we grab all objects associated with the `else` block
A__ = find_backend(lines[line_index] )
if backend is not None:
while not lines[line_index].startswith("else:" ):
line_index += 1
line_index += 1
A__ = []
# Until we unindent, add backend objects to the list
while line_index < len(lowercase_ ) and len(lines[line_index] ) > 1:
A__ = lines[line_index]
A__ = _re_single_line_import.search(lowercase_ )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(", " ) )
elif line.startswith(" " * 8 ):
objects.append(line[8:-2] )
line_index += 1
if len(lowercase_ ) > 0:
A__ = objects
else:
line_index += 1
return backend_specific_objects
def _SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> Dict:
if name.isupper():
return DUMMY_CONSTANT.format(lowercase_ )
elif name.islower():
return DUMMY_FUNCTION.format(lowercase_ , lowercase_ )
else:
return DUMMY_CLASS.format(lowercase_ , lowercase_ )
def _SCREAMING_SNAKE_CASE ( lowercase_=None ) -> Union[str, Any]:
if backend_specific_objects is None:
A__ = read_init()
# For special correspondence backend to module name as used in the function requires_modulename
A__ = {}
for backend, objects in backend_specific_objects.items():
A__ = "[" + ", ".join(f"""\"{b}\"""" for b in backend.split("_and_" ) ) + "]"
A__ = "# This file is autogenerated by the command `make fix-copies`, do not edit.\n"
dummy_file += "from ..utils import DummyObject, requires_backends\n\n"
dummy_file += "\n".join([create_dummy_object(lowercase_ , lowercase_ ) for o in objects] )
A__ = dummy_file
return dummy_files
def _SCREAMING_SNAKE_CASE ( lowercase_=False ) -> Union[str, Any]:
A__ = create_dummy_files()
# For special correspondence backend to shortcut as used in utils/dummy_xxx_objects.py
A__ = {"torch": "pt"}
# Locate actual dummy modules and read their content.
A__ = os.path.join(lowercase_ , "utils" )
A__ = {
backend: os.path.join(lowercase_ , f"""dummy_{short_names.get(lowercase_ , lowercase_ )}_objects.py""" )
for backend in dummy_files.keys()
}
A__ = {}
for backend, file_path in dummy_file_paths.items():
if os.path.isfile(lowercase_ ):
with open(lowercase_ , "r" , encoding="utf-8" , newline="\n" ) as f:
A__ = f.read()
else:
A__ = ""
for backend in dummy_files.keys():
if dummy_files[backend] != actual_dummies[backend]:
if overwrite:
print(
f"""Updating diffusers.utils.dummy_{short_names.get(lowercase_ , lowercase_ )}_objects.py as the main """
"__init__ has new objects." )
with open(dummy_file_paths[backend] , "w" , encoding="utf-8" , newline="\n" ) as f:
f.write(dummy_files[backend] )
else:
raise ValueError(
"The main __init__ has objects that are not present in "
f"""diffusers.utils.dummy_{short_names.get(lowercase_ , lowercase_ )}_objects.py. Run `make fix-copies` """
"to fix this." )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.")
SCREAMING_SNAKE_CASE = parser.parse_args()
check_dummies(args.fix_and_overwrite)
| 247 | 0 |
import sacrebleu as scb
from packaging import version
from sacrebleu import CHRF
import datasets
_A : List[str] = '\\n@inproceedings{popovic-2015-chrf,\n title = "chr{F}: character n-gram {F}-score for automatic {MT} evaluation",\n author = "Popovi{\'c}, Maja",\n booktitle = "Proceedings of the Tenth Workshop on Statistical Machine Translation",\n month = sep,\n year = "2015",\n address = "Lisbon, Portugal",\n publisher = "Association for Computational Linguistics",\n url = "https://aclanthology.org/W15-3049",\n doi = "10.18653/v1/W15-3049",\n pages = "392--395",\n}\n@inproceedings{popovic-2017-chrf,\n title = "chr{F}++: words helping character n-grams",\n author = "Popovi{\'c}, Maja",\n booktitle = "Proceedings of the Second Conference on Machine Translation",\n month = sep,\n year = "2017",\n address = "Copenhagen, Denmark",\n publisher = "Association for Computational Linguistics",\n url = "https://aclanthology.org/W17-4770",\n doi = "10.18653/v1/W17-4770",\n pages = "612--618",\n}\n@inproceedings{post-2018-call,\n title = "A Call for Clarity in Reporting {BLEU} Scores",\n author = "Post, Matt",\n booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers",\n month = oct,\n year = "2018",\n address = "Belgium, Brussels",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/W18-6319",\n pages = "186--191",\n}\n'
_A : List[str] = '\\nChrF and ChrF++ are two MT evaluation metrics. They both use the F-score statistic for character n-gram matches,\nand ChrF++ adds word n-grams as well which correlates more strongly with direct assessment. We use the implementation\nthat is already present in sacrebleu.\n\nThe implementation here is slightly different from sacrebleu in terms of the required input format. The length of\nthe references and hypotheses lists need to be the same, so you may need to transpose your references compared to\nsacrebleu\'s required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534\n\nSee the README.md file at https://github.com/mjpost/sacreBLEU#chrf--chrf for more information.\n'
_A : Tuple = '\nProduces ChrF(++) scores for hypotheses given reference translations.\n\nArgs:\n predictions (list of str): The predicted sentences.\n references (list of list of str): The references. There should be one reference sub-list for each prediction sentence.\n char_order (int): Character n-gram order. Defaults to `6`.\n word_order (int): Word n-gram order. If equals to `2`, the metric is referred to as chrF++. Defaults to `0`.\n beta (int): Determine the importance of recall w.r.t precision. Defaults to `2`.\n lowercase (bool): if `True`, enables case-insensitivity. Defaults to `False`.\n whitespace (bool): If `True`, include whitespaces when extracting character n-grams.\n eps_smoothing (bool): If `True`, applies epsilon smoothing similar\n to reference chrF++.py, NLTK and Moses implementations. If `False`,\n it takes into account effective match order similar to sacreBLEU < 2.0.0. Defaults to `False`.\n\nReturns:\n \'score\' (float): The chrF (chrF++) score,\n \'char_order\' (int): The character n-gram order,\n \'word_order\' (int): The word n-gram order. If equals to 2, the metric is referred to as chrF++,\n \'beta\' (int): Determine the importance of recall w.r.t precision\n\nExamples:\n Example 1--a simple example of calculating chrF:\n >>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."]\n >>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]]\n >>> chrf = datasets.load_metric("chrf")\n >>> results = chrf.compute(predictions=prediction, references=reference)\n >>> print(results)\n {\'score\': 84.64214891738334, \'char_order\': 6, \'word_order\': 0, \'beta\': 2}\n\n Example 2--the same example, but with the argument word_order=2, to calculate chrF++ instead of chrF:\n >>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."]\n >>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]]\n >>> chrf = datasets.load_metric("chrf")\n >>> results = chrf.compute(predictions=prediction,\n ... references=reference,\n ... word_order=2)\n >>> print(results)\n {\'score\': 82.87263732906315, \'char_order\': 6, \'word_order\': 2, \'beta\': 2}\n\n Example 3--the same chrF++ example as above, but with `lowercase=True` to normalize all case:\n >>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."]\n >>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]]\n >>> chrf = datasets.load_metric("chrf")\n >>> results = chrf.compute(predictions=prediction,\n ... references=reference,\n ... word_order=2,\n ... lowercase=True)\n >>> print(results)\n {\'score\': 92.12853119829202, \'char_order\': 6, \'word_order\': 2, \'beta\': 2}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class __SCREAMING_SNAKE_CASE ( datasets.Metric ):
def __lowerCamelCase ( self : List[str] ) ->str:
if version.parse(scb.__version__ ) < version.parse('''1.4.12''' ):
raise ImportWarning(
'''To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn\'t match this condition.\n'''
'''You can install it with `pip install "sacrebleu>=1.4.12"`.''' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='''https://github.com/mjpost/sacreBLEU#chrf--chrf''' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Sequence(datasets.Value('''string''' , id='''sequence''' ) , id='''references''' ),
} ) , codebase_urls=['''https://github.com/mjpost/sacreBLEU#chrf--chrf'''] , reference_urls=[
'''https://github.com/m-popovic/chrF''',
] , )
def __lowerCamelCase ( self : Optional[int] , A : Tuple , A : List[str] , A : int = CHRF.CHAR_ORDER , A : int = CHRF.WORD_ORDER , A : int = CHRF.BETA , A : bool = False , A : bool = False , A : bool = False , ) ->List[str]:
lowerCamelCase__ = len(references[0] )
if any(len(A ) != references_per_prediction for refs in references ):
raise ValueError('''Sacrebleu requires the same number of references for each prediction''' )
lowerCamelCase__ = [[refs[i] for refs in references] for i in range(A )]
lowerCamelCase__ = CHRF(A , A , A , A , A , A )
lowerCamelCase__ = sb_chrf.corpus_score(A , A )
return {
"score": output.score,
"char_order": output.char_order,
"word_order": output.word_order,
"beta": output.beta,
}
| 370 |
def _a ( UpperCAmelCase , UpperCAmelCase ) -> Dict:
"""simple docstring"""
if b == 0:
return 1
if (b % 2) == 0:
return actual_power(UpperCAmelCase , int(b / 2 ) ) * actual_power(UpperCAmelCase , int(b / 2 ) )
else:
return a * actual_power(UpperCAmelCase , int(b / 2 ) ) * actual_power(UpperCAmelCase , int(b / 2 ) )
def _a ( UpperCAmelCase , UpperCAmelCase ) -> float:
"""simple docstring"""
if b < 0:
return 1 / actual_power(UpperCAmelCase , UpperCAmelCase )
return actual_power(UpperCAmelCase , UpperCAmelCase )
if __name__ == "__main__":
print(power(-2, -3))
| 265 | 0 |
import argparse
import torch
from transformers import YosoConfig, YosoForMaskedLM
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
if "model" in orig_key:
UpperCAmelCase = orig_key.replace('''model.''' , '''''' )
if "norm1" in orig_key:
UpperCAmelCase = orig_key.replace('''norm1''' , '''attention.output.LayerNorm''' )
if "norm2" in orig_key:
UpperCAmelCase = orig_key.replace('''norm2''' , '''output.LayerNorm''' )
if "norm" in orig_key:
UpperCAmelCase = orig_key.replace('''norm''' , '''LayerNorm''' )
if "transformer" in orig_key:
UpperCAmelCase = orig_key.split('''.''' )[0].split('''_''' )[-1]
UpperCAmelCase = orig_key.replace(F"""transformer_{layer_num}""" , F"""encoder.layer.{layer_num}""" )
if "mha.attn" in orig_key:
UpperCAmelCase = orig_key.replace('''mha.attn''' , '''attention.self''' )
if "mha" in orig_key:
UpperCAmelCase = orig_key.replace('''mha''' , '''attention''' )
if "W_q" in orig_key:
UpperCAmelCase = orig_key.replace('''W_q''' , '''self.query''' )
if "W_k" in orig_key:
UpperCAmelCase = orig_key.replace('''W_k''' , '''self.key''' )
if "W_v" in orig_key:
UpperCAmelCase = orig_key.replace('''W_v''' , '''self.value''' )
if "ff1" in orig_key:
UpperCAmelCase = orig_key.replace('''ff1''' , '''intermediate.dense''' )
if "ff2" in orig_key:
UpperCAmelCase = orig_key.replace('''ff2''' , '''output.dense''' )
if "ff" in orig_key:
UpperCAmelCase = orig_key.replace('''ff''' , '''output.dense''' )
if "mlm_class" in orig_key:
UpperCAmelCase = orig_key.replace('''mlm.mlm_class''' , '''cls.predictions.decoder''' )
if "mlm" in orig_key:
UpperCAmelCase = orig_key.replace('''mlm''' , '''cls.predictions.transform''' )
if "cls" not in orig_key:
UpperCAmelCase = '''yoso.''' + orig_key
return orig_key
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ , UpperCamelCase__ ) -> Dict:
'''simple docstring'''
for key in orig_state_dict.copy().keys():
UpperCAmelCase = orig_state_dict.pop(UpperCamelCase__ )
if ("pooler" in key) or ("sen_class" in key):
continue
else:
UpperCAmelCase = val
UpperCAmelCase = orig_state_dict['''cls.predictions.decoder.bias''']
UpperCAmelCase = torch.arange(UpperCamelCase__ ).expand((1, -1) ) + 2
return orig_state_dict
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> int:
'''simple docstring'''
UpperCAmelCase = torch.load(UpperCamelCase__ , map_location='''cpu''' )['''model_state_dict''']
UpperCAmelCase = YosoConfig.from_json_file(UpperCamelCase__ )
UpperCAmelCase = YosoForMaskedLM(UpperCamelCase__ )
UpperCAmelCase = convert_checkpoint_helper(config.max_position_embeddings , UpperCamelCase__ )
print(model.load_state_dict(UpperCamelCase__ ) )
model.eval()
model.save_pretrained(UpperCamelCase__ )
print(F"""Checkpoint successfuly converted. Model saved at {pytorch_dump_path}""" )
if __name__ == "__main__":
__A : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--pytorch_model_path", default=None, type=str, required=True, help="Path to YOSO pytorch checkpoint."
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help="The json file for YOSO model config.",
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
__A : List[str] = parser.parse_args()
convert_yoso_checkpoint(args.pytorch_model_path, args.config_file, args.pytorch_dump_path)
| 273 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__A : Dict = {
"configuration_roformer": ["ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "RoFormerConfig", "RoFormerOnnxConfig"],
"tokenization_roformer": ["RoFormerTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Union[str, Any] = ["RoFormerTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : str = [
"ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"RoFormerForCausalLM",
"RoFormerForMaskedLM",
"RoFormerForMultipleChoice",
"RoFormerForQuestionAnswering",
"RoFormerForSequenceClassification",
"RoFormerForTokenClassification",
"RoFormerLayer",
"RoFormerModel",
"RoFormerPreTrainedModel",
"load_tf_weights_in_roformer",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : int = [
"TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFRoFormerForCausalLM",
"TFRoFormerForMaskedLM",
"TFRoFormerForMultipleChoice",
"TFRoFormerForQuestionAnswering",
"TFRoFormerForSequenceClassification",
"TFRoFormerForTokenClassification",
"TFRoFormerLayer",
"TFRoFormerModel",
"TFRoFormerPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Any = [
"FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"FlaxRoFormerForMaskedLM",
"FlaxRoFormerForMultipleChoice",
"FlaxRoFormerForQuestionAnswering",
"FlaxRoFormerForSequenceClassification",
"FlaxRoFormerForTokenClassification",
"FlaxRoFormerModel",
"FlaxRoFormerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_roformer import ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, RoFormerConfig, RoFormerOnnxConfig
from .tokenization_roformer import RoFormerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roformer_fast import RoFormerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roformer import (
ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
RoFormerForCausalLM,
RoFormerForMaskedLM,
RoFormerForMultipleChoice,
RoFormerForQuestionAnswering,
RoFormerForSequenceClassification,
RoFormerForTokenClassification,
RoFormerLayer,
RoFormerModel,
RoFormerPreTrainedModel,
load_tf_weights_in_roformer,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roformer import (
TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerLayer,
TFRoFormerModel,
TFRoFormerPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roformer import (
FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
FlaxRoFormerPreTrainedModel,
)
else:
import sys
__A : int = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 273 | 1 |
"""simple docstring"""
import argparse
import torch
from torch import nn
from transformers import MaMaaaConfig, MaMaaaForConditionalGeneration
def __lowercase ( snake_case_ : int ) ->List[Any]:
'''simple docstring'''
__A : int = [
'''encoder.version''',
'''decoder.version''',
'''model.encoder.version''',
'''model.decoder.version''',
'''decoder.output_projection.weight''',
'''_float_tensor''',
'''encoder.embed_positions._float_tensor''',
'''decoder.embed_positions._float_tensor''',
]
for k in ignore_keys:
state_dict.pop(snake_case_ ,snake_case_ )
def __lowercase ( snake_case_ : List[Any] ) ->Tuple:
'''simple docstring'''
__A , __A : Tuple = emb.weight.shape
__A : str = nn.Linear(snake_case_ ,snake_case_ ,bias=snake_case_ )
__A : str = emb.weight.data
return lin_layer
def __lowercase ( snake_case_ : Dict ) ->str:
'''simple docstring'''
__A : List[str] = torch.load(snake_case_ ,map_location='''cpu''' )
__A : int = mam_aaa['''args'''] or mam_aaa['''cfg''']['''model''']
__A : int = mam_aaa['''model''']
remove_ignore_keys_(snake_case_ )
__A : str = state_dict['''encoder.embed_tokens.weight'''].shape[0]
__A : Union[str, Any] = MaMaaaConfig(
vocab_size=snake_case_ ,max_position_embeddings=1024 ,encoder_layers=args.encoder_layers ,decoder_layers=args.decoder_layers ,encoder_attention_heads=args.encoder_attention_heads ,decoder_attention_heads=args.decoder_attention_heads ,encoder_ffn_dim=args.encoder_ffn_embed_dim ,decoder_ffn_dim=args.decoder_ffn_embed_dim ,d_model=args.encoder_embed_dim ,encoder_layerdrop=args.encoder_layerdrop ,decoder_layerdrop=args.decoder_layerdrop ,dropout=args.dropout ,attention_dropout=args.attention_dropout ,activation_dropout=args.activation_dropout ,activation_function='''relu''' ,)
__A : List[str] = state_dict['''decoder.embed_tokens.weight''']
__A : List[str] = MaMaaaForConditionalGeneration(snake_case_ )
model.model.load_state_dict(snake_case_ ,strict=snake_case_ )
__A : int = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""fairseq_path""", type=str, help="""path to a model.pt on local filesystem.""")
parser.add_argument("""pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
a_ = parser.parse_args()
a_ = convert_fairseq_mamaaa_checkpoint_from_disk(args.fairseq_pathß)
model.save_pretrained(args.pytorch_dump_folder_path)
| 291 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available
from ...utils import OptionalDependencyNotAvailable
a_ = {"""configuration_gpt_neox""": ["""GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GPTNeoXConfig"""]}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = ["""GPTNeoXTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
"""GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GPTNeoXForCausalLM""",
"""GPTNeoXForQuestionAnswering""",
"""GPTNeoXForSequenceClassification""",
"""GPTNeoXForTokenClassification""",
"""GPTNeoXLayer""",
"""GPTNeoXModel""",
"""GPTNeoXPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_gpt_neox import GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXConfig
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_neox_fast import GPTNeoXTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neox import (
GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
GPTNeoXLayer,
GPTNeoXModel,
GPTNeoXPreTrainedModel,
)
else:
import sys
a_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 291 | 1 |
def A ( a_ ,a_ ) -> int:
return int((input_a, input_a).count(1 ) != 0 )
def A ( ) -> None:
assert or_gate(0 ,0 ) == 0
assert or_gate(0 ,1 ) == 1
assert or_gate(1 ,0 ) == 1
assert or_gate(1 ,1 ) == 1
if __name__ == "__main__":
print(or_gate(0, 1))
print(or_gate(1, 0))
print(or_gate(0, 0))
print(or_gate(1, 1))
| 71 |
from importlib import import_module
from .logging import get_logger
A__ = get_logger(__name__)
class a :
def __init__( self :Optional[int] ,__lowercase :List[str] ,__lowercase :Any=None ):
snake_case__ : List[str] = attrs or []
if module is not None:
for key in module.__dict__:
if key in attrs or not key.startswith('''__''' ):
setattr(self ,__lowercase ,getattr(__lowercase ,__lowercase ) )
snake_case__ : Optional[Any] = module._original_module if isinstance(__lowercase ,_PatchedModuleObj ) else module
class a :
__lowerCAmelCase : Any = []
def __init__( self :List[str] ,__lowercase :Optional[Any] ,__lowercase :str ,__lowercase :Dict ,__lowercase :Any=None ):
snake_case__ : Dict = obj
snake_case__ : Dict = target
snake_case__ : List[str] = new
snake_case__ : int = target.split('''.''' )[0]
snake_case__ : List[str] = {}
snake_case__ : Any = attrs or []
def __enter__( self :Tuple ):
*snake_case__ , snake_case__ : str = self.target.split('''.''' )
# Patch modules:
# it's used to patch attributes of submodules like "os.path.join";
# in this case we need to patch "os" and "os.path"
for i in range(len(__lowercase ) ):
try:
snake_case__ : Any = import_module('''.'''.join(submodules[: i + 1] ) )
except ModuleNotFoundError:
continue
# We iterate over all the globals in self.obj in case we find "os" or "os.path"
for attr in self.obj.__dir__():
snake_case__ : Optional[int] = getattr(self.obj ,__lowercase )
# We don't check for the name of the global, but rather if its value *is* "os" or "os.path".
# This allows to patch renamed modules like "from os import path as ospath".
if obj_attr is submodule or (
(isinstance(__lowercase ,_PatchedModuleObj ) and obj_attr._original_module is submodule)
):
snake_case__ : List[Any] = obj_attr
# patch at top level
setattr(self.obj ,__lowercase ,_PatchedModuleObj(__lowercase ,attrs=self.attrs ) )
snake_case__ : List[Any] = getattr(self.obj ,__lowercase )
# construct lower levels patches
for key in submodules[i + 1 :]:
setattr(__lowercase ,__lowercase ,_PatchedModuleObj(getattr(__lowercase ,__lowercase ,__lowercase ) ,attrs=self.attrs ) )
snake_case__ : List[Any] = getattr(__lowercase ,__lowercase )
# finally set the target attribute
setattr(__lowercase ,__lowercase ,self.new )
# Patch attribute itself:
# it's used for builtins like "open",
# and also to patch "os.path.join" we may also need to patch "join"
# itself if it was imported as "from os.path import join".
if submodules: # if it's an attribute of a submodule like "os.path.join"
try:
snake_case__ : int = getattr(import_module('''.'''.join(__lowercase ) ) ,__lowercase )
except (AttributeError, ModuleNotFoundError):
return
# We iterate over all the globals in self.obj in case we find "os.path.join"
for attr in self.obj.__dir__():
# We don't check for the name of the global, but rather if its value *is* "os.path.join".
# This allows to patch renamed attributes like "from os.path import join as pjoin".
if getattr(self.obj ,__lowercase ) is attr_value:
snake_case__ : str = getattr(self.obj ,__lowercase )
setattr(self.obj ,__lowercase ,self.new )
elif target_attr in globals()["__builtins__"]: # if it'a s builtin like "open"
snake_case__ : str = globals()['''__builtins__'''][target_attr]
setattr(self.obj ,__lowercase ,self.new )
else:
raise RuntimeError(F"""Tried to patch attribute {target_attr} instead of a submodule.""" )
def __exit__( self :Tuple ,*__lowercase :Optional[int] ):
for attr in list(self.original ):
setattr(self.obj ,__lowercase ,self.original.pop(__lowercase ) )
def __lowerCamelCase ( self :Tuple ):
self.__enter__()
self._active_patches.append(self )
def __lowerCamelCase ( self :Dict ):
try:
self._active_patches.remove(self )
except ValueError:
# If the patch hasn't been started this will fail
return None
return self.__exit__()
| 230 | 0 |
import json
import os
import tempfile
import datasets
from utils import generate_example_dataset, get_duration
UpperCamelCase_ = 50000
UpperCamelCase_ = 5000
UpperCamelCase_ ,UpperCamelCase_ = os.path.split(__file__)
UpperCamelCase_ = os.path.join(RESULTS_BASEPATH, '''results''', RESULTS_FILENAME.replace('''.py''', '''.json'''))
@get_duration
def lowerCamelCase_ ( _a : datasets.Dataset , _a : List[Any] ):
'''simple docstring'''
for i in range(_a ):
UpperCAmelCase_ : List[Any] = dataset[i]
@get_duration
def lowerCamelCase_ ( _a : datasets.Dataset , _a : str , _a : str ):
'''simple docstring'''
for i in range(0 , len(_a ) , _a ):
UpperCAmelCase_ : Optional[Any] = dataset[i : i + batch_size]
@get_duration
def lowerCamelCase_ ( _a : datasets.Dataset , _a : Tuple , _a : Dict ):
'''simple docstring'''
with dataset.formatted_as(type=_a ):
for i in range(_a ):
UpperCAmelCase_ : Optional[Any] = dataset[i]
@get_duration
def lowerCamelCase_ ( _a : datasets.Dataset , _a : Union[str, Any] , _a : Optional[int] , _a : List[Any] ):
'''simple docstring'''
with dataset.formatted_as(type=_a ):
for i in range(0 , _a , _a ):
UpperCAmelCase_ : Dict = dataset[i : i + batch_size]
def lowerCamelCase_ ( ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = {"""num examples""": SPEED_TEST_N_EXAMPLES}
UpperCAmelCase_ : Any = [
(read, {"""length""": SMALL_TEST}),
(read, {"""length""": SPEED_TEST_N_EXAMPLES}),
(read_batch, {"""length""": SPEED_TEST_N_EXAMPLES, """batch_size""": 10}),
(read_batch, {"""length""": SPEED_TEST_N_EXAMPLES, """batch_size""": 100}),
(read_batch, {"""length""": SPEED_TEST_N_EXAMPLES, """batch_size""": 1000}),
(read_formatted, {"""type""": """numpy""", """length""": SMALL_TEST}),
(read_formatted, {"""type""": """pandas""", """length""": SMALL_TEST}),
(read_formatted, {"""type""": """torch""", """length""": SMALL_TEST}),
(read_formatted, {"""type""": """tensorflow""", """length""": SMALL_TEST}),
(read_formatted_batch, {"""type""": """numpy""", """length""": SMALL_TEST, """batch_size""": 10}),
(read_formatted_batch, {"""type""": """numpy""", """length""": SMALL_TEST, """batch_size""": 1000}),
]
UpperCAmelCase_ : int = [
(read, {"""length""": SMALL_TEST}),
(read, {"""length""": SPEED_TEST_N_EXAMPLES}),
(read_batch, {"""length""": SPEED_TEST_N_EXAMPLES, """batch_size""": 10}),
(read_batch, {"""length""": SPEED_TEST_N_EXAMPLES, """batch_size""": 100}),
(read_batch, {"""length""": SPEED_TEST_N_EXAMPLES, """batch_size""": 1000}),
(read_formatted, {"""type""": """numpy""", """length""": SMALL_TEST}),
(read_formatted_batch, {"""type""": """numpy""", """length""": SMALL_TEST, """batch_size""": 10}),
(read_formatted_batch, {"""type""": """numpy""", """length""": SMALL_TEST, """batch_size""": 1000}),
]
with tempfile.TemporaryDirectory() as tmp_dir:
print("""generating dataset""" )
UpperCAmelCase_ : int = datasets.Features(
{"""list""": datasets.Sequence(datasets.Value("""float32""" ) ), """numbers""": datasets.Value("""float32""" )} )
UpperCAmelCase_ : Optional[int] = generate_example_dataset(
os.path.join(_a , """dataset.arrow""" ) , _a , num_examples=_a , seq_shapes={"""list""": (100,)} , )
print("""first set of iterations""" )
for func, kwargs in functions:
print(func.__name__ , str(_a ) )
UpperCAmelCase_ : Any = func(_a , **_a )
print("""shuffling dataset""" )
UpperCAmelCase_ : List[str] = dataset.shuffle()
print("""Second set of iterations (after shuffling""" )
for func, kwargs in functions_shuffled:
print("""shuffled """ , func.__name__ , str(_a ) )
UpperCAmelCase_ : Tuple = func(
_a , **_a )
with open(_a , """wb""" ) as f:
f.write(json.dumps(_a ).encode("""utf-8""" ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_iterating()
| 351 |
from scipy.stats import spearmanr
import datasets
UpperCamelCase_ = '''
The Spearman rank-order correlation coefficient is a measure of the
relationship between two datasets. Like other correlation coefficients,
this one varies between -1 and +1 with 0 implying no correlation.
Positive correlations imply that as data in dataset x increases, so
does data in dataset y. Negative correlations imply that as x increases,
y decreases. Correlations of -1 or +1 imply an exact monotonic relationship.
Unlike the Pearson correlation, the Spearman correlation does not
assume that both datasets are normally distributed.
The p-value roughly indicates the probability of an uncorrelated system
producing datasets that have a Spearman correlation at least as extreme
as the one computed from these datasets. The p-values are not entirely
reliable but are probably reasonable for datasets larger than 500 or so.
'''
UpperCamelCase_ = '''
Args:
predictions (`List[float]`): Predicted labels, as returned by a model.
references (`List[float]`): Ground truth labels.
return_pvalue (`bool`): If `True`, returns the p-value. If `False`, returns
only the spearmanr score. Defaults to `False`.
Returns:
spearmanr (`float`): Spearman correlation coefficient.
p-value (`float`): p-value. **Note**: is only returned if `return_pvalue=True` is input.
Examples:
Example 1:
>>> spearmanr_metric = datasets.load_metric("spearmanr")
>>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], predictions=[10, 9, 2.5, 6, 4])
>>> print(results)
{\'spearmanr\': -0.7}
Example 2:
>>> spearmanr_metric = datasets.load_metric("spearmanr")
>>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5],
... predictions=[10, 9, 2.5, 6, 4],
... return_pvalue=True)
>>> print(results[\'spearmanr\'])
-0.7
>>> print(round(results[\'spearmanr_pvalue\'], 2))
0.19
'''
UpperCamelCase_ = R'''\
@book{kokoska2000crc,
title={CRC standard probability and statistics tables and formulae},
author={Kokoska, Stephen and Zwillinger, Daniel},
year={2000},
publisher={Crc Press}
}
@article{2020SciPy-NMeth,
author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and
Haberland, Matt and Reddy, Tyler and Cournapeau, David and
Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and
Bright, Jonathan and {van der Walt}, St{\'e}fan J. and
Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and
Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and
Kern, Robert and Larson, Eric and Carey, C J and
Polat, {\.I}lhan and Feng, Yu and Moore, Eric W. and
{VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and
Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and
Harris, Charles R. and Archibald, Anne M. and
Ribeiro, Ant{\^o}nio H. and Pedregosa, Fabian and
{van Mulbregt}, Paul and {SciPy 1.0 Contributors}},
title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific
Computing in Python}},
journal = {Nature Methods},
year = {2020},
volume = {17},
pages = {261--272},
adsurl = {https://rdcu.be/b08Wh},
doi = {10.1038/s41592-019-0686-2},
}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _snake_case ( datasets.Metric ):
'''simple docstring'''
def A__ ( self: int ) -> Tuple:
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
"""predictions""": datasets.Value("""float""" ),
"""references""": datasets.Value("""float""" ),
} ) ,reference_urls=["""https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.spearmanr.html"""] ,)
def A__ ( self: int ,lowerCamelCase_: int ,lowerCamelCase_: List[Any] ,lowerCamelCase_: List[str]=False ) -> Dict:
UpperCAmelCase_ : List[str] = spearmanr(lowerCamelCase_ ,lowerCamelCase_ )
if return_pvalue:
return {"spearmanr": results[0], "spearmanr_pvalue": results[1]}
else:
return {"spearmanr": results[0]}
| 59 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCamelCase : Optional[Any] = logging.get_logger(__name__)
__lowerCamelCase : Optional[int] = {
'''s-JoL/Open-Llama-V1''': '''https://huggingface.co/s-JoL/Open-Llama-V1/blob/main/config.json''',
}
class a__ ( A__ ):
A = 'open-llama'
def __init__( self : Union[str, Any],_A : Dict=10_0000,_A : List[Any]=4096,_A : List[Any]=1_1008,_A : List[Any]=32,_A : Dict=32,_A : List[Any]="silu",_A : Dict=2048,_A : Optional[Any]=0.02,_A : str=1E-6,_A : Union[str, Any]=True,_A : Optional[Any]=0,_A : List[str]=1,_A : Optional[Any]=2,_A : List[Any]=False,_A : Dict=True,_A : Dict=0.1,_A : Optional[int]=0.1,_A : List[Any]=True,_A : str=True,_A : str=None,**_A : Optional[Any],):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = vocab_size
SCREAMING_SNAKE_CASE_ : List[Any] = max_position_embeddings
SCREAMING_SNAKE_CASE_ : Any = hidden_size
SCREAMING_SNAKE_CASE_ : Optional[int] = intermediate_size
SCREAMING_SNAKE_CASE_ : Any = num_hidden_layers
SCREAMING_SNAKE_CASE_ : Tuple = num_attention_heads
SCREAMING_SNAKE_CASE_ : Optional[Any] = hidden_act
SCREAMING_SNAKE_CASE_ : Any = initializer_range
SCREAMING_SNAKE_CASE_ : List[Any] = rms_norm_eps
SCREAMING_SNAKE_CASE_ : Any = use_cache
SCREAMING_SNAKE_CASE_ : Dict = kwargs.pop(
"use_memorry_efficient_attention",_A )
SCREAMING_SNAKE_CASE_ : str = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ : Union[str, Any] = attention_dropout_prob
SCREAMING_SNAKE_CASE_ : List[Any] = use_stable_embedding
SCREAMING_SNAKE_CASE_ : Tuple = shared_input_output_embedding
SCREAMING_SNAKE_CASE_ : Tuple = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=_A,bos_token_id=_A,eos_token_id=_A,tie_word_embeddings=_A,**_A,)
def __UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling,_A ) or len(self.rope_scaling ) != 2:
raise ValueError(
"`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, "
F'got {self.rope_scaling}' )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.rope_scaling.get("type",_A )
SCREAMING_SNAKE_CASE_ : Dict = self.rope_scaling.get("factor",_A )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
F'`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}' )
if rope_scaling_factor is None or not isinstance(_A,_A ) or rope_scaling_factor <= 1.0:
raise ValueError(F'`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}' )
| 18 |
def _a ( UpperCAmelCase , UpperCAmelCase ) -> Union[str, Any]:
"""simple docstring"""
if b == 0:
return 1
if (b % 2) == 0:
return actual_power(UpperCAmelCase , int(b / 2 ) ) * actual_power(UpperCAmelCase , int(b / 2 ) )
else:
return a * actual_power(UpperCAmelCase , int(b / 2 ) ) * actual_power(UpperCAmelCase , int(b / 2 ) )
def _a ( UpperCAmelCase , UpperCAmelCase ) -> float:
"""simple docstring"""
if b < 0:
return 1 / actual_power(UpperCAmelCase , UpperCAmelCase )
return actual_power(UpperCAmelCase , UpperCAmelCase )
if __name__ == "__main__":
print(power(-2, -3))
| 142 | 0 |
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_torch_available
from transformers.testing_utils import require_torch, torch_device
if is_torch_available():
from transformers import PyTorchBenchmark, PyTorchBenchmarkArguments
@require_torch
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE__ ( self : int , SCREAMING_SNAKE_CASE_ : Union[str, Any] ):
for model_result in results.values():
for batch_size, sequence_length in zip(model_result['bs'] , model_result['ss'] ):
lowerCAmelCase_ : Dict = model_result['result'][batch_size][sequence_length]
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
def SCREAMING_SNAKE_CASE__ ( self : Dict ):
lowerCAmelCase_ : List[str] = 'sshleifer/tiny-gpt2'
lowerCAmelCase_ : int = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=SCREAMING_SNAKE_CASE_ , inference=SCREAMING_SNAKE_CASE_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=SCREAMING_SNAKE_CASE_ , )
lowerCAmelCase_ : str = PyTorchBenchmark(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : Tuple = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
lowerCAmelCase_ : int = 'sgugger/tiny-distilbert-classification'
lowerCAmelCase_ : List[Any] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=SCREAMING_SNAKE_CASE_ , inference=SCREAMING_SNAKE_CASE_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=SCREAMING_SNAKE_CASE_ , only_pretrain_model=SCREAMING_SNAKE_CASE_ , )
lowerCAmelCase_ : str = PyTorchBenchmark(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : Optional[int] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
lowerCAmelCase_ : Tuple = 'sshleifer/tiny-gpt2'
lowerCAmelCase_ : str = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=SCREAMING_SNAKE_CASE_ , inference=SCREAMING_SNAKE_CASE_ , torchscript=SCREAMING_SNAKE_CASE_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=SCREAMING_SNAKE_CASE_ , )
lowerCAmelCase_ : Tuple = PyTorchBenchmark(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : Dict = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
@unittest.skipIf(torch_device == 'cpu' , 'Cant do half precision' )
def SCREAMING_SNAKE_CASE__ ( self : Any ):
lowerCAmelCase_ : Dict = 'sshleifer/tiny-gpt2'
lowerCAmelCase_ : Tuple = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=SCREAMING_SNAKE_CASE_ , inference=SCREAMING_SNAKE_CASE_ , fpaa=SCREAMING_SNAKE_CASE_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=SCREAMING_SNAKE_CASE_ , )
lowerCAmelCase_ : str = PyTorchBenchmark(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : int = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def SCREAMING_SNAKE_CASE__ ( self : Tuple ):
lowerCAmelCase_ : Dict = 'sshleifer/tiny-gpt2'
lowerCAmelCase_ : Dict = AutoConfig.from_pretrained(SCREAMING_SNAKE_CASE_ )
# set architectures equal to `None`
lowerCAmelCase_ : int = None
lowerCAmelCase_ : Tuple = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=SCREAMING_SNAKE_CASE_ , inference=SCREAMING_SNAKE_CASE_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=SCREAMING_SNAKE_CASE_ , )
lowerCAmelCase_ : Any = PyTorchBenchmark(SCREAMING_SNAKE_CASE_ , configs=[config] )
lowerCAmelCase_ : int = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def SCREAMING_SNAKE_CASE__ ( self : Any ):
lowerCAmelCase_ : Dict = 'sshleifer/tiny-gpt2'
lowerCAmelCase_ : Tuple = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=SCREAMING_SNAKE_CASE_ , inference=SCREAMING_SNAKE_CASE_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=SCREAMING_SNAKE_CASE_ , )
lowerCAmelCase_ : int = PyTorchBenchmark(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : List[str] = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
@unittest.skipIf(torch_device == 'cpu' , 'Can\'t do half precision' )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
lowerCAmelCase_ : Union[str, Any] = 'sshleifer/tiny-gpt2'
lowerCAmelCase_ : Optional[Any] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=SCREAMING_SNAKE_CASE_ , inference=SCREAMING_SNAKE_CASE_ , sequence_lengths=[8] , batch_sizes=[1] , fpaa=SCREAMING_SNAKE_CASE_ , multi_process=SCREAMING_SNAKE_CASE_ , )
lowerCAmelCase_ : Tuple = PyTorchBenchmark(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : str = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def SCREAMING_SNAKE_CASE__ ( self : str ):
lowerCAmelCase_ : List[Any] = 'sshleifer/tiny-gpt2'
lowerCAmelCase_ : Union[str, Any] = AutoConfig.from_pretrained(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : int = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=SCREAMING_SNAKE_CASE_ , inference=SCREAMING_SNAKE_CASE_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=SCREAMING_SNAKE_CASE_ , )
lowerCAmelCase_ : str = PyTorchBenchmark(SCREAMING_SNAKE_CASE_ , configs=[config] )
lowerCAmelCase_ : List[str] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
lowerCAmelCase_ : str = 'sshleifer/tinier_bart'
lowerCAmelCase_ : Optional[int] = AutoConfig.from_pretrained(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : Optional[Any] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=SCREAMING_SNAKE_CASE_ , inference=SCREAMING_SNAKE_CASE_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=SCREAMING_SNAKE_CASE_ , )
lowerCAmelCase_ : int = PyTorchBenchmark(SCREAMING_SNAKE_CASE_ , configs=[config] )
lowerCAmelCase_ : List[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def SCREAMING_SNAKE_CASE__ ( self : Dict ):
lowerCAmelCase_ : Union[str, Any] = 'sshleifer/tiny-gpt2'
lowerCAmelCase_ : Any = AutoConfig.from_pretrained(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : int = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=SCREAMING_SNAKE_CASE_ , inference=SCREAMING_SNAKE_CASE_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=SCREAMING_SNAKE_CASE_ , )
lowerCAmelCase_ : int = PyTorchBenchmark(SCREAMING_SNAKE_CASE_ , configs=[config] )
lowerCAmelCase_ : Dict = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def SCREAMING_SNAKE_CASE__ ( self : Any ):
lowerCAmelCase_ : str = 'sshleifer/tinier_bart'
lowerCAmelCase_ : Optional[Any] = AutoConfig.from_pretrained(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : Any = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=SCREAMING_SNAKE_CASE_ , inference=SCREAMING_SNAKE_CASE_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=SCREAMING_SNAKE_CASE_ , )
lowerCAmelCase_ : Union[str, Any] = PyTorchBenchmark(SCREAMING_SNAKE_CASE_ , configs=[config] )
lowerCAmelCase_ : int = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
lowerCAmelCase_ : Any = 'sshleifer/tiny-gpt2'
with tempfile.TemporaryDirectory() as tmp_dir:
lowerCAmelCase_ : Any = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=SCREAMING_SNAKE_CASE_ , inference=SCREAMING_SNAKE_CASE_ , save_to_csv=SCREAMING_SNAKE_CASE_ , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(SCREAMING_SNAKE_CASE_ , 'inf_time.csv' ) , train_memory_csv_file=os.path.join(SCREAMING_SNAKE_CASE_ , 'train_mem.csv' ) , inference_memory_csv_file=os.path.join(SCREAMING_SNAKE_CASE_ , 'inf_mem.csv' ) , train_time_csv_file=os.path.join(SCREAMING_SNAKE_CASE_ , 'train_time.csv' ) , env_info_csv_file=os.path.join(SCREAMING_SNAKE_CASE_ , 'env.csv' ) , multi_process=SCREAMING_SNAKE_CASE_ , )
lowerCAmelCase_ : List[Any] = PyTorchBenchmark(SCREAMING_SNAKE_CASE_ )
benchmark.run()
self.assertTrue(Path(os.path.join(SCREAMING_SNAKE_CASE_ , 'inf_time.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(SCREAMING_SNAKE_CASE_ , 'train_time.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(SCREAMING_SNAKE_CASE_ , 'inf_mem.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(SCREAMING_SNAKE_CASE_ , 'train_mem.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(SCREAMING_SNAKE_CASE_ , 'env.csv' ) ).exists() )
def SCREAMING_SNAKE_CASE__ ( self : str ):
lowerCAmelCase_ : Any = 'sshleifer/tiny-gpt2'
def _check_summary_is_not_empty(SCREAMING_SNAKE_CASE_ : Optional[int] ):
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , 'sequential' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , 'cumulative' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , 'current' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , 'total' ) )
with tempfile.TemporaryDirectory() as tmp_dir:
lowerCAmelCase_ : Optional[int] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=SCREAMING_SNAKE_CASE_ , inference=SCREAMING_SNAKE_CASE_ , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(SCREAMING_SNAKE_CASE_ , 'log.txt' ) , log_print=SCREAMING_SNAKE_CASE_ , trace_memory_line_by_line=SCREAMING_SNAKE_CASE_ , multi_process=SCREAMING_SNAKE_CASE_ , )
lowerCAmelCase_ : int = PyTorchBenchmark(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : List[str] = benchmark.run()
_check_summary_is_not_empty(result.inference_summary )
_check_summary_is_not_empty(result.train_summary )
self.assertTrue(Path(os.path.join(SCREAMING_SNAKE_CASE_ , 'log.txt' ) ).exists() )
| 352 |
"""simple docstring"""
import random
import timeit
from functools import wraps
from typing import Callable, Optional
from ..configuration_utils import PretrainedConfig
from ..models.auto.modeling_tf_auto import TF_MODEL_MAPPING, TF_MODEL_WITH_LM_HEAD_MAPPING
from ..utils import is_pyanvml_available, is_tf_available, logging
from .benchmark_utils import (
Benchmark,
Memory,
MemorySummary,
measure_peak_memory_cpu,
start_memory_tracing,
stop_memory_tracing,
)
if is_tf_available():
import tensorflow as tf
from tensorflow.python.framework.errors_impl import ResourceExhaustedError
from .benchmark_args_tf import TensorFlowBenchmarkArguments
if is_pyanvml_available():
import pyanvml.pyanvml as nvml
lowercase__ : str = logging.get_logger(__name__)
def UpperCamelCase_ ( lowerCAmelCase__ : bool , lowerCAmelCase__ : bool ) -> List[Any]:
"""simple docstring"""
def run_func(lowerCAmelCase__ : int ):
@wraps(lowerCAmelCase__ )
def run_in_eager_mode(*lowerCAmelCase__ : List[Any] , **lowerCAmelCase__ : int ):
return func(*lowerCAmelCase__ , **lowerCAmelCase__ )
@wraps(lowerCAmelCase__ )
@tf.function(experimental_compile=lowerCAmelCase__ )
def run_in_graph_mode(*lowerCAmelCase__ : Dict , **lowerCAmelCase__ : Any ):
return func(*lowerCAmelCase__ , **lowerCAmelCase__ )
if do_eager_mode is True:
if use_xla is not False:
raise ValueError(
'Cannot run model in XLA, if `args.eager_mode` is set to `True`. Please set `args.eager_mode=False`.' )
return run_in_eager_mode
else:
return run_in_graph_mode
return run_func
def UpperCamelCase_ ( lowerCAmelCase__ : int , lowerCAmelCase__ : int , lowerCAmelCase__ : int ) -> ["tf.Tensor"]:
"""simple docstring"""
lowerCAmelCase_ : Dict = random.Random()
lowerCAmelCase_ : List[str] = [rng.randint(0 , vocab_size - 1 ) for i in range(batch_size * sequence_length )]
return tf.constant(lowerCAmelCase__ , shape=(batch_size, sequence_length) , dtype=tf.intaa )
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = 42
_SCREAMING_SNAKE_CASE = 42
_SCREAMING_SNAKE_CASE = "TensorFlow"
@property
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
return tf.__version__
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int ):
# initialize GPU on separate process
lowerCAmelCase_ : List[Any] = self.args.strategy
if strategy is None:
raise ValueError('A device strategy has to be initialized before using TensorFlow.' )
lowerCAmelCase_ : List[str] = self._prepare_inference_func(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
return self._measure_speed(_inference )
def SCREAMING_SNAKE_CASE__ ( self : Any , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int ):
lowerCAmelCase_ : Optional[int] = self.args.strategy
if strategy is None:
raise ValueError('A device strategy has to be initialized before using TensorFlow.' )
lowerCAmelCase_ : Any = self._prepare_train_func(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
return self._measure_speed(_train )
def SCREAMING_SNAKE_CASE__ ( self : List[str] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int ):
# initialize GPU on separate process
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : Any = self.args.strategy
if strategy is None:
raise ValueError('A device strategy has to be initialized before using TensorFlow.' )
lowerCAmelCase_ : Optional[Any] = self._prepare_inference_func(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
return self._measure_memory(_inference )
def SCREAMING_SNAKE_CASE__ ( self : Any , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int ):
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : Any = self.args.strategy
if strategy is None:
raise ValueError('A device strategy has to be initialized before using TensorFlow.' )
lowerCAmelCase_ : Optional[int] = self._prepare_train_func(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
return self._measure_memory(_train )
def SCREAMING_SNAKE_CASE__ ( self : Any , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int ):
lowerCAmelCase_ : Any = self.config_dict[model_name]
if self.args.fpaa:
raise NotImplementedError('Mixed precision is currently not supported.' )
lowerCAmelCase_ : Union[str, Any] = (
hasattr(SCREAMING_SNAKE_CASE_ , 'architectures' )
and isinstance(config.architectures , SCREAMING_SNAKE_CASE_ )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
lowerCAmelCase_ : Any = 'TF' + config.architectures[0] # prepend 'TF' for tensorflow model
lowerCAmelCase_ : Any = __import__('transformers' , fromlist=[model_class] )
lowerCAmelCase_ : Dict = getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : List[str] = model_cls(SCREAMING_SNAKE_CASE_ )
except ImportError:
raise ImportError(
F"{model_class} does not exist. If you just want to test the pretrained model, you might want to"
' set `--only_pretrain_model` or `args.only_pretrain_model=True`.' )
else:
lowerCAmelCase_ : str = TF_MODEL_MAPPING[config.__class__](SCREAMING_SNAKE_CASE_ )
# encoder-decoder has vocab size saved differently
lowerCAmelCase_ : List[Any] = config.vocab_size if hasattr(SCREAMING_SNAKE_CASE_ , 'vocab_size' ) else config.encoder.vocab_size
lowerCAmelCase_ : Tuple = random_input_ids(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_decoder_forward():
return model(SCREAMING_SNAKE_CASE_ , decoder_input_ids=SCREAMING_SNAKE_CASE_ , training=SCREAMING_SNAKE_CASE_ )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_forward():
return model(SCREAMING_SNAKE_CASE_ , training=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : List[Any] = encoder_decoder_forward if config.is_encoder_decoder else encoder_forward
return _inference
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int ):
lowerCAmelCase_ : Union[str, Any] = self.config_dict[model_name]
if self.args.eager_mode is not False:
raise ValueError('Training cannot be done in eager mode. Please make sure that `args.eager_mode = False`.' )
if self.args.fpaa:
raise NotImplementedError('Mixed precision is currently not supported.' )
lowerCAmelCase_ : Dict = (
hasattr(SCREAMING_SNAKE_CASE_ , 'architectures' )
and isinstance(config.architectures , SCREAMING_SNAKE_CASE_ )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
lowerCAmelCase_ : Optional[Any] = 'TF' + config.architectures[0] # prepend 'TF' for tensorflow model
lowerCAmelCase_ : int = __import__('transformers' , fromlist=[model_class] )
lowerCAmelCase_ : Any = getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : Optional[Any] = model_cls(SCREAMING_SNAKE_CASE_ )
except ImportError:
raise ImportError(
F"{model_class} does not exist. If you just want to test the pretrained model, you might want to"
' set `--only_pretrain_model` or `args.only_pretrain_model=True`.' )
else:
lowerCAmelCase_ : Union[str, Any] = TF_MODEL_WITH_LM_HEAD_MAPPING[config.__class__](SCREAMING_SNAKE_CASE_ )
# encoder-decoder has vocab size saved differently
lowerCAmelCase_ : int = config.vocab_size if hasattr(SCREAMING_SNAKE_CASE_ , 'vocab_size' ) else config.encoder.vocab_size
lowerCAmelCase_ : Optional[Any] = random_input_ids(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_decoder_train():
lowerCAmelCase_ : str = model(SCREAMING_SNAKE_CASE_ , decoder_input_ids=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ , training=SCREAMING_SNAKE_CASE_ )[0]
lowerCAmelCase_ : Optional[int] = tf.gradients(SCREAMING_SNAKE_CASE_ , model.trainable_variables )
return gradients
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_train():
lowerCAmelCase_ : Dict = model(SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ , training=SCREAMING_SNAKE_CASE_ )[0]
lowerCAmelCase_ : str = tf.gradients(SCREAMING_SNAKE_CASE_ , model.trainable_variables )
return gradients
lowerCAmelCase_ : int = encoder_decoder_train if config.is_encoder_decoder else encoder_train
return _train
def SCREAMING_SNAKE_CASE__ ( self : List[str] , SCREAMING_SNAKE_CASE_ : Tuple ):
with self.args.strategy.scope():
try:
if self.args.is_tpu or self.args.use_xla:
# run additional 10 times to stabilize compilation for tpu
logger.info('Do inference on TPU. Running model 5 times to stabilize compilation' )
timeit.repeat(SCREAMING_SNAKE_CASE_ , repeat=1 , number=5 )
# as written in https://docs.python.org/2/library/timeit.html#timeit.Timer.repeat, min should be taken rather than the average
lowerCAmelCase_ : Dict = timeit.repeat(
SCREAMING_SNAKE_CASE_ , repeat=self.args.repeat , number=1_0 , )
return min(SCREAMING_SNAKE_CASE_ ) / 10.0
except ResourceExhaustedError as e:
self.print_fn(F"Doesn't fit on GPU. {e}" )
def SCREAMING_SNAKE_CASE__ ( self : int , SCREAMING_SNAKE_CASE_ : Callable[[], None] ):
logger.info(
'Note that TensorFlow allocates more memory than '
'it might need to speed up computation. '
'The memory reported here corresponds to the memory '
'reported by `nvidia-smi`, which can vary depending '
'on total available memory on the GPU that is used.' )
with self.args.strategy.scope():
try:
if self.args.trace_memory_line_by_line:
if not self.args.eager_mode:
raise ValueError(
'`args.eager_mode` is set to `False`. Make sure to run model in eager mode to measure memory'
' consumption line by line.' )
lowerCAmelCase_ : Union[str, Any] = start_memory_tracing('transformers' )
if self.args.is_tpu:
# tpu
raise NotImplementedError(
'Memory Benchmarking is currently not implemented for TPU. Please disable memory benchmarking'
' with `args.memory=False`' )
elif self.args.is_gpu:
# gpu
if not is_pyanvml_available():
logger.warning(
'py3nvml not installed, we won\'t log GPU memory usage. '
'Install py3nvml (pip install py3nvml) to log information about GPU.' )
lowerCAmelCase_ : Tuple = 'N/A'
else:
logger.info(
'Measuring total GPU usage on GPU device. Make sure to not have additional processes'
' running on the same GPU.' )
# init nvml
nvml.nvmlInit()
func()
lowerCAmelCase_ : int = nvml.nvmlDeviceGetHandleByIndex(self.args.device_idx )
lowerCAmelCase_ : Union[str, Any] = nvml.nvmlDeviceGetMemoryInfo(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : List[Any] = meminfo.used
lowerCAmelCase_ : int = Memory(SCREAMING_SNAKE_CASE_ )
# shutdown nvml
nvml.nvmlShutdown()
else:
# cpu
if self.args.trace_memory_line_by_line:
logger.info(
'When enabling line by line tracing, the max peak memory for CPU is inaccurate in'
' TensorFlow.' )
lowerCAmelCase_ : Optional[int] = None
else:
lowerCAmelCase_ : Union[str, Any] = measure_peak_memory_cpu(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : Any = Memory(SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else memory_bytes
if self.args.trace_memory_line_by_line:
lowerCAmelCase_ : List[Any] = stop_memory_tracing(SCREAMING_SNAKE_CASE_ )
if memory is None:
lowerCAmelCase_ : Union[str, Any] = summary.total
else:
lowerCAmelCase_ : List[str] = None
return memory, summary
except ResourceExhaustedError as e:
self.print_fn(F"Doesn't fit on GPU. {e}" )
return "N/A", None
| 289 | 0 |
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCamelCase =logging.get_logger(__name__)
_lowerCamelCase ={
"""huggingface/time-series-transformer-tourism-monthly""": (
"""https://huggingface.co/huggingface/time-series-transformer-tourism-monthly/resolve/main/config.json"""
),
# See all TimeSeriesTransformer models at https://huggingface.co/models?filter=time_series_transformer
}
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : List[Any] = """time_series_transformer"""
_UpperCAmelCase : List[str] = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
"""num_hidden_layers""": """encoder_layers""",
}
def __init__( self , __magic_name__ = None , __magic_name__ = None , __magic_name__ = "student_t" , __magic_name__ = "nll" , __magic_name__ = 1 , __magic_name__ = [1, 2, 3, 4, 5, 6, 7] , __magic_name__ = "mean" , __magic_name__ = 0 , __magic_name__ = 0 , __magic_name__ = 0 , __magic_name__ = 0 , __magic_name__ = None , __magic_name__ = None , __magic_name__ = 3_2 , __magic_name__ = 3_2 , __magic_name__ = 2 , __magic_name__ = 2 , __magic_name__ = 2 , __magic_name__ = 2 , __magic_name__ = True , __magic_name__ = "gelu" , __magic_name__ = 6_4 , __magic_name__ = 0.1 , __magic_name__ = 0.1 , __magic_name__ = 0.1 , __magic_name__ = 0.1 , __magic_name__ = 0.1 , __magic_name__ = 1_0_0 , __magic_name__ = 0.02 , __magic_name__=True , **__magic_name__ , ):
lowerCamelCase : Dict = prediction_length
lowerCamelCase : Tuple = context_length or prediction_length
lowerCamelCase : int = distribution_output
lowerCamelCase : str = loss
lowerCamelCase : List[str] = input_size
lowerCamelCase : List[str] = num_time_features
lowerCamelCase : List[str] = lags_sequence
lowerCamelCase : List[Any] = scaling
lowerCamelCase : Union[str, Any] = num_dynamic_real_features
lowerCamelCase : Dict = num_static_real_features
lowerCamelCase : str = num_static_categorical_features
if cardinality and num_static_categorical_features > 0:
if len(_SCREAMING_SNAKE_CASE ) != num_static_categorical_features:
raise ValueError(
"""The cardinality should be a list of the same length as `num_static_categorical_features`""" )
lowerCamelCase : str = cardinality
else:
lowerCamelCase : Dict = [0]
if embedding_dimension and num_static_categorical_features > 0:
if len(_SCREAMING_SNAKE_CASE ) != num_static_categorical_features:
raise ValueError(
"""The embedding dimension should be a list of the same length as `num_static_categorical_features`""" )
lowerCamelCase : List[str] = embedding_dimension
else:
lowerCamelCase : Union[str, Any] = [min(5_0 , (cat + 1) // 2 ) for cat in self.cardinality]
lowerCamelCase : Any = num_parallel_samples
# Transformer architecture configuration
lowerCamelCase : List[Any] = input_size * len(_SCREAMING_SNAKE_CASE ) + self._number_of_features
lowerCamelCase : Any = d_model
lowerCamelCase : Dict = encoder_attention_heads
lowerCamelCase : Optional[Any] = decoder_attention_heads
lowerCamelCase : str = encoder_ffn_dim
lowerCamelCase : Optional[Any] = decoder_ffn_dim
lowerCamelCase : Tuple = encoder_layers
lowerCamelCase : Dict = decoder_layers
lowerCamelCase : Union[str, Any] = dropout
lowerCamelCase : Optional[Any] = attention_dropout
lowerCamelCase : Union[str, Any] = activation_dropout
lowerCamelCase : str = encoder_layerdrop
lowerCamelCase : str = decoder_layerdrop
lowerCamelCase : int = activation_function
lowerCamelCase : Tuple = init_std
lowerCamelCase : Optional[Any] = use_cache
super().__init__(is_encoder_decoder=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
@property
def UpperCamelCase__ ( self ):
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 287 |
'''simple docstring'''
# Algorithm for the pigeonhole sorting
def lowercase__ ( __UpperCamelCase )-> Union[str, Any]:
UpperCamelCase = min(__UpperCamelCase ) # min() finds the minimum value
UpperCamelCase = max(__UpperCamelCase ) # max() finds the maximum value
UpperCamelCase = max_val - min_val + 1 # size is difference of max and min values plus one
# list of pigeonholes of size equal to the variable size
UpperCamelCase = [0] * size
# Populate the pigeonholes.
for x in a:
assert isinstance(__UpperCamelCase , __UpperCamelCase ), "integers only please"
holes[x - min_val] += 1
# Putting the elements back into the array in an order.
UpperCamelCase = 0
for count in range(__UpperCamelCase ):
while holes[count] > 0:
holes[count] -= 1
UpperCamelCase = count + min_val
i += 1
def lowercase__ ( )-> Any:
UpperCamelCase = [8, 3, 2, 7, 4, 6, 8]
pigeonhole_sort(__UpperCamelCase )
print("""Sorted order is:""" , """ """.join(__UpperCamelCase ) )
if __name__ == "__main__":
main()
| 321 | 0 |
"""simple docstring"""
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class _lowercase ( __UpperCAmelCase ):
lowercase_ = ['image_processor', 'tokenizer']
lowercase_ = 'BridgeTowerImageProcessor'
lowercase_ = ('RobertaTokenizer', 'RobertaTokenizerFast')
def __init__( self , UpperCAmelCase_ , UpperCAmelCase_ ) -> Any:
super().__init__(UpperCAmelCase_ , UpperCAmelCase_ )
def __call__( self , UpperCAmelCase_ , UpperCAmelCase_ = None , UpperCAmelCase_ = True , UpperCAmelCase_ = False , UpperCAmelCase_ = None , UpperCAmelCase_ = None , UpperCAmelCase_ = 0 , UpperCAmelCase_ = None , UpperCAmelCase_ = None , UpperCAmelCase_ = None , UpperCAmelCase_ = False , UpperCAmelCase_ = False , UpperCAmelCase_ = False , UpperCAmelCase_ = False , UpperCAmelCase_ = True , UpperCAmelCase_ = None , **UpperCAmelCase_ , ) -> BatchEncoding:
lowerCamelCase : str = self.tokenizer(
text=UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ , padding=UpperCAmelCase_ , truncation=UpperCAmelCase_ , max_length=UpperCAmelCase_ , stride=UpperCAmelCase_ , pad_to_multiple_of=UpperCAmelCase_ , return_token_type_ids=UpperCAmelCase_ , return_attention_mask=UpperCAmelCase_ , return_overflowing_tokens=UpperCAmelCase_ , return_special_tokens_mask=UpperCAmelCase_ , return_offsets_mapping=UpperCAmelCase_ , return_length=UpperCAmelCase_ , verbose=UpperCAmelCase_ , return_tensors=UpperCAmelCase_ , **UpperCAmelCase_ , )
# add pixel_values + pixel_mask
lowerCamelCase : Any = self.image_processor(
UpperCAmelCase_ , return_tensors=UpperCAmelCase_ , do_normalize=UpperCAmelCase_ , do_center_crop=UpperCAmelCase_ , **UpperCAmelCase_ )
encoding.update(UpperCAmelCase_ )
return encoding
def _UpperCamelCase ( self , *UpperCAmelCase_ , **UpperCAmelCase_ ) -> Optional[int]:
return self.tokenizer.batch_decode(*UpperCAmelCase_ , **UpperCAmelCase_ )
def _UpperCamelCase ( self , *UpperCAmelCase_ , **UpperCAmelCase_ ) -> List[Any]:
return self.tokenizer.decode(*UpperCAmelCase_ , **UpperCAmelCase_ )
@property
def _UpperCamelCase ( self ) -> Any:
lowerCamelCase : Tuple = self.tokenizer.model_input_names
lowerCamelCase : Optional[Any] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 205 |
"""simple docstring"""
from ....configuration_utils import PretrainedConfig
from ....utils import logging
_A = logging.get_logger(__name__)
_A = {
'CarlCochet/trajectory-transformer-halfcheetah-medium-v2': (
'https://huggingface.co/CarlCochet/trajectory-transformer-halfcheetah-medium-v2/resolve/main/config.json'
),
# See all TrajectoryTransformer models at https://huggingface.co/models?filter=trajectory_transformer
}
class _lowercase ( __UpperCAmelCase ):
lowercase_ = 'trajectory_transformer'
lowercase_ = ['past_key_values']
lowercase_ = {
'hidden_size': 'n_embd',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self , UpperCAmelCase_=100 , UpperCAmelCase_=5 , UpperCAmelCase_=1 , UpperCAmelCase_=1 , UpperCAmelCase_=249 , UpperCAmelCase_=6 , UpperCAmelCase_=17 , UpperCAmelCase_=25 , UpperCAmelCase_=4 , UpperCAmelCase_=4 , UpperCAmelCase_=128 , UpperCAmelCase_=0.1 , UpperCAmelCase_=0.1 , UpperCAmelCase_=0.1 , UpperCAmelCase_=0.0006 , UpperCAmelCase_=512 , UpperCAmelCase_=0.02 , UpperCAmelCase_=1E-1_2 , UpperCAmelCase_=1 , UpperCAmelCase_=True , UpperCAmelCase_=1 , UpperCAmelCase_=50256 , UpperCAmelCase_=50256 , **UpperCAmelCase_ , ) -> List[Any]:
lowerCamelCase : int = vocab_size
lowerCamelCase : List[str] = action_weight
lowerCamelCase : List[Any] = reward_weight
lowerCamelCase : List[str] = value_weight
lowerCamelCase : Tuple = max_position_embeddings
lowerCamelCase : List[str] = block_size
lowerCamelCase : Any = action_dim
lowerCamelCase : List[Any] = observation_dim
lowerCamelCase : Any = transition_dim
lowerCamelCase : int = learning_rate
lowerCamelCase : Union[str, Any] = n_layer
lowerCamelCase : Tuple = n_head
lowerCamelCase : Any = n_embd
lowerCamelCase : Union[str, Any] = embd_pdrop
lowerCamelCase : Optional[int] = attn_pdrop
lowerCamelCase : int = resid_pdrop
lowerCamelCase : Optional[int] = initializer_range
lowerCamelCase : List[str] = layer_norm_eps
lowerCamelCase : Any = kaiming_initializer_range
lowerCamelCase : str = use_cache
super().__init__(pad_token_id=UpperCAmelCase_ , bos_token_id=UpperCAmelCase_ , eos_token_id=UpperCAmelCase_ , **UpperCAmelCase_ )
| 205 | 1 |
"""simple docstring"""
import logging
import random
import ray
from transformers import RagConfig, RagRetriever, RagTokenizer
from transformers.models.rag.retrieval_rag import CustomHFIndex
_UpperCAmelCase = logging.getLogger(__name__)
class a :
def __init__( self : List[str] ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: str =False
def lowerCamelCase__ ( self : List[str] , lowerCAmelCase : Optional[Any] , lowerCAmelCase : str , lowerCAmelCase : List[str] , lowerCAmelCase : Tuple ) -> int:
'''simple docstring'''
if not self.initialized:
SCREAMING_SNAKE_CASE_: List[Any] =RagRetriever(
lowerCAmelCase , question_encoder_tokenizer=lowerCAmelCase , generator_tokenizer=lowerCAmelCase , index=lowerCAmelCase , init_retrieval=lowerCAmelCase , )
SCREAMING_SNAKE_CASE_: List[str] =True
def lowerCamelCase__ ( self : Union[str, Any] ) -> str:
'''simple docstring'''
self.retriever.index.init_index()
def lowerCamelCase__ ( self : List[str] , lowerCAmelCase : str , lowerCAmelCase : Tuple ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: str =self.retriever._main_retrieve(lowerCAmelCase , lowerCAmelCase )
return doc_ids, retrieved_doc_embeds
class a ( UpperCAmelCase__ ):
def __init__( self : Dict , lowerCAmelCase : List[Any] , lowerCAmelCase : List[str] , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : int , lowerCAmelCase : str=None ) -> Optional[Any]:
'''simple docstring'''
if index is not None and index.is_initialized() and len(lowerCAmelCase ) > 0:
raise ValueError(
"""When using Ray for distributed fine-tuning, """
"""you'll need to provide the paths instead, """
"""as the dataset and the index are loaded """
"""separately. More info in examples/rag/use_own_knowledge_dataset.py """ )
super().__init__(
lowerCAmelCase , question_encoder_tokenizer=lowerCAmelCase , generator_tokenizer=lowerCAmelCase , index=lowerCAmelCase , init_retrieval=lowerCAmelCase , )
SCREAMING_SNAKE_CASE_: Union[str, Any] =retrieval_workers
if len(self.retrieval_workers ) > 0:
ray.get(
[
worker.create_rag_retriever.remote(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
for worker in self.retrieval_workers
] )
def lowerCamelCase__ ( self : str ) -> Optional[int]:
'''simple docstring'''
logger.info("""initializing retrieval""" )
if len(self.retrieval_workers ) > 0:
ray.get([worker.init_retrieval.remote() for worker in self.retrieval_workers] )
else:
# Non-distributed training. Load index into this same process.
self.index.init_index()
def lowerCamelCase__ ( self : Tuple , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Any ) -> Dict:
'''simple docstring'''
if len(self.retrieval_workers ) > 0:
# Select a random retrieval actor.
SCREAMING_SNAKE_CASE_: Dict =self.retrieval_workers[random.randint(0 , len(self.retrieval_workers ) - 1 )]
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Union[str, Any] =ray.get(random_worker.retrieve.remote(lowerCAmelCase , lowerCAmelCase ) )
else:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Optional[Any] =self._main_retrieve(lowerCAmelCase , lowerCAmelCase )
return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(lowerCAmelCase )
@classmethod
def lowerCamelCase__ ( cls : int , lowerCAmelCase : Optional[int] , lowerCAmelCase : Tuple=None , **lowerCAmelCase : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
return super(lowerCAmelCase , cls ).get_tokenizers(lowerCAmelCase , lowerCAmelCase , **lowerCAmelCase )
@classmethod
def lowerCamelCase__ ( cls : Dict , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Any , lowerCAmelCase : int=None , **lowerCAmelCase : str ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Tuple =kwargs.pop("""config""" , lowerCAmelCase ) or RagConfig.from_pretrained(lowerCAmelCase , **lowerCAmelCase )
SCREAMING_SNAKE_CASE_: int =RagTokenizer.from_pretrained(lowerCAmelCase , config=lowerCAmelCase )
SCREAMING_SNAKE_CASE_: List[str] =rag_tokenizer.question_encoder
SCREAMING_SNAKE_CASE_: List[Any] =rag_tokenizer.generator
if indexed_dataset is not None:
SCREAMING_SNAKE_CASE_: List[Any] ="""custom"""
SCREAMING_SNAKE_CASE_: Tuple =CustomHFIndex(config.retrieval_vector_size , lowerCAmelCase )
else:
SCREAMING_SNAKE_CASE_: Optional[int] =cls._build_index(lowerCAmelCase )
return cls(
lowerCAmelCase , question_encoder_tokenizer=lowerCAmelCase , generator_tokenizer=lowerCAmelCase , retrieval_workers=lowerCAmelCase , index=lowerCAmelCase , )
| 173 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_UpperCAmelCase = logging.get_logger(__name__)
_UpperCAmelCase = {
"""facebook/vit-mae-base""": """https://huggingface.co/facebook/vit-mae-base/resolve/main/config.json""",
# See all ViT MAE models at https://huggingface.co/models?filter=vit-mae
}
class a ( UpperCAmelCase__ ):
UpperCamelCase : Dict = 'vit_mae'
def __init__( self : int , lowerCAmelCase : int=768 , lowerCAmelCase : List[str]=12 , lowerCAmelCase : Any=12 , lowerCAmelCase : Union[str, Any]=3072 , lowerCAmelCase : Any="gelu" , lowerCAmelCase : str=0.0 , lowerCAmelCase : Any=0.0 , lowerCAmelCase : List[Any]=0.0_2 , lowerCAmelCase : int=1E-12 , lowerCAmelCase : Optional[Any]=224 , lowerCAmelCase : Optional[Any]=16 , lowerCAmelCase : Any=3 , lowerCAmelCase : Optional[Any]=True , lowerCAmelCase : Any=16 , lowerCAmelCase : str=512 , lowerCAmelCase : int=8 , lowerCAmelCase : Union[str, Any]=2048 , lowerCAmelCase : Tuple=0.7_5 , lowerCAmelCase : str=False , **lowerCAmelCase : Tuple , ) -> List[Any]:
'''simple docstring'''
super().__init__(**lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Optional[int] =hidden_size
SCREAMING_SNAKE_CASE_: Optional[int] =num_hidden_layers
SCREAMING_SNAKE_CASE_: Optional[int] =num_attention_heads
SCREAMING_SNAKE_CASE_: Optional[Any] =intermediate_size
SCREAMING_SNAKE_CASE_: Union[str, Any] =hidden_act
SCREAMING_SNAKE_CASE_: Tuple =hidden_dropout_prob
SCREAMING_SNAKE_CASE_: List[str] =attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_: List[Any] =initializer_range
SCREAMING_SNAKE_CASE_: int =layer_norm_eps
SCREAMING_SNAKE_CASE_: List[str] =image_size
SCREAMING_SNAKE_CASE_: Dict =patch_size
SCREAMING_SNAKE_CASE_: str =num_channels
SCREAMING_SNAKE_CASE_: List[str] =qkv_bias
SCREAMING_SNAKE_CASE_: List[str] =decoder_num_attention_heads
SCREAMING_SNAKE_CASE_: Any =decoder_hidden_size
SCREAMING_SNAKE_CASE_: Optional[Any] =decoder_num_hidden_layers
SCREAMING_SNAKE_CASE_: str =decoder_intermediate_size
SCREAMING_SNAKE_CASE_: Union[str, Any] =mask_ratio
SCREAMING_SNAKE_CASE_: List[str] =norm_pix_loss
| 173 | 1 |
from typing import List
from .keymap import KEYMAP, get_character
def UpperCAmelCase__ (UpperCamelCase_ ):
"""simple docstring"""
def decorator(UpperCamelCase_ ):
snake_case = getattr(UpperCamelCase_ ,'''handle_key''' ,[] )
handle += [key]
setattr(UpperCamelCase_ ,'''handle_key''' ,UpperCamelCase_ )
return func
return decorator
def UpperCAmelCase__ (*UpperCamelCase_ ):
"""simple docstring"""
def decorator(UpperCamelCase_ ):
snake_case = getattr(UpperCamelCase_ ,'''handle_key''' ,[] )
handle += keys
setattr(UpperCamelCase_ ,'''handle_key''' ,UpperCamelCase_ )
return func
return decorator
class A__ ( snake_case__ ):
"""simple docstring"""
def __new__( cls , __snake_case , __snake_case , __snake_case ):
snake_case = super().__new__(cls , __snake_case , __snake_case , __snake_case )
if not hasattr(__snake_case , '''key_handler''' ):
setattr(__snake_case , '''key_handler''' , {} )
setattr(__snake_case , '''handle_input''' , KeyHandler.handle_input )
for value in attrs.values():
snake_case = getattr(__snake_case , '''handle_key''' , [] )
for key in handled_keys:
snake_case = value
return new_cls
@staticmethod
def a_ ( cls ):
snake_case = get_character()
if char != KEYMAP["undefined"]:
snake_case = ord(__snake_case )
snake_case = cls.key_handler.get(__snake_case )
if handler:
snake_case = char
return handler(cls )
else:
return None
def UpperCAmelCase__ (cls ):
"""simple docstring"""
return KeyHandler(cls.__name__ ,cls.__bases__ ,cls.__dict__.copy() )
| 213 |
from __future__ import annotations
from scipy.special import comb # type: ignore
class A__ :
"""simple docstring"""
def __init__( self , __snake_case ):
snake_case = list_of_points
# Degree determines the flexibility of the curve.
# Degree = 1 will produce a straight line.
snake_case = len(__snake_case ) - 1
def a_ ( self , __snake_case ):
assert 0 <= t <= 1, "Time t must be between 0 and 1."
snake_case = []
for i in range(len(self.list_of_points ) ):
# basis function for each i
output_values.append(
comb(self.degree , __snake_case ) * ((1 - t) ** (self.degree - i)) * (t**i) )
# the basis must sum up to 1 for it to produce a valid Bezier curve.
assert round(sum(__snake_case ) , 5 ) == 1
return output_values
def a_ ( self , __snake_case ):
assert 0 <= t <= 1, "Time t must be between 0 and 1."
snake_case = self.basis_function(__snake_case )
snake_case = 0.0
snake_case = 0.0
for i in range(len(self.list_of_points ) ):
# For all points, sum up the product of i-th basis function and i-th point.
x += basis_function[i] * self.list_of_points[i][0]
y += basis_function[i] * self.list_of_points[i][1]
return (x, y)
def a_ ( self , __snake_case = 0.01 ):
from matplotlib import pyplot as plt # type: ignore
snake_case = [] # x coordinates of points to plot
snake_case = [] # y coordinates of points to plot
snake_case = 0.0
while t <= 1:
snake_case = self.bezier_curve_function(__snake_case )
to_plot_x.append(value[0] )
to_plot_y.append(value[1] )
t += step_size
snake_case = [i[0] for i in self.list_of_points]
snake_case = [i[1] for i in self.list_of_points]
plt.plot(
__snake_case , __snake_case , color='''blue''' , label='''Curve of Degree ''' + str(self.degree ) , )
plt.scatter(__snake_case , __snake_case , color='''red''' , label='''Control Points''' )
plt.legend()
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
BezierCurve([(1, 2), (3, 5)]).plot_curve() # degree 1
BezierCurve([(0, 0), (5, 5), (5, 0)]).plot_curve() # degree 2
BezierCurve([(0, 0), (5, 5), (5, 0), (2.5, -2.5)]).plot_curve() # degree 3
| 213 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase__ = {"""configuration_ibert""": ["""IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """IBertConfig""", """IBertOnnxConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = [
"""IBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""IBertForMaskedLM""",
"""IBertForMultipleChoice""",
"""IBertForQuestionAnswering""",
"""IBertForSequenceClassification""",
"""IBertForTokenClassification""",
"""IBertModel""",
"""IBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_ibert import IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, IBertConfig, IBertOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ibert import (
IBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
IBertForMaskedLM,
IBertForMultipleChoice,
IBertForQuestionAnswering,
IBertForSequenceClassification,
IBertForTokenClassification,
IBertModel,
IBertPreTrainedModel,
)
else:
import sys
lowercase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 96 |
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def UpperCAmelCase_( a__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = filter(lambda a__ : p.requires_grad , model.parameters() )
SCREAMING_SNAKE_CASE : List[Any] = sum([np.prod(p.size() ) for p in model_parameters] )
return params
a__ : Any = logging.getLogger(__name__)
def UpperCAmelCase_( a__ , a__ ):
"""simple docstring"""
if metric == "rouge2":
SCREAMING_SNAKE_CASE : str = '''{val_avg_rouge2:.4f}-{step_count}'''
elif metric == "bleu":
SCREAMING_SNAKE_CASE : List[Any] = '''{val_avg_bleu:.4f}-{step_count}'''
elif metric == "em":
SCREAMING_SNAKE_CASE : int = '''{val_avg_em:.4f}-{step_count}'''
elif metric == "loss":
SCREAMING_SNAKE_CASE : int = '''{val_avg_loss:.4f}-{step_count}'''
else:
raise NotImplementedError(
F"""seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this"""
''' function.''' )
SCREAMING_SNAKE_CASE : Dict = ModelCheckpoint(
dirpath=a__ , filename=a__ , monitor=F"""val_{metric}""" , mode='''max''' , save_top_k=1 , every_n_epochs=1 , )
return checkpoint_callback
def UpperCAmelCase_( a__ , a__ ):
"""simple docstring"""
return EarlyStopping(
monitor=F"""val_{metric}""" , mode='''min''' if '''loss''' in metric else '''max''' , patience=a__ , verbose=a__ , )
class a_ ( pl.Callback ):
"""simple docstring"""
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase ) ->Dict:
SCREAMING_SNAKE_CASE : List[str] = {F"""lr_group_{i}""": param['''lr'''] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )}
pl_module.logger.log_metrics(_lowerCamelCase )
@rank_zero_only
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=True ) ->None:
logger.info(F"""***** {type_path} results at step {trainer.global_step:05d} *****""" )
SCREAMING_SNAKE_CASE : Optional[int] = trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ['''log''', '''progress_bar''', '''preds''']} )
# Log results
SCREAMING_SNAKE_CASE : List[str] = Path(pl_module.hparams.output_dir )
if type_path == "test":
SCREAMING_SNAKE_CASE : Any = od / '''test_results.txt'''
SCREAMING_SNAKE_CASE : Optional[int] = od / '''test_generations.txt'''
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
SCREAMING_SNAKE_CASE : str = od / F"""{type_path}_results/{trainer.global_step:05d}.txt"""
SCREAMING_SNAKE_CASE : Tuple = od / F"""{type_path}_generations/{trainer.global_step:05d}.txt"""
results_file.parent.mkdir(exist_ok=_lowerCamelCase )
generations_file.parent.mkdir(exist_ok=_lowerCamelCase )
with open(_lowerCamelCase , '''a+''' ) as writer:
for key in sorted(_lowerCamelCase ):
if key in ["log", "progress_bar", "preds"]:
continue
SCREAMING_SNAKE_CASE : Tuple = metrics[key]
if isinstance(_lowerCamelCase , torch.Tensor ):
SCREAMING_SNAKE_CASE : List[Any] = val.item()
SCREAMING_SNAKE_CASE : Tuple = F"""{key}: {val:.6f}\n"""
writer.write(_lowerCamelCase )
if not save_generations:
return
if "preds" in metrics:
SCREAMING_SNAKE_CASE : Optional[Any] = '''\n'''.join(metrics['''preds'''] )
generations_file.open('''w+''' ).write(_lowerCamelCase )
@rank_zero_only
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase ) ->Dict:
try:
SCREAMING_SNAKE_CASE : Any = pl_module.model.model.num_parameters()
except AttributeError:
SCREAMING_SNAKE_CASE : Optional[int] = pl_module.model.num_parameters()
SCREAMING_SNAKE_CASE : int = count_trainable_parameters(_lowerCamelCase )
# mp stands for million parameters
trainer.logger.log_metrics({'''n_params''': npars, '''mp''': npars / 1e6, '''grad_mp''': n_trainable_pars / 1e6} )
@rank_zero_only
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase ) ->List[Any]:
save_json(pl_module.metrics , pl_module.metrics_save_path )
return self._write_logs(_lowerCamelCase , _lowerCamelCase , '''test''' )
@rank_zero_only
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase ) ->Tuple:
save_json(pl_module.metrics , pl_module.metrics_save_path )
# Uncommenting this will save val generations
# return self._write_logs(trainer, pl_module, "valid")
| 313 | 0 |
'''simple docstring'''
import unittest
from transformers import (
MODEL_FOR_OBJECT_DETECTION_MAPPING,
AutoFeatureExtractor,
AutoModelForObjectDetection,
ObjectDetectionPipeline,
is_vision_available,
pipeline,
)
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_pytesseract,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class __UpperCAmelCase :
@staticmethod
def lowerCamelCase ( *lowerCAmelCase_ , **lowerCAmelCase_ ):
"""simple docstring"""
pass
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class __UpperCAmelCase ( unittest.TestCase ):
__lowercase = MODEL_FOR_OBJECT_DETECTION_MAPPING
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case = ObjectDetectionPipeline(model=_a , image_processor=_a )
return object_detector, ["./tests/fixtures/tests_samples/COCO/000000039769.png"]
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case = object_detector('./tests/fixtures/tests_samples/COCO/000000039769.png' , threshold=0.0 )
self.assertGreater(len(_a ) , 0 )
for detected_object in outputs:
self.assertEqual(
_a , {
'score': ANY(_a ),
'label': ANY(_a ),
'box': {'xmin': ANY(_a ), 'ymin': ANY(_a ), 'xmax': ANY(_a ), 'ymax': ANY(_a )},
} , )
import datasets
_snake_case = datasets.load_dataset('hf-internal-testing/fixtures_image_utils' , 'image' , split='test' )
_snake_case = [
Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ),
'http://images.cocodataset.org/val2017/000000039769.jpg',
# RGBA
dataset[0]['file'],
# LA
dataset[1]['file'],
# L
dataset[2]['file'],
]
_snake_case = object_detector(_a , threshold=0.0 )
self.assertEqual(len(_a ) , len(_a ) )
for outputs in batch_outputs:
self.assertGreater(len(_a ) , 0 )
for detected_object in outputs:
self.assertEqual(
_a , {
'score': ANY(_a ),
'label': ANY(_a ),
'box': {'xmin': ANY(_a ), 'ymin': ANY(_a ), 'xmax': ANY(_a ), 'ymax': ANY(_a )},
} , )
@require_tf
@unittest.skip('Object detection not implemented in TF' )
def lowerCamelCase ( self ):
"""simple docstring"""
pass
@require_torch
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = 'hf-internal-testing/tiny-detr-mobilenetsv3'
_snake_case = AutoModelForObjectDetection.from_pretrained(_a )
_snake_case = AutoFeatureExtractor.from_pretrained(_a )
_snake_case = ObjectDetectionPipeline(model=_a , feature_extractor=_a )
_snake_case = object_detector('http://images.cocodataset.org/val2017/000000039769.jpg' , threshold=0.0 )
self.assertEqual(
nested_simplify(_a , decimals=4 ) , [
{'score': 0.3376, 'label': 'LABEL_0', 'box': {'xmin': 1_59, 'ymin': 1_20, 'xmax': 4_80, 'ymax': 3_59}},
{'score': 0.3376, 'label': 'LABEL_0', 'box': {'xmin': 1_59, 'ymin': 1_20, 'xmax': 4_80, 'ymax': 3_59}},
] , )
_snake_case = object_detector(
[
'http://images.cocodataset.org/val2017/000000039769.jpg',
'http://images.cocodataset.org/val2017/000000039769.jpg',
] , threshold=0.0 , )
self.assertEqual(
nested_simplify(_a , decimals=4 ) , [
[
{'score': 0.3376, 'label': 'LABEL_0', 'box': {'xmin': 1_59, 'ymin': 1_20, 'xmax': 4_80, 'ymax': 3_59}},
{'score': 0.3376, 'label': 'LABEL_0', 'box': {'xmin': 1_59, 'ymin': 1_20, 'xmax': 4_80, 'ymax': 3_59}},
],
[
{'score': 0.3376, 'label': 'LABEL_0', 'box': {'xmin': 1_59, 'ymin': 1_20, 'xmax': 4_80, 'ymax': 3_59}},
{'score': 0.3376, 'label': 'LABEL_0', 'box': {'xmin': 1_59, 'ymin': 1_20, 'xmax': 4_80, 'ymax': 3_59}},
],
] , )
@require_torch
@slow
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = 'facebook/detr-resnet-50'
_snake_case = AutoModelForObjectDetection.from_pretrained(_a )
_snake_case = AutoFeatureExtractor.from_pretrained(_a )
_snake_case = ObjectDetectionPipeline(model=_a , feature_extractor=_a )
_snake_case = object_detector('http://images.cocodataset.org/val2017/000000039769.jpg' )
self.assertEqual(
nested_simplify(_a , decimals=4 ) , [
{'score': 0.9982, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 70, 'xmax': 1_75, 'ymax': 1_17}},
{'score': 0.9960, 'label': 'remote', 'box': {'xmin': 3_33, 'ymin': 72, 'xmax': 3_68, 'ymax': 1_87}},
{'score': 0.9955, 'label': 'couch', 'box': {'xmin': 0, 'ymin': 1, 'xmax': 6_39, 'ymax': 4_73}},
{'score': 0.9988, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 3_14, 'ymax': 4_70}},
{'score': 0.9987, 'label': 'cat', 'box': {'xmin': 3_45, 'ymin': 23, 'xmax': 6_40, 'ymax': 3_68}},
] , )
_snake_case = object_detector(
[
'http://images.cocodataset.org/val2017/000000039769.jpg',
'http://images.cocodataset.org/val2017/000000039769.jpg',
] )
self.assertEqual(
nested_simplify(_a , decimals=4 ) , [
[
{'score': 0.9982, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 70, 'xmax': 1_75, 'ymax': 1_17}},
{'score': 0.9960, 'label': 'remote', 'box': {'xmin': 3_33, 'ymin': 72, 'xmax': 3_68, 'ymax': 1_87}},
{'score': 0.9955, 'label': 'couch', 'box': {'xmin': 0, 'ymin': 1, 'xmax': 6_39, 'ymax': 4_73}},
{'score': 0.9988, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 3_14, 'ymax': 4_70}},
{'score': 0.9987, 'label': 'cat', 'box': {'xmin': 3_45, 'ymin': 23, 'xmax': 6_40, 'ymax': 3_68}},
],
[
{'score': 0.9982, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 70, 'xmax': 1_75, 'ymax': 1_17}},
{'score': 0.9960, 'label': 'remote', 'box': {'xmin': 3_33, 'ymin': 72, 'xmax': 3_68, 'ymax': 1_87}},
{'score': 0.9955, 'label': 'couch', 'box': {'xmin': 0, 'ymin': 1, 'xmax': 6_39, 'ymax': 4_73}},
{'score': 0.9988, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 3_14, 'ymax': 4_70}},
{'score': 0.9987, 'label': 'cat', 'box': {'xmin': 3_45, 'ymin': 23, 'xmax': 6_40, 'ymax': 3_68}},
],
] , )
@require_torch
@slow
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = 'facebook/detr-resnet-50'
_snake_case = pipeline('object-detection' , model=_a )
_snake_case = object_detector('http://images.cocodataset.org/val2017/000000039769.jpg' )
self.assertEqual(
nested_simplify(_a , decimals=4 ) , [
{'score': 0.9982, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 70, 'xmax': 1_75, 'ymax': 1_17}},
{'score': 0.9960, 'label': 'remote', 'box': {'xmin': 3_33, 'ymin': 72, 'xmax': 3_68, 'ymax': 1_87}},
{'score': 0.9955, 'label': 'couch', 'box': {'xmin': 0, 'ymin': 1, 'xmax': 6_39, 'ymax': 4_73}},
{'score': 0.9988, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 3_14, 'ymax': 4_70}},
{'score': 0.9987, 'label': 'cat', 'box': {'xmin': 3_45, 'ymin': 23, 'xmax': 6_40, 'ymax': 3_68}},
] , )
_snake_case = object_detector(
[
'http://images.cocodataset.org/val2017/000000039769.jpg',
'http://images.cocodataset.org/val2017/000000039769.jpg',
] )
self.assertEqual(
nested_simplify(_a , decimals=4 ) , [
[
{'score': 0.9982, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 70, 'xmax': 1_75, 'ymax': 1_17}},
{'score': 0.9960, 'label': 'remote', 'box': {'xmin': 3_33, 'ymin': 72, 'xmax': 3_68, 'ymax': 1_87}},
{'score': 0.9955, 'label': 'couch', 'box': {'xmin': 0, 'ymin': 1, 'xmax': 6_39, 'ymax': 4_73}},
{'score': 0.9988, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 3_14, 'ymax': 4_70}},
{'score': 0.9987, 'label': 'cat', 'box': {'xmin': 3_45, 'ymin': 23, 'xmax': 6_40, 'ymax': 3_68}},
],
[
{'score': 0.9982, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 70, 'xmax': 1_75, 'ymax': 1_17}},
{'score': 0.9960, 'label': 'remote', 'box': {'xmin': 3_33, 'ymin': 72, 'xmax': 3_68, 'ymax': 1_87}},
{'score': 0.9955, 'label': 'couch', 'box': {'xmin': 0, 'ymin': 1, 'xmax': 6_39, 'ymax': 4_73}},
{'score': 0.9988, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 3_14, 'ymax': 4_70}},
{'score': 0.9987, 'label': 'cat', 'box': {'xmin': 3_45, 'ymin': 23, 'xmax': 6_40, 'ymax': 3_68}},
],
] , )
@require_torch
@slow
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = 0.9985
_snake_case = 'facebook/detr-resnet-50'
_snake_case = pipeline('object-detection' , model=_a )
_snake_case = object_detector('http://images.cocodataset.org/val2017/000000039769.jpg' , threshold=_a )
self.assertEqual(
nested_simplify(_a , decimals=4 ) , [
{'score': 0.9988, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 3_14, 'ymax': 4_70}},
{'score': 0.9987, 'label': 'cat', 'box': {'xmin': 3_45, 'ymin': 23, 'xmax': 6_40, 'ymax': 3_68}},
] , )
@require_torch
@require_pytesseract
@slow
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = 'Narsil/layoutlmv3-finetuned-funsd'
_snake_case = 0.9993
_snake_case = pipeline('object-detection' , model=_a , threshold=_a )
_snake_case = object_detector(
'https://huggingface.co/spaces/impira/docquery/resolve/2359223c1837a7587402bda0f2643382a6eefeab/invoice.png' )
self.assertEqual(
nested_simplify(_a , decimals=4 ) , [
{'score': 0.9993, 'label': 'I-ANSWER', 'box': {'xmin': 2_94, 'ymin': 2_54, 'xmax': 3_43, 'ymax': 2_64}},
{'score': 0.9993, 'label': 'I-ANSWER', 'box': {'xmin': 2_94, 'ymin': 2_54, 'xmax': 3_43, 'ymax': 2_64}},
] , )
| 361 |
'''simple docstring'''
# limitations under the License.
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from .pipelines import DiffusionPipeline, ImagePipelineOutput # noqa: F401
from .utils import deprecate
deprecate(
"pipelines_utils",
"0.22.0",
"Importing `DiffusionPipeline` or `ImagePipelineOutput` from diffusers.pipeline_utils is deprecated. Please import from diffusers.pipelines.pipeline_utils instead.",
standard_warn=False,
stacklevel=3,
)
| 160 | 0 |
'''simple docstring'''
import operator as op
lowerCamelCase : Dict = 'scaler.pt'
lowerCamelCase : Optional[Any] = 'pytorch_model'
lowerCamelCase : List[Any] = 'random_states'
lowerCamelCase : Union[str, Any] = 'optimizer'
lowerCamelCase : str = 'scheduler'
lowerCamelCase : int = 'pytorch_model.bin'
lowerCamelCase : Optional[Any] = 'pytorch_model.bin.index.json'
lowerCamelCase : List[Any] = 'model.safetensors'
lowerCamelCase : Any = 'model.safetensors.index.json'
lowerCamelCase : str = '1.10.2'
lowerCamelCase : List[str] = 'py38'
lowerCamelCase : List[Any] = '4.17.0'
lowerCamelCase : Union[str, Any] = ['ml.p3.16xlarge', 'ml.p3dn.24xlarge', 'ml.p4dn.24xlarge']
lowerCamelCase : Optional[int] = ['FULL_SHARD', 'SHARD_GRAD_OP', 'NO_SHARD', 'HYBRID_SHARD', 'HYBRID_SHARD_ZERO2']
lowerCamelCase : Any = ['TRANSFORMER_BASED_WRAP', 'SIZE_BASED_WRAP', 'NO_WRAP']
lowerCamelCase : Tuple = ['BACKWARD_PRE', 'BACKWARD_POST', 'NO_PREFETCH']
lowerCamelCase : Tuple = ['FULL_STATE_DICT', 'LOCAL_STATE_DICT', 'SHARDED_STATE_DICT']
lowerCamelCase : Optional[int] = '2.0.1'
lowerCamelCase : str = ['pdsh', 'standard', 'openmpi', 'mvapich']
lowerCamelCase : str = ['default', 'reduce-overhead', 'max-autotune']
lowerCamelCase : Optional[Any] = {'>': op.gt, '>=': op.ge, '==': op.eq, '!=': op.ne, '<=': op.le, '<': op.lt}
# These are the args for `torch.distributed.launch` for pytorch < 1.9
lowerCamelCase : List[Any] = [
'nnodes',
'nproc_per_node',
'rdzv_backend',
'rdzv_endpoint',
'rdzv_id',
'rdzv_conf',
'standalone',
'max_restarts',
'monitor_interval',
'start_method',
'role',
'module',
'm',
'no_python',
'run_path',
'log_dir',
'r',
'redirects',
't',
'tee',
'node_rank',
'master_addr',
'master_port',
]
lowerCamelCase : List[Any] = ['DEEPSPEED', 'MULTI_GPU', 'FSDP', 'MEGATRON_LM']
lowerCamelCase : Optional[Any] = ['DEEPSPEED', 'MULTI_XPU', 'FSDP']
| 2 |
'''simple docstring'''
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from ..image_utils import load_image
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_OBJECT_DETECTION_MAPPING, MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = Dict[str, Any]
UpperCamelCase = List[Prediction]
@add_end_docstrings(UpperCAmelCase_ )
class lowerCAmelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
def __init__( self : Union[str, Any] , *SCREAMING_SNAKE_CASE_ : Union[str, Any] , **SCREAMING_SNAKE_CASE_ : List[str] ) -> int:
'''simple docstring'''
super().__init__(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
if self.framework == "tf":
raise ValueError(f"""The {self.__class__} is only available in PyTorch.""" )
requires_backends(self , '''vision''' )
self.check_model_type(
dict(MODEL_FOR_OBJECT_DETECTION_MAPPING.items() + MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING.items() ) )
def _snake_case ( self : int , **SCREAMING_SNAKE_CASE_ : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
A: Any = {}
if "threshold" in kwargs:
A: List[Any] = kwargs['''threshold''']
return {}, {}, postprocess_kwargs
def __call__( self : str , *SCREAMING_SNAKE_CASE_ : str , **SCREAMING_SNAKE_CASE_ : Optional[Any] ) -> Union[Predictions, List[Prediction]]:
'''simple docstring'''
return super().__call__(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def _snake_case ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
A: int = load_image(SCREAMING_SNAKE_CASE_ )
A: Optional[Any] = torch.IntTensor([[image.height, image.width]] )
A: Union[str, Any] = self.image_processor(images=[image] , return_tensors='''pt''' )
if self.tokenizer is not None:
A: int = self.tokenizer(text=inputs['''words'''] , boxes=inputs['''boxes'''] , return_tensors='''pt''' )
A: Any = target_size
return inputs
def _snake_case ( self : int , SCREAMING_SNAKE_CASE_ : str ) -> List[Any]:
'''simple docstring'''
A: Tuple = model_inputs.pop('''target_size''' )
A: Tuple = self.model(**SCREAMING_SNAKE_CASE_ )
A: List[str] = outputs.__class__({'''target_size''': target_size, **outputs} )
if self.tokenizer is not None:
A: Dict = model_inputs['''bbox''']
return model_outputs
def _snake_case ( self : Any , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : str=0.9 ) -> Union[str, Any]:
'''simple docstring'''
A: List[Any] = model_outputs['''target_size''']
if self.tokenizer is not None:
# This is a LayoutLMForTokenClassification variant.
# The OCR got the boxes and the model classified the words.
A , A: Union[str, Any] = target_size[0].tolist()
def unnormalize(SCREAMING_SNAKE_CASE_ : str ):
return self._get_bounding_box(
torch.Tensor(
[
(width * bbox[0] / 10_00),
(height * bbox[1] / 10_00),
(width * bbox[2] / 10_00),
(height * bbox[3] / 10_00),
] ) )
A , A: Dict = model_outputs['''logits'''].squeeze(0 ).softmax(dim=-1 ).max(dim=-1 )
A: List[str] = [self.model.config.idalabel[prediction] for prediction in classes.tolist()]
A: List[str] = [unnormalize(SCREAMING_SNAKE_CASE_ ) for bbox in model_outputs['''bbox'''].squeeze(0 )]
A: Dict = ['''score''', '''label''', '''box''']
A: Optional[int] = [dict(zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ) for vals in zip(scores.tolist() , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) if vals[0] > threshold]
else:
# This is a regular ForObjectDetectionModel
A: Any = self.image_processor.post_process_object_detection(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
A: List[str] = raw_annotations[0]
A: List[Any] = raw_annotation['''scores''']
A: List[Any] = raw_annotation['''labels''']
A: int = raw_annotation['''boxes''']
A: Any = scores.tolist()
A: List[Any] = [self.model.config.idalabel[label.item()] for label in labels]
A: List[Any] = [self._get_bounding_box(SCREAMING_SNAKE_CASE_ ) for box in boxes]
# {"scores": [...], ...} --> [{"score":x, ...}, ...]
A: Tuple = ['''score''', '''label''', '''box''']
A: str = [
dict(zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
for vals in zip(raw_annotation['''scores'''] , raw_annotation['''labels'''] , raw_annotation['''boxes'''] )
]
return annotation
def _snake_case ( self : Tuple , SCREAMING_SNAKE_CASE_ : "torch.Tensor" ) -> Dict[str, int]:
'''simple docstring'''
if self.framework != "pt":
raise ValueError('''The ObjectDetectionPipeline is only available in PyTorch.''' )
A , A , A , A: str = box.int().tolist()
A: str = {
'''xmin''': xmin,
'''ymin''': ymin,
'''xmax''': xmax,
'''ymax''': ymax,
}
return bbox
| 319 | 0 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_mvp import MvpTokenizer
SCREAMING_SNAKE_CASE_ : int = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ : Tuple = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
# See all MVP models at https://huggingface.co/models?filter=mvp
SCREAMING_SNAKE_CASE_ : Optional[Any] = {
'vocab_file': {
'RUCAIBox/mvp': 'https://huggingface.co/RUCAIBox/mvp/resolve/main/vocab.json',
},
'added_tokens.json': {
'RUCAIBox/mvp': 'https://huggingface.co/RUCAIBox/mvp/resolve/main/added_tokens.json',
},
'merges_file': {
'RUCAIBox/mvp': 'https://huggingface.co/RUCAIBox/mvp/resolve/main/merges.txt',
},
'tokenizer_file': {
'RUCAIBox/mvp': 'https://huggingface.co/RUCAIBox/mvp/resolve/main/tokenizer.json',
},
}
SCREAMING_SNAKE_CASE_ : List[str] = {
'RUCAIBox/mvp': 1_0_2_4,
}
class a ( _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase = VOCAB_FILES_NAMES
UpperCAmelCase = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase = ["input_ids", "attention_mask"]
UpperCAmelCase = MvpTokenizer
def __init__( self: Dict , UpperCamelCase: List[str]=None , UpperCamelCase: Optional[int]=None , UpperCamelCase: Union[str, Any]=None , UpperCamelCase: Tuple="replace" , UpperCamelCase: Dict="<s>" , UpperCamelCase: List[str]="</s>" , UpperCamelCase: Union[str, Any]="</s>" , UpperCamelCase: Optional[Any]="<s>" , UpperCamelCase: Optional[int]="<unk>" , UpperCamelCase: Optional[Any]="<pad>" , UpperCamelCase: int="<mask>" , UpperCamelCase: int=False , UpperCamelCase: int=True , **UpperCamelCase: Any , ):
"""simple docstring"""
super().__init__(
UpperCamelCase , UpperCamelCase , tokenizer_file=UpperCamelCase , errors=UpperCamelCase , bos_token=UpperCamelCase , eos_token=UpperCamelCase , sep_token=UpperCamelCase , cls_token=UpperCamelCase , unk_token=UpperCamelCase , pad_token=UpperCamelCase , mask_token=UpperCamelCase , add_prefix_space=UpperCamelCase , trim_offsets=UpperCamelCase , **UpperCamelCase , )
A__ = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("""add_prefix_space""" , UpperCamelCase ) != add_prefix_space:
A__ = getattr(UpperCamelCase , pre_tok_state.pop("""type""" ) )
A__ = add_prefix_space
A__ = pre_tok_class(**UpperCamelCase )
A__ = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
A__ = """post_processor"""
A__ = getattr(self.backend_tokenizer , UpperCamelCase , UpperCamelCase )
if tokenizer_component_instance:
A__ = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
A__ = tuple(state["""sep"""] )
if "cls" in state:
A__ = tuple(state["""cls"""] )
A__ = False
if state.get("""add_prefix_space""" , UpperCamelCase ) != add_prefix_space:
A__ = add_prefix_space
A__ = True
if state.get("""trim_offsets""" , UpperCamelCase ) != trim_offsets:
A__ = trim_offsets
A__ = True
if changes_to_apply:
A__ = getattr(UpperCamelCase , state.pop("""type""" ) )
A__ = component_class(**UpperCamelCase )
setattr(self.backend_tokenizer , UpperCamelCase , UpperCamelCase )
@property
def UpperCamelCase ( self: Tuple ):
"""simple docstring"""
if self._mask_token is None:
if self.verbose:
logger.error("""Using mask_token, but it is not set yet.""" )
return None
return str(self._mask_token )
@mask_token.setter
def UpperCamelCase ( self: Optional[Any] , UpperCamelCase: Optional[Any] ):
"""simple docstring"""
A__ = AddedToken(UpperCamelCase , lstrip=UpperCamelCase , rstrip=UpperCamelCase ) if isinstance(UpperCamelCase , UpperCamelCase ) else value
A__ = value
def UpperCamelCase ( self: int , *UpperCamelCase: Union[str, Any] , **UpperCamelCase: Optional[int] ):
"""simple docstring"""
A__ = kwargs.get("""is_split_into_words""" , UpperCamelCase )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"""to use it with pretokenized inputs.""" )
return super()._batch_encode_plus(*UpperCamelCase , **UpperCamelCase )
def UpperCamelCase ( self: int , *UpperCamelCase: Tuple , **UpperCamelCase: Union[str, Any] ):
"""simple docstring"""
A__ = kwargs.get("""is_split_into_words""" , UpperCamelCase )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"""to use it with pretokenized inputs.""" )
return super()._encode_plus(*UpperCamelCase , **UpperCamelCase )
def UpperCamelCase ( self: List[str] , UpperCamelCase: str , UpperCamelCase: Optional[str] = None ):
"""simple docstring"""
A__ = self._tokenizer.model.save(UpperCamelCase , name=UpperCamelCase )
return tuple(UpperCamelCase )
def UpperCamelCase ( self: List[str] , UpperCamelCase: List[Any] , UpperCamelCase: Optional[int]=None ):
"""simple docstring"""
A__ = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def UpperCamelCase ( self: List[Any] , UpperCamelCase: List[int] , UpperCamelCase: Optional[List[int]] = None ):
"""simple docstring"""
A__ = [self.sep_token_id]
A__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 362 |
"""simple docstring"""
import shutil
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import (
is_pt_tf_cross_test,
require_tf,
require_torch,
require_torchvision,
require_vision,
)
from transformers.utils import is_tf_available, is_torch_available, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, SamImageProcessor, SamProcessor
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
@require_vision
@require_torchvision
class a ( unittest.TestCase ):
"""simple docstring"""
def UpperCamelCase ( self: Optional[Any] ):
"""simple docstring"""
A__ = tempfile.mkdtemp()
A__ = SamImageProcessor()
A__ = SamProcessor(UpperCamelCase )
processor.save_pretrained(self.tmpdirname )
def UpperCamelCase ( self: Optional[int] , **UpperCamelCase: List[str] ):
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname , **UpperCamelCase ).image_processor
def UpperCamelCase ( self: List[str] ):
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def UpperCamelCase ( self: Any ):
"""simple docstring"""
A__ = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
A__ = [Image.fromarray(np.moveaxis(UpperCamelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def UpperCamelCase ( self: Union[str, Any] ):
"""simple docstring"""
A__ = SamProcessor(image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
A__ = self.get_image_processor(do_normalize=UpperCamelCase , padding_value=1.0 )
A__ = SamProcessor.from_pretrained(self.tmpdirname , do_normalize=UpperCamelCase , padding_value=1.0 )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , UpperCamelCase )
def UpperCamelCase ( self: str ):
"""simple docstring"""
A__ = self.get_image_processor()
A__ = SamProcessor(image_processor=UpperCamelCase )
A__ = self.prepare_image_inputs()
A__ = image_processor(UpperCamelCase , return_tensors="""np""" )
A__ = processor(images=UpperCamelCase , return_tensors="""np""" )
input_feat_extract.pop("""original_sizes""" ) # pop original_sizes as it is popped in the processor
input_feat_extract.pop("""reshaped_input_sizes""" ) # pop original_sizes as it is popped in the processor
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
@require_torch
def UpperCamelCase ( self: Any ):
"""simple docstring"""
A__ = self.get_image_processor()
A__ = SamProcessor(image_processor=UpperCamelCase )
A__ = [torch.ones((1, 3, 5, 5) )]
A__ = [[17_64, 26_46]]
A__ = [[6_83, 10_24]]
A__ = processor.post_process_masks(UpperCamelCase , UpperCamelCase , UpperCamelCase )
self.assertEqual(masks[0].shape , (1, 3, 17_64, 26_46) )
A__ = processor.post_process_masks(
UpperCamelCase , torch.tensor(UpperCamelCase ) , torch.tensor(UpperCamelCase ) )
self.assertEqual(masks[0].shape , (1, 3, 17_64, 26_46) )
# should also work with np
A__ = [np.ones((1, 3, 5, 5) )]
A__ = processor.post_process_masks(UpperCamelCase , np.array(UpperCamelCase ) , np.array(UpperCamelCase ) )
self.assertEqual(masks[0].shape , (1, 3, 17_64, 26_46) )
A__ = [[1, 0], [0, 1]]
with self.assertRaises(UpperCamelCase ):
A__ = processor.post_process_masks(UpperCamelCase , np.array(UpperCamelCase ) , np.array(UpperCamelCase ) )
@require_vision
@require_tf
class a ( unittest.TestCase ):
"""simple docstring"""
def UpperCamelCase ( self: Union[str, Any] ):
"""simple docstring"""
A__ = tempfile.mkdtemp()
A__ = SamImageProcessor()
A__ = SamProcessor(UpperCamelCase )
processor.save_pretrained(self.tmpdirname )
def UpperCamelCase ( self: Optional[int] , **UpperCamelCase: str ):
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname , **UpperCamelCase ).image_processor
def UpperCamelCase ( self: List[str] ):
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def UpperCamelCase ( self: List[str] ):
"""simple docstring"""
A__ = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
A__ = [Image.fromarray(np.moveaxis(UpperCamelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def UpperCamelCase ( self: List[str] ):
"""simple docstring"""
A__ = SamProcessor(image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
A__ = self.get_image_processor(do_normalize=UpperCamelCase , padding_value=1.0 )
A__ = SamProcessor.from_pretrained(self.tmpdirname , do_normalize=UpperCamelCase , padding_value=1.0 )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , UpperCamelCase )
def UpperCamelCase ( self: List[str] ):
"""simple docstring"""
A__ = self.get_image_processor()
A__ = SamProcessor(image_processor=UpperCamelCase )
A__ = self.prepare_image_inputs()
A__ = image_processor(UpperCamelCase , return_tensors="""np""" )
A__ = processor(images=UpperCamelCase , return_tensors="""np""" )
input_feat_extract.pop("""original_sizes""" ) # pop original_sizes as it is popped in the processor
input_feat_extract.pop("""reshaped_input_sizes""" ) # pop reshaped_input_sizes as it is popped in the processor
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
@require_tf
def UpperCamelCase ( self: Any ):
"""simple docstring"""
A__ = self.get_image_processor()
A__ = SamProcessor(image_processor=UpperCamelCase )
A__ = [tf.ones((1, 3, 5, 5) )]
A__ = [[17_64, 26_46]]
A__ = [[6_83, 10_24]]
A__ = processor.post_process_masks(UpperCamelCase , UpperCamelCase , UpperCamelCase , return_tensors="""tf""" )
self.assertEqual(masks[0].shape , (1, 3, 17_64, 26_46) )
A__ = processor.post_process_masks(
UpperCamelCase , tf.convert_to_tensor(UpperCamelCase ) , tf.convert_to_tensor(UpperCamelCase ) , return_tensors="""tf""" , )
self.assertEqual(masks[0].shape , (1, 3, 17_64, 26_46) )
# should also work with np
A__ = [np.ones((1, 3, 5, 5) )]
A__ = processor.post_process_masks(
UpperCamelCase , np.array(UpperCamelCase ) , np.array(UpperCamelCase ) , return_tensors="""tf""" )
self.assertEqual(masks[0].shape , (1, 3, 17_64, 26_46) )
A__ = [[1, 0], [0, 1]]
with self.assertRaises(tf.errors.InvalidArgumentError ):
A__ = processor.post_process_masks(
UpperCamelCase , np.array(UpperCamelCase ) , np.array(UpperCamelCase ) , return_tensors="""tf""" )
@require_vision
@require_torchvision
class a ( unittest.TestCase ):
"""simple docstring"""
def UpperCamelCase ( self: Union[str, Any] ):
"""simple docstring"""
A__ = tempfile.mkdtemp()
A__ = SamImageProcessor()
A__ = SamProcessor(UpperCamelCase )
processor.save_pretrained(self.tmpdirname )
def UpperCamelCase ( self: Tuple , **UpperCamelCase: Tuple ):
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname , **UpperCamelCase ).image_processor
def UpperCamelCase ( self: Optional[int] ):
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def UpperCamelCase ( self: List[Any] ):
"""simple docstring"""
A__ = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
A__ = [Image.fromarray(np.moveaxis(UpperCamelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
@is_pt_tf_cross_test
def UpperCamelCase ( self: List[str] ):
"""simple docstring"""
A__ = self.get_image_processor()
A__ = SamProcessor(image_processor=UpperCamelCase )
A__ = np.random.randint(0 , 2 , size=(1, 3, 5, 5) ).astype(np.floataa )
A__ = [tf.convert_to_tensor(UpperCamelCase )]
A__ = [torch.tensor(UpperCamelCase )]
A__ = [[17_64, 26_46]]
A__ = [[6_83, 10_24]]
A__ = processor.post_process_masks(
UpperCamelCase , UpperCamelCase , UpperCamelCase , return_tensors="""tf""" )
A__ = processor.post_process_masks(
UpperCamelCase , UpperCamelCase , UpperCamelCase , return_tensors="""pt""" )
self.assertTrue(np.all(tf_masks[0].numpy() == pt_masks[0].numpy() ) )
@is_pt_tf_cross_test
def UpperCamelCase ( self: Dict ):
"""simple docstring"""
A__ = self.get_image_processor()
A__ = SamProcessor(image_processor=UpperCamelCase )
A__ = self.prepare_image_inputs()
A__ = image_processor(UpperCamelCase , return_tensors="""pt""" )["""pixel_values"""].numpy()
A__ = processor(images=UpperCamelCase , return_tensors="""pt""" )["""pixel_values"""].numpy()
A__ = image_processor(UpperCamelCase , return_tensors="""tf""" )["""pixel_values"""].numpy()
A__ = processor(images=UpperCamelCase , return_tensors="""tf""" )["""pixel_values"""].numpy()
self.assertTrue(np.allclose(UpperCamelCase , UpperCamelCase ) )
self.assertTrue(np.allclose(UpperCamelCase , UpperCamelCase ) )
self.assertTrue(np.allclose(UpperCamelCase , UpperCamelCase ) )
| 69 | 0 |
'''simple docstring'''
from __future__ import annotations
def __lowerCamelCase ( A__ ) -> Dict:
"""simple docstring"""
UpperCamelCase = str(A__ )
return n == n[::-1]
def __lowerCamelCase ( A__ = 1_000_000 ) -> Any:
"""simple docstring"""
UpperCamelCase = 0
for i in range(1 , A__ ):
if is_palindrome(A__ ) and is_palindrome(bin(A__ ).split('b' )[1] ):
total += i
return total
if __name__ == "__main__":
print(solution(int(str(input().strip()))))
| 28 |
'''simple docstring'''
def UpperCamelCase_ ( A__ : int = 10_00 ):
'''simple docstring'''
lowerCAmelCase_ : Optional[Any] = 3
lowerCAmelCase_ : Dict = 0
while a < n:
if a % 3 == 0 or a % 5 == 0:
result += a
elif a % 15 == 0:
result -= a
a += 1
return result
if __name__ == "__main__":
print(F'''{solution() = }''')
| 120 | 0 |
'''simple docstring'''
import collections
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
a__ : Tuple = logging.get_logger(__name__)
a__ : int = '▁'
a__ : Union[str, Any] = {'vocab_file': 'prophetnet.tokenizer'}
a__ : List[Any] = {
'vocab_file': {
'microsoft/xprophetnet-large-wiki100-cased': (
'https://huggingface.co/microsoft/xprophetnet-large-wiki100-cased/resolve/main/prophetnet.tokenizer'
),
}
}
a__ : Any = {
'microsoft/xprophetnet-large-wiki100-cased': {'do_lower_case': False},
}
a__ : Any = {
'microsoft/xprophetnet-large-wiki100-cased': 5_1_2,
}
def _lowercase ( __A ):
'''simple docstring'''
__UpperCamelCase = collections.OrderedDict()
with open(__A ,"""r""" ,encoding="""utf-8""" ) as reader:
__UpperCamelCase = reader.readlines()
for index, token in enumerate(__A ):
__UpperCamelCase = token.rstrip("""\n""" )
__UpperCamelCase = index
return vocab
class UpperCAmelCase__ ( UpperCAmelCase_):
__SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES
__SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP
__SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__SCREAMING_SNAKE_CASE = ['''input_ids''', '''attention_mask''']
def __init__( self , lowercase , lowercase="[SEP]" , lowercase="[SEP]" , lowercase="[SEP]" , lowercase="[UNK]" , lowercase="[PAD]" , lowercase="[CLS]" , lowercase="[MASK]" , lowercase = None , **lowercase , ) -> None:
__UpperCamelCase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=lowercase , eos_token=lowercase , sep_token=lowercase , unk_token=lowercase , pad_token=lowercase , cls_token=lowercase , mask_token=lowercase , sp_model_kwargs=self.sp_model_kwargs , **lowercase , )
try:
import sentencepiece as spm
except ImportError:
logger.warning(
"""You need to install SentencePiece to use XLMRobertaTokenizer: https://github.com/google/sentencepiece"""
""" pip install sentencepiece""" )
raise
__UpperCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(lowercase ) )
__UpperCamelCase = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# put special tokens and [unused] tokens into the vocab
__UpperCamelCase = {"""[PAD]""": 0, """[CLS]""": 1, """[SEP]""": 2, """[UNK]""": 3, """[MASK]""": 4}
for i in range(1_0 ):
__UpperCamelCase = f"[unused{i}]"
__UpperCamelCase = 5 + i
# The first "real" token "," has position 15 in the embedding vocab and position 3 in the spm vocab
__UpperCamelCase = 1_2
__UpperCamelCase = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
for k in self.fairseq_tokens_to_ids.keys():
self.unique_no_split_tokens.append(lowercase )
def __getstate__( self ) -> List[str]:
__UpperCamelCase = self.__dict__.copy()
__UpperCamelCase = None
return state
def __setstate__( self , lowercase ) -> Optional[Any]:
__UpperCamelCase = d
try:
import sentencepiece as spm
except ImportError:
logger.warning(
"""You need to install SentencePiece to use XLMRobertaTokenizer: https://github.com/google/sentencepiece"""
""" pip install sentencepiece""" )
raise
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
__UpperCamelCase = {}
__UpperCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __lowerCamelCase ( self , lowercase , lowercase = None , lowercase = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowercase , token_ids_a=lowercase , already_has_special_tokens=lowercase )
if token_ids_a is None:
return ([0] * len(lowercase )) + [1]
return ([0] * len(lowercase )) + [1] + ([0] * len(lowercase )) + [1]
def __lowerCamelCase ( self , lowercase , lowercase = None ) -> List[int]:
__UpperCamelCase = [self.sep_token_id]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0]
return len(token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def __lowerCamelCase ( self ) -> List[str]:
return len(self.sp_model ) + self.fairseq_offset
def __lowerCamelCase ( self ) -> Union[str, Any]:
__UpperCamelCase = {self.convert_ids_to_tokens(lowercase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __lowerCamelCase ( self , lowercase ) -> str:
return self.sp_model.encode(lowercase , out_type=lowercase )
def __lowerCamelCase ( self , lowercase ) -> Any:
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
__UpperCamelCase = self.sp_model.PieceToId(lowercase )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def __lowerCamelCase ( self , lowercase ) -> Optional[int]:
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def __lowerCamelCase ( self , lowercase ) -> str:
__UpperCamelCase = """""".join(lowercase ).replace(lowercase , """ """ ).strip()
return out_string
def __lowerCamelCase ( self , lowercase , lowercase = None ) -> Tuple[str]:
if not os.path.isdir(lowercase ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
__UpperCamelCase = os.path.join(
lowercase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowercase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowercase )
elif not os.path.isfile(self.vocab_file ):
with open(lowercase , """wb""" ) as fi:
__UpperCamelCase = self.sp_model.serialized_model_proto()
fi.write(lowercase )
return (out_vocab_file,)
def __lowerCamelCase ( self , lowercase , lowercase = None ) -> List[int]:
if token_ids_a is None:
return token_ids_a + [self.sep_token_id]
__UpperCamelCase = [self.sep_token_id]
return token_ids_a + sep + token_ids_a + sep
| 243 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_convbert import ConvBertTokenizer
a__ : List[Any] = logging.get_logger(__name__)
a__ : str = {'vocab_file': 'vocab.txt'}
a__ : Any = {
'vocab_file': {
'YituTech/conv-bert-base': 'https://huggingface.co/YituTech/conv-bert-base/resolve/main/vocab.txt',
'YituTech/conv-bert-medium-small': (
'https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/vocab.txt'
),
'YituTech/conv-bert-small': 'https://huggingface.co/YituTech/conv-bert-small/resolve/main/vocab.txt',
}
}
a__ : Tuple = {
'YituTech/conv-bert-base': 5_1_2,
'YituTech/conv-bert-medium-small': 5_1_2,
'YituTech/conv-bert-small': 5_1_2,
}
a__ : str = {
'YituTech/conv-bert-base': {'do_lower_case': True},
'YituTech/conv-bert-medium-small': {'do_lower_case': True},
'YituTech/conv-bert-small': {'do_lower_case': True},
}
class UpperCAmelCase__ ( UpperCAmelCase_):
__SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES
__SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP
__SCREAMING_SNAKE_CASE = PRETRAINED_INIT_CONFIGURATION
__SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__SCREAMING_SNAKE_CASE = ConvBertTokenizer
def __init__( self , lowercase=None , lowercase=None , lowercase=True , lowercase="[UNK]" , lowercase="[SEP]" , lowercase="[PAD]" , lowercase="[CLS]" , lowercase="[MASK]" , lowercase=True , lowercase=None , **lowercase , ) -> int:
super().__init__(
lowercase , tokenizer_file=lowercase , do_lower_case=lowercase , unk_token=lowercase , sep_token=lowercase , pad_token=lowercase , cls_token=lowercase , mask_token=lowercase , tokenize_chinese_chars=lowercase , strip_accents=lowercase , **lowercase , )
__UpperCamelCase = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""" , lowercase ) != do_lower_case
or normalizer_state.get("""strip_accents""" , lowercase ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" , lowercase ) != tokenize_chinese_chars
):
__UpperCamelCase = getattr(lowercase , normalizer_state.pop("""type""" ) )
__UpperCamelCase = do_lower_case
__UpperCamelCase = strip_accents
__UpperCamelCase = tokenize_chinese_chars
__UpperCamelCase = normalizer_class(**lowercase )
__UpperCamelCase = do_lower_case
def __lowerCamelCase ( self , lowercase , lowercase=None ) -> Tuple:
__UpperCamelCase = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __lowerCamelCase ( self , lowercase , lowercase = None ) -> List[int]:
__UpperCamelCase = [self.sep_token_id]
__UpperCamelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __lowerCamelCase ( self , lowercase , lowercase = None ) -> Tuple[str]:
__UpperCamelCase = self._tokenizer.model.save(lowercase , name=lowercase )
return tuple(lowercase )
| 243 | 1 |
import math
import unittest
from transformers import BioGptConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptTokenizer,
)
from transformers.models.biogpt.modeling_biogpt import BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST
class __UpperCamelCase :
"""simple docstring"""
def __init__( self : Any , _A : Optional[int] , _A : Tuple=13 , _A : Any=7 , _A : str=True , _A : List[str]=True , _A : str=False , _A : Dict=True , _A : Tuple=99 , _A : int=32 , _A : Any=5 , _A : List[Any]=4 , _A : str=37 , _A : Any="gelu" , _A : Tuple=0.1 , _A : str=0.1 , _A : Union[str, Any]=512 , _A : Any=16 , _A : Tuple=2 , _A : Tuple=0.02 , _A : Tuple=3 , _A : Optional[int]=4 , _A : List[Any]=None , ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Any = parent
__SCREAMING_SNAKE_CASE : List[Any] = batch_size
__SCREAMING_SNAKE_CASE : int = seq_length
__SCREAMING_SNAKE_CASE : Optional[Any] = is_training
__SCREAMING_SNAKE_CASE : Tuple = use_input_mask
__SCREAMING_SNAKE_CASE : Optional[Any] = use_token_type_ids
__SCREAMING_SNAKE_CASE : Tuple = use_labels
__SCREAMING_SNAKE_CASE : Dict = vocab_size
__SCREAMING_SNAKE_CASE : Tuple = hidden_size
__SCREAMING_SNAKE_CASE : List[str] = num_hidden_layers
__SCREAMING_SNAKE_CASE : Optional[Any] = num_attention_heads
__SCREAMING_SNAKE_CASE : str = intermediate_size
__SCREAMING_SNAKE_CASE : Optional[Any] = hidden_act
__SCREAMING_SNAKE_CASE : Optional[Any] = hidden_dropout_prob
__SCREAMING_SNAKE_CASE : Any = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE : int = max_position_embeddings
__SCREAMING_SNAKE_CASE : List[str] = type_vocab_size
__SCREAMING_SNAKE_CASE : Optional[Any] = type_sequence_label_size
__SCREAMING_SNAKE_CASE : Optional[Any] = initializer_range
__SCREAMING_SNAKE_CASE : List[Any] = num_labels
__SCREAMING_SNAKE_CASE : Tuple = num_choices
__SCREAMING_SNAKE_CASE : Any = scope
def UpperCAmelCase__ ( self : Tuple ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__SCREAMING_SNAKE_CASE : List[str] = None
if self.use_input_mask:
__SCREAMING_SNAKE_CASE : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
__SCREAMING_SNAKE_CASE : Tuple = None
if self.use_token_type_ids:
__SCREAMING_SNAKE_CASE : str = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__SCREAMING_SNAKE_CASE : Tuple = None
__SCREAMING_SNAKE_CASE : Optional[int] = None
__SCREAMING_SNAKE_CASE : Dict = None
if self.use_labels:
__SCREAMING_SNAKE_CASE : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__SCREAMING_SNAKE_CASE : Dict = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__SCREAMING_SNAKE_CASE : int = ids_tensor([self.batch_size] , self.num_choices )
__SCREAMING_SNAKE_CASE : List[str] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCAmelCase__ ( self : Tuple ):
"""simple docstring"""
return BioGptConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_A , initializer_range=self.initializer_range , )
def UpperCAmelCase__ ( self : Union[str, Any] , _A : Any , _A : Optional[Any] , _A : Dict , _A : List[Any] , _A : int , _A : Union[str, Any] , _A : List[Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Union[str, Any] = BioGptModel(config=_A )
model.to(_A )
model.eval()
__SCREAMING_SNAKE_CASE : Tuple = model(_A , attention_mask=_A )
__SCREAMING_SNAKE_CASE : List[str] = model(_A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase__ ( self : Dict , _A : Union[str, Any] , _A : Dict , _A : Optional[Any] , _A : List[Any] , _A : Tuple , _A : Dict , _A : Dict , _A : List[Any] , _A : Tuple , ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Dict = BioGptForCausalLM(config=_A )
model.to(_A )
model.eval()
__SCREAMING_SNAKE_CASE : Optional[int] = model(_A , attention_mask=_A , token_type_ids=_A , labels=_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCAmelCase__ ( self : List[str] , _A : Tuple , _A : str , _A : Any , _A : int , _A : List[Any] , *_A : int ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Union[str, Any] = BioGptModel(config=_A )
model.to(_A )
model.eval()
# create attention mask
__SCREAMING_SNAKE_CASE : Optional[int] = torch.ones(input_ids.shape , dtype=torch.long , device=_A )
__SCREAMING_SNAKE_CASE : Any = self.seq_length // 2
__SCREAMING_SNAKE_CASE : Any = 0
# first forward pass
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : List[str] = model(_A , attention_mask=_A ).to_tuple()
# create hypothetical next token and extent to next_input_ids
__SCREAMING_SNAKE_CASE : Any = ids_tensor((self.batch_size, 1) , config.vocab_size )
# change a random masked slice from input_ids
__SCREAMING_SNAKE_CASE : Dict = ids_tensor((1,) , _A ).item() + 1
__SCREAMING_SNAKE_CASE : int = ids_tensor((self.batch_size, 1) , config.vocab_size ).squeeze(-1 )
__SCREAMING_SNAKE_CASE : str = random_other_next_tokens
# append to next input_ids and attn_mask
__SCREAMING_SNAKE_CASE : str = torch.cat([input_ids, next_tokens] , dim=-1 )
__SCREAMING_SNAKE_CASE : Any = torch.cat(
[attn_mask, torch.ones((attn_mask.shape[0], 1) , dtype=torch.long , device=_A )] , dim=1 , )
# get two different outputs
__SCREAMING_SNAKE_CASE : Dict = model(_A , attention_mask=_A )['''last_hidden_state''']
__SCREAMING_SNAKE_CASE : int = model(_A , past_key_values=_A , attention_mask=_A )['''last_hidden_state''']
# select random slice
__SCREAMING_SNAKE_CASE : int = ids_tensor((1,) , output_from_past.shape[-1] ).item()
__SCREAMING_SNAKE_CASE : Tuple = output_from_no_past[:, -1, random_slice_idx].detach()
__SCREAMING_SNAKE_CASE : List[Any] = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(_A , _A , atol=1e-3 ) )
def UpperCAmelCase__ ( self : Optional[int] , _A : Tuple , _A : Dict , _A : str , _A : int , _A : Optional[Any] , *_A : Any ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Tuple = BioGptModel(config=_A ).to(_A ).eval()
__SCREAMING_SNAKE_CASE : Any = torch.ones(input_ids.shape , dtype=torch.long , device=_A )
# first forward pass
__SCREAMING_SNAKE_CASE : int = model(_A , attention_mask=_A , use_cache=_A )
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : List[Any] = outputs.to_tuple()
# create hypothetical multiple next token and extent to next_input_ids
__SCREAMING_SNAKE_CASE : Optional[int] = ids_tensor((self.batch_size, 3) , config.vocab_size )
__SCREAMING_SNAKE_CASE : str = ids_tensor((self.batch_size, 3) , 2 )
# append to next input_ids and
__SCREAMING_SNAKE_CASE : List[Any] = torch.cat([input_ids, next_tokens] , dim=-1 )
__SCREAMING_SNAKE_CASE : int = torch.cat([attention_mask, next_attn_mask] , dim=-1 )
__SCREAMING_SNAKE_CASE : Dict = model(_A , attention_mask=_A )['''last_hidden_state''']
__SCREAMING_SNAKE_CASE : Dict = model(_A , attention_mask=_A , past_key_values=_A )[
'''last_hidden_state'''
]
# select random slice
__SCREAMING_SNAKE_CASE : Dict = ids_tensor((1,) , output_from_past.shape[-1] ).item()
__SCREAMING_SNAKE_CASE : Optional[int] = output_from_no_past[:, -3:, random_slice_idx].detach()
__SCREAMING_SNAKE_CASE : Any = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(_A , _A , atol=1e-3 ) )
def UpperCAmelCase__ ( self : Optional[int] , _A : List[Any] , _A : Tuple , _A : Any , _A : int , _A : List[Any] , *_A : Dict , _A : List[str]=False ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[str] = BioGptForCausalLM(_A )
model.to(_A )
if gradient_checkpointing:
model.gradient_checkpointing_enable()
__SCREAMING_SNAKE_CASE : List[Any] = model(_A , labels=_A )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
result.loss.backward()
def UpperCAmelCase__ ( self : Optional[int] , _A : int , *_A : int ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Any = BioGptModel(_A )
__SCREAMING_SNAKE_CASE : Dict = model.config.initializer_range / math.sqrt(2 * model.config.num_hidden_layers )
for key in model.state_dict().keys():
if "c_proj" in key and "weight" in key:
self.parent.assertLessEqual(abs(torch.std(model.state_dict()[key] ) - model_std ) , 0.0_01 )
self.parent.assertLessEqual(abs(torch.mean(model.state_dict()[key] ) - 0.0 ) , 0.01 )
def UpperCAmelCase__ ( self : List[str] , _A : Optional[Any] , _A : Optional[Any] , _A : Dict , _A : List[str] , _A : Optional[int] , *_A : Any ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[str] = self.num_labels
__SCREAMING_SNAKE_CASE : Optional[Any] = BioGptForTokenClassification(_A )
model.to(_A )
model.eval()
__SCREAMING_SNAKE_CASE : str = model(_A , attention_mask=_A , token_type_ids=_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCAmelCase__ ( self : Tuple ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : str = self.prepare_config_and_inputs()
(
(
__SCREAMING_SNAKE_CASE
), (
__SCREAMING_SNAKE_CASE
), (
__SCREAMING_SNAKE_CASE
), (
__SCREAMING_SNAKE_CASE
), (
__SCREAMING_SNAKE_CASE
), (
__SCREAMING_SNAKE_CASE
), (
__SCREAMING_SNAKE_CASE
),
) : Any = config_and_inputs
__SCREAMING_SNAKE_CASE : Union[str, Any] = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class __UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase_ = (
(BioGptModel, BioGptForCausalLM, BioGptForSequenceClassification, BioGptForTokenClassification)
if is_torch_available()
else ()
)
lowerCAmelCase_ = (BioGptForCausalLM,) if is_torch_available() else ()
lowerCAmelCase_ = (
{
'''feature-extraction''': BioGptModel,
'''text-classification''': BioGptForSequenceClassification,
'''text-generation''': BioGptForCausalLM,
'''token-classification''': BioGptForTokenClassification,
'''zero-shot''': BioGptForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCAmelCase_ = False
def UpperCAmelCase__ ( self : Optional[Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : int = BioGptModelTester(self )
__SCREAMING_SNAKE_CASE : List[Any] = ConfigTester(self , config_class=_A , hidden_size=37 )
def UpperCAmelCase__ ( self : Dict ):
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCAmelCase__ ( self : Union[str, Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_A )
def UpperCAmelCase__ ( self : Union[str, Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Dict = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__SCREAMING_SNAKE_CASE : int = type
self.model_tester.create_and_check_model(*_A )
def UpperCAmelCase__ ( self : str ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_attention_mask_past(*_A )
def UpperCAmelCase__ ( self : Optional[Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_forward_and_backwards(*_A , gradient_checkpointing=_A )
def UpperCAmelCase__ ( self : Any ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_past_large_inputs(*_A )
def UpperCAmelCase__ ( self : Union[str, Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_weight_initialization(*_A )
def UpperCAmelCase__ ( self : Dict ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_for_token_classification(*_A )
@slow
def UpperCAmelCase__ ( self : Tuple ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[Any] = BioGptForCausalLM.from_pretrained('''microsoft/biogpt''' )
model.to(_A )
__SCREAMING_SNAKE_CASE : Optional[int] = BioGptTokenizer.from_pretrained('''microsoft/biogpt''' )
__SCREAMING_SNAKE_CASE : int = '''left'''
# Define PAD Token = EOS Token = 50256
__SCREAMING_SNAKE_CASE : Optional[int] = tokenizer.eos_token
__SCREAMING_SNAKE_CASE : Optional[Any] = model.config.eos_token_id
# use different length sentences to test batching
__SCREAMING_SNAKE_CASE : List[str] = [
'''Hello, my dog is a little''',
'''Today, I''',
]
__SCREAMING_SNAKE_CASE : int = tokenizer(_A , return_tensors='''pt''' , padding=_A )
__SCREAMING_SNAKE_CASE : Optional[Any] = inputs['''input_ids'''].to(_A )
__SCREAMING_SNAKE_CASE : Tuple = model.generate(
input_ids=_A , attention_mask=inputs['''attention_mask'''].to(_A ) , )
__SCREAMING_SNAKE_CASE : List[str] = tokenizer(sentences[0] , return_tensors='''pt''' ).input_ids.to(_A )
__SCREAMING_SNAKE_CASE : List[str] = model.generate(input_ids=_A )
__SCREAMING_SNAKE_CASE : List[str] = inputs_non_padded.shape[-1] - inputs['''attention_mask'''][-1].long().sum().cpu().item()
__SCREAMING_SNAKE_CASE : Tuple = tokenizer(sentences[1] , return_tensors='''pt''' ).input_ids.to(_A )
__SCREAMING_SNAKE_CASE : int = model.generate(input_ids=_A , max_length=model.config.max_length - num_paddings )
__SCREAMING_SNAKE_CASE : Dict = tokenizer.batch_decode(_A , skip_special_tokens=_A )
__SCREAMING_SNAKE_CASE : Optional[int] = tokenizer.decode(output_non_padded[0] , skip_special_tokens=_A )
__SCREAMING_SNAKE_CASE : int = tokenizer.decode(output_padded[0] , skip_special_tokens=_A )
__SCREAMING_SNAKE_CASE : Tuple = [
'''Hello, my dog is a little bit bigger than a little bit.''',
'''Today, I have a good idea of how to use the information''',
]
self.assertListEqual(_A , _A )
self.assertListEqual(_A , [non_padded_sentence, padded_sentence] )
@slow
def UpperCAmelCase__ ( self : List[str] ):
"""simple docstring"""
for model_name in BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__SCREAMING_SNAKE_CASE : Any = BioGptModel.from_pretrained(_A )
self.assertIsNotNone(_A )
def UpperCAmelCase__ ( self : int ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Dict = self.model_tester.prepare_config_and_inputs_for_common()
__SCREAMING_SNAKE_CASE : List[str] = 3
__SCREAMING_SNAKE_CASE : int = input_dict['''input_ids''']
__SCREAMING_SNAKE_CASE : Tuple = input_ids.ne(1 ).to(_A )
__SCREAMING_SNAKE_CASE : int = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
__SCREAMING_SNAKE_CASE : List[str] = BioGptForSequenceClassification(_A )
model.to(_A )
model.eval()
__SCREAMING_SNAKE_CASE : Optional[int] = model(_A , attention_mask=_A , labels=_A )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def UpperCAmelCase__ ( self : List[Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
__SCREAMING_SNAKE_CASE : Dict = 3
__SCREAMING_SNAKE_CASE : int = '''multi_label_classification'''
__SCREAMING_SNAKE_CASE : List[Any] = input_dict['''input_ids''']
__SCREAMING_SNAKE_CASE : Optional[Any] = input_ids.ne(1 ).to(_A )
__SCREAMING_SNAKE_CASE : Optional[Any] = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
__SCREAMING_SNAKE_CASE : str = BioGptForSequenceClassification(_A )
model.to(_A )
model.eval()
__SCREAMING_SNAKE_CASE : Optional[Any] = model(_A , attention_mask=_A , labels=_A )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@require_torch
class __UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def UpperCAmelCase__ ( self : Optional[int] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Dict = BioGptForCausalLM.from_pretrained('''microsoft/biogpt''' )
__SCREAMING_SNAKE_CASE : Dict = torch.tensor([[2, 4805, 9, 656, 21]] )
__SCREAMING_SNAKE_CASE : List[Any] = model(_A )[0]
__SCREAMING_SNAKE_CASE : Union[str, Any] = 4_2384
__SCREAMING_SNAKE_CASE : int = torch.Size((1, 5, vocab_size) )
self.assertEqual(output.shape , _A )
__SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor(
[[[-9.52_36, -9.89_18, 10.45_57], [-11.04_69, -9.64_23, 8.10_22], [-8.86_64, -7.88_26, 5.53_25]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , _A , atol=1e-4 ) )
@slow
def UpperCAmelCase__ ( self : List[str] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Dict = BioGptTokenizer.from_pretrained('''microsoft/biogpt''' )
__SCREAMING_SNAKE_CASE : Tuple = BioGptForCausalLM.from_pretrained('''microsoft/biogpt''' )
model.to(_A )
torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE : Dict = tokenizer('''COVID-19 is''' , return_tensors='''pt''' ).to(_A )
__SCREAMING_SNAKE_CASE : Any = model.generate(
**_A , min_length=100 , max_length=1024 , num_beams=5 , early_stopping=_A , )
__SCREAMING_SNAKE_CASE : Optional[int] = tokenizer.decode(output_ids[0] , skip_special_tokens=_A )
__SCREAMING_SNAKE_CASE : Any = (
'''COVID-19 is a global pandemic caused by severe acute respiratory syndrome coronavirus 2 (SARS-CoV-2), the'''
''' causative agent of coronavirus disease 2019 (COVID-19), which has spread to more than 200 countries and'''
''' territories, including the United States (US), Canada, Australia, New Zealand, the United Kingdom (UK),'''
''' and the United States of America (USA), as of March 11, 2020, with more than 800,000 confirmed cases and'''
''' more than 800,000 deaths.'''
)
self.assertEqual(_A , _A )
| 303 |
from .glue import glue_convert_examples_to_features, glue_output_modes, glue_processors, glue_tasks_num_labels
from .squad import SquadExample, SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
from .utils import DataProcessor, InputExample, InputFeatures, SingleSentenceClassificationProcessor
from .xnli import xnli_output_modes, xnli_processors, xnli_tasks_num_labels
| 303 | 1 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
snake_case__ = {
"""configuration_efficientnet""": [
"""EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""EfficientNetConfig""",
"""EfficientNetOnnxConfig""",
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ = ["""EfficientNetImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ = [
"""EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""EfficientNetForImageClassification""",
"""EfficientNetModel""",
"""EfficientNetPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_efficientnet import (
EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
EfficientNetConfig,
EfficientNetOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_efficientnet import EfficientNetImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_efficientnet import (
EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST,
EfficientNetForImageClassification,
EfficientNetModel,
EfficientNetPreTrainedModel,
)
else:
import sys
snake_case__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 4 |
'''simple docstring'''
import heapq
def snake_case__ ( lowerCamelCase__ : dict ) -> set[int]:
A_ : list[list] = []
# for each node and his adjacency list add them and the rank of the node to queue
# using heapq module the queue will be filled like a Priority Queue
# heapq works with a min priority queue, so I used -1*len(v) to build it
for key, value in graph.items():
# O(log(n))
heapq.heappush(lowerCamelCase__ , [-1 * len(lowerCamelCase__ ), (key, value)] )
# chosen_vertices = set of chosen vertices
A_ : str = set()
# while queue isn't empty and there are still edges
# (queue[0][0] is the rank of the node with max rank)
while queue and queue[0][0] != 0:
# extract vertex with max rank from queue and add it to chosen_vertices
A_ : Tuple = heapq.heappop(lowerCamelCase__ )[1][0]
chosen_vertices.add(lowerCamelCase__ )
# Remove all arcs adjacent to argmax
for elem in queue:
# if v haven't adjacent node, skip
if elem[0] == 0:
continue
# if argmax is reachable from elem
# remove argmax from elem's adjacent list and update his rank
if argmax in elem[1][1]:
A_ : List[str] = elem[1][1].index(lowerCamelCase__ )
del elem[1][1][index]
elem[0] += 1
# re-order the queue
heapq.heapify(lowerCamelCase__ )
return chosen_vertices
if __name__ == "__main__":
import doctest
doctest.testmod()
snake_case__ = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
print(F'Minimum vertex cover:\n{greedy_min_vertex_cover(graph)}')
| 4 | 1 |
import unittest
import numpy as np
from transformers import is_flax_available
from transformers.testing_utils import require_flax
from ..test_modeling_flax_common import ids_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.generation import (
FlaxForcedBOSTokenLogitsProcessor,
FlaxForcedEOSTokenLogitsProcessor,
FlaxLogitsProcessorList,
FlaxMinLengthLogitsProcessor,
FlaxTemperatureLogitsWarper,
FlaxTopKLogitsWarper,
FlaxTopPLogitsWarper,
)
@require_flax
class _lowercase ( unittest.TestCase ):
"""simple docstring"""
def UpperCamelCase_ (self , lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
a = jnp.ones((batch_size, length) ) / length
return scores
def UpperCamelCase_ (self ):
"""simple docstring"""
a = None
a = 20
a = self._get_uniform_logits(batch_size=2 , length=lowerCamelCase_ )
# tweak scores to not be uniform anymore
a = scores.at[1, 5].set((1 / length) + 0.1 ) # peak, 1st batch
a = scores.at[1, 10].set((1 / length) - 0.4 ) # valley, 1st batch
# compute softmax
a = jax.nn.softmax(lowerCamelCase_ , axis=-1 )
a = FlaxTemperatureLogitsWarper(temperature=0.5 )
a = FlaxTemperatureLogitsWarper(temperature=1.3 )
a = jax.nn.softmax(temp_dist_warper_sharper(lowerCamelCase_ , scores.copy() , cur_len=lowerCamelCase_ ) , axis=-1 )
a = jax.nn.softmax(temp_dist_warper_smoother(lowerCamelCase_ , scores.copy() , cur_len=lowerCamelCase_ ) , axis=-1 )
# uniform distribution stays uniform
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_sharp[0, :] , atol=1E-3 ) )
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_smooth[0, :] , atol=1E-3 ) )
# sharp peaks get higher, valleys get lower
self.assertLess(probs[1, :].max() , warped_prob_sharp[1, :].max() )
self.assertGreater(probs[1, :].min() , warped_prob_sharp[1, :].min() )
# smooth peaks get lower, valleys get higher
self.assertGreater(probs[1, :].max() , warped_prob_smooth[1, :].max() )
self.assertLess(probs[1, :].min() , warped_prob_smooth[1, :].min() )
def UpperCamelCase_ (self ):
"""simple docstring"""
a = None
a = 10
a = 2
# create ramp distribution
a = np.broadcast_to(np.arange(lowerCamelCase_ )[None, :] , (batch_size, vocab_size) ).copy()
a = ramp_logits[1:, : vocab_size // 2] + vocab_size
a = FlaxTopKLogitsWarper(3 )
a = top_k_warp(lowerCamelCase_ , lowerCamelCase_ , cur_len=lowerCamelCase_ )
# check that correct tokens are filtered
self.assertListEqual(jnp.isinf(scores[0] ).tolist() , 7 * [True] + 3 * [False] )
self.assertListEqual(jnp.isinf(scores[1] ).tolist() , 2 * [True] + 3 * [False] + 5 * [True] )
# check special case
a = 5
a = FlaxTopKLogitsWarper(top_k=1 , filter_value=0.0 , min_tokens_to_keep=3 )
a = np.broadcast_to(np.arange(lowerCamelCase_ )[None, :] , (batch_size, length) ).copy()
a = top_k_warp_safety_check(lowerCamelCase_ , lowerCamelCase_ , cur_len=lowerCamelCase_ )
# min_tokens overwrites k: 3 tokens are kept => 2 tokens are nullified
self.assertListEqual((scores == 0.0).sum(axis=-1 ).tolist() , [2, 2] )
def UpperCamelCase_ (self ):
"""simple docstring"""
a = None
a = 10
a = 2
# create distribution and take log (inverse to Softmax as taken in TopPLogitsWarper)
a = np.log(np.array([[0.3, 0.1, 0.1, 0.5], [0.15, 0.3, 0.3, 0.25]] ) )
a = FlaxTopPLogitsWarper(0.8 )
a = np.exp(top_p_warp(lowerCamelCase_ , lowerCamelCase_ , cur_len=lowerCamelCase_ ) )
# dist should be filtered to keep min num values so that sum is >= top_p
# exp (-inf) => 0
a = np.array([[0.3, 0.0, 0.0, 0.5], [0.0, 0.3, 0.3, 0.25]] )
self.assertTrue(np.allclose(lowerCamelCase_ , lowerCamelCase_ , atol=1E-3 ) )
# check edge cases with negative and extreme logits
a = np.broadcast_to(np.arange(lowerCamelCase_ )[None, :] , (batch_size, vocab_size) ).copy() - (
vocab_size // 2
)
# make ramp_logits more extreme
a = ramp_logits[1] * 100.0
# make sure at least 2 tokens are kept
a = FlaxTopPLogitsWarper(0.9 , min_tokens_to_keep=2 , filter_value=0.0 )
a = top_p_warp(lowerCamelCase_ , lowerCamelCase_ , cur_len=lowerCamelCase_ )
# first batch should keep three tokens, second batch would keep only 1, but due to `min_tokens_to_keep=2` keeps 2.
self.assertListEqual((filtered_dist != 0.0).sum(axis=-1 ).tolist() , [3, 2] )
def UpperCamelCase_ (self ):
"""simple docstring"""
a = 20
a = 4
a = 0
a = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=lowerCamelCase_ )
# check that min length is applied at length 5
a = ids_tensor((batch_size, 20) , vocab_size=20 )
a = 5
a = self._get_uniform_logits(lowerCamelCase_ , lowerCamelCase_ )
a = min_dist_processor(lowerCamelCase_ , lowerCamelCase_ , cur_len=lowerCamelCase_ )
self.assertListEqual(scores_before_min_length[:, eos_token_id].tolist() , 4 * [-float("inf" )] )
# check that min length is not applied anymore at length 15
a = self._get_uniform_logits(lowerCamelCase_ , lowerCamelCase_ )
a = 15
a = min_dist_processor(lowerCamelCase_ , lowerCamelCase_ , cur_len=lowerCamelCase_ )
self.assertFalse(jnp.isinf(lowerCamelCase_ ).any() )
def UpperCamelCase_ (self ):
"""simple docstring"""
a = 20
a = 4
a = 0
a = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=lowerCamelCase_ )
# check that all scores are -inf except the bos_token_id score
a = ids_tensor((batch_size, 1) , vocab_size=20 )
a = 1
a = self._get_uniform_logits(lowerCamelCase_ , lowerCamelCase_ )
a = logits_processor(lowerCamelCase_ , lowerCamelCase_ , cur_len=lowerCamelCase_ )
self.assertTrue(jnp.isneginf(scores[:, bos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, bos_token_id].tolist() , 4 * [0] ) # score for bos_token_id shold be zero
# check that bos_token_id is not forced if current length is greater than 1
a = 3
a = self._get_uniform_logits(lowerCamelCase_ , lowerCamelCase_ )
a = logits_processor(lowerCamelCase_ , lowerCamelCase_ , cur_len=lowerCamelCase_ )
self.assertFalse(jnp.isinf(lowerCamelCase_ ).any() )
def UpperCamelCase_ (self ):
"""simple docstring"""
a = 20
a = 4
a = 0
a = 5
a = FlaxForcedEOSTokenLogitsProcessor(max_length=lowerCamelCase_ , eos_token_id=lowerCamelCase_ )
# check that all scores are -inf except the eos_token_id when max_length is reached
a = ids_tensor((batch_size, 4) , vocab_size=20 )
a = 4
a = self._get_uniform_logits(lowerCamelCase_ , lowerCamelCase_ )
a = logits_processor(lowerCamelCase_ , lowerCamelCase_ , cur_len=lowerCamelCase_ )
self.assertTrue(jnp.isneginf(scores[:, eos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, eos_token_id].tolist() , 4 * [0] ) # score for eos_token_id should be zero
# check that eos_token_id is not forced if max_length is not reached
a = 3
a = self._get_uniform_logits(lowerCamelCase_ , lowerCamelCase_ )
a = logits_processor(lowerCamelCase_ , lowerCamelCase_ , cur_len=lowerCamelCase_ )
self.assertFalse(jnp.isinf(lowerCamelCase_ ).any() )
def UpperCamelCase_ (self ):
"""simple docstring"""
a = 4
a = 10
a = 15
a = 2
a = 1
a = 15
# dummy input_ids and scores
a = ids_tensor((batch_size, sequence_length) , lowerCamelCase_ )
a = input_ids.copy()
a = self._get_uniform_logits(lowerCamelCase_ , lowerCamelCase_ )
a = scores.copy()
# instantiate all dist processors
a = FlaxTemperatureLogitsWarper(temperature=0.5 )
a = FlaxTopKLogitsWarper(3 )
a = FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
a = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=lowerCamelCase_ )
a = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=lowerCamelCase_ )
a = FlaxForcedEOSTokenLogitsProcessor(max_length=lowerCamelCase_ , eos_token_id=lowerCamelCase_ )
a = 10
# no processor list
a = temp_dist_warp(lowerCamelCase_ , lowerCamelCase_ , cur_len=lowerCamelCase_ )
a = top_k_warp(lowerCamelCase_ , lowerCamelCase_ , cur_len=lowerCamelCase_ )
a = top_p_warp(lowerCamelCase_ , lowerCamelCase_ , cur_len=lowerCamelCase_ )
a = min_dist_proc(lowerCamelCase_ , lowerCamelCase_ , cur_len=lowerCamelCase_ )
a = bos_dist_proc(lowerCamelCase_ , lowerCamelCase_ , cur_len=lowerCamelCase_ )
a = eos_dist_proc(lowerCamelCase_ , lowerCamelCase_ , cur_len=lowerCamelCase_ )
# with processor list
a = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
a = processor(lowerCamelCase_ , lowerCamelCase_ , cur_len=lowerCamelCase_ )
# scores should be equal
self.assertTrue(jnp.allclose(lowerCamelCase_ , lowerCamelCase_ , atol=1E-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist() )
def UpperCamelCase_ (self ):
"""simple docstring"""
a = 4
a = 10
a = 15
a = 2
a = 1
a = 15
# dummy input_ids and scores
a = ids_tensor((batch_size, sequence_length) , lowerCamelCase_ )
a = input_ids.copy()
a = self._get_uniform_logits(lowerCamelCase_ , lowerCamelCase_ )
a = scores.copy()
# instantiate all dist processors
a = FlaxTemperatureLogitsWarper(temperature=0.5 )
a = FlaxTopKLogitsWarper(3 )
a = FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
a = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=lowerCamelCase_ )
a = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=lowerCamelCase_ )
a = FlaxForcedEOSTokenLogitsProcessor(max_length=lowerCamelCase_ , eos_token_id=lowerCamelCase_ )
a = 10
# no processor list
def run_no_processor_list(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
a = temp_dist_warp(lowerCamelCase_ , lowerCamelCase_ , cur_len=lowerCamelCase_ )
a = top_k_warp(lowerCamelCase_ , lowerCamelCase_ , cur_len=lowerCamelCase_ )
a = top_p_warp(lowerCamelCase_ , lowerCamelCase_ , cur_len=lowerCamelCase_ )
a = min_dist_proc(lowerCamelCase_ , lowerCamelCase_ , cur_len=lowerCamelCase_ )
a = bos_dist_proc(lowerCamelCase_ , lowerCamelCase_ , cur_len=lowerCamelCase_ )
a = eos_dist_proc(lowerCamelCase_ , lowerCamelCase_ , cur_len=lowerCamelCase_ )
return scores
# with processor list
def run_processor_list(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
a = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
a = processor(lowerCamelCase_ , lowerCamelCase_ , cur_len=lowerCamelCase_ )
return scores
a = jax.jit(lowerCamelCase_ )
a = jax.jit(lowerCamelCase_ )
a = jitted_run_no_processor_list(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
a = jitted_run_processor_list(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
# scores should be equal
self.assertTrue(jnp.allclose(lowerCamelCase_ , lowerCamelCase_ , atol=1E-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist() )
| 227 |
'''simple docstring'''
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoImageProcessor, ViTImageProcessor
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / """utils"""))
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
a : List[str] = get_tests_dir("""fixtures""")
class UpperCamelCase_ ( unittest.TestCase ):
def _lowercase( self ) -> int:
# A mock response for an HTTP head request to emulate server down
UpperCAmelCase : Tuple = mock.Mock()
UpperCAmelCase : List[str] = 500
UpperCAmelCase : Any = {}
UpperCAmelCase : List[str] = HTTPError
UpperCAmelCase : str = {}
# Download this model to make sure it's in the cache.
UpperCAmelCase : Optional[int] = ViTImageProcessor.from_pretrained("""hf-internal-testing/tiny-random-vit""" )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch("""requests.Session.request""" , return_value=A ) as mock_head:
UpperCAmelCase : Optional[int] = ViTImageProcessor.from_pretrained("""hf-internal-testing/tiny-random-vit""" )
# This check we did call the fake head request
mock_head.assert_called()
def _lowercase( self ) -> Any:
# This test is for deprecated behavior and can be removed in v5
UpperCAmelCase : Tuple = ViTImageProcessor.from_pretrained(
"""https://huggingface.co/hf-internal-testing/tiny-random-vit/resolve/main/preprocessor_config.json""" )
def _lowercase( self ) -> Union[str, Any]:
with self.assertRaises(A ):
# config is in subfolder, the following should not work without specifying the subfolder
UpperCAmelCase : Any = AutoImageProcessor.from_pretrained("""hf-internal-testing/stable-diffusion-all-variants""" )
UpperCAmelCase : Union[str, Any] = AutoImageProcessor.from_pretrained(
"""hf-internal-testing/stable-diffusion-all-variants""" , subfolder="""feature_extractor""" )
self.assertIsNotNone(A )
@is_staging_test
class UpperCamelCase_ ( unittest.TestCase ):
@classmethod
def _lowercase( cls ) -> Dict:
UpperCAmelCase : Tuple = TOKEN
HfFolder.save_token(A )
@classmethod
def _lowercase( cls ) -> List[str]:
try:
delete_repo(token=cls._token , repo_id="""test-image-processor""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""valid_org/test-image-processor-org""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""test-dynamic-image-processor""" )
except HTTPError:
pass
def _lowercase( self ) -> Optional[int]:
UpperCAmelCase : Union[str, Any] = ViTImageProcessor.from_pretrained(A )
image_processor.push_to_hub("""test-image-processor""" , use_auth_token=self._token )
UpperCAmelCase : Optional[int] = ViTImageProcessor.from_pretrained(f'''{USER}/test-image-processor''' )
for k, v in image_processor.__dict__.items():
self.assertEqual(A , getattr(A , A ) )
# Reset repo
delete_repo(token=self._token , repo_id="""test-image-processor""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(
A , repo_id="""test-image-processor""" , push_to_hub=A , use_auth_token=self._token )
UpperCAmelCase : Tuple = ViTImageProcessor.from_pretrained(f'''{USER}/test-image-processor''' )
for k, v in image_processor.__dict__.items():
self.assertEqual(A , getattr(A , A ) )
def _lowercase( self ) -> List[str]:
UpperCAmelCase : List[str] = ViTImageProcessor.from_pretrained(A )
image_processor.push_to_hub("""valid_org/test-image-processor""" , use_auth_token=self._token )
UpperCAmelCase : Tuple = ViTImageProcessor.from_pretrained("""valid_org/test-image-processor""" )
for k, v in image_processor.__dict__.items():
self.assertEqual(A , getattr(A , A ) )
# Reset repo
delete_repo(token=self._token , repo_id="""valid_org/test-image-processor""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(
A , repo_id="""valid_org/test-image-processor-org""" , push_to_hub=A , use_auth_token=self._token )
UpperCAmelCase : int = ViTImageProcessor.from_pretrained("""valid_org/test-image-processor-org""" )
for k, v in image_processor.__dict__.items():
self.assertEqual(A , getattr(A , A ) )
def _lowercase( self ) -> Optional[int]:
CustomImageProcessor.register_for_auto_class()
UpperCAmelCase : Optional[Any] = CustomImageProcessor.from_pretrained(A )
image_processor.push_to_hub("""test-dynamic-image-processor""" , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(
image_processor.auto_map , {"""AutoImageProcessor""": """custom_image_processing.CustomImageProcessor"""} , )
UpperCAmelCase : Union[str, Any] = AutoImageProcessor.from_pretrained(
f'''{USER}/test-dynamic-image-processor''' , trust_remote_code=A )
# Can't make an isinstance check because the new_image_processor is from the CustomImageProcessor class of a dynamic module
self.assertEqual(new_image_processor.__class__.__name__ , """CustomImageProcessor""" )
| 265 | 0 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DiffusionPipeline,
EulerDiscreteScheduler,
StableDiffusionXLImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.utils import floats_tensor, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class __lowercase ( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
_UpperCAmelCase : Optional[int] = StableDiffusionXLImgaImgPipeline
_UpperCAmelCase : Optional[int] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''height''', '''width'''}
_UpperCAmelCase : Dict = PipelineTesterMixin.required_optional_params - {'''latents'''}
_UpperCAmelCase : str = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
_UpperCAmelCase : Optional[Any] = IMAGE_TO_IMAGE_IMAGE_PARAMS
_UpperCAmelCase : List[Any] = IMAGE_TO_IMAGE_IMAGE_PARAMS
def _SCREAMING_SNAKE_CASE ( self : List[str]):
torch.manual_seed(0)
SCREAMING_SNAKE_CASE_: Union[str, Any] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , attention_head_dim=(2, 4) , use_linear_projection=lowerCAmelCase__ , addition_embed_type="text_time" , addition_time_embed_dim=8 , transformer_layers_per_block=(1, 2) , projection_class_embeddings_input_dim=80 , cross_attention_dim=64 , )
SCREAMING_SNAKE_CASE_: Tuple = EulerDiscreteScheduler(
beta_start=0.0_0085 , beta_end=0.012 , steps_offset=1 , beta_schedule="scaled_linear" , timestep_spacing="leading" , )
torch.manual_seed(0)
SCREAMING_SNAKE_CASE_: str = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0)
SCREAMING_SNAKE_CASE_: List[str] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act="gelu" , projection_dim=32 , )
SCREAMING_SNAKE_CASE_: Optional[Any] = CLIPTextModel(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Tuple = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" , local_files_only=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Union[str, Any] = CLIPTextModelWithProjection(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[str] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" , local_files_only=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: str = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"text_encoder_2": text_encoder_a,
"tokenizer_2": tokenizer_a,
# "safety_checker": None,
# "feature_extractor": None,
}
return components
def _SCREAMING_SNAKE_CASE ( self : int , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : List[str]=0):
SCREAMING_SNAKE_CASE_: Tuple = floats_tensor((1, 3, 32, 32) , rng=random.Random(lowerCAmelCase__)).to(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Tuple = image / 2 + 0.5
if str(lowerCAmelCase__).startswith("mps"):
SCREAMING_SNAKE_CASE_: str = torch.manual_seed(lowerCAmelCase__)
else:
SCREAMING_SNAKE_CASE_: Optional[int] = torch.Generator(device=lowerCAmelCase__).manual_seed(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[str] = {
"prompt": "A painting of a squirrel eating a burger",
"image": image,
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 5.0,
"output_type": "numpy",
"strength": 0.75,
}
return inputs
def _SCREAMING_SNAKE_CASE ( self : Any):
SCREAMING_SNAKE_CASE_: Union[str, Any] = "cpu" # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE_: int = self.get_dummy_components()
SCREAMING_SNAKE_CASE_: Any = StableDiffusionXLImgaImgPipeline(**lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: int = sd_pipe.to(lowerCAmelCase__)
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Optional[Any] = self.get_dummy_inputs(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Dict = sd_pipe(**lowerCAmelCase__).images
SCREAMING_SNAKE_CASE_: Union[str, Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
SCREAMING_SNAKE_CASE_: Optional[Any] = np.array([0.4656, 0.4840, 0.4439, 0.6698, 0.5574, 0.4524, 0.5799, 0.5943, 0.5165])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]):
super().test_attention_slicing_forward_pass(expected_max_diff=3E-3)
def _SCREAMING_SNAKE_CASE ( self : List[Any]):
super().test_inference_batch_single_identical(expected_max_diff=3E-3)
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]):
pass
def _SCREAMING_SNAKE_CASE ( self : Dict):
SCREAMING_SNAKE_CASE_: Optional[int] = self.get_dummy_components()
SCREAMING_SNAKE_CASE_: Optional[Any] = StableDiffusionXLImgaImgPipeline(**lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[str] = sd_pipe.to(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Optional[int] = sd_pipe.to(lowerCAmelCase__)
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase__)
# forward without prompt embeds
SCREAMING_SNAKE_CASE_: List[str] = self.get_dummy_inputs(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: str = 3 * ["this is a negative prompt"]
SCREAMING_SNAKE_CASE_: List[str] = negative_prompt
SCREAMING_SNAKE_CASE_: Optional[Any] = 3 * [inputs["prompt"]]
SCREAMING_SNAKE_CASE_: int = sd_pipe(**lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: str = output.images[0, -3:, -3:, -1]
# forward with prompt embeds
SCREAMING_SNAKE_CASE_: Optional[Any] = self.get_dummy_inputs(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Tuple = 3 * ["this is a negative prompt"]
SCREAMING_SNAKE_CASE_: Union[str, Any] = 3 * [inputs.pop("prompt")]
(
SCREAMING_SNAKE_CASE_
): Optional[int] = sd_pipe.encode_prompt(lowerCAmelCase__ , negative_prompt=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[Any] = sd_pipe(
**lowerCAmelCase__ , prompt_embeds=lowerCAmelCase__ , negative_prompt_embeds=lowerCAmelCase__ , pooled_prompt_embeds=lowerCAmelCase__ , negative_pooled_prompt_embeds=lowerCAmelCase__ , )
SCREAMING_SNAKE_CASE_: Union[str, Any] = output.images[0, -3:, -3:, -1]
# make sure that it's equal
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten()).max() < 1E-4
@slow
@require_torch_gpu
class __lowercase ( unittest.TestCase ):
"""simple docstring"""
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase__ : int , lowerCAmelCase__ : str="cpu" , lowerCAmelCase__ : Tuple=torch.floataa , lowerCAmelCase__ : int=0):
SCREAMING_SNAKE_CASE_: Any = torch.Generator(device=lowerCAmelCase__).manual_seed(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[Any] = np.random.RandomState(lowerCAmelCase__).standard_normal((1, 4, 64, 64))
SCREAMING_SNAKE_CASE_: Union[str, Any] = torch.from_numpy(lowerCAmelCase__).to(device=lowerCAmelCase__ , dtype=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Optional[int] = {
"prompt": "a photograph of an astronaut riding a horse",
"latents": latents,
"generator": generator,
"num_inference_steps": 3,
"guidance_scale": 7.5,
"output_type": "numpy",
}
return inputs
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]):
SCREAMING_SNAKE_CASE_: Any = DiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-2-base")
pipe.to(lowerCAmelCase__)
pipe.set_progress_bar_config(disable=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[Any] = self.get_inputs(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Optional[int] = pipe(**lowerCAmelCase__).images
SCREAMING_SNAKE_CASE_: Any = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE_: str = np.array([0.4_9493, 0.4_7896, 0.4_0798, 0.5_4214, 0.5_3212, 0.4_8202, 0.4_7656, 0.4_6329, 0.4_8506])
assert np.abs(image_slice - expected_slice).max() < 7E-3
| 351 |
import tempfile
import unittest
import numpy as np
import transformers
from transformers import GPTaTokenizer, GPTJConfig, is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax, tooslow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
from transformers.models.gptj.modeling_flax_gptj import FlaxGPTJForCausalLM, FlaxGPTJModel
if is_torch_available():
import torch
class __lowercase :
"""simple docstring"""
def __init__( self : str , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Dict=14 , lowerCAmelCase__ : Optional[Any]=7 , lowerCAmelCase__ : Optional[int]=True , lowerCAmelCase__ : Optional[int]=True , lowerCAmelCase__ : Union[str, Any]=False , lowerCAmelCase__ : Tuple=True , lowerCAmelCase__ : Optional[int]=99 , lowerCAmelCase__ : str=32 , lowerCAmelCase__ : Any=4 , lowerCAmelCase__ : Any=4 , lowerCAmelCase__ : Any=4 , lowerCAmelCase__ : Dict=37 , lowerCAmelCase__ : Tuple="gelu" , lowerCAmelCase__ : str=0.1 , lowerCAmelCase__ : Optional[Any]=0.1 , lowerCAmelCase__ : Dict=512 , lowerCAmelCase__ : Dict=0.02 , ):
SCREAMING_SNAKE_CASE_: List[Any] = parent
SCREAMING_SNAKE_CASE_: Any = batch_size
SCREAMING_SNAKE_CASE_: str = seq_length
SCREAMING_SNAKE_CASE_: Dict = is_training
SCREAMING_SNAKE_CASE_: str = use_input_mask
SCREAMING_SNAKE_CASE_: int = use_token_type_ids
SCREAMING_SNAKE_CASE_: Tuple = use_labels
SCREAMING_SNAKE_CASE_: int = vocab_size
SCREAMING_SNAKE_CASE_: Tuple = hidden_size
SCREAMING_SNAKE_CASE_: Optional[int] = rotary_dim
SCREAMING_SNAKE_CASE_: Dict = num_hidden_layers
SCREAMING_SNAKE_CASE_: List[Any] = num_attention_heads
SCREAMING_SNAKE_CASE_: Dict = intermediate_size
SCREAMING_SNAKE_CASE_: List[str] = hidden_act
SCREAMING_SNAKE_CASE_: List[str] = hidden_dropout_prob
SCREAMING_SNAKE_CASE_: int = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_: Optional[Any] = max_position_embeddings
SCREAMING_SNAKE_CASE_: int = initializer_range
SCREAMING_SNAKE_CASE_: List[Any] = None
SCREAMING_SNAKE_CASE_: Optional[Any] = vocab_size - 1
SCREAMING_SNAKE_CASE_: Tuple = vocab_size - 1
SCREAMING_SNAKE_CASE_: Union[str, Any] = vocab_size - 1
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]):
SCREAMING_SNAKE_CASE_: List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
SCREAMING_SNAKE_CASE_: Any = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE_: Optional[int] = random_attention_mask([self.batch_size, self.seq_length])
SCREAMING_SNAKE_CASE_: Optional[int] = GPTJConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , use_cache=lowerCAmelCase__ , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , rotary_dim=self.rotary_dim , )
return (config, input_ids, input_mask)
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]):
SCREAMING_SNAKE_CASE_: List[str] = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Any = config_and_inputs
SCREAMING_SNAKE_CASE_: Union[str, Any] = {"input_ids": input_ids, "attention_mask": attention_mask}
return config, inputs_dict
def _SCREAMING_SNAKE_CASE ( self : Dict , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Optional[int]):
SCREAMING_SNAKE_CASE_: List[str] = 20
SCREAMING_SNAKE_CASE_: Any = model_class_name(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: str = model.init_cache(input_ids.shape[0] , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Any = jnp.ones((input_ids.shape[0], max_decoder_length) , dtype="i4")
SCREAMING_SNAKE_CASE_: List[str] = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1)[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1))
SCREAMING_SNAKE_CASE_: int = model(
input_ids[:, :-1] , attention_mask=lowerCAmelCase__ , past_key_values=lowerCAmelCase__ , position_ids=lowerCAmelCase__ , )
SCREAMING_SNAKE_CASE_: Optional[Any] = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype="i4")
SCREAMING_SNAKE_CASE_: Dict = model(
input_ids[:, -1:] , attention_mask=lowerCAmelCase__ , past_key_values=outputs_cache.past_key_values , position_ids=lowerCAmelCase__ , )
SCREAMING_SNAKE_CASE_: Optional[Any] = model(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[Any] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5])))
self.parent.assertTrue(diff < 1E-3 , msg=F"Max diff is {diff}")
def _SCREAMING_SNAKE_CASE ( self : str , lowerCAmelCase__ : Any , lowerCAmelCase__ : str , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Tuple):
SCREAMING_SNAKE_CASE_: int = 20
SCREAMING_SNAKE_CASE_: Tuple = model_class_name(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Optional[int] = jnp.concatenate(
[attention_mask, jnp.zeros((attention_mask.shape[0], max_decoder_length - attention_mask.shape[1]))] , axis=-1 , )
SCREAMING_SNAKE_CASE_: List[str] = model.init_cache(input_ids.shape[0] , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Optional[int] = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1)[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1))
SCREAMING_SNAKE_CASE_: Any = model(
input_ids[:, :-1] , attention_mask=lowerCAmelCase__ , past_key_values=lowerCAmelCase__ , position_ids=lowerCAmelCase__ , )
SCREAMING_SNAKE_CASE_: int = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype="i4")
SCREAMING_SNAKE_CASE_: Optional[int] = model(
input_ids[:, -1:] , past_key_values=outputs_cache.past_key_values , attention_mask=lowerCAmelCase__ , position_ids=lowerCAmelCase__ , )
SCREAMING_SNAKE_CASE_: Optional[int] = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Dict = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5])))
self.parent.assertTrue(diff < 1E-3 , msg=F"Max diff is {diff}")
@require_flax
class __lowercase ( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
_UpperCAmelCase : Dict = (FlaxGPTJModel, FlaxGPTJForCausalLM) if is_flax_available() else ()
_UpperCAmelCase : Tuple = (FlaxGPTJForCausalLM,) if is_flax_available() else ()
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]):
SCREAMING_SNAKE_CASE_: Tuple = FlaxGPTJModelTester(self)
def _SCREAMING_SNAKE_CASE ( self : Optional[int]):
for model_class_name in self.all_model_classes:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: str = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : Any):
for model_class_name in self.all_model_classes:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward_with_attn_mask(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__)
@tooslow
def _SCREAMING_SNAKE_CASE ( self : Any):
SCREAMING_SNAKE_CASE_: Optional[int] = GPTaTokenizer.from_pretrained("gpt2" , pad_token="<|endoftext|>" , padding_side="left")
SCREAMING_SNAKE_CASE_: Union[str, Any] = tokenizer(["Hello this is a long string", "Hey"] , return_tensors="np" , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Tuple = FlaxGPTJForCausalLM.from_pretrained("EleutherAI/gpt-j-6B")
SCREAMING_SNAKE_CASE_: int = False
SCREAMING_SNAKE_CASE_: str = model.config.eos_token_id
SCREAMING_SNAKE_CASE_: int = jax.jit(model.generate)
SCREAMING_SNAKE_CASE_: Optional[int] = jit_generate(
inputs["input_ids"] , attention_mask=inputs["attention_mask"] , pad_token_id=tokenizer.pad_token_id).sequences
SCREAMING_SNAKE_CASE_: int = tokenizer.batch_decode(lowerCAmelCase__ , skip_special_tokens=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[str] = [
"Hello this is a long string of text.\n\nI'm trying to get the text of the",
"Hey, I'm a little late to the party. I'm going to",
]
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__)
@is_pt_flax_cross_test
def _SCREAMING_SNAKE_CASE ( self : List[Any]):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__):
# prepare inputs
SCREAMING_SNAKE_CASE_: Tuple = self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[Any] = {k: torch.tensor(v.tolist()) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
SCREAMING_SNAKE_CASE_: Tuple = model_class.__name__[4:] # Skip the "Flax" at the beginning
SCREAMING_SNAKE_CASE_: int = getattr(lowerCAmelCase__ , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Union[str, Any] = pt_inputs["input_ids"].shape
SCREAMING_SNAKE_CASE_: List[Any] = np.random.randint(0 , seq_length - 1 , size=(batch_size,))
for batch_idx, start_index in enumerate(lowerCAmelCase__):
SCREAMING_SNAKE_CASE_: Dict = 0
SCREAMING_SNAKE_CASE_: str = 1
SCREAMING_SNAKE_CASE_: Union[str, Any] = 0
SCREAMING_SNAKE_CASE_: Optional[Any] = 1
SCREAMING_SNAKE_CASE_: Optional[Any] = pt_model_class(lowerCAmelCase__).eval()
SCREAMING_SNAKE_CASE_: Tuple = model_class(lowerCAmelCase__ , dtype=jnp.floataa)
SCREAMING_SNAKE_CASE_: Optional[int] = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[str] = fx_state
with torch.no_grad():
SCREAMING_SNAKE_CASE_: Optional[int] = pt_model(**lowerCAmelCase__).to_tuple()
SCREAMING_SNAKE_CASE_: List[str] = fx_model(**lowerCAmelCase__).to_tuple()
self.assertEqual(len(lowerCAmelCase__) , len(lowerCAmelCase__) , "Output lengths differ between Flax and PyTorch")
for fx_output, pt_output in zip(lowerCAmelCase__ , lowerCAmelCase__):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2)
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: int = model_class.from_pretrained(lowerCAmelCase__ , from_pt=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[str] = fx_model_loaded(**lowerCAmelCase__).to_tuple()
self.assertEqual(
len(lowerCAmelCase__) , len(lowerCAmelCase__) , "Output lengths differ between Flax and PyTorch")
for fx_output_loaded, pt_output in zip(lowerCAmelCase__ , lowerCAmelCase__):
self.assert_almost_equals(fx_output_loaded[:, -1] , pt_output[:, -1].numpy() , 4E-2)
@is_pt_flax_cross_test
def _SCREAMING_SNAKE_CASE ( self : Tuple):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__):
# prepare inputs
SCREAMING_SNAKE_CASE_: Optional[Any] = self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Optional[int] = {k: torch.tensor(v.tolist()) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
SCREAMING_SNAKE_CASE_: Optional[int] = model_class.__name__[4:] # Skip the "Flax" at the beginning
SCREAMING_SNAKE_CASE_: Any = getattr(lowerCAmelCase__ , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Optional[int] = pt_model_class(lowerCAmelCase__).eval()
SCREAMING_SNAKE_CASE_: Optional[Any] = model_class(lowerCAmelCase__ , dtype=jnp.floataa)
SCREAMING_SNAKE_CASE_: int = load_flax_weights_in_pytorch_model(lowerCAmelCase__ , fx_model.params)
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: int = pt_inputs["input_ids"].shape
SCREAMING_SNAKE_CASE_: Any = np.random.randint(0 , seq_length - 1 , size=(batch_size,))
for batch_idx, start_index in enumerate(lowerCAmelCase__):
SCREAMING_SNAKE_CASE_: str = 0
SCREAMING_SNAKE_CASE_: Tuple = 1
SCREAMING_SNAKE_CASE_: Dict = 0
SCREAMING_SNAKE_CASE_: str = 1
# make sure weights are tied in PyTorch
pt_model.tie_weights()
with torch.no_grad():
SCREAMING_SNAKE_CASE_: Optional[int] = pt_model(**lowerCAmelCase__).to_tuple()
SCREAMING_SNAKE_CASE_: Optional[Any] = fx_model(**lowerCAmelCase__).to_tuple()
self.assertEqual(len(lowerCAmelCase__) , len(lowerCAmelCase__) , "Output lengths differ between Flax and PyTorch")
for fx_output, pt_output in zip(lowerCAmelCase__ , lowerCAmelCase__):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2)
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Any = pt_model_class.from_pretrained(lowerCAmelCase__ , from_flax=lowerCAmelCase__)
with torch.no_grad():
SCREAMING_SNAKE_CASE_: Any = pt_model_loaded(**lowerCAmelCase__).to_tuple()
self.assertEqual(
len(lowerCAmelCase__) , len(lowerCAmelCase__) , "Output lengths differ between Flax and PyTorch")
for fx_output, pt_output in zip(lowerCAmelCase__ , lowerCAmelCase__):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2)
@tooslow
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]):
for model_class_name in self.all_model_classes:
SCREAMING_SNAKE_CASE_: Union[str, Any] = model_class_name.from_pretrained("EleutherAI/gpt-j-6B")
SCREAMING_SNAKE_CASE_: str = model(np.ones((1, 1)))
self.assertIsNotNone(lowerCAmelCase__)
| 127 | 0 |
"""simple docstring"""
from __future__ import annotations
def a__ ( snake_case__ , snake_case__ = None ) -> list[list[str]]:
lowerCamelCase = word_bank or []
# create a table
lowerCamelCase = len(snake_case__ ) + 1
lowerCamelCase = []
for _ in range(snake_case__ ):
table.append([] )
# seed value
lowerCamelCase = [[]] # because empty string has empty combination
# iterate through the indices
for i in range(snake_case__ ):
# condition
if table[i] != []:
for word in word_bank:
# slice condition
if target[i : i + len(snake_case__ )] == word:
lowerCamelCase = [
[word, *way] for way in table[i]
]
# adds the word to every combination the current position holds
# now,push that combination to the table[i+len(word)]
table[i + len(snake_case__ )] += new_combinations
# combinations are in reverse order so reverse for better output
for combination in table[len(snake_case__ )]:
combination.reverse()
return table[len(snake_case__ )]
if __name__ == "__main__":
print(all_construct("""jwajalapa""", ["""jwa""", """j""", """w""", """a""", """la""", """lapa"""]))
print(all_construct("""rajamati""", ["""s""", """raj""", """amat""", """raja""", """ma""", """i""", """t"""]))
print(
all_construct(
"""hexagonosaurus""",
["""h""", """ex""", """hex""", """ag""", """ago""", """ru""", """auru""", """rus""", """go""", """no""", """o""", """s"""],
)
)
| 291 |
"""simple docstring"""
def a__ ( snake_case__ , snake_case__ = False ) -> str:
if not isinstance(snake_case__ , snake_case__ ):
lowerCamelCase = F'Expected string as input, found {type(snake_case__ )}'
raise ValueError(snake_case__ )
if not isinstance(snake_case__ , snake_case__ ):
lowerCamelCase = F'Expected boolean as use_pascal parameter, found {type(snake_case__ )}'
raise ValueError(snake_case__ )
lowerCamelCase = input_str.split("""_""" )
lowerCamelCase = 0 if use_pascal else 1
lowerCamelCase = words[start_index:]
lowerCamelCase = [word[0].upper() + word[1:] for word in words_to_capitalize]
lowerCamelCase = """""" if use_pascal else words[0]
return "".join([initial_word, *capitalized_words] )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 291 | 1 |
import string
def UpperCamelCase_( _snake_case : str ):
"""simple docstring"""
for key in range(len(string.ascii_uppercase ) ):
__a =''
for symbol in message:
if symbol in string.ascii_uppercase:
__a =string.ascii_uppercase.find(_snake_case )
__a =num - key
if num < 0:
__a =num + len(string.ascii_uppercase )
__a =translated + string.ascii_uppercase[num]
else:
__a =translated + symbol
print(F'Decryption using Key #{key}: {translated}' )
def UpperCamelCase_( ):
"""simple docstring"""
__a =input('Encrypted message: ' )
__a =message.upper()
decrypt(_snake_case )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 308 |
import string
# frequency taken from https://en.wikipedia.org/wiki/Letter_frequency
_lowerCAmelCase : Tuple = {
"E": 12.70,
"T": 9.06,
"A": 8.17,
"O": 7.51,
"I": 6.97,
"N": 6.75,
"S": 6.33,
"H": 6.09,
"R": 5.99,
"D": 4.25,
"L": 4.03,
"C": 2.78,
"U": 2.76,
"M": 2.41,
"W": 2.36,
"F": 2.23,
"G": 2.02,
"Y": 1.97,
"P": 1.93,
"B": 1.29,
"V": 0.98,
"K": 0.77,
"J": 0.15,
"X": 0.15,
"Q": 0.10,
"Z": 0.07,
}
_lowerCAmelCase : Optional[int] = "ETAOINSHRDLCUMWFGYPBVKJXQZ"
_lowerCAmelCase : Any = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
def UpperCamelCase_( _snake_case : str ):
"""simple docstring"""
__a ={letter: 0 for letter in string.ascii_uppercase}
for letter in message.upper():
if letter in LETTERS:
letter_count[letter] += 1
return letter_count
def UpperCamelCase_( _snake_case : tuple ):
"""simple docstring"""
return x[0]
def UpperCamelCase_( _snake_case : str ):
"""simple docstring"""
__a =get_letter_count(_snake_case )
__a ={
freq: [] for letter, freq in letter_to_freq.items()
}
for letter in LETTERS:
freq_to_letter[letter_to_freq[letter]].append(_snake_case )
__a ={}
for freq in freq_to_letter:
freq_to_letter[freq].sort(key=ETAOIN.find , reverse=_snake_case )
__a =''.join(freq_to_letter[freq] )
__a =list(freq_to_letter_str.items() )
freq_pairs.sort(key=_snake_case , reverse=_snake_case )
__a =[freq_pair[1] for freq_pair in freq_pairs]
return "".join(_snake_case )
def UpperCamelCase_( _snake_case : str ):
"""simple docstring"""
__a =get_frequency_order(_snake_case )
__a =0
for common_letter in ETAOIN[:6]:
if common_letter in freq_order[:6]:
match_score += 1
for uncommon_letter in ETAOIN[-6:]:
if uncommon_letter in freq_order[-6:]:
match_score += 1
return match_score
if __name__ == "__main__":
import doctest
doctest.testmod()
| 308 | 1 |
"""simple docstring"""
from typing import Optional, Tuple
import jax
import jax.numpy as jnp
from flax import linen as nn
from flax.core.frozen_dict import FrozenDict
from transformers import CLIPConfig, FlaxPreTrainedModel
from transformers.models.clip.modeling_flax_clip import FlaxCLIPVisionModule
def lowerCamelCase ( _UpperCamelCase : int , _UpperCamelCase : Dict , _UpperCamelCase : Any=1E-12 ) -> Dict:
'''simple docstring'''
__UpperCAmelCase : str = jnp.divide(emb_a.T , jnp.clip(jnp.linalg.norm(__lowerCamelCase , axis=1 ) , a_min=__lowerCamelCase ) ).T
__UpperCAmelCase : str = jnp.divide(emb_a.T , jnp.clip(jnp.linalg.norm(__lowerCamelCase , axis=1 ) , a_min=__lowerCamelCase ) ).T
return jnp.matmul(__lowerCamelCase , norm_emb_a.T )
class lowerCamelCase__ ( nn.Module ):
"""simple docstring"""
__a = 42
__a = jnp.floataa
def lowerCamelCase__ ( self : Tuple ):
'''simple docstring'''
__UpperCAmelCase : List[Any] = FlaxCLIPVisionModule(self.config.vision_config )
__UpperCAmelCase : List[Any] = nn.Dense(self.config.projection_dim , use_bias=snake_case__ , dtype=self.dtype )
__UpperCAmelCase : Tuple = self.param("""concept_embeds""" , jax.nn.initializers.ones , (17, self.config.projection_dim) )
__UpperCAmelCase : Optional[int] = self.param(
"""special_care_embeds""" , jax.nn.initializers.ones , (3, self.config.projection_dim) )
__UpperCAmelCase : Any = self.param("""concept_embeds_weights""" , jax.nn.initializers.ones , (17,) )
__UpperCAmelCase : Any = self.param("""special_care_embeds_weights""" , jax.nn.initializers.ones , (3,) )
def __call__( self : Tuple , UpperCamelCase : Tuple ):
'''simple docstring'''
__UpperCAmelCase : str = self.vision_model(snake_case__ )[1]
__UpperCAmelCase : int = self.visual_projection(snake_case__ )
__UpperCAmelCase : str = jax_cosine_distance(snake_case__ , self.special_care_embeds )
__UpperCAmelCase : Dict = jax_cosine_distance(snake_case__ , self.concept_embeds )
# increase this value to create a stronger `nfsw` filter
# at the cost of increasing the possibility of filtering benign image inputs
__UpperCAmelCase : List[str] = 0.0
__UpperCAmelCase : Optional[int] = special_cos_dist - self.special_care_embeds_weights[None, :] + adjustment
__UpperCAmelCase : int = jnp.round(snake_case__ , 3 )
__UpperCAmelCase : Optional[Any] = jnp.any(special_scores > 0 , axis=1 , keepdims=snake_case__ )
# Use a lower threshold if an image has any special care concept
__UpperCAmelCase : str = is_special_care * 0.01
__UpperCAmelCase : Any = cos_dist - self.concept_embeds_weights[None, :] + special_adjustment
__UpperCAmelCase : Optional[Any] = jnp.round(snake_case__ , 3 )
__UpperCAmelCase : int = jnp.any(concept_scores > 0 , axis=1 )
return has_nsfw_concepts
class lowerCamelCase__ ( A_ ):
"""simple docstring"""
__a = CLIPConfig
__a = "clip_input"
__a = FlaxStableDiffusionSafetyCheckerModule
def __init__( self : str , UpperCamelCase : CLIPConfig , UpperCamelCase : Optional[Tuple] = None , UpperCamelCase : int = 0 , UpperCamelCase : jnp.dtype = jnp.floataa , UpperCamelCase : bool = True , **UpperCamelCase : Dict , ):
'''simple docstring'''
if input_shape is None:
__UpperCAmelCase : int = (1, 224, 224, 3)
__UpperCAmelCase : Union[str, Any] = self.module_class(config=snake_case__ , dtype=snake_case__ , **snake_case__ )
super().__init__(snake_case__ , snake_case__ , input_shape=snake_case__ , seed=snake_case__ , dtype=snake_case__ , _do_init=_do_init )
def lowerCamelCase__ ( self : List[Any] , UpperCamelCase : jax.random.KeyArray , UpperCamelCase : Tuple , UpperCamelCase : FrozenDict = None ):
'''simple docstring'''
__UpperCAmelCase : str = jax.random.normal(snake_case__ , snake_case__ )
__UpperCAmelCase : int = jax.random.split(snake_case__ )
__UpperCAmelCase : Optional[int] = {"params": params_rng, "dropout": dropout_rng}
__UpperCAmelCase : Optional[int] = self.module.init(snake_case__ , snake_case__ )["params"]
return random_params
def __call__( self : Union[str, Any] , UpperCamelCase : str , UpperCamelCase : dict = None , ):
'''simple docstring'''
__UpperCAmelCase : int = jnp.transpose(snake_case__ , (0, 2, 3, 1) )
return self.module.apply(
{"""params""": params or self.params} , jnp.array(snake_case__ , dtype=jnp.floataa ) , rngs={} , )
| 115 |
import logging
import numpy as np
import pytest
from scipy.linalg import eigh
logging.basicConfig(level=logging.INFO, format="""%(message)s""")
def UpperCamelCase ( __lowerCamelCase : np.ndarray ):
return input_array.reshape((input_array.size, 1) )
def UpperCamelCase ( __lowerCamelCase : np.ndarray , __lowerCamelCase : np.ndarray , __lowerCamelCase : int ):
snake_case : Any = np.nan
for i in range(__lowerCamelCase ):
snake_case : List[str] = features[:, labels == i]
snake_case : Dict = data.mean(1 )
# Centralize the data of class i
snake_case : Optional[Any] = data - column_reshape(__lowerCamelCase )
if i > 0:
# If covariance_sum is not None
covariance_sum += np.dot(__lowerCamelCase , centered_data.T )
else:
# If covariance_sum is np.nan (i.e. first loop)
snake_case : Optional[Any] = np.dot(__lowerCamelCase , centered_data.T )
return covariance_sum / features.shape[1]
def UpperCamelCase ( __lowerCamelCase : np.ndarray , __lowerCamelCase : np.ndarray , __lowerCamelCase : int ):
snake_case : Optional[Any] = features.mean(1 )
snake_case : Tuple = np.nan
for i in range(__lowerCamelCase ):
snake_case : Tuple = features[:, labels == i]
snake_case : Tuple = data.shape[1]
snake_case : List[str] = data.mean(1 )
if i > 0:
# If covariance_sum is not None
covariance_sum += device_data * np.dot(
column_reshape(__lowerCamelCase ) - column_reshape(__lowerCamelCase ) , (column_reshape(__lowerCamelCase ) - column_reshape(__lowerCamelCase )).T , )
else:
# If covariance_sum is np.nan (i.e. first loop)
snake_case : Optional[int] = device_data * np.dot(
column_reshape(__lowerCamelCase ) - column_reshape(__lowerCamelCase ) , (column_reshape(__lowerCamelCase ) - column_reshape(__lowerCamelCase )).T , )
return covariance_sum / features.shape[1]
def UpperCamelCase ( __lowerCamelCase : np.ndarray , __lowerCamelCase : int ):
# Check if the features have been loaded
if features.any():
snake_case : Tuple = features.mean(1 )
# Center the dataset
snake_case : List[str] = features - np.reshape(__lowerCamelCase , (data_mean.size, 1) )
snake_case : Optional[Any] = np.dot(__lowerCamelCase , centered_data.T ) / features.shape[1]
snake_case , snake_case : Dict = np.linalg.eigh(__lowerCamelCase )
# Take all the columns in the reverse order (-1), and then takes only the first
snake_case : Optional[Any] = eigenvectors[:, ::-1][:, 0:dimensions]
# Project the database on the new space
snake_case : Union[str, Any] = np.dot(filtered_eigenvectors.T , __lowerCamelCase )
logging.info("Principal Component Analysis computed" )
return projected_data
else:
logging.basicConfig(level=logging.ERROR , format="%(message)s" , force=__lowerCamelCase )
logging.error("Dataset empty" )
raise AssertionError
def UpperCamelCase ( __lowerCamelCase : np.ndarray , __lowerCamelCase : np.ndarray , __lowerCamelCase : int , __lowerCamelCase : int ):
assert classes > dimensions
# Check if features have been already loaded
if features.any:
snake_case , snake_case : str = eigh(
covariance_between_classes(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) , covariance_within_classes(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) , )
snake_case : str = eigenvectors[:, ::-1][:, :dimensions]
snake_case , snake_case , snake_case : int = np.linalg.svd(__lowerCamelCase )
snake_case : List[Any] = svd_matrix[:, 0:dimensions]
snake_case : Optional[Any] = np.dot(filtered_svd_matrix.T , __lowerCamelCase )
logging.info("Linear Discriminant Analysis computed" )
return projected_data
else:
logging.basicConfig(level=logging.ERROR , format="%(message)s" , force=__lowerCamelCase )
logging.error("Dataset empty" )
raise AssertionError
def UpperCamelCase ( ):
# Create dummy dataset with 2 classes and 3 features
snake_case : str = np.array([[1, 2, 3, 4, 5], [2, 3, 4, 5, 6], [3, 4, 5, 6, 7]] )
snake_case : Union[str, Any] = np.array([0, 0, 0, 1, 1] )
snake_case : List[Any] = 2
snake_case : Any = 2
# Assert that the function raises an AssertionError if dimensions > classes
with pytest.raises(__lowerCamelCase ) as error_info:
snake_case : str = linear_discriminant_analysis(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
if isinstance(__lowerCamelCase , np.ndarray ):
raise AssertionError(
"Did not raise AssertionError for dimensions > classes" )
assert error_info.type is AssertionError
def UpperCamelCase ( ):
snake_case : List[str] = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]] )
snake_case : List[str] = 2
snake_case : int = np.array([[6.9282_0323, 8.6602_5404, 10.3923_0485], [3.0, 3.0, 3.0]] )
with pytest.raises(__lowerCamelCase ) as error_info:
snake_case : Union[str, Any] = principal_component_analysis(__lowerCamelCase , __lowerCamelCase )
if not np.allclose(__lowerCamelCase , __lowerCamelCase ):
raise AssertionError
assert error_info.type is AssertionError
if __name__ == "__main__":
import doctest
doctest.testmod()
| 59 | 0 |
import numpy as np
from transformers import BatchFeature
from transformers.testing_utils import require_tf, require_torch
from .test_feature_extraction_common import FeatureExtractionSavingTestMixin
class lowercase ( _lowerCAmelCase ):
# to overwrite at feature extractactor specific tests
__lowercase : Any = None
__lowercase : Union[str, Any] = None
@property
def __UpperCamelCase ( self ) -> Optional[Any]:
"""simple docstring"""
return self.feat_extract_tester.prepare_feat_extract_dict()
def __UpperCamelCase ( self ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase = self.feature_extraction_class(**self.feat_extract_dict )
self.assertTrue(hasattr(_lowercase , 'feature_size' ) )
self.assertTrue(hasattr(_lowercase , 'sampling_rate' ) )
self.assertTrue(hasattr(_lowercase , 'padding_value' ) )
def __UpperCamelCase ( self ) -> Tuple:
"""simple docstring"""
UpperCamelCase = self.feat_extract_tester.prepare_inputs_for_common()
UpperCamelCase = self.feature_extraction_class(**self.feat_extract_dict )
UpperCamelCase = feat_extract.model_input_names[0]
UpperCamelCase = BatchFeature({input_name: speech_inputs} )
self.assertTrue(all(len(_lowercase ) == len(_lowercase ) for x, y in zip(_lowercase , processed_features[input_name] ) ) )
UpperCamelCase = self.feat_extract_tester.prepare_inputs_for_common(equal_length=_lowercase )
UpperCamelCase = BatchFeature({input_name: speech_inputs} , tensor_type='np' )
UpperCamelCase = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
UpperCamelCase = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) )
@require_torch
def __UpperCamelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase = self.feat_extract_tester.prepare_inputs_for_common(equal_length=_lowercase )
UpperCamelCase = self.feature_extraction_class(**self.feat_extract_dict )
UpperCamelCase = feat_extract.model_input_names[0]
UpperCamelCase = BatchFeature({input_name: speech_inputs} , tensor_type='pt' )
UpperCamelCase = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
UpperCamelCase = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) )
@require_tf
def __UpperCamelCase ( self ) -> List[str]:
"""simple docstring"""
UpperCamelCase = self.feat_extract_tester.prepare_inputs_for_common(equal_length=_lowercase )
UpperCamelCase = self.feature_extraction_class(**self.feat_extract_dict )
UpperCamelCase = feat_extract.model_input_names[0]
UpperCamelCase = BatchFeature({input_name: speech_inputs} , tensor_type='tf' )
UpperCamelCase = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
UpperCamelCase = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) )
def __UpperCamelCase ( self , A_=False ) -> Optional[Any]:
"""simple docstring"""
def _inputs_have_equal_length(A_ ):
UpperCamelCase = len(input[0] )
for input_slice in input[1:]:
if len(_lowercase ) != length:
return False
return True
def _inputs_are_equal(A_ , A_ ):
if len(_lowercase ) != len(_lowercase ):
return False
for input_slice_a, input_slice_a in zip(_lowercase , _lowercase ):
if not np.allclose(np.asarray(_lowercase ) , np.asarray(_lowercase ) , atol=1e-3 ):
return False
return True
UpperCamelCase = self.feature_extraction_class(**self.feat_extract_dict )
UpperCamelCase = self.feat_extract_tester.prepare_inputs_for_common(numpify=_lowercase )
UpperCamelCase = feat_extract.model_input_names[0]
UpperCamelCase = BatchFeature({input_name: speech_inputs} )
UpperCamelCase = self.feat_extract_tester.seq_length_diff
UpperCamelCase = self.feat_extract_tester.max_seq_length + pad_diff
UpperCamelCase = self.feat_extract_tester.min_seq_length
UpperCamelCase = self.feat_extract_tester.batch_size
UpperCamelCase = self.feat_extract_tester.feature_size
# test padding for List[int] + numpy
UpperCamelCase = feat_extract.pad(_lowercase , padding=_lowercase )
UpperCamelCase = input_a[input_name]
UpperCamelCase = feat_extract.pad(_lowercase , padding='longest' )
UpperCamelCase = input_a[input_name]
UpperCamelCase = feat_extract.pad(_lowercase , padding='max_length' , max_length=len(speech_inputs[-1] ) )
UpperCamelCase = input_a[input_name]
UpperCamelCase = feat_extract.pad(_lowercase , padding='longest' , return_tensors='np' )
UpperCamelCase = input_a[input_name]
# max_length parameter has to be provided when setting `padding="max_length"`
with self.assertRaises(_lowercase ):
feat_extract.pad(_lowercase , padding='max_length' )[input_name]
UpperCamelCase = feat_extract.pad(
_lowercase , padding='max_length' , max_length=_lowercase , return_tensors='np' )
UpperCamelCase = input_a[input_name]
self.assertFalse(_inputs_have_equal_length(_lowercase ) )
self.assertTrue(_inputs_have_equal_length(_lowercase ) )
self.assertTrue(_inputs_have_equal_length(_lowercase ) )
self.assertTrue(_inputs_are_equal(_lowercase , _lowercase ) )
self.assertTrue(len(input_a[0] ) == pad_min_length )
self.assertTrue(len(input_a[1] ) == pad_min_length + pad_diff )
self.assertTrue(input_a.shape[:2] == (batch_size, len(input_a[0] )) )
self.assertTrue(input_a.shape[:2] == (batch_size, pad_max_length) )
if feature_size > 1:
self.assertTrue(input_a.shape[2] == input_a.shape[2] == feature_size )
# test padding for `pad_to_multiple_of` for List[int] + numpy
UpperCamelCase = feat_extract.pad(_lowercase , pad_to_multiple_of=10 )
UpperCamelCase = input_a[input_name]
UpperCamelCase = feat_extract.pad(_lowercase , padding='longest' , pad_to_multiple_of=10 )
UpperCamelCase = input_a[input_name]
UpperCamelCase = feat_extract.pad(
_lowercase , padding='max_length' , pad_to_multiple_of=10 , max_length=_lowercase )
UpperCamelCase = input_a[input_name]
UpperCamelCase = feat_extract.pad(
_lowercase , padding='max_length' , pad_to_multiple_of=10 , max_length=_lowercase , return_tensors='np' , )
UpperCamelCase = input_a[input_name]
self.assertTrue(all(len(_lowercase ) % 10 == 0 for x in input_a ) )
self.assertTrue(_inputs_are_equal(_lowercase , _lowercase ) )
UpperCamelCase = pad_max_length if pad_max_length % 10 == 0 else (pad_max_length // 10 + 1) * 10
self.assertTrue(all(len(_lowercase ) == expected_mult_pad_length for x in input_a ) )
self.assertEqual(input_a.shape[:2] , (batch_size, expected_mult_pad_length) )
if feature_size > 1:
self.assertTrue(input_a.shape[2] == feature_size )
# Check padding value is correct
UpperCamelCase = (np.ones(self.feat_extract_tester.feature_size ) * feat_extract.padding_value).sum()
self.assertTrue(
abs(np.asarray(input_a[0] )[pad_min_length:].sum() - padding_vector_sum * (pad_max_length - pad_min_length) )
< 1e-3 )
self.assertTrue(
abs(
np.asarray(input_a[1] )[pad_min_length + pad_diff :].sum()
- padding_vector_sum * (pad_max_length - pad_min_length - pad_diff) )
< 1e-3 )
self.assertTrue(
abs(
np.asarray(input_a[2] )[pad_min_length + 2 * pad_diff :].sum()
- padding_vector_sum * (pad_max_length - pad_min_length - 2 * pad_diff) )
< 1e-3 )
self.assertTrue(
abs(input_a[0, pad_min_length:].sum() - padding_vector_sum * (pad_max_length - pad_min_length) ) < 1e-3 )
self.assertTrue(
abs(input_a[0, pad_min_length:].sum() - padding_vector_sum * (expected_mult_pad_length - pad_min_length) )
< 1e-3 )
def __UpperCamelCase ( self , A_=False ) -> Optional[int]:
"""simple docstring"""
def _inputs_have_equal_length(A_ ):
UpperCamelCase = len(input[0] )
for input_slice in input[1:]:
if len(_lowercase ) != length:
return False
return True
def _inputs_are_equal(A_ , A_ ):
if len(_lowercase ) != len(_lowercase ):
return False
for input_slice_a, input_slice_a in zip(_lowercase , _lowercase ):
if not np.allclose(np.asarray(_lowercase ) , np.asarray(_lowercase ) , atol=1e-3 ):
return False
return True
UpperCamelCase = self.feature_extraction_class(**self.feat_extract_dict )
UpperCamelCase = self.feat_extract_tester.prepare_inputs_for_common(numpify=_lowercase )
UpperCamelCase = feat_extract.model_input_names[0]
UpperCamelCase = BatchFeature({input_name: speech_inputs} )
# truncate to smallest
UpperCamelCase = feat_extract.pad(
_lowercase , padding='max_length' , max_length=len(speech_inputs[0] ) , truncation=_lowercase )
UpperCamelCase = input_a[input_name]
UpperCamelCase = feat_extract.pad(_lowercase , padding='max_length' , max_length=len(speech_inputs[0] ) )
UpperCamelCase = input_a[input_name]
self.assertTrue(_inputs_have_equal_length(_lowercase ) )
self.assertFalse(_inputs_have_equal_length(_lowercase ) )
# truncate to smallest with np
UpperCamelCase = feat_extract.pad(
_lowercase , padding='max_length' , max_length=len(speech_inputs[0] ) , return_tensors='np' , truncation=_lowercase , )
UpperCamelCase = input_a[input_name]
UpperCamelCase = feat_extract.pad(
_lowercase , padding='max_length' , max_length=len(speech_inputs[0] ) , return_tensors='np' )
UpperCamelCase = input_a[input_name]
self.assertTrue(_inputs_have_equal_length(_lowercase ) )
self.assertTrue(input_a.shape[1] == len(speech_inputs[0] ) )
# since truncation forces padding to be smaller than longest input
# function can't return `np.ndarray`, but has to return list
self.assertFalse(_inputs_have_equal_length(_lowercase ) )
# truncate to middle
UpperCamelCase = feat_extract.pad(
_lowercase , padding='max_length' , max_length=len(speech_inputs[1] ) , truncation=_lowercase , return_tensors='np' , )
UpperCamelCase = input_a[input_name]
UpperCamelCase = feat_extract.pad(
_lowercase , padding='max_length' , max_length=len(speech_inputs[1] ) , truncation=_lowercase )
UpperCamelCase = input_a[input_name]
UpperCamelCase = feat_extract.pad(
_lowercase , padding='max_length' , max_length=len(speech_inputs[1] ) , return_tensors='np' )
UpperCamelCase = input_a[input_name]
self.assertTrue(input_a.shape[1] == len(speech_inputs[1] ) )
self.assertTrue(_inputs_have_equal_length(_lowercase ) )
self.assertTrue(_inputs_have_equal_length(_lowercase ) )
self.assertTrue(_inputs_are_equal(_lowercase , _lowercase ) )
# since truncation forces padding to be smaller than longest input
# function can't return `np.ndarray`, but has to return list
self.assertFalse(_inputs_have_equal_length(_lowercase ) )
self.assertTrue(len(input_a[-1] ) == len(speech_inputs[-1] ) )
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(_lowercase ):
feat_extract.pad(_lowercase , truncation=_lowercase )[input_name]
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(_lowercase ):
feat_extract.pad(_lowercase , padding='longest' , truncation=_lowercase )[input_name]
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(_lowercase ):
feat_extract.pad(_lowercase , padding='longest' , truncation=_lowercase )[input_name]
# max_length parameter has to be provided when setting `truncation=True` and padding="max_length"
with self.assertRaises(_lowercase ):
feat_extract.pad(_lowercase , padding='max_length' , truncation=_lowercase )[input_name]
# test truncation for `pad_to_multiple_of` for List[int] + numpy
UpperCamelCase = 12
UpperCamelCase = feat_extract.pad(
_lowercase , padding='max_length' , max_length=len(speech_inputs[0] ) , pad_to_multiple_of=_lowercase , truncation=_lowercase , )
UpperCamelCase = input_a[input_name]
UpperCamelCase = feat_extract.pad(
_lowercase , padding='max_length' , max_length=len(speech_inputs[0] ) , pad_to_multiple_of=_lowercase , )
UpperCamelCase = input_a[input_name]
# retrieve expected_length as multiple of pad_to_multiple_of
UpperCamelCase = len(speech_inputs[0] )
if expected_length % pad_to_multiple_of != 0:
UpperCamelCase = ((len(speech_inputs[0] ) // pad_to_multiple_of) + 1) * pad_to_multiple_of
self.assertTrue(len(input_a[0] ) == expected_length )
self.assertTrue(_inputs_have_equal_length(_lowercase ) )
self.assertFalse(_inputs_have_equal_length(_lowercase ) )
def __UpperCamelCase ( self ) -> Tuple:
"""simple docstring"""
self._check_padding(numpify=_lowercase )
def __UpperCamelCase ( self ) -> int:
"""simple docstring"""
self._check_padding(numpify=_lowercase )
def __UpperCamelCase ( self ) -> Optional[int]:
"""simple docstring"""
self._check_truncation(numpify=_lowercase )
def __UpperCamelCase ( self ) -> str:
"""simple docstring"""
self._check_truncation(numpify=_lowercase )
@require_torch
def __UpperCamelCase ( self ) -> str:
"""simple docstring"""
UpperCamelCase = self.feature_extraction_class(**self.feat_extract_dict )
UpperCamelCase = self.feat_extract_tester.prepare_inputs_for_common()
UpperCamelCase = feat_extract.model_input_names[0]
UpperCamelCase = BatchFeature({input_name: speech_inputs} )
UpperCamelCase = feat_extract.pad(_lowercase , padding='longest' , return_tensors='np' )[input_name]
UpperCamelCase = feat_extract.pad(_lowercase , padding='longest' , return_tensors='pt' )[input_name]
self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_pt.numpy().astype(np.floataa ).sum() ) < 1e-2 )
@require_tf
def __UpperCamelCase ( self ) -> int:
"""simple docstring"""
UpperCamelCase = self.feature_extraction_class(**self.feat_extract_dict )
UpperCamelCase = self.feat_extract_tester.prepare_inputs_for_common()
UpperCamelCase = feat_extract.model_input_names[0]
UpperCamelCase = BatchFeature({input_name: speech_inputs} )
UpperCamelCase = feat_extract.pad(_lowercase , padding='longest' , return_tensors='np' )[input_name]
UpperCamelCase = feat_extract.pad(_lowercase , padding='longest' , return_tensors='tf' )[input_name]
self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_tf.numpy().astype(np.floataa ).sum() ) < 1e-2 )
def __UpperCamelCase ( self ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase = self.feat_extract_dict
UpperCamelCase = True
UpperCamelCase = self.feature_extraction_class(**_lowercase )
UpperCamelCase = self.feat_extract_tester.prepare_inputs_for_common()
UpperCamelCase = [len(_lowercase ) for x in speech_inputs]
UpperCamelCase = feat_extract.model_input_names[0]
UpperCamelCase = BatchFeature({input_name: speech_inputs} )
UpperCamelCase = feat_extract.pad(_lowercase , padding='longest' , return_tensors='np' )
self.assertIn('attention_mask' , _lowercase )
self.assertListEqual(list(processed.attention_mask.shape ) , list(processed[input_name].shape[:2] ) )
self.assertListEqual(processed.attention_mask.sum(-1 ).tolist() , _lowercase )
def __UpperCamelCase ( self ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase = self.feat_extract_dict
UpperCamelCase = True
UpperCamelCase = self.feature_extraction_class(**_lowercase )
UpperCamelCase = self.feat_extract_tester.prepare_inputs_for_common()
UpperCamelCase = [len(_lowercase ) for x in speech_inputs]
UpperCamelCase = feat_extract.model_input_names[0]
UpperCamelCase = BatchFeature({input_name: speech_inputs} )
UpperCamelCase = min(_lowercase )
UpperCamelCase = feat_extract.pad(
_lowercase , padding='max_length' , max_length=_lowercase , truncation=_lowercase , return_tensors='np' )
self.assertIn('attention_mask' , _lowercase )
self.assertListEqual(
list(processed_pad.attention_mask.shape ) , [processed_pad[input_name].shape[0], max_length] )
self.assertListEqual(
processed_pad.attention_mask[:, :max_length].sum(-1 ).tolist() , [max_length for x in speech_inputs] )
| 370 |
import pprint
import requests
_UpperCAmelCase : Union[str, Any] = "https://zenquotes.io/api"
def A ( ) -> list:
'''simple docstring'''
return requests.get(API_ENDPOINT_URL + '/today' ).json()
def A ( ) -> list:
'''simple docstring'''
return requests.get(API_ENDPOINT_URL + '/random' ).json()
if __name__ == "__main__":
_UpperCAmelCase : str = random_quotes()
pprint.pprint(response)
| 110 | 0 |
'''simple docstring'''
from .constants import (
MODEL_NAME,
OPTIMIZER_NAME,
RNG_STATE_NAME,
SAFE_WEIGHTS_INDEX_NAME,
SAFE_WEIGHTS_NAME,
SCALER_NAME,
SCHEDULER_NAME,
TORCH_LAUNCH_PARAMS,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
)
from .dataclasses import (
BnbQuantizationConfig,
ComputeEnvironment,
CustomDtype,
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
DynamoBackend,
FPaRecipeKwargs,
FullyShardedDataParallelPlugin,
GradientAccumulationPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
KwargsHandler,
LoggerType,
MegatronLMPlugin,
PrecisionType,
ProjectConfiguration,
RNGType,
SageMakerDistributedType,
TensorInformation,
TorchDynamoPlugin,
)
from .environment import get_int_from_env, parse_choice_from_env, parse_flag_from_env
from .imports import (
get_ccl_version,
is_abit_bnb_available,
is_abit_bnb_available,
is_aim_available,
is_bfaa_available,
is_bnb_available,
is_botoa_available,
is_ccl_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_fpa_available,
is_ipex_available,
is_megatron_lm_available,
is_mlflow_available,
is_mps_available,
is_npu_available,
is_rich_available,
is_safetensors_available,
is_sagemaker_available,
is_tensorboard_available,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
from .modeling import (
check_device_map,
check_tied_parameters_in_config,
check_tied_parameters_on_same_device,
compute_module_sizes,
convert_file_size_to_int,
dtype_byte_size,
find_tied_parameters,
get_balanced_memory,
get_max_layer_size,
get_max_memory,
get_mixed_precision_context_manager,
id_tensor_storage,
infer_auto_device_map,
load_checkpoint_in_model,
load_offloaded_weights,
load_state_dict,
named_module_tensors,
retie_parameters,
set_module_tensor_to_device,
shard_checkpoint,
)
from .offload import (
OffloadedWeightsLoader,
PrefixedDataset,
extract_submodules_state_dict,
load_offloaded_weight,
offload_state_dict,
offload_weight,
save_offload_index,
)
from .operations import (
broadcast,
broadcast_object_list,
concatenate,
convert_outputs_to_fpaa,
convert_to_fpaa,
find_batch_size,
find_device,
gather,
gather_object,
get_data_structure,
honor_type,
initialize_tensors,
is_namedtuple,
is_tensor_information,
is_torch_tensor,
listify,
pad_across_processes,
recursively_apply,
reduce,
send_to_device,
slice_tensors,
)
from .versions import compare_versions, is_torch_version
if is_deepspeed_available():
from .deepspeed import (
DeepSpeedEngineWrapper,
DeepSpeedOptimizerWrapper,
DeepSpeedSchedulerWrapper,
DummyOptim,
DummyScheduler,
HfDeepSpeedConfig,
)
from .bnb import has_abit_bnb_layers, load_and_quantize_model
from .fsdp_utils import load_fsdp_model, load_fsdp_optimizer, save_fsdp_model, save_fsdp_optimizer
from .launch import (
PrepareForLaunch,
_filter_args,
prepare_deepspeed_cmd_env,
prepare_multi_gpu_env,
prepare_sagemager_args_inputs,
prepare_simple_launcher_cmd_env,
prepare_tpu,
)
from .megatron_lm import (
AbstractTrainStep,
BertTrainStep,
GPTTrainStep,
MegatronEngine,
MegatronLMDummyDataLoader,
MegatronLMDummyScheduler,
MegatronLMOptimizerWrapper,
MegatronLMSchedulerWrapper,
TaTrainStep,
avg_losses_across_data_parallel_group,
gather_across_data_parallel_groups,
)
from .megatron_lm import initialize as megatron_lm_initialize
from .megatron_lm import prepare_data_loader as megatron_lm_prepare_data_loader
from .megatron_lm import prepare_model as megatron_lm_prepare_model
from .megatron_lm import prepare_optimizer as megatron_lm_prepare_optimizer
from .megatron_lm import prepare_scheduler as megatron_lm_prepare_scheduler
from .memory import find_executable_batch_size, release_memory
from .other import (
extract_model_from_parallel,
get_pretty_name,
is_port_in_use,
merge_dicts,
patch_environment,
save,
wait_for_everyone,
write_basic_config,
)
from .random import set_seed, synchronize_rng_state, synchronize_rng_states
from .torch_xla import install_xla
from .tqdm import tqdm
from .transformer_engine import convert_model, has_transformer_engine_layers
| 164 | """simple docstring"""
from collections import OrderedDict
from typing import List, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = {
"""google/efficientnet-b7""": """https://huggingface.co/google/efficientnet-b7/resolve/main/config.json""",
}
class a ( lowerCAmelCase_ ):
_snake_case : Any = 'efficientnet'
def __init__( self : Any , __lowerCAmelCase : int = 3 , __lowerCAmelCase : int = 600 , __lowerCAmelCase : float = 2.0 , __lowerCAmelCase : float = 3.1 , __lowerCAmelCase : int = 8 , __lowerCAmelCase : List[int] = [3, 3, 5, 3, 5, 5, 3] , __lowerCAmelCase : List[int] = [32, 16, 24, 40, 80, 112, 192] , __lowerCAmelCase : List[int] = [16, 24, 40, 80, 112, 192, 320] , __lowerCAmelCase : List[int] = [] , __lowerCAmelCase : List[int] = [1, 2, 2, 2, 1, 2, 1] , __lowerCAmelCase : List[int] = [1, 2, 2, 3, 3, 4, 1] , __lowerCAmelCase : List[int] = [1, 6, 6, 6, 6, 6, 6] , __lowerCAmelCase : float = 0.25 , __lowerCAmelCase : str = "swish" , __lowerCAmelCase : int = 2560 , __lowerCAmelCase : str = "mean" , __lowerCAmelCase : float = 0.02 , __lowerCAmelCase : float = 0.001 , __lowerCAmelCase : float = 0.99 , __lowerCAmelCase : float = 0.5 , __lowerCAmelCase : float = 0.2 , **__lowerCAmelCase : List[Any] , ):
super().__init__(**__lowerCAmelCase )
_UpperCAmelCase = num_channels
_UpperCAmelCase = image_size
_UpperCAmelCase = width_coefficient
_UpperCAmelCase = depth_coefficient
_UpperCAmelCase = depth_divisor
_UpperCAmelCase = kernel_sizes
_UpperCAmelCase = in_channels
_UpperCAmelCase = out_channels
_UpperCAmelCase = depthwise_padding
_UpperCAmelCase = strides
_UpperCAmelCase = num_block_repeats
_UpperCAmelCase = expand_ratios
_UpperCAmelCase = squeeze_expansion_ratio
_UpperCAmelCase = hidden_act
_UpperCAmelCase = hidden_dim
_UpperCAmelCase = pooling_type
_UpperCAmelCase = initializer_range
_UpperCAmelCase = batch_norm_eps
_UpperCAmelCase = batch_norm_momentum
_UpperCAmelCase = dropout_rate
_UpperCAmelCase = drop_connect_rate
_UpperCAmelCase = sum(__lowerCAmelCase ) * 4
class a ( lowerCAmelCase_ ):
_snake_case : Dict = version.parse('1.11' )
@property
def lowerCAmelCase_ ( self : Any ):
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def lowerCAmelCase_ ( self : int ):
return 1e-5
| 289 | 0 |
from ..utils import DummyObject, requires_backends
class lowercase ( metaclass=SCREAMING_SNAKE_CASE__ ):
lowercase_ : Any =['''torch''', '''scipy''']
def __init__( self ,*A__ ,**A__):
requires_backends(self ,['''torch''', '''scipy'''])
@classmethod
def A__ ( cls ,*A__ ,**A__):
requires_backends(cls ,['''torch''', '''scipy'''])
@classmethod
def A__ ( cls ,*A__ ,**A__):
requires_backends(cls ,['''torch''', '''scipy'''])
| 369 |
import numpy as np
import datasets
lowercase__ :Dict = "\nCompute the Mahalanobis Distance\n\nMahalonobis distance is the distance between a point and a distribution.\nAnd not between two distinct points. It is effectively a multivariate equivalent of the Euclidean distance.\nIt was introduced by Prof. P. C. Mahalanobis in 1936\nand has been used in various statistical applications ever since\n[source: https://www.machinelearningplus.com/statistics/mahalanobis-distance/]\n"
lowercase__ :List[Any] = "\\n@article{de2000mahalanobis,\n title={The mahalanobis distance},\n author={De Maesschalck, Roy and Jouan-Rimbaud, Delphine and Massart, D{\'e}sir{\'e} L},\n journal={Chemometrics and intelligent laboratory systems},\n volume={50},\n number={1},\n pages={1--18},\n year={2000},\n publisher={Elsevier}\n}\n"
lowercase__ :Dict = "\nArgs:\n X: List of datapoints to be compared with the `reference_distribution`.\n reference_distribution: List of datapoints from the reference distribution we want to compare to.\nReturns:\n mahalanobis: The Mahalonobis distance for each datapoint in `X`.\nExamples:\n\n >>> mahalanobis_metric = datasets.load_metric(\"mahalanobis\")\n >>> results = mahalanobis_metric.compute(reference_distribution=[[0, 1], [1, 0]], X=[[0, 1]])\n >>> print(results)\n {'mahalanobis': array([0.5])}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowercase ( datasets.Metric ):
def A__ ( self):
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
'''X''': datasets.Sequence(datasets.Value('''float''' ,id='''sequence''') ,id='''X'''),
}) ,)
def A__ ( self ,A__ ,A__):
# convert to numpy arrays
lowercase = np.array(A__)
lowercase = np.array(A__)
# Assert that arrays are 2D
if len(X.shape) != 2:
raise ValueError('''Expected `X` to be a 2D vector''')
if len(reference_distribution.shape) != 2:
raise ValueError('''Expected `reference_distribution` to be a 2D vector''')
if reference_distribution.shape[0] < 2:
raise ValueError(
'''Expected `reference_distribution` to be a 2D vector with more than one element in the first dimension''')
# Get mahalanobis distance for each prediction
lowercase = X - np.mean(A__)
lowercase = np.cov(reference_distribution.T)
try:
lowercase = np.linalg.inv(A__)
except np.linalg.LinAlgError:
lowercase = np.linalg.pinv(A__)
lowercase = np.dot(A__ ,A__)
lowercase = np.dot(A__ ,X_minus_mu.T).diagonal()
return {"mahalanobis": mahal_dist}
| 97 | 0 |
import argparse
import re
from flax.traverse_util import flatten_dict, unflatten_dict
from tax import checkpoints
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
from transformers.utils import logging
logging.set_verbosity_info()
# should not include what is already done by the `from_pt` argument
lowercase_ = {
'/attention/': '/0/SelfAttention/',
'/self_attention/': '/0/SelfAttention/',
'/encoder_decoder_attention/': '/1/EncDecAttention/',
'value': 'v',
'query': 'q',
'key': 'k',
'out': 'o',
'pre_self_attention_layer_norm': '0/layer_norm',
'pre_cross_attention_layer_norm': '1/layer_norm',
'pre_attention_layer_norm': '0/layer_norm', # previously 1, but seems wrong
'token_embedder': 'shared',
'encoder_norm': 'final_layer_norm',
'decoder_norm': 'final_layer_norm',
'relpos_bias/rel_embedding': 'block/0/layer/0/SelfAttention/relative_attention_bias/weight',
'router/router_weights/w/': 'router/classifier/',
'roer/roer_weights/w/': 'router/classifier/',
'logits_dense': 'lm_head',
}
def a ( A__ : Any ) -> Union[str, Any]:
"""simple docstring"""
_lowercase =list(s_dict.keys() )
for key in keys:
_lowercase =r'.*/layers_(\d+)'
_lowercase =key
if re.match(A__ , A__ ):
_lowercase =re.sub(r'layers_(\d+)' , r'block/\1/layer' , A__ )
_lowercase =r'(encoder|decoder)\/'
if re.match(A__ , A__ ):
_lowercase =re.match(A__ , A__ ).groups()
if groups[0] == "encoder":
_lowercase =re.sub(r'/mlp/' , r'/1/mlp/' , A__ )
_lowercase =re.sub(r'/pre_mlp_layer_norm/' , r'/1/layer_norm/' , A__ )
elif groups[0] == "decoder":
_lowercase =re.sub(r'/mlp/' , r'/2/mlp/' , A__ )
_lowercase =re.sub(r'/pre_mlp_layer_norm/' , r'/2/layer_norm/' , A__ )
# 2. Convert other classic mappings
for old_key, temp_key in MOE_LAYER_NAME_MAPPING.items():
if old_key in new_key:
_lowercase =new_key.replace(A__ , A__ )
print(F'''{key} -> {new_key}''' )
_lowercase =s_dict.pop(A__ )
if "encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
_lowercase =s_dict[
'encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight'
].T
if "decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
_lowercase =s_dict[
'decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight'
].T
# 3. Take extra care of the EXPERTS layer
for key in list(s_dict.keys() ):
if "expert" in key:
_lowercase =s_dict[key].shape[0]
_lowercase =s_dict[key]
for idx in range(A__ ):
_lowercase =expert_weihts[idx]
print(F'''{key} -> {key.replace("expert/" , "nested fstring" )}''' )
s_dict.pop(A__ )
return s_dict
lowercase_ = {
'NUM_ENCODER_LAYERS': 'num_layers',
'NUM_DECODER_LAYERS': 'num_decoder_layers',
'NUM_HEADS': 'num_heads',
'HEAD_DIM': 'd_kv',
'EMBED_DIM': 'd_model',
'MLP_DIM': 'd_ff',
'NUM_SELECTED_EXPERTS': 'num_selected_experts',
'NUM_ENCODER_SPARSE_LAYERS': 'num_sparse_encoder_layers',
'NUM_DECODER_SPARSE_LAYERS': 'num_sparse_decoder_layers',
'dense.MlpBlock.activations': 'feed_forward_proj',
}
def a ( A__ : List[Any] , A__ : Union[str, Any] ) -> List[str]:
"""simple docstring"""
import regex as re
with open(A__ , 'r' ) as f:
_lowercase =f.read()
_lowercase =re.findall(r'(.*) = ([0-9.]*)' , A__ )
_lowercase ={}
for param, value in regex_match:
if param in GIN_TO_CONFIG_MAPPING and value != "":
_lowercase =float(A__ ) if '.' in value else int(A__ )
_lowercase =re.findall(r'(.*activations) = \(\'(.*)\',\)' , A__ )[0]
_lowercase =str(activation[1] )
_lowercase =num_experts
_lowercase =SwitchTransformersConfig(**A__ )
return config
def a ( A__ : Optional[int] , A__ : Union[str, Any] , A__ : int=None , A__ : str="./" , A__ : Tuple=8 ) -> str:
"""simple docstring"""
print(F'''Loading flax weights from : {flax_checkpoint_path}''' )
_lowercase =checkpoints.load_tax_checkpoint(A__ )
if gin_file is not None:
_lowercase =convert_gin_to_config(A__ , A__ )
else:
_lowercase =SwitchTransformersConfig.from_pretrained(A__ )
_lowercase =SwitchTransformersForConditionalGeneration(A__ )
_lowercase =flax_params['target']
_lowercase =flatten_dict(A__ , sep='/' )
_lowercase =rename_keys(A__ )
_lowercase =unflatten_dict(A__ , sep='/' )
# Load the flax params in the PT model
load_flax_weights_in_pytorch_model(A__ , A__ )
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
pt_model.save_pretrained(A__ )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--switch_t5x_checkpoint_path',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained SwitchTransformers model. \nThis specifies the'
' model architecture. If not provided, a `gin_file` has to be provided.'
),
)
parser.add_argument(
'--gin_file',
default=None,
type=str,
required=False,
help='Path to the gin config file. If not provided, a `config_file` has to be passed ',
)
parser.add_argument(
'--config_name', default=None, type=str, required=False, help='Config name of SwitchTransformers model.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output pytorch model.'
)
parser.add_argument('--num_experts', default=8, type=int, required=False, help='Number of experts')
lowercase_ = parser.parse_args()
convert_flax_checkpoint_to_pytorch(
args.switch_tax_checkpoint_path,
args.config_name,
args.gin_file,
args.pytorch_dump_folder_path,
args.num_experts,
)
| 205 |
import logging
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEncoder,
BertModel,
BertPreTrainedModel,
)
lowercase_ = logging.getLogger(__name__)
class __lowerCAmelCase ( SCREAMING_SNAKE_CASE ):
def A__ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase=None , lowerCAmelCase=None ) -> List[str]:
'''simple docstring'''
_lowercase =self.layer[current_layer](lowerCAmelCase , lowerCAmelCase , head_mask[current_layer] )
_lowercase =layer_outputs[0]
return hidden_states
@add_start_docstrings(
"""The bare Bert Model transformer with PABEE outputting raw hidden-states without any specific head on top.""" , SCREAMING_SNAKE_CASE , )
class __lowerCAmelCase ( SCREAMING_SNAKE_CASE ):
def __init__( self , lowerCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(lowerCAmelCase )
_lowercase =BertEncoderWithPabee(lowerCAmelCase )
self.init_weights()
_lowercase =0
_lowercase =0
_lowercase =0
_lowercase =0
def A__ ( self , lowerCAmelCase ) -> Optional[Any]:
'''simple docstring'''
_lowercase =threshold
def A__ ( self , lowerCAmelCase ) -> List[Any]:
'''simple docstring'''
_lowercase =patience
def A__ ( self ) -> Dict:
'''simple docstring'''
_lowercase =0
_lowercase =0
def A__ ( self ) -> int:
'''simple docstring'''
_lowercase =self.inference_layers_num / self.inference_instances_num
_lowercase =(
F'''*** Patience = {self.patience} Avg. Inference Layers = {avg_inf_layers:.2f} Speed Up ='''
F''' {1 - avg_inf_layers / self.config.num_hidden_layers:.2f} ***'''
)
print(lowerCAmelCase )
@add_start_docstrings_to_model_forward(lowerCAmelCase )
def A__ ( self , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=False , ) -> str:
'''simple docstring'''
if input_ids is not None and inputs_embeds is not None:
raise ValueError('You cannot specify both input_ids and inputs_embeds at the same time' )
elif input_ids is not None:
_lowercase =input_ids.size()
elif inputs_embeds is not None:
_lowercase =inputs_embeds.size()[:-1]
else:
raise ValueError('You have to specify either input_ids or inputs_embeds' )
_lowercase =input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
_lowercase =torch.ones(lowerCAmelCase , device=lowerCAmelCase )
if token_type_ids is None:
_lowercase =torch.zeros(lowerCAmelCase , dtype=torch.long , device=lowerCAmelCase )
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
_lowercase =self.get_extended_attention_mask(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if self.config.is_decoder and encoder_hidden_states is not None:
_lowercase , _lowercase , _lowercase =encoder_hidden_states.size()
_lowercase =(encoder_batch_size, encoder_sequence_length)
if encoder_attention_mask is None:
_lowercase =torch.ones(lowerCAmelCase , device=lowerCAmelCase )
_lowercase =self.invert_attention_mask(lowerCAmelCase )
else:
_lowercase =None
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
_lowercase =self.get_head_mask(lowerCAmelCase , self.config.num_hidden_layers )
_lowercase =self.embeddings(
input_ids=lowerCAmelCase , position_ids=lowerCAmelCase , token_type_ids=lowerCAmelCase , inputs_embeds=lowerCAmelCase )
_lowercase =embedding_output
if self.training:
_lowercase =[]
for i in range(self.config.num_hidden_layers ):
_lowercase =self.encoder.adaptive_forward(
lowerCAmelCase , current_layer=lowerCAmelCase , attention_mask=lowerCAmelCase , head_mask=lowerCAmelCase )
_lowercase =self.pooler(lowerCAmelCase )
_lowercase =output_layers[i](output_dropout(lowerCAmelCase ) )
res.append(lowerCAmelCase )
elif self.patience == 0: # Use all layers for inference
_lowercase =self.encoder(
lowerCAmelCase , attention_mask=lowerCAmelCase , head_mask=lowerCAmelCase , encoder_hidden_states=lowerCAmelCase , encoder_attention_mask=lowerCAmelCase , )
_lowercase =self.pooler(encoder_outputs[0] )
_lowercase =[output_layers[self.config.num_hidden_layers - 1](lowerCAmelCase )]
else:
_lowercase =0
_lowercase =None
_lowercase =0
for i in range(self.config.num_hidden_layers ):
calculated_layer_num += 1
_lowercase =self.encoder.adaptive_forward(
lowerCAmelCase , current_layer=lowerCAmelCase , attention_mask=lowerCAmelCase , head_mask=lowerCAmelCase )
_lowercase =self.pooler(lowerCAmelCase )
_lowercase =output_layers[i](lowerCAmelCase )
if regression:
_lowercase =logits.detach()
if patient_result is not None:
_lowercase =patient_result.detach()
if (patient_result is not None) and torch.abs(patient_result - labels ) < self.regression_threshold:
patient_counter += 1
else:
_lowercase =0
else:
_lowercase =logits.detach().argmax(dim=1 )
if patient_result is not None:
_lowercase =patient_result.detach().argmax(dim=1 )
if (patient_result is not None) and torch.all(labels.eq(lowerCAmelCase ) ):
patient_counter += 1
else:
_lowercase =0
_lowercase =logits
if patient_counter == self.patience:
break
_lowercase =[patient_result]
self.inference_layers_num += calculated_layer_num
self.inference_instances_num += 1
return res
@add_start_docstrings(
"""Bert Model transformer with PABEE and a sequence classification/regression head on top (a linear layer on top of
the pooled output) e.g. for GLUE tasks. """ , SCREAMING_SNAKE_CASE , )
class __lowerCAmelCase ( SCREAMING_SNAKE_CASE ):
def __init__( self , lowerCAmelCase ) -> List[Any]:
'''simple docstring'''
super().__init__(lowerCAmelCase )
_lowercase =config.num_labels
_lowercase =BertModelWithPabee(lowerCAmelCase )
_lowercase =nn.Dropout(config.hidden_dropout_prob )
_lowercase =nn.ModuleList(
[nn.Linear(config.hidden_size , self.config.num_labels ) for _ in range(config.num_hidden_layers )] )
self.init_weights()
@add_start_docstrings_to_model_forward(lowerCAmelCase )
def A__ ( self , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=None , ) -> Union[str, Any]:
'''simple docstring'''
_lowercase =self.bert(
input_ids=lowerCAmelCase , attention_mask=lowerCAmelCase , token_type_ids=lowerCAmelCase , position_ids=lowerCAmelCase , head_mask=lowerCAmelCase , inputs_embeds=lowerCAmelCase , output_dropout=self.dropout , output_layers=self.classifiers , regression=self.num_labels == 1 , )
_lowercase =(logits[-1],)
if labels is not None:
_lowercase =None
_lowercase =0
for ix, logits_item in enumerate(lowerCAmelCase ):
if self.num_labels == 1:
# We are doing regression
_lowercase =MSELoss()
_lowercase =loss_fct(logits_item.view(-1 ) , labels.view(-1 ) )
else:
_lowercase =CrossEntropyLoss()
_lowercase =loss_fct(logits_item.view(-1 , self.num_labels ) , labels.view(-1 ) )
if total_loss is None:
_lowercase =loss
else:
total_loss += loss * (ix + 1)
total_weights += ix + 1
_lowercase =(total_loss / total_weights,) + outputs
return outputs
| 205 | 1 |
import argparse
import torch
from safetensors.torch import load_file
from diffusers import StableDiffusionPipeline
def snake_case_ ( snake_case , snake_case , snake_case , snake_case , snake_case ) -> List[Any]:
# load base model
lowercase__: Dict = StableDiffusionPipeline.from_pretrained(snake_case , torch_dtype=torch.floataa )
# load LoRA weight from .safetensors
lowercase__: int = load_file(snake_case )
lowercase__: str = []
# directly update weight in diffusers model
for key in state_dict:
# it is suggested to print out the key, it usually will be something like below
# "lora_te_text_model_encoder_layers_0_self_attn_k_proj.lora_down.weight"
# as we have set the alpha beforehand, so just skip
if ".alpha" in key or key in visited:
continue
if "text" in key:
lowercase__: Dict = key.split('.' )[0].split(LORA_PREFIX_TEXT_ENCODER + '_' )[-1].split('_' )
lowercase__: Optional[int] = pipeline.text_encoder
else:
lowercase__: Union[str, Any] = key.split('.' )[0].split(LORA_PREFIX_UNET + '_' )[-1].split('_' )
lowercase__: Optional[int] = pipeline.unet
# find the target layer
lowercase__: Tuple = layer_infos.pop(0 )
while len(snake_case ) > -1:
try:
lowercase__: Dict = curr_layer.__getattr__(snake_case )
if len(snake_case ) > 0:
lowercase__: Union[str, Any] = layer_infos.pop(0 )
elif len(snake_case ) == 0:
break
except Exception:
if len(snake_case ) > 0:
temp_name += "_" + layer_infos.pop(0 )
else:
lowercase__: Optional[Any] = layer_infos.pop(0 )
lowercase__: List[Any] = []
if "lora_down" in key:
pair_keys.append(key.replace('lora_down' , 'lora_up' ) )
pair_keys.append(snake_case )
else:
pair_keys.append(snake_case )
pair_keys.append(key.replace('lora_up' , 'lora_down' ) )
# update weight
if len(state_dict[pair_keys[0]].shape ) == 4:
lowercase__: List[str] = state_dict[pair_keys[0]].squeeze(3 ).squeeze(2 ).to(torch.floataa )
lowercase__: Union[str, Any] = state_dict[pair_keys[1]].squeeze(3 ).squeeze(2 ).to(torch.floataa )
curr_layer.weight.data += alpha * torch.mm(snake_case , snake_case ).unsqueeze(2 ).unsqueeze(3 )
else:
lowercase__: Dict = state_dict[pair_keys[0]].to(torch.floataa )
lowercase__: Dict = state_dict[pair_keys[1]].to(torch.floataa )
curr_layer.weight.data += alpha * torch.mm(snake_case , snake_case )
# update visited list
for item in pair_keys:
visited.append(snake_case )
return pipeline
if __name__ == "__main__":
__lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument(
'''--base_model_path''', default=None, type=str, required=True, help='''Path to the base model in diffusers format.'''
)
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the checkpoint to convert.'''
)
parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''')
parser.add_argument(
'''--lora_prefix_unet''', default='''lora_unet''', type=str, help='''The prefix of UNet weight in safetensors'''
)
parser.add_argument(
'''--lora_prefix_text_encoder''',
default='''lora_te''',
type=str,
help='''The prefix of text encoder weight in safetensors''',
)
parser.add_argument('''--alpha''', default=0.75, type=float, help='''The merging ratio in W = W0 + alpha * deltaW''')
parser.add_argument(
'''--to_safetensors''', action='''store_true''', help='''Whether to store pipeline in safetensors format or not.'''
)
parser.add_argument('''--device''', type=str, help='''Device to use (e.g. cpu, cuda:0, cuda:1, etc.)''')
__lowerCAmelCase = parser.parse_args()
__lowerCAmelCase = args.base_model_path
__lowerCAmelCase = args.checkpoint_path
__lowerCAmelCase = args.dump_path
__lowerCAmelCase = args.lora_prefix_unet
__lowerCAmelCase = args.lora_prefix_text_encoder
__lowerCAmelCase = args.alpha
__lowerCAmelCase = convert(base_model_path, checkpoint_path, lora_prefix_unet, lora_prefix_text_encoder, alpha)
__lowerCAmelCase = pipe.to(args.device)
pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 350 |
from __future__ import annotations
from numpy import array, cos, cross, floataa, radians, sin
from numpy.typing import NDArray
def snake_case_ ( snake_case , snake_case , snake_case = False ) -> list[float]:
if radian_mode:
return [magnitude * cos(snake_case ), magnitude * sin(snake_case )]
return [magnitude * cos(radians(snake_case ) ), magnitude * sin(radians(snake_case ) )]
def snake_case_ ( snake_case , snake_case , snake_case = 10**-1 ) -> bool:
lowercase__: NDArray[floataa] = cross(snake_case , snake_case )
lowercase__: float = sum(snake_case )
return abs(snake_case ) < eps
if __name__ == "__main__":
# Test to check if it works
__lowerCAmelCase = array(
[
polar_force(718.4, 1_80 - 30),
polar_force(879.54, 45),
polar_force(1_00, -90),
]
)
__lowerCAmelCase = array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem 1 in image_data/2D_problems.jpg
__lowerCAmelCase = array(
[
polar_force(30 * 9.81, 15),
polar_force(2_15, 1_80 - 45),
polar_force(2_64, 90 - 30),
]
)
__lowerCAmelCase = array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem in image_data/2D_problems_1.jpg
__lowerCAmelCase = array([[0, -20_00], [0, -12_00], [0, 1_56_00], [0, -1_24_00]])
__lowerCAmelCase = array([[0, 0], [6, 0], [10, 0], [12, 0]])
assert in_static_equilibrium(forces, location)
import doctest
doctest.testmod()
| 288 | 0 |
"""simple docstring"""
import argparse
import os
import sys
from unittest.mock import patch
import pytorch_lightning as pl
import timeout_decorator
import torch
from distillation import SummarizationDistiller, distill_main
from finetune import SummarizationModule, main
from transformers import MarianMTModel
from transformers.file_utils import cached_path
from transformers.testing_utils import TestCasePlus, require_torch_gpu, slow
from utils import load_json
__SCREAMING_SNAKE_CASE ="sshleifer/mar_enro_6_3_student"
class UpperCamelCase ( lowercase_ ):
def _UpperCAmelCase ( self ) -> Any:
'''simple docstring'''
super().setUp()
lowercase_ : List[str] = cached_path(
'https://cdn-datasets.huggingface.co/translation/wmt_en_ro-tr40k-va0.5k-te0.5k.tar.gz' ,extract_compressed_file=__UpperCamelCase ,)
lowercase_ : Tuple = f'''{data_cached}/wmt_en_ro-tr40k-va0.5k-te0.5k'''
@slow
@require_torch_gpu
def _UpperCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
MarianMTModel.from_pretrained(__UpperCamelCase )
@slow
@require_torch_gpu
def _UpperCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
lowercase_ : Dict = {
'$MAX_LEN': 64,
'$BS': 64,
'$GAS': 1,
'$ENRO_DIR': self.data_dir,
'facebook/mbart-large-cc25': MARIAN_MODEL,
# "val_check_interval=0.25": "val_check_interval=1.0",
'--learning_rate=3e-5': '--learning_rate 3e-4',
'--num_train_epochs 6': '--num_train_epochs 1',
}
# Clean up bash script
lowercase_ : Optional[int] = (self.test_file_dir / 'train_mbart_cc25_enro.sh').open().read().split('finetune.py' )[1].strip()
lowercase_ : Optional[Any] = bash_script.replace('\\\n' ,'' ).strip().replace('"$@"' ,'' )
for k, v in env_vars_to_replace.items():
lowercase_ : int = bash_script.replace(__UpperCamelCase ,str(__UpperCamelCase ) )
lowercase_ : Optional[Any] = self.get_auto_remove_tmp_dir()
# bash_script = bash_script.replace("--fp16 ", "")
lowercase_ : List[str] = f'''
--output_dir {output_dir}
--tokenizer_name Helsinki-NLP/opus-mt-en-ro
--sortish_sampler
--do_predict
--gpus 1
--freeze_encoder
--n_train 40000
--n_val 500
--n_test 500
--fp16_opt_level O1
--num_sanity_val_steps 0
--eval_beams 2
'''.split()
# XXX: args.gpus > 1 : handle multi_gpu in the future
lowercase_ : int = ['finetune.py'] + bash_script.split() + args
with patch.object(__UpperCamelCase ,'argv' ,__UpperCamelCase ):
lowercase_ : Optional[int] = argparse.ArgumentParser()
lowercase_ : Any = pl.Trainer.add_argparse_args(__UpperCamelCase )
lowercase_ : Tuple = SummarizationModule.add_model_specific_args(__UpperCamelCase ,os.getcwd() )
lowercase_ : Tuple = parser.parse_args()
lowercase_ : Optional[Any] = main(__UpperCamelCase )
# Check metrics
lowercase_ : int = load_json(model.metrics_save_path )
lowercase_ : Tuple = metrics['val'][0]
lowercase_ : str = metrics['val'][-1]
self.assertEqual(len(metrics['val'] ) ,(args.max_epochs / args.val_check_interval) )
assert isinstance(last_step_stats[f'''val_avg_{model.val_metric}'''] ,__UpperCamelCase )
self.assertGreater(last_step_stats['val_avg_gen_time'] ,0.01 )
# model hanging on generate. Maybe bad config was saved. (XXX: old comment/assert?)
self.assertLessEqual(last_step_stats['val_avg_gen_time'] ,1.0 )
# test learning requirements:
# 1. BLEU improves over the course of training by more than 2 pts
self.assertGreater(last_step_stats['val_avg_bleu'] - first_step_stats['val_avg_bleu'] ,2 )
# 2. BLEU finishes above 17
self.assertGreater(last_step_stats['val_avg_bleu'] ,17 )
# 3. test BLEU and val BLEU within ~1.1 pt.
self.assertLess(abs(metrics['val'][-1]['val_avg_bleu'] - metrics['test'][-1]['test_avg_bleu'] ) ,1.1 )
# check lightning ckpt can be loaded and has a reasonable statedict
lowercase_ : Dict = os.listdir(__UpperCamelCase )
lowercase_ : int = [x for x in contents if x.endswith('.ckpt' )][0]
lowercase_ : Tuple = os.path.join(args.output_dir ,__UpperCamelCase )
lowercase_ : List[str] = torch.load(__UpperCamelCase ,map_location='cpu' )
lowercase_ : List[Any] = 'model.model.decoder.layers.0.encoder_attn_layer_norm.weight'
assert expected_key in ckpt["state_dict"]
assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.floataa
# TODO: turn on args.do_predict when PL bug fixed.
if args.do_predict:
lowercase_ : Any = {os.path.basename(__UpperCamelCase ) for p in contents}
assert "test_generations.txt" in contents
assert "test_results.txt" in contents
# assert len(metrics["val"]) == desired_n_evals
assert len(metrics['test'] ) == 1
class UpperCamelCase ( lowercase_ ):
@timeout_decorator.timeout(600 )
@slow
@require_torch_gpu
def _UpperCAmelCase ( self ) -> Tuple:
'''simple docstring'''
lowercase_ : Optional[int] = f'''{self.test_file_dir_str}/test_data/wmt_en_ro'''
lowercase_ : Union[str, Any] = {
'--fp16_opt_level=O1': '',
'$MAX_LEN': 128,
'$BS': 16,
'$GAS': 1,
'$ENRO_DIR': data_dir,
'$m': 'sshleifer/student_marian_en_ro_6_1',
'val_check_interval=0.25': 'val_check_interval=1.0',
}
# Clean up bash script
lowercase_ : Any = (
(self.test_file_dir / 'distil_marian_no_teacher.sh').open().read().split('distillation.py' )[1].strip()
)
lowercase_ : Tuple = bash_script.replace('\\\n' ,'' ).strip().replace('"$@"' ,'' )
lowercase_ : Union[str, Any] = bash_script.replace('--fp16 ' ,' ' )
for k, v in env_vars_to_replace.items():
lowercase_ : Dict = bash_script.replace(__UpperCamelCase ,str(__UpperCamelCase ) )
lowercase_ : List[Any] = self.get_auto_remove_tmp_dir()
lowercase_ : int = bash_script.replace('--fp16' ,'' )
lowercase_ : str = 6
lowercase_ : List[Any] = (
['distillation.py']
+ bash_script.split()
+ [
f'''--output_dir={output_dir}''',
'--gpus=1',
'--learning_rate=1e-3',
f'''--num_train_epochs={epochs}''',
'--warmup_steps=10',
'--val_check_interval=1.0',
'--do_predict',
]
)
with patch.object(__UpperCamelCase ,'argv' ,__UpperCamelCase ):
lowercase_ : Any = argparse.ArgumentParser()
lowercase_ : int = pl.Trainer.add_argparse_args(__UpperCamelCase )
lowercase_ : int = SummarizationDistiller.add_model_specific_args(__UpperCamelCase ,os.getcwd() )
lowercase_ : str = parser.parse_args()
# assert args.gpus == gpus THIS BREAKS for multi_gpu
lowercase_ : Optional[int] = distill_main(__UpperCamelCase )
# Check metrics
lowercase_ : Optional[int] = load_json(model.metrics_save_path )
lowercase_ : Optional[Any] = metrics['val'][0]
lowercase_ : Union[str, Any] = metrics['val'][-1]
assert len(metrics['val'] ) >= (args.max_epochs / args.val_check_interval) # +1 accounts for val_sanity_check
assert last_step_stats["val_avg_gen_time"] >= 0.01
assert first_step_stats["val_avg_bleu"] < last_step_stats["val_avg_bleu"] # model learned nothing
assert 1.0 >= last_step_stats["val_avg_gen_time"] # model hanging on generate. Maybe bad config was saved.
assert isinstance(last_step_stats[f'''val_avg_{model.val_metric}'''] ,__UpperCamelCase )
# check lightning ckpt can be loaded and has a reasonable statedict
lowercase_ : Union[str, Any] = os.listdir(__UpperCamelCase )
lowercase_ : int = [x for x in contents if x.endswith('.ckpt' )][0]
lowercase_ : Optional[Any] = os.path.join(args.output_dir ,__UpperCamelCase )
lowercase_ : Union[str, Any] = torch.load(__UpperCamelCase ,map_location='cpu' )
lowercase_ : Any = 'model.model.decoder.layers.0.encoder_attn_layer_norm.weight'
assert expected_key in ckpt["state_dict"]
assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.floataa
# TODO: turn on args.do_predict when PL bug fixed.
if args.do_predict:
lowercase_ : str = {os.path.basename(__UpperCamelCase ) for p in contents}
assert "test_generations.txt" in contents
assert "test_results.txt" in contents
# assert len(metrics["val"]) == desired_n_evals
assert len(metrics['test'] ) == 1
| 213 | """simple docstring"""
import inspect
import unittest
import numpy as np
from transformers import ViTConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
from transformers.models.vit.modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel
class UpperCamelCase ( unittest.TestCase ):
def __init__( self ,__UpperCamelCase ,__UpperCamelCase=13 ,__UpperCamelCase=30 ,__UpperCamelCase=2 ,__UpperCamelCase=3 ,__UpperCamelCase=True ,__UpperCamelCase=True ,__UpperCamelCase=32 ,__UpperCamelCase=5 ,__UpperCamelCase=4 ,__UpperCamelCase=37 ,__UpperCamelCase="gelu" ,__UpperCamelCase=0.1 ,__UpperCamelCase=0.1 ,__UpperCamelCase=10 ,__UpperCamelCase=0.02 ,) -> Tuple:
'''simple docstring'''
lowercase_ : Tuple = parent
lowercase_ : Union[str, Any] = batch_size
lowercase_ : int = image_size
lowercase_ : Tuple = patch_size
lowercase_ : Optional[int] = num_channels
lowercase_ : Union[str, Any] = is_training
lowercase_ : Dict = use_labels
lowercase_ : Optional[int] = hidden_size
lowercase_ : List[str] = num_hidden_layers
lowercase_ : Optional[Any] = num_attention_heads
lowercase_ : Optional[int] = intermediate_size
lowercase_ : Tuple = hidden_act
lowercase_ : int = hidden_dropout_prob
lowercase_ : str = attention_probs_dropout_prob
lowercase_ : str = type_sequence_label_size
lowercase_ : Optional[int] = initializer_range
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
lowercase_ : str = (image_size // patch_size) ** 2
lowercase_ : Optional[int] = num_patches + 1
def _UpperCAmelCase ( self ) -> Tuple:
'''simple docstring'''
lowercase_ : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase_ : List[Any] = ViTConfig(
image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,is_decoder=__UpperCamelCase ,initializer_range=self.initializer_range ,)
return config, pixel_values
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ) -> Dict:
'''simple docstring'''
lowercase_ : List[Any] = FlaxViTModel(config=__UpperCamelCase )
lowercase_ : Dict = model(__UpperCamelCase )
# expected sequence length = num_patches + 1 (we add 1 for the [CLS] token)
lowercase_ : Union[str, Any] = (self.image_size, self.image_size)
lowercase_ : List[Any] = (self.patch_size, self.patch_size)
lowercase_ : List[str] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, num_patches + 1, self.hidden_size) )
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ) -> Union[str, Any]:
'''simple docstring'''
lowercase_ : List[Any] = self.type_sequence_label_size
lowercase_ : str = FlaxViTForImageClassification(config=__UpperCamelCase )
lowercase_ : Optional[Any] = model(__UpperCamelCase )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
# test greyscale images
lowercase_ : Union[str, Any] = 1
lowercase_ : Optional[int] = FlaxViTForImageClassification(__UpperCamelCase )
lowercase_ : List[Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowercase_ : str = model(__UpperCamelCase )
def _UpperCAmelCase ( self ) -> Any:
'''simple docstring'''
lowercase_ : Optional[int] = self.prepare_config_and_inputs()
(
(
lowercase_
) , (
lowercase_
) ,
) : List[Any] = config_and_inputs
lowercase_ : Tuple = {'pixel_values': pixel_values}
return config, inputs_dict
@require_flax
class UpperCamelCase ( lowercase_ , unittest.TestCase ):
lowercase = (FlaxViTModel, FlaxViTForImageClassification) if is_flax_available() else ()
def _UpperCAmelCase ( self ) -> None:
'''simple docstring'''
lowercase_ : Optional[Any] = FlaxViTModelTester(self )
lowercase_ : Union[str, Any] = ConfigTester(self ,config_class=__UpperCamelCase ,has_text_modality=__UpperCamelCase ,hidden_size=37 )
def _UpperCAmelCase ( self ) -> List[str]:
'''simple docstring'''
self.config_tester.run_common_tests()
def _UpperCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCamelCase )
def _UpperCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__UpperCamelCase )
def _UpperCAmelCase ( self ) -> List[Any]:
'''simple docstring'''
lowercase_ , lowercase_ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ : Optional[Any] = model_class(__UpperCamelCase )
lowercase_ : Tuple = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase_ : Union[str, Any] = [*signature.parameters.keys()]
lowercase_ : str = ['pixel_values']
self.assertListEqual(arg_names[:1] ,__UpperCamelCase )
def _UpperCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase_ , lowercase_ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
lowercase_ : Optional[Any] = self._prepare_for_class(__UpperCamelCase ,__UpperCamelCase )
lowercase_ : Dict = model_class(__UpperCamelCase )
@jax.jit
def model_jitted(__UpperCamelCase ,**__UpperCamelCase ):
return model(pixel_values=__UpperCamelCase ,**__UpperCamelCase )
with self.subTest('JIT Enabled' ):
lowercase_ : Optional[int] = model_jitted(**__UpperCamelCase ).to_tuple()
with self.subTest('JIT Disabled' ):
with jax.disable_jit():
lowercase_ : List[str] = model_jitted(**__UpperCamelCase ).to_tuple()
self.assertEqual(len(__UpperCamelCase ) ,len(__UpperCamelCase ) )
for jitted_output, output in zip(__UpperCamelCase ,__UpperCamelCase ):
self.assertEqual(jitted_output.shape ,output.shape )
@slow
def _UpperCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
for model_class_name in self.all_model_classes:
lowercase_ : Optional[int] = model_class_name.from_pretrained('google/vit-base-patch16-224' )
lowercase_ : int = model(np.ones((1, 3, 224, 224) ) )
self.assertIsNotNone(__UpperCamelCase )
| 213 | 1 |
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase__ : Tuple = logging.get_logger(__name__)
UpperCamelCase__ : int = {
'microsoft/git-base': 'https://huggingface.co/microsoft/git-base/resolve/main/config.json',
}
class _lowerCAmelCase ( __A ):
"""simple docstring"""
lowerCamelCase = '''git_vision_model'''
def __init__( self , _lowerCamelCase=768 , _lowerCamelCase=3072 , _lowerCamelCase=12 , _lowerCamelCase=12 , _lowerCamelCase=3 , _lowerCamelCase=224 , _lowerCamelCase=16 , _lowerCamelCase="quick_gelu" , _lowerCamelCase=1e-5 , _lowerCamelCase=0.0 , _lowerCamelCase=0.02 , **_lowerCamelCase , ) -> int:
super().__init__(**_lowerCamelCase )
A_ : Optional[Any] = hidden_size
A_ : Optional[Any] = intermediate_size
A_ : Any = num_hidden_layers
A_ : str = num_attention_heads
A_ : int = num_channels
A_ : int = patch_size
A_ : List[str] = image_size
A_ : int = initializer_range
A_ : Optional[Any] = attention_dropout
A_ : Tuple = layer_norm_eps
A_ : Union[str, Any] = hidden_act
@classmethod
def UpperCAmelCase_ ( cls , _lowerCamelCase , **_lowerCamelCase ) -> "PretrainedConfig":
cls._set_token_in_kwargs(_lowerCamelCase )
A_ , A_ : str = cls.get_config_dict(_lowerCamelCase , **_lowerCamelCase )
# get the vision config dict if we are loading from GITConfig
if config_dict.get("""model_type""" ) == "git":
A_ : Union[str, Any] = config_dict["""vision_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
F"{cls.model_type}. This is not supported for all configurations of models and can yield errors." )
return cls.from_dict(_lowerCamelCase , **_lowerCamelCase )
class _lowerCAmelCase ( __A ):
"""simple docstring"""
lowerCamelCase = '''git'''
def __init__( self , _lowerCamelCase=None , _lowerCamelCase=3_0522 , _lowerCamelCase=768 , _lowerCamelCase=6 , _lowerCamelCase=12 , _lowerCamelCase=3072 , _lowerCamelCase="gelu" , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase=1024 , _lowerCamelCase=0.02 , _lowerCamelCase=1e-12 , _lowerCamelCase=0 , _lowerCamelCase="absolute" , _lowerCamelCase=True , _lowerCamelCase=False , _lowerCamelCase=101 , _lowerCamelCase=102 , _lowerCamelCase=None , **_lowerCamelCase , ) -> Optional[int]:
super().__init__(bos_token_id=_lowerCamelCase , eos_token_id=_lowerCamelCase , pad_token_id=_lowerCamelCase , **_lowerCamelCase )
if vision_config is None:
A_ : List[str] = {}
logger.info("""vision_config is None. initializing the GitVisionConfig with default values.""" )
A_ : Tuple = GitVisionConfig(**_lowerCamelCase )
A_ : str = vocab_size
A_ : str = hidden_size
A_ : Optional[Any] = num_hidden_layers
A_ : int = num_attention_heads
A_ : Optional[Any] = hidden_act
A_ : Any = intermediate_size
A_ : Tuple = hidden_dropout_prob
A_ : Tuple = attention_probs_dropout_prob
A_ : Dict = max_position_embeddings
A_ : Optional[int] = initializer_range
A_ : Any = layer_norm_eps
A_ : Tuple = position_embedding_type
A_ : Tuple = use_cache
A_ : Dict = tie_word_embeddings
A_ : List[str] = num_image_with_embedding
A_ : Optional[Any] = bos_token_id
A_ : List[str] = eos_token_id
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
A_ : List[Any] = copy.deepcopy(self.__dict__ )
A_ : List[str] = self.vision_config.to_dict()
A_ : List[str] = self.__class__.model_type
return output
| 164 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
UpperCamelCase__ : Any = {'configuration_speech_encoder_decoder': ['SpeechEncoderDecoderConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ : Union[str, Any] = ['SpeechEncoderDecoderModel']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ : int = ['FlaxSpeechEncoderDecoderModel']
if TYPE_CHECKING:
from .configuration_speech_encoder_decoder import SpeechEncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_encoder_decoder import SpeechEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_speech_encoder_decoder import FlaxSpeechEncoderDecoderModel
else:
import sys
UpperCamelCase__ : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 164 | 1 |
import argparse
import gc
import json
import os
import re
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoModelForCausalLM, AutoTokenizer, PreTrainedTokenizerFast, RwkvConfig
from transformers.modeling_utils import WEIGHTS_INDEX_NAME, shard_checkpoint
lowerCAmelCase_ = {
'''169M''': 12,
'''430M''': 24,
'''1B5''': 24,
'''3B''': 32,
'''7B''': 32,
'''14B''': 40,
}
lowerCAmelCase_ = {
'''169M''': 7_68,
'''430M''': 10_24,
'''1B5''': 20_48,
'''3B''': 25_60,
'''7B''': 40_96,
'''14B''': 51_20,
}
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
snake_case_ = list(state_dict.keys() )
for name in state_dict_keys:
snake_case_ = state_dict.pop(SCREAMING_SNAKE_CASE__ )
# emb -> embedding
if name.startswith('''emb.''' ):
snake_case_ = name.replace('''emb.''' , '''embeddings.''' )
# ln_0 -> pre_ln (only present at block 0)
if name.startswith('''blocks.0.ln0''' ):
snake_case_ = name.replace('''blocks.0.ln0''' , '''blocks.0.pre_ln''' )
# att -> attention
snake_case_ = re.sub(R'''blocks\.(\d+)\.att''' , R'''blocks.\1.attention''' , SCREAMING_SNAKE_CASE__ )
# ffn -> feed_forward
snake_case_ = re.sub(R'''blocks\.(\d+)\.ffn''' , R'''blocks.\1.feed_forward''' , SCREAMING_SNAKE_CASE__ )
# time_mix_k -> time_mix_key and reshape
if name.endswith('''.time_mix_k''' ):
snake_case_ = name.replace('''.time_mix_k''' , '''.time_mix_key''' )
# time_mix_v -> time_mix_value and reshape
if name.endswith('''.time_mix_v''' ):
snake_case_ = name.replace('''.time_mix_v''' , '''.time_mix_value''' )
# time_mix_r -> time_mix_key and reshape
if name.endswith('''.time_mix_r''' ):
snake_case_ = name.replace('''.time_mix_r''' , '''.time_mix_receptance''' )
if name != "head.weight":
snake_case_ = '''rwkv.''' + name
snake_case_ = weight
return state_dict
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=None ):
# 1. If possible, build the tokenizer.
if tokenizer_file is None:
print('''No `--tokenizer_file` provided, we will use the default tokenizer.''' )
snake_case_ = 50277
snake_case_ = AutoTokenizer.from_pretrained('''EleutherAI/gpt-neox-20b''' )
else:
snake_case_ = PreTrainedTokenizerFast(tokenizer_file=SCREAMING_SNAKE_CASE__ )
snake_case_ = len(SCREAMING_SNAKE_CASE__ )
tokenizer.save_pretrained(SCREAMING_SNAKE_CASE__ )
# 2. Build the config
snake_case_ = list(NUM_HIDDEN_LAYERS_MAPPING.keys() )
if size is None:
# Try to infer size from the checkpoint name
for candidate in possible_sizes:
if candidate in checkpoint_file:
snake_case_ = candidate
break
if size is None:
raise ValueError('''Could not infer the size, please provide it with the `--size` argument.''' )
if size not in possible_sizes:
raise ValueError(F'''`size` should be one of {possible_sizes}, got {size}.''' )
snake_case_ = RwkvConfig(
vocab_size=SCREAMING_SNAKE_CASE__ , num_hidden_layers=NUM_HIDDEN_LAYERS_MAPPING[size] , hidden_size=HIDEN_SIZE_MAPPING[size] , )
config.save_pretrained(SCREAMING_SNAKE_CASE__ )
# 3. Download model file then convert state_dict
snake_case_ = hf_hub_download(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
snake_case_ = torch.load(SCREAMING_SNAKE_CASE__ , map_location='''cpu''' )
snake_case_ = convert_state_dict(SCREAMING_SNAKE_CASE__ )
# 4. Split in shards and save
snake_case_, snake_case_ = shard_checkpoint(SCREAMING_SNAKE_CASE__ )
for shard_file, shard in shards.items():
torch.save(SCREAMING_SNAKE_CASE__ , os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
if index is not None:
snake_case_ = os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# Save the index as well
with open(SCREAMING_SNAKE_CASE__ , '''w''' , encoding='''utf-8''' ) as f:
snake_case_ = json.dumps(SCREAMING_SNAKE_CASE__ , indent=2 , sort_keys=SCREAMING_SNAKE_CASE__ ) + '''\n'''
f.write(SCREAMING_SNAKE_CASE__ )
# 5. Clean up shards (for some reason the file PyTorch saves take the same space as the whole state_dict
print(
'''Cleaning up shards. This may error with an OOM error, it this is the case don\'t worry you still have converted the model.''' )
snake_case_ = list(shards.keys() )
del state_dict
del shards
gc.collect()
for shard_file in shard_files:
snake_case_ = torch.load(os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
torch.save({k: v.cpu().clone() for k, v in state_dict.items()} , os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
del state_dict
gc.collect()
if push_to_hub:
if model_name is None:
raise ValueError('''Please provide a `model_name` to push the model to the Hub.''' )
snake_case_ = AutoModelForCausalLM.from_pretrained(SCREAMING_SNAKE_CASE__ )
model.push_to_hub(SCREAMING_SNAKE_CASE__ , max_shard_size='''2GB''' )
tokenizer.push_to_hub(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--repo_id''', default=None, type=str, required=True, help='''Repo ID from which to pull the checkpoint.'''
)
parser.add_argument(
'''--checkpoint_file''', default=None, type=str, required=True, help='''Name of the checkpoint file in the repo.'''
)
parser.add_argument(
'''--output_dir''', default=None, type=str, required=True, help='''Where to save the converted model.'''
)
parser.add_argument(
'''--tokenizer_file''',
default=None,
type=str,
help='''Path to the tokenizer file to use (if not provided, only the model is converted).''',
)
parser.add_argument(
'''--size''',
default=None,
type=str,
help='''Size of the model. Will be inferred from the `checkpoint_file` if not passed.''',
)
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
help='''Push to the Hub the converted model.''',
)
parser.add_argument(
'''--model_name''',
default=None,
type=str,
help='''Name of the pushed model on the Hub, including the username / organization.''',
)
lowerCAmelCase_ = parser.parse_args()
convert_rmkv_checkpoint_to_hf_format(
args.repo_id,
args.checkpoint_file,
args.output_dir,
size=args.size,
tokenizer_file=args.tokenizer_file,
push_to_hub=args.push_to_hub,
model_name=args.model_name,
) | 8 |
"""simple docstring"""
import os
import re
import warnings
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_ta import TaTokenizer
else:
A = None
A = logging.get_logger(__name__)
A = {'''vocab_file''': '''spiece.model''', '''tokenizer_file''': '''tokenizer.json'''}
A = {
'''vocab_file''': {
'''t5-small''': '''https://huggingface.co/t5-small/resolve/main/spiece.model''',
'''t5-base''': '''https://huggingface.co/t5-base/resolve/main/spiece.model''',
'''t5-large''': '''https://huggingface.co/t5-large/resolve/main/spiece.model''',
'''t5-3b''': '''https://huggingface.co/t5-3b/resolve/main/spiece.model''',
'''t5-11b''': '''https://huggingface.co/t5-11b/resolve/main/spiece.model''',
},
'''tokenizer_file''': {
'''t5-small''': '''https://huggingface.co/t5-small/resolve/main/tokenizer.json''',
'''t5-base''': '''https://huggingface.co/t5-base/resolve/main/tokenizer.json''',
'''t5-large''': '''https://huggingface.co/t5-large/resolve/main/tokenizer.json''',
'''t5-3b''': '''https://huggingface.co/t5-3b/resolve/main/tokenizer.json''',
'''t5-11b''': '''https://huggingface.co/t5-11b/resolve/main/tokenizer.json''',
},
}
# TODO(PVP) - this should be removed in Transformers v5
A = {
'''t5-small''': 512,
'''t5-base''': 512,
'''t5-large''': 512,
'''t5-3b''': 512,
'''t5-11b''': 512,
}
class __lowercase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = VOCAB_FILES_NAMES
__lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
__lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCAmelCase = ['''input_ids''', '''attention_mask''']
__lowerCAmelCase = TaTokenizer
__lowerCAmelCase = []
def __init__( self , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase="</s>" , _UpperCAmelCase="<unk>" , _UpperCAmelCase="<pad>" , _UpperCAmelCase=100 , _UpperCAmelCase=None , **_UpperCAmelCase , ):
# Add extra_ids to the special token list
if extra_ids > 0 and additional_special_tokens is None:
__a : Dict = [f"""<extra_id_{i}>""" for i in range(_UpperCAmelCase )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra special tokens
__a : Union[str, Any] = len(set(filter(lambda _UpperCAmelCase : bool('''extra_id_''' in str(_UpperCAmelCase ) ) , _UpperCAmelCase ) ) )
if extra_tokens != extra_ids:
raise ValueError(
f"""Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are"""
''' provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids'''
''' tokens''' )
super().__init__(
_UpperCAmelCase , tokenizer_file=_UpperCAmelCase , eos_token=_UpperCAmelCase , unk_token=_UpperCAmelCase , pad_token=_UpperCAmelCase , extra_ids=_UpperCAmelCase , additional_special_tokens=_UpperCAmelCase , **_UpperCAmelCase , )
__a : Union[str, Any] = vocab_file
__a : int = False if not self.vocab_file else True
__a : List[str] = extra_ids
@staticmethod
def _lowerCamelCase ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
if pretrained_model_name_or_path in TaTokenizerFast.max_model_input_sizes:
__a : Any = TaTokenizerFast.max_model_input_sizes[pretrained_model_name_or_path]
if init_max_model_length is not None and init_max_model_length != max_model_length:
return init_max_model_length
elif init_max_model_length is None:
warnings.warn(
'''This tokenizer was incorrectly instantiated with a model max length of'''
f""" {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this"""
''' behavior is kept to avoid breaking backwards compatibility when padding/encoding with'''
''' `truncation is True`.\n- Be aware that you SHOULD NOT rely on'''
f""" {pretrained_model_name_or_path} automatically truncating your input to"""
f""" {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences"""
f""" longer than {deprecated_max_model_length} you can either instantiate this tokenizer with"""
''' `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please'''
''' instantiate this tokenizer with `model_max_length` set to your preferred value.''' , _UpperCAmelCase , )
return max_model_length
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(_UpperCAmelCase ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
__a : Optional[Any] = os.path.join(
_UpperCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_UpperCAmelCase ):
copyfile(self.vocab_file , _UpperCAmelCase )
logger.info(f"""Copy vocab file to {out_vocab_file}""" )
return (out_vocab_file,)
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase = None ):
__a : str = token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return self.prefix_tokens + token_ids_a
else:
__a : List[str] = token_ids_a + [self.eos_token_id]
return self.prefix_tokens + token_ids_a + token_ids_a
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase = None ):
__a : Tuple = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def _lowerCamelCase ( self ):
return list(
set(filter(lambda _UpperCAmelCase : bool(re.search(R'''<extra_id_\d+>''' , _UpperCAmelCase ) ) is not None , self.additional_special_tokens ) ) )
def _lowerCamelCase ( self ):
return [self.convert_tokens_to_ids(_UpperCAmelCase ) for token in self.get_sentinel_tokens()] | 160 | 0 |
'''simple docstring'''
import warnings
warnings.warn(
"memory_utils has been reorganized to utils.memory. Import `find_executable_batchsize` from the main `__init__`: "
"`from accelerate import find_executable_batch_size` to avoid this warning.",
FutureWarning,
)
| 332 |
'''simple docstring'''
import json
from typing import Dict, List, Optional, Tuple, Union
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding, EncodedInput
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_led import LEDTokenizer
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
lowerCAmelCase_ = {
"vocab_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json",
},
"merges_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt",
},
"tokenizer_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json",
},
}
lowerCAmelCase_ = {
"allenai/led-base-16384": 1_6_3_8_4,
}
class lowerCamelCase ( __lowerCAmelCase ):
snake_case_ = VOCAB_FILES_NAMES
snake_case_ = PRETRAINED_VOCAB_FILES_MAP
snake_case_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case_ = LEDTokenizer
snake_case_ = ['''input_ids''', '''attention_mask''']
def __init__( self, lowercase_=None, lowercase_=None, lowercase_=None, lowercase_="replace", lowercase_="<s>", lowercase_="</s>", lowercase_="</s>", lowercase_="<s>", lowercase_="<unk>", lowercase_="<pad>", lowercase_="<mask>", lowercase_=False, lowercase_=True, **lowercase_, ) -> int:
super().__init__(
lowercase_, lowercase_, tokenizer_file=lowercase_, errors=lowercase_, bos_token=lowercase_, eos_token=lowercase_, sep_token=lowercase_, cls_token=lowercase_, unk_token=lowercase_, pad_token=lowercase_, mask_token=lowercase_, add_prefix_space=lowercase_, trim_offsets=lowercase_, **lowercase_, )
snake_case = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space', lowercase_ ) != add_prefix_space:
snake_case = getattr(lowercase_, pre_tok_state.pop('type' ) )
snake_case = add_prefix_space
snake_case = pre_tok_class(**lowercase_ )
snake_case = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
snake_case = 'post_processor'
snake_case = getattr(self.backend_tokenizer, lowercase_, lowercase_ )
if tokenizer_component_instance:
snake_case = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
snake_case = tuple(state['sep'] )
if "cls" in state:
snake_case = tuple(state['cls'] )
snake_case = False
if state.get('add_prefix_space', lowercase_ ) != add_prefix_space:
snake_case = add_prefix_space
snake_case = True
if state.get('trim_offsets', lowercase_ ) != trim_offsets:
snake_case = trim_offsets
snake_case = True
if changes_to_apply:
snake_case = getattr(lowercase_, state.pop('type' ) )
snake_case = component_class(**lowercase_ )
setattr(self.backend_tokenizer, lowercase_, lowercase_ )
@property
# Copied from transformers.models.bart.tokenization_bart_fast.BartTokenizerFast.mask_token with BART->LED
def _lowerCamelCase ( self ) -> str:
if self._mask_token is None:
if self.verbose:
logger.error('Using mask_token, but it is not set yet.' )
return None
return str(self._mask_token )
@mask_token.setter
def _lowerCamelCase ( self, lowercase_ ) -> Any:
snake_case = AddedToken(lowercase_, lstrip=lowercase_, rstrip=lowercase_ ) if isinstance(lowercase_, lowercase_ ) else value
snake_case = value
def _lowerCamelCase ( self, *lowercase_, **lowercase_ ) -> BatchEncoding:
snake_case = kwargs.get('is_split_into_words', lowercase_ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
'to use it with pretokenized inputs.' )
return super()._batch_encode_plus(*lowercase_, **lowercase_ )
def _lowerCamelCase ( self, *lowercase_, **lowercase_ ) -> BatchEncoding:
snake_case = kwargs.get('is_split_into_words', lowercase_ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
'to use it with pretokenized inputs.' )
return super()._encode_plus(*lowercase_, **lowercase_ )
def _lowerCamelCase ( self, lowercase_, lowercase_ = None ) -> Tuple[str]:
snake_case = self._tokenizer.model.save(lowercase_, name=lowercase_ )
return tuple(lowercase_ )
def _lowerCamelCase ( self, lowercase_, lowercase_=None ) -> Dict:
snake_case = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def _lowerCamelCase ( self, lowercase_, lowercase_ = None ) -> List[int]:
snake_case = [self.sep_token_id]
snake_case = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _lowerCamelCase ( self, lowercase_, lowercase_ = None, lowercase_ = PaddingStrategy.DO_NOT_PAD, lowercase_ = None, lowercase_ = None, ) -> dict:
snake_case = super()._pad(
encoded_inputs=lowercase_, max_length=lowercase_, padding_strategy=lowercase_, pad_to_multiple_of=lowercase_, return_attention_mask=lowercase_, )
# Load from model defaults
if return_attention_mask is None:
snake_case = 'attention_mask' in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
snake_case = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
snake_case = len(encoded_inputs['global_attention_mask'] ) != len(lowercase_ )
if needs_to_be_padded:
snake_case = len(lowercase_ ) - len(encoded_inputs['global_attention_mask'] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
snake_case = (
encoded_inputs['global_attention_mask'] + [-1] * difference
)
elif self.padding_side == "left":
snake_case = [-1] * difference + encoded_inputs[
'global_attention_mask'
]
else:
raise ValueError('Invalid padding strategy:' + str(self.padding_side ) )
return encoded_inputs
| 332 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
if is_sentencepiece_available():
from ..ta.tokenization_ta import TaTokenizer
else:
from ...utils.dummy_sentencepiece_objects import TaTokenizer
lowercase_ = TaTokenizer
if is_tokenizers_available():
from ..ta.tokenization_ta_fast import TaTokenizerFast
else:
from ...utils.dummy_tokenizers_objects import TaTokenizerFast
lowercase_ = TaTokenizerFast
lowercase_ = {"""configuration_mt5""": ["""MT5Config""", """MT5OnnxConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"""MT5EncoderModel""",
"""MT5ForConditionalGeneration""",
"""MT5ForQuestionAnswering""",
"""MT5Model""",
"""MT5PreTrainedModel""",
"""MT5Stack""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ["""TFMT5EncoderModel""", """TFMT5ForConditionalGeneration""", """TFMT5Model"""]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ["""FlaxMT5EncoderModel""", """FlaxMT5ForConditionalGeneration""", """FlaxMT5Model"""]
if TYPE_CHECKING:
from .configuration_mta import MTaConfig, MTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mta import (
MTaEncoderModel,
MTaForConditionalGeneration,
MTaForQuestionAnswering,
MTaModel,
MTaPreTrainedModel,
MTaStack,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mta import TFMTaEncoderModel, TFMTaForConditionalGeneration, TFMTaModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_mta import FlaxMTaEncoderModel, FlaxMTaForConditionalGeneration, FlaxMTaModel
else:
import sys
lowercase_ = _LazyModule(
__name__,
globals()["""__file__"""],
_import_structure,
extra_objects={"""MT5Tokenizer""": MTaTokenizer, """MT5TokenizerFast""": MTaTokenizerFast},
module_spec=__spec__,
)
| 303 | """simple docstring"""
from __future__ import annotations
from collections.abc import Iterator
from typing import Any
class UpperCamelCase :
def __init__( self, lowerCAmelCase__) -> Optional[int]:
snake_case_ = data
snake_case_ = None
class UpperCamelCase :
def __init__( self) -> Dict:
snake_case_ = None
snake_case_ = None
def __iter__( self) -> Iterator[Any]:
snake_case_ = self.head
while self.head:
yield node.data
snake_case_ = node.next
if node == self.head:
break
def __len__( self) -> int:
return sum(1 for _ in self)
def __repr__( self) -> str:
return "->".join(str(lowerCAmelCase__) for item in iter(self))
def a_ ( self, lowerCAmelCase__) -> None:
self.insert_nth(len(self), lowerCAmelCase__)
def a_ ( self, lowerCAmelCase__) -> None:
self.insert_nth(0, lowerCAmelCase__)
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__) -> None:
if index < 0 or index > len(self):
raise IndexError('list index out of range.')
snake_case_ = Node(lowerCAmelCase__)
if self.head is None:
snake_case_ = new_node # first node points itself
snake_case_ = snake_case_ = new_node
elif index == 0: # insert at head
snake_case_ = self.head
snake_case_ = snake_case_ = new_node
else:
snake_case_ = self.head
for _ in range(index - 1):
snake_case_ = temp.next
snake_case_ = temp.next
snake_case_ = new_node
if index == len(self) - 1: # insert at tail
snake_case_ = new_node
def a_ ( self) -> str:
return self.delete_nth(0)
def a_ ( self) -> Any:
return self.delete_nth(len(self) - 1)
def a_ ( self, lowerCAmelCase__ = 0) -> Any:
if not 0 <= index < len(self):
raise IndexError('list index out of range.')
snake_case_ = self.head
if self.head == self.tail: # just one node
snake_case_ = snake_case_ = None
elif index == 0: # delete head node
snake_case_ = self.tail.next.next
snake_case_ = self.head.next
else:
snake_case_ = self.head
for _ in range(index - 1):
snake_case_ = temp.next
snake_case_ = temp.next
snake_case_ = temp.next.next
if index == len(self) - 1: # delete at tail
snake_case_ = temp
return delete_node.data
def a_ ( self) -> bool:
return len(self) == 0
def UpperCAmelCase ( ) -> None:
snake_case_ = CircularLinkedList()
assert len(UpperCAmelCase ) == 0
assert circular_linked_list.is_empty() is True
assert str(UpperCAmelCase ) == ""
try:
circular_linked_list.delete_front()
raise AssertionError # This should not happen
except IndexError:
assert True # This should happen
try:
circular_linked_list.delete_tail()
raise AssertionError # This should not happen
except IndexError:
assert True # This should happen
try:
circular_linked_list.delete_nth(-1 )
raise AssertionError
except IndexError:
assert True
try:
circular_linked_list.delete_nth(0 )
raise AssertionError
except IndexError:
assert True
assert circular_linked_list.is_empty() is True
for i in range(5 ):
assert len(UpperCAmelCase ) == i
circular_linked_list.insert_nth(UpperCAmelCase , i + 1 )
assert str(UpperCAmelCase ) == "->".join(str(UpperCAmelCase ) for i in range(1 , 6 ) )
circular_linked_list.insert_tail(6 )
assert str(UpperCAmelCase ) == "->".join(str(UpperCAmelCase ) for i in range(1 , 7 ) )
circular_linked_list.insert_head(0 )
assert str(UpperCAmelCase ) == "->".join(str(UpperCAmelCase ) for i in range(0 , 7 ) )
assert circular_linked_list.delete_front() == 0
assert circular_linked_list.delete_tail() == 6
assert str(UpperCAmelCase ) == "->".join(str(UpperCAmelCase ) for i in range(1 , 6 ) )
assert circular_linked_list.delete_nth(2 ) == 3
circular_linked_list.insert_nth(2 , 3 )
assert str(UpperCAmelCase ) == "->".join(str(UpperCAmelCase ) for i in range(1 , 6 ) )
assert circular_linked_list.is_empty() is False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 69 | 0 |
"""simple docstring"""
from typing import Optional, Tuple
import jax
import jax.numpy as jnp
from flax import linen as nn
from flax.core.frozen_dict import FrozenDict
from transformers import CLIPConfig, FlaxPreTrainedModel
from transformers.models.clip.modeling_flax_clip import FlaxCLIPVisionModule
def UpperCAmelCase__ (snake_case__ : Optional[int] , snake_case__ : Union[str, Any] , snake_case__ : Union[str, Any]=1e-1_2 ):
"""simple docstring"""
_snake_case : Any = jnp.divide(emb_a.T , jnp.clip(jnp.linalg.norm(snake_case__ , axis=1 ) , a_min=snake_case__ ) ).T
_snake_case : Union[str, Any] = jnp.divide(emb_a.T , jnp.clip(jnp.linalg.norm(snake_case__ , axis=1 ) , a_min=snake_case__ ) ).T
return jnp.matmul(snake_case__ , norm_emb_a.T )
class lowercase( nn.Module ):
'''simple docstring'''
lowercase__ = 42
lowercase__ = jnp.floataa
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
_snake_case : List[str] = FlaxCLIPVisionModule(self.config.vision_config )
_snake_case : Tuple = nn.Dense(self.config.projection_dim, use_bias=a_, dtype=self.dtype )
_snake_case : Any = self.param("""concept_embeds""", jax.nn.initializers.ones, (17, self.config.projection_dim) )
_snake_case : List[Any] = self.param(
"""special_care_embeds""", jax.nn.initializers.ones, (3, self.config.projection_dim) )
_snake_case : Any = self.param("""concept_embeds_weights""", jax.nn.initializers.ones, (17,) )
_snake_case : int = self.param("""special_care_embeds_weights""", jax.nn.initializers.ones, (3,) )
def __call__( self: List[str], a_: Optional[Any] ):
'''simple docstring'''
_snake_case : Union[str, Any] = self.vision_model(a_ )[1]
_snake_case : Union[str, Any] = self.visual_projection(a_ )
_snake_case : Optional[Any] = jax_cosine_distance(a_, self.special_care_embeds )
_snake_case : Any = jax_cosine_distance(a_, self.concept_embeds )
# increase this value to create a stronger `nfsw` filter
# at the cost of increasing the possibility of filtering benign image inputs
_snake_case : Union[str, Any] = 0.0
_snake_case : List[Any] = special_cos_dist - self.special_care_embeds_weights[None, :] + adjustment
_snake_case : Tuple = jnp.round(a_, 3 )
_snake_case : int = jnp.any(special_scores > 0, axis=1, keepdims=a_ )
# Use a lower threshold if an image has any special care concept
_snake_case : int = is_special_care * 0.01
_snake_case : List[str] = cos_dist - self.concept_embeds_weights[None, :] + special_adjustment
_snake_case : Optional[int] = jnp.round(a_, 3 )
_snake_case : Dict = jnp.any(concept_scores > 0, axis=1 )
return has_nsfw_concepts
class lowercase( __a ):
'''simple docstring'''
lowercase__ = CLIPConfig
lowercase__ = "clip_input"
lowercase__ = FlaxStableDiffusionSafetyCheckerModule
def __init__( self: Union[str, Any], a_: CLIPConfig, a_: Optional[Tuple] = None, a_: int = 0, a_: jnp.dtype = jnp.floataa, a_: bool = True, **a_: Optional[int], ):
'''simple docstring'''
if input_shape is None:
_snake_case : str = (1, 224, 224, 3)
_snake_case : Tuple = self.module_class(config=a_, dtype=a_, **a_ )
super().__init__(a_, a_, input_shape=a_, seed=a_, dtype=a_, _do_init=_do_init )
def UpperCamelCase_ ( self: Union[str, Any], a_: jax.random.KeyArray, a_: Tuple, a_: FrozenDict = None ):
'''simple docstring'''
_snake_case : int = jax.random.normal(a_, a_ )
_snake_case , _snake_case : List[str] = jax.random.split(a_ )
_snake_case : Optional[Any] = {"""params""": params_rng, """dropout""": dropout_rng}
_snake_case : List[str] = self.module.init(a_, a_ )["""params"""]
return random_params
def __call__( self: Tuple, a_: List[Any], a_: dict = None, ):
'''simple docstring'''
_snake_case : Union[str, Any] = jnp.transpose(a_, (0, 2, 3, 1) )
return self.module.apply(
{"""params""": params or self.params}, jnp.array(a_, dtype=jnp.floataa ), rngs={}, )
| 132 |
"""simple docstring"""
from typing import List, Optional, Tuple, Union
import torch
from ...utils import logging, randn_tensor
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
A_ = logging.get_logger(__name__) # pylint: disable=invalid-name
class lowercase( __a ):
'''simple docstring'''
def __init__( self: Optional[int], a_: str, a_: Optional[Any] ):
'''simple docstring'''
super().__init__()
self.register_modules(unet=a_, scheduler=a_ )
@torch.no_grad()
def __call__( self: Any, a_: int = 1, a_: int = 100, a_: Optional[Union[torch.Generator, List[torch.Generator]]] = None, a_: Optional[float] = None, a_: bool = True, ):
'''simple docstring'''
if audio_length_in_s is None:
_snake_case : Dict = self.unet.config.sample_size / self.unet.config.sample_rate
_snake_case : Optional[int] = audio_length_in_s * self.unet.config.sample_rate
_snake_case : int = 2 ** len(self.unet.up_blocks )
if sample_size < 3 * down_scale_factor:
raise ValueError(
f"{audio_length_in_s} is too small. Make sure it's bigger or equal to"
f" {3 * down_scale_factor / self.unet.config.sample_rate}." )
_snake_case : Union[str, Any] = int(a_ )
if sample_size % down_scale_factor != 0:
_snake_case : Optional[Any] = (
(audio_length_in_s * self.unet.config.sample_rate) // down_scale_factor + 1
) * down_scale_factor
logger.info(
f"{audio_length_in_s} is increased to {sample_size / self.unet.config.sample_rate} so that it can be handled"
f" by the model. It will be cut to {original_sample_size / self.unet.config.sample_rate} after the denoising"
""" process.""" )
_snake_case : str = int(a_ )
_snake_case : int = next(iter(self.unet.parameters() ) ).dtype
_snake_case : Optional[Any] = (batch_size, self.unet.config.in_channels, sample_size)
if isinstance(a_, a_ ) and len(a_ ) != batch_size:
raise ValueError(
f"You have passed a list of generators of length {len(a_ )}, but requested an effective batch"
f" size of {batch_size}. Make sure the batch size matches the length of the generators." )
_snake_case : Optional[Any] = randn_tensor(a_, generator=a_, device=self.device, dtype=a_ )
# set step values
self.scheduler.set_timesteps(a_, device=audio.device )
_snake_case : Optional[int] = self.scheduler.timesteps.to(a_ )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
_snake_case : str = self.unet(a_, a_ ).sample
# 2. compute previous image: x_t -> t_t-1
_snake_case : Optional[Any] = self.scheduler.step(a_, a_, a_ ).prev_sample
_snake_case : Tuple = audio.clamp(-1, 1 ).float().cpu().numpy()
_snake_case : Dict = audio[:, :, :original_sample_size]
if not return_dict:
return (audio,)
return AudioPipelineOutput(audios=a_ )
| 132 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.