code stringlengths 86 54.5k | code_codestyle int64 0 371 | style_context stringlengths 87 49.2k | style_context_codestyle int64 0 349 | label int64 0 1 |
|---|---|---|---|---|
'''simple docstring'''
import os
import re
import warnings
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
if TYPE_CHECKING:
from ...tokenization_utils_base import TextInput
from ...utils import logging
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {'''vocab_file''': '''spiece.model'''}
_lowerCAmelCase = {
'''vocab_file''': {
'''t5-small''': '''https://huggingface.co/t5-small/resolve/main/spiece.model''',
'''t5-base''': '''https://huggingface.co/t5-base/resolve/main/spiece.model''',
'''t5-large''': '''https://huggingface.co/t5-large/resolve/main/spiece.model''',
'''t5-3b''': '''https://huggingface.co/t5-3b/resolve/main/spiece.model''',
'''t5-11b''': '''https://huggingface.co/t5-11b/resolve/main/spiece.model''',
}
}
# TODO(PVP) - this should be removed in Transformers v5
_lowerCAmelCase = {
'''t5-small''': 512,
'''t5-base''': 512,
'''t5-large''': 512,
'''t5-3b''': 512,
'''t5-11b''': 512,
}
_lowerCAmelCase = '''▁'''
class lowerCAmelCase_( A__ ):
'''simple docstring'''
__lowercase : Optional[Any] = VOCAB_FILES_NAMES
__lowercase : str = PRETRAINED_VOCAB_FILES_MAP
__lowercase : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowercase : Optional[Any] = ['''input_ids''', '''attention_mask''']
def __init__( self ,__UpperCAmelCase ,__UpperCAmelCase="</s>" ,__UpperCAmelCase="<unk>" ,__UpperCAmelCase="<pad>" ,__UpperCAmelCase=100 ,__UpperCAmelCase=None ,__UpperCAmelCase = None ,__UpperCAmelCase=True ,**__UpperCAmelCase ,) -> Any:
if extra_ids > 0 and additional_special_tokens is None:
lowerCAmelCase__ : List[Any] = [F"""<extra_id_{i}>""" for i in range(__UpperCAmelCase )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra_id special tokens
lowerCAmelCase__ : Dict = len(set(filter(lambda __UpperCAmelCase : bool("""extra_id""" in str(__UpperCAmelCase ) ) ,__UpperCAmelCase ) ) )
if extra_tokens != extra_ids:
raise ValueError(
F"""Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are"""
""" provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids"""
""" tokens""" )
if legacy:
logger.warning_once(
F"""You are using the legacy behaviour of the {self.__class__}. This means that tokens that come after special tokens will not be properly handled. We recommend you to"""
""" read the related pull request available at https://github.com/huggingface/transformers/pull/24565""" )
lowerCAmelCase__ : List[Any] = legacy
lowerCAmelCase__ : int = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=__UpperCAmelCase ,unk_token=__UpperCAmelCase ,pad_token=__UpperCAmelCase ,extra_ids=__UpperCAmelCase ,additional_special_tokens=__UpperCAmelCase ,sp_model_kwargs=self.sp_model_kwargs ,legacy=__UpperCAmelCase ,**__UpperCAmelCase ,)
lowerCAmelCase__ : List[str] = vocab_file
lowerCAmelCase__ : Optional[Any] = extra_ids
lowerCAmelCase__ : str = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__UpperCAmelCase )
@staticmethod
def UpperCAmelCase_ ( __UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ) -> Dict:
if pretrained_model_name_or_path in TaTokenizer.max_model_input_sizes:
lowerCAmelCase__ : Tuple = TaTokenizer.max_model_input_sizes[pretrained_model_name_or_path]
if init_max_model_length is not None and init_max_model_length != max_model_length:
return init_max_model_length
elif init_max_model_length is None:
warnings.warn(
"""This tokenizer was incorrectly instantiated with a model max length of"""
F""" {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this"""
""" behavior is kept to avoid breaking backwards compatibility when padding/encoding with"""
""" `truncation is True`.\n- Be aware that you SHOULD NOT rely on"""
F""" {pretrained_model_name_or_path} automatically truncating your input to"""
F""" {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences"""
F""" longer than {deprecated_max_model_length} you can either instantiate this tokenizer with"""
""" `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please"""
""" instantiate this tokenizer with `model_max_length` set to your preferred value.""" ,__UpperCAmelCase ,)
return max_model_length
@property
def UpperCAmelCase_ ( self ) -> List[str]:
return self.sp_model.get_piece_size() + self._extra_ids
def UpperCAmelCase_ ( self ) -> List[str]:
lowerCAmelCase__ : Optional[Any] = {self.convert_ids_to_tokens(__UpperCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase = None ,__UpperCAmelCase = False ) -> Any:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__UpperCAmelCase ,token_ids_a=__UpperCAmelCase ,already_has_special_tokens=__UpperCAmelCase )
# normal case: some special tokens
if token_ids_a is None:
return ([0] * len(__UpperCAmelCase )) + [1]
return ([0] * len(__UpperCAmelCase )) + [1] + ([0] * len(__UpperCAmelCase )) + [1]
def UpperCAmelCase_ ( self ) -> str:
return list(
set(filter(lambda __UpperCAmelCase : bool(re.search(R"""<extra_id_\d+>""" ,__UpperCAmelCase ) ) is not None ,self.additional_special_tokens ) ) )
def UpperCAmelCase_ ( self ) -> str:
return [self._convert_token_to_id(__UpperCAmelCase ) for token in self.get_sentinel_tokens()]
def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> Dict:
if len(__UpperCAmelCase ) > 0 and token_ids[-1] == self.eos_token_id:
warnings.warn(
F"""This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated"""
""" eos tokens being added.""" )
return token_ids
else:
return token_ids + [self.eos_token_id]
def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase = None ) -> Tuple:
lowerCAmelCase__ : Optional[int] = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase = None ) -> Union[str, Any]:
lowerCAmelCase__ : Dict = self._add_eos_if_not_present(__UpperCAmelCase )
if token_ids_a is None:
return token_ids_a
else:
lowerCAmelCase__ : Optional[Any] = self._add_eos_if_not_present(__UpperCAmelCase )
return token_ids_a + token_ids_a
def __getstate__( self ) -> str:
lowerCAmelCase__ : str = self.__dict__.copy()
lowerCAmelCase__ : List[Any] = None
return state
def __setstate__( self ,__UpperCAmelCase ) -> List[str]:
lowerCAmelCase__ : List[Any] = d
# for backward compatibility
if not hasattr(self ,"""sp_model_kwargs""" ):
lowerCAmelCase__ : str = {}
lowerCAmelCase__ : int = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def UpperCAmelCase_ ( self ,__UpperCAmelCase ,**__UpperCAmelCase ) -> List[str]:
if not self.legacy:
lowerCAmelCase__ : Tuple = SPIECE_UNDERLINE + text.replace(__UpperCAmelCase ,""" """ )
return super().tokenize(__UpperCAmelCase ,**__UpperCAmelCase )
def UpperCAmelCase_ ( self ,__UpperCAmelCase ,**__UpperCAmelCase ) -> List[Any]:
if not self.legacy:
lowerCAmelCase__ : List[str] = text.startswith(__UpperCAmelCase )
if is_first:
lowerCAmelCase__ : List[str] = text[1:]
lowerCAmelCase__ : Tuple = self.sp_model.encode(__UpperCAmelCase ,out_type=__UpperCAmelCase )
if not self.legacy and not is_first and not text.startswith(""" """ ) and tokens[0].startswith(__UpperCAmelCase ):
lowerCAmelCase__ : Tuple = ([tokens[0][1:]] if len(tokens[0] ) > 1 else []) + tokens[1:]
return tokens
def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> Dict:
if token.startswith("""<extra_id_""" ):
lowerCAmelCase__ : str = re.match(R"""<extra_id_(\d+)>""" ,__UpperCAmelCase )
lowerCAmelCase__ : List[str] = int(match.group(1 ) )
return self.vocab_size - num - 1
return self.sp_model.piece_to_id(__UpperCAmelCase )
def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> Union[str, Any]:
if index < self.sp_model.get_piece_size():
lowerCAmelCase__ : Any = self.sp_model.IdToPiece(__UpperCAmelCase )
else:
lowerCAmelCase__ : int = F"""<extra_id_{self.vocab_size - 1 - index}>"""
return token
def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> int:
lowerCAmelCase__ : int = []
lowerCAmelCase__ : List[str] = ""
lowerCAmelCase__ : List[Any] = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(__UpperCAmelCase ) + token
lowerCAmelCase__ : List[Any] = True
lowerCAmelCase__ : Optional[Any] = []
else:
current_sub_tokens.append(__UpperCAmelCase )
lowerCAmelCase__ : Optional[int] = False
out_string += self.sp_model.decode(__UpperCAmelCase )
return out_string.strip()
def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase = None ) -> Dict:
if not os.path.isdir(__UpperCAmelCase ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowerCAmelCase__ : Union[str, Any] = os.path.join(
__UpperCAmelCase ,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file ,__UpperCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(__UpperCAmelCase ,"""wb""" ) as fi:
lowerCAmelCase__ : int = self.sp_model.serialized_model_proto()
fi.write(__UpperCAmelCase )
return (out_vocab_file,)
| 37 |
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import datasets
import datasets.config
from .utils import require_beam
class __a ( datasets.BeamBasedBuilder ):
def __lowercase ( self : List[str] ):
'''simple docstring'''
return datasets.DatasetInfo(
features=datasets.Features({"content": datasets.Value("string" )} ) , supervised_keys=SCREAMING_SNAKE_CASE , )
def __lowercase ( self : List[Any] , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Any ):
'''simple docstring'''
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"examples": get_test_dummy_examples()} )]
def __lowercase ( self : List[Any] , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(SCREAMING_SNAKE_CASE )
class __a ( datasets.BeamBasedBuilder ):
def __lowercase ( self : Any ):
'''simple docstring'''
return datasets.DatasetInfo(
features=datasets.Features({"a": datasets.Sequence({"b": datasets.Value("string" )} )} ) , supervised_keys=SCREAMING_SNAKE_CASE , )
def __lowercase ( self : Any , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : List[Any] ):
'''simple docstring'''
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"examples": get_test_nested_examples()} )
]
def __lowercase ( self : List[Any] , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Any ):
'''simple docstring'''
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( ) -> List[str]:
return [(i, {"content": content}) for i, content in enumerate(["foo", "bar", "foobar"] )]
def SCREAMING_SNAKE_CASE ( ) -> Optional[Any]:
return [(i, {"a": {"b": [content]}}) for i, content in enumerate(["foo", "bar", "foobar"] )]
class __a ( A__ ):
@require_beam
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
UpperCamelCase__ : int = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
UpperCamelCase__ : List[Any] = DummyBeamDataset(cache_dir=SCREAMING_SNAKE_CASE , beam_runner="DirectRunner" )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(SCREAMING_SNAKE_CASE , builder.name , "default" , "0.0.0" , F'{builder.name}-train.arrow' ) ) )
self.assertDictEqual(builder.info.features , datasets.Features({"content": datasets.Value("string" )} ) )
UpperCamelCase__ : Optional[Any] = builder.as_dataset()
self.assertEqual(dset["train"].num_rows , SCREAMING_SNAKE_CASE )
self.assertEqual(dset["train"].info.splits["train"].num_examples , SCREAMING_SNAKE_CASE )
self.assertDictEqual(dset["train"][0] , get_test_dummy_examples()[0][1] )
self.assertDictEqual(
dset["train"][expected_num_examples - 1] , get_test_dummy_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(SCREAMING_SNAKE_CASE , builder.name , "default" , "0.0.0" , "dataset_info.json" ) ) )
del dset
@require_beam
def __lowercase ( self : Any ):
'''simple docstring'''
import apache_beam as beam
UpperCamelCase__ : Any = beam.io.parquetio.WriteToParquet
UpperCamelCase__ : Tuple = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
UpperCamelCase__ : str = DummyBeamDataset(cache_dir=SCREAMING_SNAKE_CASE , beam_runner="DirectRunner" )
with patch("apache_beam.io.parquetio.WriteToParquet" ) as write_parquet_mock:
UpperCamelCase__ : Optional[Any] = partial(SCREAMING_SNAKE_CASE , num_shards=2 )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(
SCREAMING_SNAKE_CASE , builder.name , "default" , "0.0.0" , F'{builder.name}-train-00000-of-00002.arrow' ) ) )
self.assertTrue(
os.path.exists(
os.path.join(
SCREAMING_SNAKE_CASE , builder.name , "default" , "0.0.0" , F'{builder.name}-train-00000-of-00002.arrow' ) ) )
self.assertDictEqual(builder.info.features , datasets.Features({"content": datasets.Value("string" )} ) )
UpperCamelCase__ : List[Any] = builder.as_dataset()
self.assertEqual(dset["train"].num_rows , SCREAMING_SNAKE_CASE )
self.assertEqual(dset["train"].info.splits["train"].num_examples , SCREAMING_SNAKE_CASE )
# Order is not preserved when sharding, so we just check that all the elements are there
self.assertListEqual(sorted(dset["train"]["content"] ) , sorted(["foo", "bar", "foobar"] ) )
self.assertTrue(
os.path.exists(os.path.join(SCREAMING_SNAKE_CASE , builder.name , "default" , "0.0.0" , "dataset_info.json" ) ) )
del dset
@require_beam
def __lowercase ( self : List[Any] ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_cache_dir:
UpperCamelCase__ : int = DummyBeamDataset(cache_dir=SCREAMING_SNAKE_CASE )
self.assertRaises(datasets.builder.MissingBeamOptions , builder.download_and_prepare )
@require_beam
def __lowercase ( self : List[Any] ):
'''simple docstring'''
UpperCamelCase__ : List[Any] = len(get_test_nested_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
UpperCamelCase__ : Any = NestedBeamDataset(cache_dir=SCREAMING_SNAKE_CASE , beam_runner="DirectRunner" )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(SCREAMING_SNAKE_CASE , builder.name , "default" , "0.0.0" , F'{builder.name}-train.arrow' ) ) )
self.assertDictEqual(
builder.info.features , datasets.Features({"a": datasets.Sequence({"b": datasets.Value("string" )} )} ) )
UpperCamelCase__ : List[Any] = builder.as_dataset()
self.assertEqual(dset["train"].num_rows , SCREAMING_SNAKE_CASE )
self.assertEqual(dset["train"].info.splits["train"].num_examples , SCREAMING_SNAKE_CASE )
self.assertDictEqual(dset["train"][0] , get_test_nested_examples()[0][1] )
self.assertDictEqual(
dset["train"][expected_num_examples - 1] , get_test_nested_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(SCREAMING_SNAKE_CASE , builder.name , "default" , "0.0.0" , "dataset_info.json" ) ) )
del dset | 189 | 0 |
"""simple docstring"""
import warnings
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __lowerCamelCase ( A__ ):
'''simple docstring'''
a_ : Dict = ["""image_processor""", """tokenizer"""]
a_ : str = """FlavaImageProcessor"""
a_ : List[Any] = ("""BertTokenizer""", """BertTokenizerFast""")
def __init__( self : List[Any] , a_ : List[str]=None , a_ : Dict=None , **a_ : List[Any] ):
lowerCAmelCase_ : List[str] = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , a_ , )
lowerCAmelCase_ : int = kwargs.pop("feature_extractor" )
lowerCAmelCase_ : int = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(a_ , a_ )
lowerCAmelCase_ : Union[str, Any] = self.image_processor
def __call__( self : Union[str, Any] , a_ : Optional[ImageInput] = None , a_ : Optional[Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]]] = None , a_ : bool = True , a_ : Union[bool, str, PaddingStrategy] = False , a_ : Union[bool, str, TruncationStrategy] = False , a_ : Optional[int] = None , a_ : int = 0 , a_ : Optional[int] = None , a_ : Optional[bool] = None , a_ : Optional[bool] = None , a_ : Optional[bool] = None , a_ : Optional[bool] = None , a_ : bool = False , a_ : bool = False , a_ : bool = False , a_ : bool = False , a_ : bool = True , a_ : Optional[Union[str, TensorType]] = None , **a_ : Optional[int] , ):
if text is None and images is None:
raise ValueError("You have to specify either text or images. Both cannot be none." )
if text is not None:
lowerCAmelCase_ : str = self.tokenizer(
text=a_ , add_special_tokens=a_ , padding=a_ , truncation=a_ , max_length=a_ , stride=a_ , pad_to_multiple_of=a_ , return_token_type_ids=a_ , return_attention_mask=a_ , return_overflowing_tokens=a_ , return_special_tokens_mask=a_ , return_offsets_mapping=a_ , return_length=a_ , verbose=a_ , return_tensors=a_ , **a_ , )
if images is not None:
lowerCAmelCase_ : int = self.image_processor(
a_ , return_image_mask=a_ , return_codebook_pixels=a_ , return_tensors=a_ , **a_ , )
if text is not None and images is not None:
encoding.update(a_ )
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**a_ ) , tensor_type=a_ )
def lowerCamelCase ( self : List[Any] , *a_ : Optional[int] , **a_ : Optional[int] ):
return self.tokenizer.batch_decode(*a_ , **a_ )
def lowerCamelCase ( self : Dict , *a_ : int , **a_ : str ):
return self.tokenizer.decode(*a_ , **a_ )
@property
def lowerCamelCase ( self : Optional[int] ):
lowerCAmelCase_ : Any = self.tokenizer.model_input_names
lowerCAmelCase_ : List[str] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def lowerCamelCase ( self : List[str] ):
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , a_ , )
return self.image_processor_class
@property
def lowerCamelCase ( self : str ):
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , a_ , )
return self.image_processor
| 365 |
"""simple docstring"""
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast
from ...utils import logging
lowercase__ = logging.get_logger(__name__)
lowercase__ = {
"""EleutherAI/gpt-neo-1.3B""": """https://huggingface.co/EleutherAI/gpt-neo-1.3B/resolve/main/config.json""",
# See all GPTNeo models at https://huggingface.co/models?filter=gpt_neo
}
class __lowerCamelCase ( A__ ):
'''simple docstring'''
a_ : Union[str, Any] = """gpt_neo"""
a_ : List[Any] = ["""past_key_values"""]
a_ : Optional[Any] = {"""num_attention_heads""": """num_heads""", """num_hidden_layers""": """num_layers"""}
def __init__( self : Optional[int] , a_ : List[str]=5_02_57 , a_ : List[str]=20_48 , a_ : Union[str, Any]=20_48 , a_ : Union[str, Any]=24 , a_ : Optional[int]=[[["global", "local"], 12]] , a_ : str=16 , a_ : Optional[Any]=None , a_ : str=2_56 , a_ : Union[str, Any]="gelu_new" , a_ : Optional[int]=0.0 , a_ : Optional[Any]=0.0 , a_ : List[Any]=0.0 , a_ : List[Any]=0.1 , a_ : Optional[Any]=1e-5 , a_ : Optional[Any]=0.02 , a_ : int=True , a_ : Optional[Any]=5_02_56 , a_ : Tuple=5_02_56 , **a_ : str , ):
lowerCAmelCase_ : Optional[Any] = vocab_size
lowerCAmelCase_ : str = max_position_embeddings
lowerCAmelCase_ : Tuple = hidden_size
lowerCAmelCase_ : Union[str, Any] = num_layers
lowerCAmelCase_ : str = num_heads
lowerCAmelCase_ : List[str] = intermediate_size
lowerCAmelCase_ : Union[str, Any] = window_size
lowerCAmelCase_ : Any = activation_function
lowerCAmelCase_ : str = resid_dropout
lowerCAmelCase_ : Union[str, Any] = embed_dropout
lowerCAmelCase_ : Optional[Any] = attention_dropout
lowerCAmelCase_ : Dict = classifier_dropout
lowerCAmelCase_ : int = layer_norm_epsilon
lowerCAmelCase_ : Dict = initializer_range
lowerCAmelCase_ : List[Any] = use_cache
lowerCAmelCase_ : Optional[int] = bos_token_id
lowerCAmelCase_ : str = eos_token_id
lowerCAmelCase_ : Optional[Any] = attention_types
lowerCAmelCase_ : Optional[Any] = self.expand_attention_types_params(a_ )
if len(self.attention_layers ) != self.num_layers:
raise ValueError(
"Configuration for convolutional module is incorrect. "
"It is required that `len(config.attention_layers)` == `config.num_layers` "
f'''but is `len(config.attention_layers) = {len(self.attention_layers )}`, '''
f'''`config.num_layers = {self.num_layers}`. '''
"`config.attention_layers` is prepared using `config.attention_types`. "
"Please verify the value of `config.attention_types` argument." )
super().__init__(bos_token_id=a_ , eos_token_id=a_ , **a_ )
@staticmethod
def lowerCamelCase ( a_ : Optional[Any] ):
lowerCAmelCase_ : int = []
for item in attention_types:
for _ in range(item[1] ):
attentions.extend(item[0] )
return attentions
def __lowerCamelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> int:
"""simple docstring"""
import torch
lowerCAmelCase_ : str = input.size()
lowerCAmelCase_ : List[Any] = len(__UpperCamelCase )
lowerCAmelCase_ : Tuple = shape[dimension]
lowerCAmelCase_ : Tuple = torch.arange(0 , __UpperCamelCase , __UpperCamelCase )
lowerCAmelCase_ : List[Any] = torch.div(sizedim - size , __UpperCamelCase , rounding_mode="floor" ) + 1
lowerCAmelCase_ : Dict = torch.arange(__UpperCamelCase ) + low_indices[:min_length][:, None]
lowerCAmelCase_ : Tuple = [slice(__UpperCamelCase )] * rank
lowerCAmelCase_ : List[str] = indices
lowerCAmelCase_ : Dict = input[s]
lowerCAmelCase_ : Tuple = list(range(0 , rank + 1 ) )
perm.append(perm.pop(dimension + 1 ) )
return sliced.permute(__UpperCamelCase )
def __lowerCamelCase ( __UpperCamelCase , __UpperCamelCase ) -> Any:
"""simple docstring"""
import torch
lowerCAmelCase_ : Optional[int] = torch.arange(1 , __UpperCamelCase )
lowerCAmelCase_ : Tuple = torch.remainder(__UpperCamelCase , __UpperCamelCase )
lowerCAmelCase_ : Tuple = remainders == 0
lowerCAmelCase_ : List[Any] = candidates[divisor_indices]
lowerCAmelCase_ : List[str] = torch.max(__UpperCamelCase )
return largest_divisor, torch.div(__UpperCamelCase , __UpperCamelCase , rounding_mode="floor" )
class __lowerCamelCase ( A__ ):
'''simple docstring'''
@property
def lowerCamelCase ( self : List[str] ):
lowerCAmelCase_ : Any = OrderedDict({"input_ids": {0: "batch", 1: "sequence"}} )
if self.use_past:
self.fill_with_past_key_values_(a_ , direction="inputs" )
lowerCAmelCase_ : int = {0: "batch", 1: "past_sequence + sequence"}
else:
lowerCAmelCase_ : str = {0: "batch", 1: "sequence"}
return common_inputs
@property
def lowerCamelCase ( self : int ):
return self._config.num_heads
def lowerCamelCase ( self : Optional[Any] , a_ : PreTrainedTokenizer , a_ : int = -1 , a_ : int = -1 , a_ : bool = False , a_ : Optional[TensorType] = None , ):
lowerCAmelCase_ : int = super(a_ , self ).generate_dummy_inputs(
a_ , batch_size=a_ , seq_length=a_ , is_pair=a_ , framework=a_ )
# We need to order the input in the way they appears in the forward()
lowerCAmelCase_ : str = OrderedDict({"input_ids": common_inputs["input_ids"]} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
lowerCAmelCase_ , lowerCAmelCase_ : Optional[Any] = common_inputs["input_ids"].shape
# Not using the same length for past_key_values
lowerCAmelCase_ : str = seqlen + 2
lowerCAmelCase_ : Tuple = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
lowerCAmelCase_ : Optional[int] = [
(torch.zeros(a_ ), torch.zeros(a_ )) for _ in range(self.num_layers )
]
lowerCAmelCase_ : Tuple = common_inputs["attention_mask"]
if self.use_past:
lowerCAmelCase_ : List[str] = ordered_inputs["attention_mask"].dtype
lowerCAmelCase_ : Optional[Any] = torch.cat(
[ordered_inputs["attention_mask"], torch.ones(a_ , a_ , dtype=a_ )] , dim=1 )
return ordered_inputs
@property
def lowerCamelCase ( self : Union[str, Any] ):
return 13
| 161 | 0 |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
SCREAMING_SNAKE_CASE :List[str] = {'configuration_mra': ['MRA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MraConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE :List[str] = [
'MRA_PRETRAINED_MODEL_ARCHIVE_LIST',
'MraForMaskedLM',
'MraForMultipleChoice',
'MraForQuestionAnswering',
'MraForSequenceClassification',
'MraForTokenClassification',
'MraLayer',
'MraModel',
'MraPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mra import MRA_PRETRAINED_CONFIG_ARCHIVE_MAP, MraConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mra import (
MRA_PRETRAINED_MODEL_ARCHIVE_LIST,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraLayer,
MraModel,
MraPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE :List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 15 |
def UpperCAmelCase ( a_ ) -> list:
"""simple docstring"""
if len(a_ ) <= 1:
return lst
__A = 1
while i < len(a_ ):
if lst[i - 1] <= lst[i]:
i += 1
else:
__A , __A = lst[i], lst[i - 1]
i -= 1
if i == 0:
__A = 1
return lst
if __name__ == "__main__":
SCREAMING_SNAKE_CASE :List[Any] = input('Enter numbers separated by a comma:\n').strip()
SCREAMING_SNAKE_CASE :List[Any] = [int(item) for item in user_input.split(',')]
print(gnome_sort(unsorted))
| 15 | 1 |
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> str:
UpperCamelCase__ : Dict = 0
# if input_string is "aba" than new_input_string become "a|b|a"
UpperCamelCase__ : List[Any] = ""
UpperCamelCase__ : str = ""
# append each character + "|" in new_string for range(0, length-1)
for i in input_string[: len(__lowerCAmelCase ) - 1]:
new_input_string += i + "|"
# append last character
new_input_string += input_string[-1]
# we will store the starting and ending of previous furthest ending palindromic
# substring
UpperCamelCase__ : Tuple = 0, 0
# length[i] shows the length of palindromic substring with center i
UpperCamelCase__ : Union[str, Any] = [1 for i in range(len(__lowerCAmelCase ) )]
# for each character in new_string find corresponding palindromic string
UpperCamelCase__ : Any = 0
for j in range(len(__lowerCAmelCase ) ):
UpperCamelCase__ : Union[str, Any] = 1 if j > r else min(length[l + r - j] // 2 , r - j + 1 )
while (
j - k >= 0
and j + k < len(__lowerCAmelCase )
and new_input_string[k + j] == new_input_string[j - k]
):
k += 1
UpperCamelCase__ : Optional[Any] = 2 * k - 1
# does this string is ending after the previously explored end (that is r) ?
# if yes the update the new r to the last index of this
if j + k - 1 > r:
UpperCamelCase__ : Optional[int] = j - k + 1 # noqa: E741
UpperCamelCase__ : Tuple = j + k - 1
# update max_length and start position
if max_length < length[j]:
UpperCamelCase__ : Tuple = length[j]
UpperCamelCase__ : Union[str, Any] = j
# create that string
UpperCamelCase__ : Optional[int] = new_input_string[start - max_length // 2 : start + max_length // 2 + 1]
for i in s:
if i != "|":
output_string += i
return output_string
if __name__ == "__main__":
import doctest
doctest.testmod() | 365 |
from collections.abc import Generator
from math import sin
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> bytes:
if len(__lowerCAmelCase ) != 32:
raise ValueError("Input must be of length 32" )
UpperCamelCase__ : Optional[Any] = B""
for i in [3, 2, 1, 0]:
little_endian += string_aa[8 * i : 8 * i + 8]
return little_endian
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> bytes:
if i < 0:
raise ValueError("Input must be non-negative" )
UpperCamelCase__ : int = format(__lowerCAmelCase , "08x" )[-8:]
UpperCamelCase__ : Union[str, Any] = B""
for i in [3, 2, 1, 0]:
little_endian_hex += hex_rep[2 * i : 2 * i + 2].encode("utf-8" )
return little_endian_hex
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> bytes:
UpperCamelCase__ : Optional[Any] = B""
for char in message:
bit_string += format(__lowerCAmelCase , "08b" ).encode("utf-8" )
UpperCamelCase__ : Any = format(len(__lowerCAmelCase ) , "064b" ).encode("utf-8" )
# Pad bit_string to a multiple of 512 chars
bit_string += b"1"
while len(__lowerCAmelCase ) % 512 != 448:
bit_string += b"0"
bit_string += to_little_endian(start_len[32:] ) + to_little_endian(start_len[:32] )
return bit_string
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> Generator[list[int], None, None]:
if len(__lowerCAmelCase ) % 512 != 0:
raise ValueError("Input must have length that's a multiple of 512" )
for pos in range(0 , len(__lowerCAmelCase ) , 512 ):
UpperCamelCase__ : Tuple = bit_string[pos : pos + 512]
UpperCamelCase__ : Tuple = []
for i in range(0 , 512 , 32 ):
block_words.append(int(to_little_endian(block[i : i + 32] ) , 2 ) )
yield block_words
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> int:
if i < 0:
raise ValueError("Input must be non-negative" )
UpperCamelCase__ : str = format(__lowerCAmelCase , "032b" )
UpperCamelCase__ : Optional[int] = ""
for c in i_str:
new_str += "1" if c == "0" else "0"
return int(__lowerCAmelCase , 2 )
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase ) -> int:
return (a + b) % 2**32
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase ) -> int:
if i < 0:
raise ValueError("Input must be non-negative" )
if shift < 0:
raise ValueError("Shift must be non-negative" )
return ((i << shift) ^ (i >> (32 - shift))) % 2**32
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> bytes:
UpperCamelCase__ : int = preprocess(__lowerCAmelCase )
UpperCamelCase__ : Optional[Any] = [int(2**32 * abs(sin(i + 1 ) ) ) for i in range(64 )]
# Starting states
UpperCamelCase__ : Any = 0x6745_2301
UpperCamelCase__ : Optional[int] = 0xEFCD_AB89
UpperCamelCase__ : Union[str, Any] = 0x98BA_DCFE
UpperCamelCase__ : Any = 0x1032_5476
UpperCamelCase__ : List[Any] = [
7,
12,
17,
22,
7,
12,
17,
22,
7,
12,
17,
22,
7,
12,
17,
22,
5,
9,
14,
20,
5,
9,
14,
20,
5,
9,
14,
20,
5,
9,
14,
20,
4,
11,
16,
23,
4,
11,
16,
23,
4,
11,
16,
23,
4,
11,
16,
23,
6,
10,
15,
21,
6,
10,
15,
21,
6,
10,
15,
21,
6,
10,
15,
21,
]
# Process bit string in chunks, each with 16 32-char words
for block_words in get_block_words(__lowerCAmelCase ):
UpperCamelCase__ : Union[str, Any] = aa
UpperCamelCase__ : int = ba
UpperCamelCase__ : Union[str, Any] = ca
UpperCamelCase__ : str = da
# Hash current chunk
for i in range(64 ):
if i <= 15:
# f = (b & c) | (not_32(b) & d) # Alternate definition for f
UpperCamelCase__ : Any = d ^ (b & (c ^ d))
UpperCamelCase__ : Optional[int] = i
elif i <= 31:
# f = (d & b) | (not_32(d) & c) # Alternate definition for f
UpperCamelCase__ : Any = c ^ (d & (b ^ c))
UpperCamelCase__ : str = (5 * i + 1) % 16
elif i <= 47:
UpperCamelCase__ : Dict = b ^ c ^ d
UpperCamelCase__ : Union[str, Any] = (3 * i + 5) % 16
else:
UpperCamelCase__ : Optional[int] = c ^ (b | not_aa(__lowerCAmelCase ))
UpperCamelCase__ : Optional[int] = (7 * i) % 16
UpperCamelCase__ : List[str] = (f + a + added_consts[i] + block_words[g]) % 2**32
UpperCamelCase__ : Optional[int] = d
UpperCamelCase__ : Dict = c
UpperCamelCase__ : List[Any] = b
UpperCamelCase__ : List[Any] = sum_aa(__lowerCAmelCase , left_rotate_aa(__lowerCAmelCase , shift_amounts[i] ) )
# Add hashed chunk to running total
UpperCamelCase__ : Optional[int] = sum_aa(__lowerCAmelCase , __lowerCAmelCase )
UpperCamelCase__ : Any = sum_aa(__lowerCAmelCase , __lowerCAmelCase )
UpperCamelCase__ : Dict = sum_aa(__lowerCAmelCase , __lowerCAmelCase )
UpperCamelCase__ : List[str] = sum_aa(__lowerCAmelCase , __lowerCAmelCase )
UpperCamelCase__ : List[str] = reformat_hex(__lowerCAmelCase ) + reformat_hex(__lowerCAmelCase ) + reformat_hex(__lowerCAmelCase ) + reformat_hex(__lowerCAmelCase )
return digest
if __name__ == "__main__":
import doctest
doctest.testmod() | 196 | 0 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import BlenderbotConfig, BlenderbotTokenizer, is_tf_available
from transformers.testing_utils import require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotForConditionalGeneration, TFBlenderbotModel
@require_tf
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
lowercase__ = BlenderbotConfig
lowercase__ = {}
lowercase__ = '''gelu'''
def __init__( self : Union[str, Any] ,lowercase_ : Optional[int] ,lowercase_ : int=1_3 ,lowercase_ : Tuple=7 ,lowercase_ : Tuple=True ,lowercase_ : Optional[int]=False ,lowercase_ : List[Any]=9_9 ,lowercase_ : List[str]=3_2 ,lowercase_ : Dict=2 ,lowercase_ : Optional[Any]=4 ,lowercase_ : int=3_7 ,lowercase_ : int=0.1 ,lowercase_ : Union[str, Any]=0.1 ,lowercase_ : Optional[Any]=2_0 ,lowercase_ : Optional[Any]=2 ,lowercase_ : Tuple=1 ,lowercase_ : str=0 ,):
lowerCAmelCase__ : List[Any] = parent
lowerCAmelCase__ : List[Any] = batch_size
lowerCAmelCase__ : Optional[Any] = seq_length
lowerCAmelCase__ : int = is_training
lowerCAmelCase__ : List[str] = use_labels
lowerCAmelCase__ : Tuple = vocab_size
lowerCAmelCase__ : Dict = hidden_size
lowerCAmelCase__ : Union[str, Any] = num_hidden_layers
lowerCAmelCase__ : Union[str, Any] = num_attention_heads
lowerCAmelCase__ : str = intermediate_size
lowerCAmelCase__ : Union[str, Any] = hidden_dropout_prob
lowerCAmelCase__ : str = attention_probs_dropout_prob
lowerCAmelCase__ : int = max_position_embeddings
lowerCAmelCase__ : Tuple = eos_token_id
lowerCAmelCase__ : Dict = pad_token_id
lowerCAmelCase__ : List[Any] = bos_token_id
def __lowerCAmelCase ( self : Any ):
lowerCAmelCase__ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length - 1] ,self.vocab_size )
lowerCAmelCase__ : Optional[Any] = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) ,1 )
lowerCAmelCase__ : Dict = tf.concat([input_ids, eos_tensor] ,axis=1 )
lowerCAmelCase__ : Any = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
lowerCAmelCase__ : Optional[Any] = self.config_cls(
vocab_size=self.vocab_size ,d_model=self.hidden_size ,encoder_layers=self.num_hidden_layers ,decoder_layers=self.num_hidden_layers ,encoder_attention_heads=self.num_attention_heads ,decoder_attention_heads=self.num_attention_heads ,encoder_ffn_dim=self.intermediate_size ,decoder_ffn_dim=self.intermediate_size ,dropout=self.hidden_dropout_prob ,attention_dropout=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,eos_token_ids=[2] ,bos_token_id=self.bos_token_id ,pad_token_id=self.pad_token_id ,decoder_start_token_id=self.pad_token_id ,**self.config_updates ,)
lowerCAmelCase__ : List[Any] = prepare_blenderbot_inputs_dict(lowercase_ ,lowercase_ ,lowercase_ )
return config, inputs_dict
def __lowerCAmelCase ( self : int ,lowercase_ : Optional[Any] ,lowercase_ : Dict ):
lowerCAmelCase__ : Optional[int] = TFBlenderbotModel(config=lowercase_ ).get_decoder()
lowerCAmelCase__ : Union[str, Any] = inputs_dict['''input_ids''']
lowerCAmelCase__ : Optional[Any] = input_ids[:1, :]
lowerCAmelCase__ : int = inputs_dict['''attention_mask'''][:1, :]
lowerCAmelCase__ : int = inputs_dict['''head_mask''']
lowerCAmelCase__ : int = 1
# first forward pass
lowerCAmelCase__ : Tuple = model(lowercase_ ,attention_mask=lowercase_ ,head_mask=lowercase_ ,use_cache=lowercase_ )
lowerCAmelCase__ ,lowerCAmelCase__ : Dict = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
lowerCAmelCase__ : Any = ids_tensor((self.batch_size, 3) ,config.vocab_size )
lowerCAmelCase__ : List[Any] = tf.cast(ids_tensor((self.batch_size, 3) ,2 ) ,tf.inta )
# append to next input_ids and
lowerCAmelCase__ : int = tf.concat([input_ids, next_tokens] ,axis=-1 )
lowerCAmelCase__ : List[str] = tf.concat([attention_mask, next_attn_mask] ,axis=-1 )
lowerCAmelCase__ : Optional[Any] = model(lowercase_ ,attention_mask=lowercase_ )[0]
lowerCAmelCase__ : Tuple = model(lowercase_ ,attention_mask=lowercase_ ,past_key_values=lowercase_ )[0]
self.parent.assertEqual(next_tokens.shape[1] ,output_from_past.shape[1] )
# select random slice
lowerCAmelCase__ : Optional[Any] = int(ids_tensor((1,) ,output_from_past.shape[-1] ) )
lowerCAmelCase__ : str = output_from_no_past[:, -3:, random_slice_idx]
lowerCAmelCase__ : List[str] = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(lowercase_ ,lowercase_ ,rtol=1E-3 )
def __SCREAMING_SNAKE_CASE ( A_ , A_ , A_ , A_=None , A_=None , A_=None , A_=None , A_=None , ):
if attention_mask is None:
lowerCAmelCase__ : Any = tf.cast(tf.math.not_equal(lowercase__ , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
lowerCAmelCase__ : Any = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
lowerCAmelCase__ : Optional[int] = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
lowerCAmelCase__ : List[str] = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
lowerCAmelCase__ : Any = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class SCREAMING_SNAKE_CASE ( A_ , A_ , unittest.TestCase ):
"""simple docstring"""
lowercase__ = (TFBlenderbotForConditionalGeneration, TFBlenderbotModel) if is_tf_available() else ()
lowercase__ = (TFBlenderbotForConditionalGeneration,) if is_tf_available() else ()
lowercase__ = (
{
'''conversational''': TFBlenderbotForConditionalGeneration,
'''feature-extraction''': TFBlenderbotModel,
'''summarization''': TFBlenderbotForConditionalGeneration,
'''text2text-generation''': TFBlenderbotForConditionalGeneration,
'''translation''': TFBlenderbotForConditionalGeneration,
}
if is_tf_available()
else {}
)
lowercase__ = True
lowercase__ = False
lowercase__ = False
def __lowerCAmelCase ( self : List[Any] ):
lowerCAmelCase__ : List[str] = TFBlenderbotModelTester(self )
lowerCAmelCase__ : Optional[Any] = ConfigTester(self ,config_class=lowercase_ )
def __lowerCAmelCase ( self : Optional[int] ):
self.config_tester.run_common_tests()
def __lowerCAmelCase ( self : Dict ):
lowerCAmelCase__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*lowercase_ )
@require_tokenizers
@require_tf
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
lowercase__ = ['''My friends are cool but they eat too many carbs.''']
lowercase__ = '''facebook/blenderbot-400M-distill'''
@cached_property
def __lowerCAmelCase ( self : str ):
return BlenderbotTokenizer.from_pretrained(self.model_name )
@cached_property
def __lowerCAmelCase ( self : int ):
lowerCAmelCase__ : List[Any] = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
@slow
def __lowerCAmelCase ( self : List[str] ):
lowerCAmelCase__ : Union[str, Any] = self.tokenizer(self.src_text ,return_tensors='''tf''' )
lowerCAmelCase__ : Any = self.model.generate(
model_inputs.input_ids ,)
lowerCAmelCase__ : Optional[Any] = self.tokenizer.batch_decode(generated_ids.numpy() ,skip_special_tokens=lowercase_ )[0]
assert (
generated_words
== " That's unfortunate. Are they trying to lose weight or are they just trying to be healthier?"
)
| 106 |
import os
import numpy
import onnx
def _lowerCamelCase( lowercase__ , lowercase__ ) -> Union[str, Any]:
'''simple docstring'''
__lowercase= a.name
__lowercase= b.name
__lowercase= ''
__lowercase= ''
__lowercase= a == b
__lowercase= name_a
__lowercase= name_b
return res
def _lowerCamelCase( lowercase__ , lowercase__ , lowercase__ ) -> Union[str, Any]:
'''simple docstring'''
for i, input_name in enumerate(node_proto.input ):
if input_name == name:
node_proto.input.insert(lowercase__ , lowercase__ )
node_proto.input.pop(i + 1 )
if node_proto.op_type == "If":
_graph_replace_input_with(node_proto.attribute[0].g , lowercase__ , lowercase__ )
_graph_replace_input_with(node_proto.attribute[1].g , lowercase__ , lowercase__ )
if node_proto.op_type == "Loop":
_graph_replace_input_with(node_proto.attribute[0].g , lowercase__ , lowercase__ )
def _lowerCamelCase( lowercase__ , lowercase__ , lowercase__ ) -> str:
'''simple docstring'''
for n in graph_proto.node:
_node_replace_input_with(lowercase__ , lowercase__ , lowercase__ )
def _lowerCamelCase( lowercase__ , lowercase__ , lowercase__ ) -> Any:
'''simple docstring'''
__lowercase= list(model.graph.initializer )
__lowercase= list(model_without_ext.graph.initializer )
for i, ref_i in ind_to_replace:
assert inits_with_data[i].name == inits[i].name
assert inits_with_data[ref_i].name == inits[ref_i].name
assert i > ref_i
__lowercase= inits[i].name
__lowercase= inits[ref_i].name
model_without_ext.graph.initializer.remove(inits[i] )
# for n in model.graph.node:
_graph_replace_input_with(model_without_ext.graph , lowercase__ , lowercase__ )
def _lowerCamelCase( lowercase__ ) -> Dict:
'''simple docstring'''
__lowercase= os.path.dirname(lowercase__ )
__lowercase= os.path.basename(lowercase__ )
__lowercase= onnx.load(os.path.join(lowercase__ , lowercase__ ) )
__lowercase= list(model.graph.initializer )
__lowercase= set()
__lowercase= {}
__lowercase= []
__lowercase= 0
for i in range(len(lowercase__ ) ):
if i in dup_set:
continue
for j in range(i + 1 , len(lowercase__ ) ):
if j in dup_set:
continue
if _is_equal_tensor_proto(inits[i] , inits[j] ):
dup_set.add(lowercase__ )
dup_set.add(lowercase__ )
__lowercase= inits[j].data_type
__lowercase= numpy.prod(inits[j].dims )
if dtype == 1:
mem_size *= 4
elif dtype == 6:
mem_size *= 4
elif dtype == 7 or dtype == 1_1:
mem_size *= 8
else:
print('unexpected data type: ' , lowercase__ )
total_reduced_size += mem_size
__lowercase= inits[i].name
__lowercase= inits[j].name
if name_i in dup_map:
dup_map[name_i].append(lowercase__ )
else:
__lowercase= [name_j]
ind_to_replace.append((j, i) )
print('total reduced size: ' , total_reduced_size / 1_0_2_4 / 1_0_2_4 / 1_0_2_4 , 'GB' )
__lowercase= sorted(lowercase__ )
_remove_dup_initializers_from_model(lowercase__ , lowercase__ , lowercase__ )
__lowercase= 'optimized_' + model_file_name
__lowercase= os.path.join(lowercase__ , lowercase__ )
onnx.save(lowercase__ , lowercase__ )
return new_model
| 295 | 0 |
from math import sqrt
def __lowerCamelCase ( __snake_case : Union[str, Any] ) -> bool:
"""simple docstring"""
assert isinstance(__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE ) and (
number >= 0
), "'number' must been an int and positive"
A__ : Dict =True
# 0 and 1 are none primes.
if number <= 1:
A__ : Optional[Any] =False
for divisor in range(2, int(round(sqrt(__SCREAMING_SNAKE_CASE ) ) ) + 1 ):
# if 'number' divisible by 'divisor' then sets 'status'
# of false and break up the loop.
if number % divisor == 0:
A__ : List[Any] =False
break
# precondition
assert isinstance(__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE ), "'status' must been from type bool"
return status
def __lowerCamelCase ( __snake_case : Tuple ) -> Any:
"""simple docstring"""
assert isinstance(__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE ) and (n > 2), "'N' must been an int and > 2"
# beginList: contains all natural numbers from 2 up to N
A__ : Union[str, Any] =list(range(2, n + 1 ) )
A__ : Any =[] # this list will be returns.
# actual sieve of erathostenes
for i in range(len(__SCREAMING_SNAKE_CASE ) ):
for j in range(i + 1, len(__SCREAMING_SNAKE_CASE ) ):
if (begin_list[i] != 0) and (begin_list[j] % begin_list[i] == 0):
A__ : List[Any] =0
# filters actual prime numbers.
A__ : Optional[int] =[x for x in begin_list if x != 0]
# precondition
assert isinstance(__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE ), "'ans' must been from type list"
return ans
def __lowerCamelCase ( __snake_case : Dict ) -> Any:
"""simple docstring"""
assert isinstance(__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE ) and (n > 2), "'N' must been an int and > 2"
A__ : Union[str, Any] =[]
# iterates over all numbers between 2 up to N+1
# if a number is prime then appends to list 'ans'
for number in range(2, n + 1 ):
if is_prime(__SCREAMING_SNAKE_CASE ):
ans.append(__SCREAMING_SNAKE_CASE )
# precondition
assert isinstance(__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE ), "'ans' must been from type list"
return ans
def __lowerCamelCase ( __snake_case : Any ) -> int:
"""simple docstring"""
assert isinstance(__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE ) and number >= 0, "'number' must been an int and >= 0"
A__ : Tuple =[] # this list will be returns of the function.
# potential prime number factors.
A__ : str =2
A__ : Tuple =number
if number == 0 or number == 1:
ans.append(__SCREAMING_SNAKE_CASE )
# if 'number' not prime then builds the prime factorization of 'number'
elif not is_prime(__SCREAMING_SNAKE_CASE ):
while quotient != 1:
if is_prime(__SCREAMING_SNAKE_CASE ) and (quotient % factor == 0):
ans.append(__SCREAMING_SNAKE_CASE )
quotient /= factor
else:
factor += 1
else:
ans.append(__SCREAMING_SNAKE_CASE )
# precondition
assert isinstance(__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE ), "'ans' must been from type list"
return ans
def __lowerCamelCase ( __snake_case : Optional[int] ) -> Any:
"""simple docstring"""
assert isinstance(__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE ) and (
number >= 0
), "'number' bust been an int and >= 0"
A__ : Union[str, Any] =0
# prime factorization of 'number'
A__ : str =prime_factorization(__SCREAMING_SNAKE_CASE )
A__ : Dict =max(__SCREAMING_SNAKE_CASE )
# precondition
assert isinstance(__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE ), "'ans' must been from type int"
return ans
def __lowerCamelCase ( __snake_case : Optional[int] ) -> Optional[int]:
"""simple docstring"""
assert isinstance(__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE ) and (
number >= 0
), "'number' bust been an int and >= 0"
A__ : List[str] =0
# prime factorization of 'number'
A__ : Dict =prime_factorization(__SCREAMING_SNAKE_CASE )
A__ : List[Any] =min(__SCREAMING_SNAKE_CASE )
# precondition
assert isinstance(__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE ), "'ans' must been from type int"
return ans
def __lowerCamelCase ( __snake_case : int ) -> List[Any]:
"""simple docstring"""
assert isinstance(__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE ), "'number' must been an int"
assert isinstance(number % 2 == 0, __SCREAMING_SNAKE_CASE ), "compare bust been from type bool"
return number % 2 == 0
def __lowerCamelCase ( __snake_case : Optional[int] ) -> Any:
"""simple docstring"""
assert isinstance(__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE ), "'number' must been an int"
assert isinstance(number % 2 != 0, __SCREAMING_SNAKE_CASE ), "compare bust been from type bool"
return number % 2 != 0
def __lowerCamelCase ( __snake_case : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
assert (
isinstance(__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE ) and (number > 2) and is_even(__SCREAMING_SNAKE_CASE )
), "'number' must been an int, even and > 2"
A__ : Dict =[] # this list will returned
# creates a list of prime numbers between 2 up to 'number'
A__ : Union[str, Any] =get_prime_numbers(__SCREAMING_SNAKE_CASE )
A__ : Optional[Any] =len(__SCREAMING_SNAKE_CASE )
# run variable for while-loops.
A__ : List[str] =0
A__ : Any =None
# exit variable. for break up the loops
A__ : Tuple =True
while i < len_pn and loop:
A__ : str =i + 1
while j < len_pn and loop:
if prime_numbers[i] + prime_numbers[j] == number:
A__ : Union[str, Any] =False
ans.append(prime_numbers[i] )
ans.append(prime_numbers[j] )
j += 1
i += 1
# precondition
assert (
isinstance(__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE )
and (len(__SCREAMING_SNAKE_CASE ) == 2)
and (ans[0] + ans[1] == number)
and is_prime(ans[0] )
and is_prime(ans[1] )
), "'ans' must contains two primes. And sum of elements must been eq 'number'"
return ans
def __lowerCamelCase ( __snake_case : Optional[Any], __snake_case : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
assert (
isinstance(__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE )
and isinstance(__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE )
and (numbera >= 0)
and (numbera >= 0)
), "'number1' and 'number2' must been positive integer."
A__ : Optional[int] =0
while numbera != 0:
A__ : int =numbera % numbera
A__ : Tuple =numbera
A__ : Union[str, Any] =rest
# precondition
assert isinstance(__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE ) and (
numbera >= 0
), "'number' must been from type int and positive"
return numbera
def __lowerCamelCase ( __snake_case : Optional[Any], __snake_case : Any ) -> List[str]:
"""simple docstring"""
assert (
isinstance(__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE )
and isinstance(__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE )
and (numbera >= 1)
and (numbera >= 1)
), "'number1' and 'number2' must been positive integer."
A__ : int =1 # actual answer that will be return.
# for kgV (x,1)
if numbera > 1 and numbera > 1:
# builds the prime factorization of 'number1' and 'number2'
A__ : Optional[int] =prime_factorization(__SCREAMING_SNAKE_CASE )
A__ : Union[str, Any] =prime_factorization(__SCREAMING_SNAKE_CASE )
elif numbera == 1 or numbera == 1:
A__ : Tuple =[]
A__ : Optional[Any] =[]
A__ : Union[str, Any] =max(__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE )
A__ : List[str] =0
A__ : Optional[Any] =0
A__ : Dict =[] # captured numbers int both 'primeFac1' and 'primeFac2'
# iterates through primeFac1
for n in prime_fac_a:
if n not in done:
if n in prime_fac_a:
A__ : int =prime_fac_a.count(__SCREAMING_SNAKE_CASE )
A__ : int =prime_fac_a.count(__SCREAMING_SNAKE_CASE )
for _ in range(max(__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE ) ):
ans *= n
else:
A__ : Optional[Any] =prime_fac_a.count(__SCREAMING_SNAKE_CASE )
for _ in range(__SCREAMING_SNAKE_CASE ):
ans *= n
done.append(__SCREAMING_SNAKE_CASE )
# iterates through primeFac2
for n in prime_fac_a:
if n not in done:
A__ : Tuple =prime_fac_a.count(__SCREAMING_SNAKE_CASE )
for _ in range(__SCREAMING_SNAKE_CASE ):
ans *= n
done.append(__SCREAMING_SNAKE_CASE )
# precondition
assert isinstance(__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE ) and (
ans >= 0
), "'ans' must been from type int and positive"
return ans
def __lowerCamelCase ( __snake_case : Union[str, Any] ) -> Any:
"""simple docstring"""
assert isinstance(__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE ) and (n >= 0), "'number' must been a positive int"
A__ : Union[str, Any] =0
A__ : Union[str, Any] =2 # this variable holds the answer
while index < n:
index += 1
ans += 1 # counts to the next number
# if ans not prime then
# runs to the next prime number.
while not is_prime(__SCREAMING_SNAKE_CASE ):
ans += 1
# precondition
assert isinstance(__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE ) and is_prime(
__SCREAMING_SNAKE_CASE ), "'ans' must been a prime number and from type int"
return ans
def __lowerCamelCase ( __snake_case : Union[str, Any], __snake_case : str ) -> Union[str, Any]:
"""simple docstring"""
assert (
is_prime(__SCREAMING_SNAKE_CASE ) and is_prime(__SCREAMING_SNAKE_CASE ) and (p_number_a < p_number_a)
), "The arguments must been prime numbers and 'pNumber1' < 'pNumber2'"
A__ : Tuple =p_number_a + 1 # jump to the next number
A__ : int =[] # this list will be returns.
# if number is not prime then
# fetch the next prime number.
while not is_prime(__SCREAMING_SNAKE_CASE ):
number += 1
while number < p_number_a:
ans.append(__SCREAMING_SNAKE_CASE )
number += 1
# fetch the next prime number.
while not is_prime(__SCREAMING_SNAKE_CASE ):
number += 1
# precondition
assert (
isinstance(__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE )
and ans[0] != p_number_a
and ans[len(__SCREAMING_SNAKE_CASE ) - 1] != p_number_a
), "'ans' must been a list without the arguments"
# 'ans' contains not 'pNumber1' and 'pNumber2' !
return ans
def __lowerCamelCase ( __snake_case : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
assert isinstance(__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE ) and (n >= 1), "'n' must been int and >= 1"
A__ : Dict =[] # will be returned.
for divisor in range(1, n + 1 ):
if n % divisor == 0:
ans.append(__SCREAMING_SNAKE_CASE )
# precondition
assert ans[0] == 1 and ans[len(__SCREAMING_SNAKE_CASE ) - 1] == n, "Error in function getDivisiors(...)"
return ans
def __lowerCamelCase ( __snake_case : str ) -> Union[str, Any]:
"""simple docstring"""
assert isinstance(__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE ) and (
number > 1
), "'number' must been an int and >= 1"
A__ : List[str] =get_divisors(__SCREAMING_SNAKE_CASE )
# precondition
assert (
isinstance(__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE )
and (divisors[0] == 1)
and (divisors[len(__SCREAMING_SNAKE_CASE ) - 1] == number)
), "Error in help-function getDivisiors(...)"
# summed all divisors up to 'number' (exclusive), hence [:-1]
return sum(divisors[:-1] ) == number
def __lowerCamelCase ( __snake_case : Dict, __snake_case : Optional[int] ) -> List[Any]:
"""simple docstring"""
assert (
isinstance(__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE )
and isinstance(__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE )
and (denominator != 0)
), "The arguments must been from type int and 'denominator' != 0"
# build the greatest common divisor of numerator and denominator.
A__ : Dict =gcd(abs(__SCREAMING_SNAKE_CASE ), abs(__SCREAMING_SNAKE_CASE ) )
# precondition
assert (
isinstance(__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE )
and (numerator % gcd_of_fraction == 0)
and (denominator % gcd_of_fraction == 0)
), "Error in function gcd(...,...)"
return (numerator // gcd_of_fraction, denominator // gcd_of_fraction)
def __lowerCamelCase ( __snake_case : str ) -> Tuple:
"""simple docstring"""
assert isinstance(__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE ) and (n >= 0), "'n' must been a int and >= 0"
A__ : Union[str, Any] =1 # this will be return.
for factor in range(1, n + 1 ):
ans *= factor
return ans
def __lowerCamelCase ( __snake_case : str ) -> Any:
"""simple docstring"""
assert isinstance(__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE ) and (n >= 0), "'n' must been an int and >= 0"
A__ : int =0
A__ : int =1
A__ : List[Any] =1 # this will be return
for _ in range(n - 1 ):
A__ : List[str] =ans
ans += fiba
A__ : Any =tmp
return ans
| 370 |
'''simple docstring'''
import collections
import json
import math
import os
import re
import time
from fnmatch import fnmatch
from typing import Dict
import requests
from slack_sdk import WebClient
__snake_case : Optional[int] = WebClient(token=os.environ['CI_SLACK_BOT_TOKEN'])
def __lowerCamelCase ( __snake_case : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
A__ : Any =test_results.split(""" """ )
A__ : List[Any] =0
A__ : Optional[int] =0
# When the output is short enough, the output is surrounded by = signs: "== OUTPUT =="
# When it is too long, those signs are not present.
A__ : Dict =expressions[-2] if """=""" in expressions[-1] else expressions[-1]
for i, expression in enumerate(__snake_case ):
if "failed" in expression:
failed += int(expressions[i - 1] )
if "passed" in expression:
success += int(expressions[i - 1] )
return failed, success, time_spent
def __lowerCamelCase ( __snake_case : str ) -> Optional[int]:
"""simple docstring"""
A__ : Dict ={}
A__ : List[Any] =None
A__ : Any =False
for line in failures_short_lines.split("""\n""" ):
if re.search(r"""_ \[doctest\]""", __snake_case ):
A__ : List[str] =True
A__ : Optional[int] =line.split(""" """ )[2]
elif in_error and not line.split(""" """ )[0].isdigit():
A__ : List[str] =line
A__ : int =False
return failures
class lowerCamelCase :
'''simple docstring'''
def __init__( self : str , lowerCAmelCase_ : str , lowerCAmelCase_ : Dict ) -> Dict:
'''simple docstring'''
A__ : Any =title
A__ : List[Any] =doc_test_results["""time_spent"""].split(""",""" )[0]
A__ : str =doc_test_results["""success"""]
A__ : str =doc_test_results["""failures"""]
A__ : Optional[int] =self.n_success + self.n_failures
# Failures and success of the modeling tests
A__ : List[Any] =doc_test_results
@property
def lowercase__ ( self : Optional[int] ) -> str:
'''simple docstring'''
A__ : List[str] =[self._time_spent]
A__ : str =0
for time in time_spent:
A__ : Union[str, Any] =time.split(""":""" )
# Time can be formatted as xx:xx:xx, as .xx, or as x.xx if the time spent was less than a minute.
if len(lowerCAmelCase_ ) == 1:
A__ : str =[0, 0, time_parts[0]]
A__ , A__ , A__ : int =int(time_parts[0] ), int(time_parts[1] ), float(time_parts[2] )
total_secs += hours * 36_00 + minutes * 60 + seconds
A__ , A__ , A__ : Optional[int] =total_secs // 36_00, (total_secs % 36_00) // 60, total_secs % 60
return f"{int(lowerCAmelCase_ )}h{int(lowerCAmelCase_ )}m{int(lowerCAmelCase_ )}s"
@property
def lowercase__ ( self : Any ) -> Dict:
'''simple docstring'''
return {"type": "header", "text": {"type": "plain_text", "text": self.title}}
@property
def lowercase__ ( self : Dict ) -> Dict:
'''simple docstring'''
return {
"type": "section",
"text": {
"type": "plain_text",
"text": f"🌞 There were no failures: all {self.n_tests} tests passed. The suite ran in {self.time}.",
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": f"https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}",
},
}
@property
def lowercase__ ( self : Tuple ) -> Dict:
'''simple docstring'''
return {
"type": "section",
"text": {
"type": "plain_text",
"text": (
f"There were {self.n_failures} failures, out of {self.n_tests} tests.\nThe suite ran in"
f" {self.time}."
),
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": f"https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}",
},
}
@property
def lowercase__ ( self : Optional[Any] ) -> Dict:
'''simple docstring'''
A__ : Optional[Any] =40
A__ : List[Any] ={k: v["""failed"""] for k, v in doc_test_results.items() if isinstance(lowerCAmelCase_ , lowerCAmelCase_ )}
A__ : Union[str, Any] =""""""
for category, failures in category_failures.items():
if len(lowerCAmelCase_ ) == 0:
continue
if report != "":
report += "\n\n"
report += f"*{category} failures*:".ljust(line_length // 2 ).rjust(line_length // 2 ) + "\n"
report += "`"
report += "`\n`".join(lowerCAmelCase_ )
report += "`"
return {
"type": "section",
"text": {
"type": "mrkdwn",
"text": f"The following examples had failures:\n\n\n{report}\n",
},
}
@property
def lowercase__ ( self : Any ) -> str:
'''simple docstring'''
A__ : Tuple =[self.header]
if self.n_failures > 0:
blocks.append(self.failures )
if self.n_failures > 0:
blocks.extend([self.category_failures] )
if self.n_failures == 0:
blocks.append(self.no_failures )
return json.dumps(lowerCAmelCase_ )
@staticmethod
def lowercase__ ( ) -> Any:
'''simple docstring'''
A__ : Dict =[
{
"""type""": """section""",
"""text""": {
"""type""": """plain_text""",
"""text""": """There was an issue running the tests.""",
},
"""accessory""": {
"""type""": """button""",
"""text""": {"""type""": """plain_text""", """text""": """Check Action results""", """emoji""": True},
"""url""": f"https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}",
},
}
]
print("""Sending the following payload""" )
print(json.dumps({"""blocks""": json.loads(lowerCAmelCase_ )} ) )
client.chat_postMessage(
channel=os.environ["""CI_SLACK_CHANNEL_ID_DAILY"""] , text="""There was an issue running the tests.""" , blocks=lowerCAmelCase_ , )
def lowercase__ ( self : Optional[int] ) -> Tuple:
'''simple docstring'''
print("""Sending the following payload""" )
print(json.dumps({"""blocks""": json.loads(self.payload )} ) )
A__ : Tuple =f"{self.n_failures} failures out of {self.n_tests} tests," if self.n_failures else """All tests passed."""
A__ : Optional[int] =client.chat_postMessage(
channel=os.environ["""CI_SLACK_CHANNEL_ID_DAILY"""] , blocks=self.payload , text=lowerCAmelCase_ , )
def lowercase__ ( self : Any , lowerCAmelCase_ : int , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : List[Any] ) -> Any:
'''simple docstring'''
A__ : int =""""""
for key, value in failures.items():
A__ : Optional[int] =value[:2_00] + """ [Truncated]""" if len(lowerCAmelCase_ ) > 2_50 else value
failures_text += f"*{key}*\n_{value}_\n\n"
A__ : List[Any] =job_name
A__ : Dict ={"""type""": """section""", """text""": {"""type""": """mrkdwn""", """text""": text}}
if job_link is not None:
A__ : Dict ={
"""type""": """button""",
"""text""": {"""type""": """plain_text""", """text""": """GitHub Action job""", """emoji""": True},
"""url""": job_link,
}
return [
{"type": "header", "text": {"type": "plain_text", "text": title.upper(), "emoji": True}},
content,
{"type": "section", "text": {"type": "mrkdwn", "text": failures_text}},
]
def lowercase__ ( self : str ) -> str:
'''simple docstring'''
if self.thread_ts is None:
raise ValueError("""Can only post reply if a post has been made.""" )
A__ : Optional[Any] =self.doc_test_results.pop("""job_link""" )
self.doc_test_results.pop("""failures""" )
self.doc_test_results.pop("""success""" )
self.doc_test_results.pop("""time_spent""" )
A__ : Union[str, Any] =sorted(self.doc_test_results.items() , key=lambda lowerCAmelCase_ : t[0] )
for job, job_result in sorted_dict:
if len(job_result["""failures"""] ):
A__ : Optional[Any] =f"*Num failures* :{len(job_result['failed'] )} \n"
A__ : Tuple =job_result["""failures"""]
A__ : Optional[Any] =self.get_reply_blocks(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , text=lowerCAmelCase_ )
print("""Sending the following reply""" )
print(json.dumps({"""blocks""": blocks} ) )
client.chat_postMessage(
channel=os.environ["""CI_SLACK_CHANNEL_ID_DAILY"""] , text=f"Results for {job}" , blocks=lowerCAmelCase_ , thread_ts=self.thread_ts["""ts"""] , )
time.sleep(1 )
def __lowerCamelCase ( ) -> Any:
"""simple docstring"""
A__ : Optional[Any] =os.environ["""GITHUB_RUN_ID"""]
A__ : Tuple =f"https://api.github.com/repos/huggingface/transformers/actions/runs/{run_id}/jobs?per_page=100"
A__ : List[str] =requests.get(__snake_case ).json()
A__ : List[str] ={}
try:
jobs.update({job["""name"""]: job["""html_url"""] for job in result["""jobs"""]} )
A__ : Optional[int] =math.ceil((result["""total_count"""] - 100) / 100 )
for i in range(__snake_case ):
A__ : List[Any] =requests.get(url + f"&page={i + 2}" ).json()
jobs.update({job["""name"""]: job["""html_url"""] for job in result["""jobs"""]} )
return jobs
except Exception as e:
print("""Unknown error, could not fetch links.""", __snake_case )
return {}
def __lowerCamelCase ( __snake_case : str ) -> Union[str, Any]:
"""simple docstring"""
A__ : Any ={}
if os.path.exists(__snake_case ):
A__ : str =os.listdir(__snake_case )
for file in files:
try:
with open(os.path.join(__snake_case, __snake_case ), encoding="""utf-8""" ) as f:
A__ : Tuple =f.read()
except UnicodeDecodeError as e:
raise ValueError(f"Could not open {os.path.join(__snake_case, __snake_case )}." ) from e
return _artifact
def __lowerCamelCase ( ) -> Union[str, Any]:
"""simple docstring"""
class lowerCamelCase :
'''simple docstring'''
def __init__( self : List[str] , lowerCAmelCase_ : str ) -> Any:
'''simple docstring'''
A__ : List[Any] =name
A__ : str =[]
def __str__( self : Optional[Any] ) -> List[Any]:
'''simple docstring'''
return self.name
def lowercase__ ( self : Any , lowerCAmelCase_ : str ) -> Tuple:
'''simple docstring'''
self.paths.append({"""name""": self.name, """path""": path} )
A__ : Dict[str, Artifact] ={}
A__ : int =filter(os.path.isdir, os.listdir() )
for directory in directories:
A__ : List[Any] =directory
if artifact_name not in _available_artifacts:
A__ : str =Artifact(__snake_case )
_available_artifacts[artifact_name].add_path(__snake_case )
return _available_artifacts
if __name__ == "__main__":
__snake_case : List[str] = get_job_links()
__snake_case : int = retrieve_available_artifacts()
__snake_case : Union[str, Any] = collections.OrderedDict(
[
('*.py', 'API Examples'),
('*.md', 'MD Examples'),
]
)
# This dict will contain all the information relative to each doc test category:
# - failed: list of failed tests
# - failures: dict in the format 'test': 'error_message'
__snake_case : Dict = {
v: {
'failed': [],
'failures': {},
}
for v in docs.values()
}
# Link to the GitHub Action job
__snake_case : List[Any] = github_actions_job_links.get('run_doctests')
__snake_case : Tuple = available_artifacts['doc_tests_gpu_test_reports'].paths[0]
__snake_case : Optional[int] = retrieve_artifact(artifact_path['name'])
if "stats" in artifact:
__snake_case , __snake_case , __snake_case : Optional[Any] = handle_test_results(artifact['stats'])
__snake_case : Optional[Any] = failed
__snake_case : Union[str, Any] = success
__snake_case : Union[str, Any] = time_spent[1:-1] + ', '
__snake_case : int = extract_first_line_failure(artifact['failures_short'])
for line in artifact["summary_short"].split('\n'):
if re.search('FAILED', line):
__snake_case : Optional[int] = line.replace('FAILED ', '')
__snake_case : str = line.split()[0].replace('\n', '')
if "::" in line:
__snake_case , __snake_case : Optional[Any] = line.split('::')
else:
__snake_case , __snake_case : Any = line, line
for file_regex in docs.keys():
if fnmatch(file_path, file_regex):
__snake_case : List[Any] = docs[file_regex]
doc_test_results[category]["failed"].append(test)
__snake_case : List[str] = all_failures[test] if test in all_failures else 'N/A'
__snake_case : Optional[Any] = failure
break
__snake_case : int = Message('🤗 Results of the doc tests.', doc_test_results)
message.post()
message.post_reply()
| 136 | 0 |
"""simple docstring"""
import re
import string
import numpy as np
import datasets
lowerCAmelCase_ = '\nReturns the rate at which the input predicted strings exactly match their references, ignoring any strings input as part of the regexes_to_ignore list.\n'
lowerCAmelCase_ = '\nArgs:\n predictions: List of predicted texts.\n references: List of reference texts.\n regexes_to_ignore: List, defaults to None. Regex expressions of characters to\n ignore when calculating the exact matches. Note: these regexes are removed\n from the input data before the changes based on the options below (e.g. ignore_case,\n ignore_punctuation, ignore_numbers) are applied.\n ignore_case: Boolean, defaults to False. If true, turns everything\n to lowercase so that capitalization differences are ignored.\n ignore_punctuation: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\n ignore_numbers: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\nReturns:\n exact_match: Dictionary containing exact_match rate. Possible values are between 0.0 and 100.0, inclusive.\nExamples:\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results["exact_match"], 1))\n 25.0\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results["exact_match"], 1))\n 50.0\n\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell", "YELL"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results["exact_match"], 1))\n 75.0\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell", "YELL"], ignore_case=True, ignore_punctuation=True, ignore_numbers=True)\n >>> print(round(results["exact_match"], 1))\n 100.0\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["The cat sat on the mat.", "Theaters are great.", "It\'s like comparing oranges and apples."]\n >>> preds = ["The cat sat on the mat?", "Theaters are great.", "It\'s like comparing apples and oranges."]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results["exact_match"], 1))\n 33.3\n\n'
lowerCAmelCase_ = '\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class __A ( datasets.Metric ):
'''simple docstring'''
def UpperCAmelCase ( self : int ) -> Dict:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' ,id='''sequence''' ),
'''references''': datasets.Value('''string''' ,id='''sequence''' ),
} ) ,reference_urls=[] ,)
def UpperCAmelCase ( self : Any ,_snake_case : int ,_snake_case : Union[str, Any] ,_snake_case : Tuple=None ,_snake_case : Optional[Any]=False ,_snake_case : Tuple=False ,_snake_case : Optional[int]=False ,) -> List[str]:
"""simple docstring"""
if regexes_to_ignore is not None:
for s in regexes_to_ignore:
lowercase__ : Dict = np.array([re.sub(_snake_case ,'''''' ,_snake_case ) for x in predictions] )
lowercase__ : Optional[Any] = np.array([re.sub(_snake_case ,'''''' ,_snake_case ) for x in references] )
else:
lowercase__ : Optional[Any] = np.asarray(_snake_case )
lowercase__ : Any = np.asarray(_snake_case )
if ignore_case:
lowercase__ : List[str] = np.char.lower(_snake_case )
lowercase__ : Tuple = np.char.lower(_snake_case )
if ignore_punctuation:
lowercase__ : Union[str, Any] = string.punctuation.maketrans('''''' ,'''''' ,string.punctuation )
lowercase__ : int = np.char.translate(_snake_case ,table=_snake_case )
lowercase__ : Optional[int] = np.char.translate(_snake_case ,table=_snake_case )
if ignore_numbers:
lowercase__ : Optional[Any] = string.digits.maketrans('''''' ,'''''' ,string.digits )
lowercase__ : Tuple = np.char.translate(_snake_case ,table=_snake_case )
lowercase__ : List[str] = np.char.translate(_snake_case ,table=_snake_case )
lowercase__ : Any = predictions == references
return {"exact_match": np.mean(_snake_case ) * 100}
| 16 |
from __future__ import annotations
from collections import namedtuple
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
__snake_case : Any = namedtuple("result" , "name value" )
if (voltage, current, power).count(0 ) != 1:
raise ValueError("Only one argument must be 0" )
elif power < 0:
raise ValueError(
"Power cannot be negative in any electrical/electronics system" )
elif voltage == 0:
return result("voltage" , power / current )
elif current == 0:
return result("current" , power / voltage )
elif power == 0:
return result("power" , float(round(abs(voltage * current ) , 2 ) ) )
else:
raise ValueError("Exactly one argument must be 0" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 123 | 0 |
from typing import TYPE_CHECKING
from ...utils import _LazyModule
__A = {"tokenization_bertweet": ["BertweetTokenizer"]}
if TYPE_CHECKING:
from .tokenization_bertweet import BertweetTokenizer
else:
import sys
__A = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 366 |
import json
import os
import tempfile
from unittest.mock import patch
import torch
from torch.utils.data import DataLoader, TensorDataset
from accelerate import DistributedType, infer_auto_device_map, init_empty_weights
from accelerate.accelerator import Accelerator
from accelerate.state import GradientState, PartialState
from accelerate.test_utils import require_bnb, require_multi_gpu, slow
from accelerate.test_utils.testing import AccelerateTestCase, require_cuda
from accelerate.utils import patch_environment
def __a ( ) -> str:
'''simple docstring'''
UpperCAmelCase_= torch.nn.Linear(2 ,4 )
UpperCAmelCase_= torch.optim.AdamW(model.parameters() ,lr=1.0 )
UpperCAmelCase_= torch.optim.lr_scheduler.OneCycleLR(lowerCAmelCase_ ,max_lr=0.01 ,steps_per_epoch=2 ,epochs=1 )
UpperCAmelCase_= DataLoader(TensorDataset(torch.tensor([1, 2, 3] ) ) )
UpperCAmelCase_= DataLoader(TensorDataset(torch.tensor([4, 5, 6] ) ) )
return model, optimizer, scheduler, train_dl, valid_dl
def __a ( lowerCAmelCase_ : Any ) -> Union[str, Any]:
'''simple docstring'''
return (model.weight.abs().sum() + model.bias.abs().sum()).item()
def __a ( lowerCAmelCase_ : Tuple ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_= torch.nn.Linear(*tuple(model.weight.T.shape ) ).state_dict()
model.load_state_dict(lowerCAmelCase_ )
class lowercase ( snake_case__):
"""simple docstring"""
@require_cuda
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[Any]:
UpperCAmelCase_= Accelerator()
assert PartialState._shared_state["_cpu"] is False
assert PartialState._shared_state["device"].type == "cuda"
with self.assertRaises(__UpperCAmelCase ):
UpperCAmelCase_= Accelerator(cpu=__UpperCAmelCase )
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Union[str, Any]:
UpperCAmelCase_= Accelerator()
UpperCAmelCase_= GradientState()
assert state.num_steps == 1
UpperCAmelCase_= 4
assert state.num_steps == 4
assert state.sync_gradients is True
UpperCAmelCase_= False
assert state.sync_gradients is False
GradientState._reset_state()
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> List[str]:
UpperCAmelCase_= Accelerator()
UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_= create_components()
(
(
UpperCAmelCase_
), (
UpperCAmelCase_
), (
UpperCAmelCase_
), (
UpperCAmelCase_
), (
UpperCAmelCase_
),
)= accelerator.prepare(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
self.assertTrue(prepared_model in accelerator._models )
self.assertTrue(prepared_optimizer in accelerator._optimizers )
self.assertTrue(prepared_scheduler in accelerator._schedulers )
self.assertTrue(prepared_train_dl in accelerator._dataloaders )
self.assertTrue(prepared_valid_dl in accelerator._dataloaders )
def _SCREAMING_SNAKE_CASE ( self : int ) -> Dict:
UpperCAmelCase_= Accelerator()
UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_= create_components()
accelerator.prepare(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
accelerator.free_memory()
self.assertTrue(len(accelerator._models ) == 0 )
self.assertTrue(len(accelerator._optimizers ) == 0 )
self.assertTrue(len(accelerator._schedulers ) == 0 )
self.assertTrue(len(accelerator._dataloaders ) == 0 )
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[Any]:
PartialState._reset_state()
# Mock torch.cuda.set_device to avoid an exception as the device doesn't exist
def noop(*__UpperCAmelCase : Dict , **__UpperCAmelCase : Tuple ):
pass
with patch("""torch.cuda.set_device""" , __UpperCAmelCase ), patch_environment(ACCELERATE_TORCH_DEVICE="""cuda:64""" ):
UpperCAmelCase_= Accelerator()
self.assertEqual(str(accelerator.state.device ) , """cuda:64""" )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[Any]:
UpperCAmelCase_= Accelerator()
UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_= create_components()
accelerator.prepare(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
UpperCAmelCase_= get_signature(__UpperCAmelCase )
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_state(__UpperCAmelCase )
# make sure random weights don't match
load_random_weights(__UpperCAmelCase )
self.assertTrue(abs(model_signature - get_signature(__UpperCAmelCase ) ) > 1E-3 )
# make sure loaded weights match
accelerator.load_state(__UpperCAmelCase )
self.assertTrue(abs(model_signature - get_signature(__UpperCAmelCase ) ) < 1E-3 )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[Any]:
UpperCAmelCase_= Accelerator()
UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_= create_components()
accelerator.prepare(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
UpperCAmelCase_= get_signature(__UpperCAmelCase )
# saving hook
def save_config(__UpperCAmelCase : Tuple , __UpperCAmelCase : List[str] , __UpperCAmelCase : Tuple ):
UpperCAmelCase_= {"""class_name""": models[0].__class__.__name__}
with open(os.path.join(__UpperCAmelCase , """data.json""" ) , """w""" ) as f:
json.dump(__UpperCAmelCase , __UpperCAmelCase )
# loading hook
def load_config(__UpperCAmelCase : Tuple , __UpperCAmelCase : Union[str, Any] ):
with open(os.path.join(__UpperCAmelCase , """data.json""" ) , """r""" ) as f:
UpperCAmelCase_= json.load(__UpperCAmelCase )
UpperCAmelCase_= config["""class_name"""]
UpperCAmelCase_= accelerator.register_save_state_pre_hook(__UpperCAmelCase )
UpperCAmelCase_= accelerator.register_load_state_pre_hook(__UpperCAmelCase )
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_state(__UpperCAmelCase )
# make sure random weights don't match with hooks
load_random_weights(__UpperCAmelCase )
self.assertTrue(abs(model_signature - get_signature(__UpperCAmelCase ) ) > 1E-3 )
# random class name to verify correct one is loaded
UpperCAmelCase_= """random"""
# make sure loaded weights match with hooks
accelerator.load_state(__UpperCAmelCase )
self.assertTrue(abs(model_signature - get_signature(__UpperCAmelCase ) ) < 1E-3 )
# mode.class_name is loaded from config
self.assertTrue(model.class_name == model.__class__.__name__ )
# remove hooks
save_hook.remove()
load_hook.remove()
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_state(__UpperCAmelCase )
# make sure random weights don't match with hooks removed
load_random_weights(__UpperCAmelCase )
self.assertTrue(abs(model_signature - get_signature(__UpperCAmelCase ) ) > 1E-3 )
# random class name to verify correct one is loaded
UpperCAmelCase_= """random"""
# make sure loaded weights match with hooks removed
accelerator.load_state(__UpperCAmelCase )
self.assertTrue(abs(model_signature - get_signature(__UpperCAmelCase ) ) < 1E-3 )
# mode.class_name is NOT loaded from config
self.assertTrue(model.class_name != model.__class__.__name__ )
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Union[str, Any]:
UpperCAmelCase_= Accelerator()
UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_= create_components()
UpperCAmelCase_= None
# This should work
UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_= accelerator.prepare(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
self.assertTrue(dummy_obj is None )
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Any:
UpperCAmelCase_= Accelerator()
UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_= create_components()
UpperCAmelCase_= [1, 2, 3]
# This should work
UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_= accelerator.prepare(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
self.assertEqual(
getattr(__UpperCAmelCase , """_is_accelerate_prepared""" , __UpperCAmelCase ) , __UpperCAmelCase , """Dummy object should have `_is_accelerate_prepared` set to `True`""" , )
self.assertEqual(
getattr(__UpperCAmelCase , """_is_accelerate_prepared""" , __UpperCAmelCase ) , __UpperCAmelCase , """Model is missing `_is_accelerator_prepared` or is set to `False`""" , )
self.assertEqual(
getattr(__UpperCAmelCase , """_is_accelerate_prepared""" , __UpperCAmelCase ) , __UpperCAmelCase , """Optimizer is missing `_is_accelerator_prepared` or is set to `False`""" , )
self.assertEqual(
getattr(__UpperCAmelCase , """_is_accelerate_prepared""" , __UpperCAmelCase ) , __UpperCAmelCase , """Scheduler is missing `_is_accelerator_prepared` or is set to `False`""" , )
self.assertEqual(
getattr(__UpperCAmelCase , """_is_accelerate_prepared""" , __UpperCAmelCase ) , __UpperCAmelCase , """Train Dataloader is missing `_is_accelerator_prepared` or is set to `False`""" , )
self.assertEqual(
getattr(__UpperCAmelCase , """_is_accelerate_prepared""" , __UpperCAmelCase ) , __UpperCAmelCase , """Valid Dataloader is missing `_is_accelerator_prepared` or is set to `False`""" , )
@slow
@require_bnb
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Optional[Any]:
from transformers import AutoModelForCausalLM
UpperCAmelCase_= AutoModelForCausalLM.from_pretrained(
"""EleutherAI/gpt-neo-125m""" , load_in_abit=__UpperCAmelCase , device_map={"""""": 0} , )
UpperCAmelCase_= Accelerator()
# This should work
UpperCAmelCase_= accelerator.prepare(__UpperCAmelCase )
@slow
@require_bnb
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Tuple:
from transformers import AutoModelForCausalLM
UpperCAmelCase_= Accelerator()
with init_empty_weights():
UpperCAmelCase_= AutoModelForCausalLM.from_pretrained(
"""EleutherAI/gpt-neo-125m""" , )
model.tie_weights()
UpperCAmelCase_= infer_auto_device_map(__UpperCAmelCase )
UpperCAmelCase_= """cpu"""
UpperCAmelCase_= AutoModelForCausalLM.from_pretrained(
"""EleutherAI/gpt-neo-125m""" , device_map=__UpperCAmelCase , load_in_abit=__UpperCAmelCase , llm_inta_enable_fpaa_cpu_offload=__UpperCAmelCase )
# This should not work and get value error
with self.assertRaises(__UpperCAmelCase ):
UpperCAmelCase_= accelerator.prepare(__UpperCAmelCase )
@slow
@require_bnb
@require_multi_gpu
def _SCREAMING_SNAKE_CASE ( self : int ) -> Tuple:
from transformers import AutoModelForCausalLM
UpperCAmelCase_= {"""distributed_type""": DistributedType.MULTI_GPU}
with init_empty_weights():
UpperCAmelCase_= AutoModelForCausalLM.from_pretrained(
"""EleutherAI/gpt-neo-125m""" , )
model.tie_weights()
UpperCAmelCase_= infer_auto_device_map(__UpperCAmelCase )
UpperCAmelCase_= 1
UpperCAmelCase_= AutoModelForCausalLM.from_pretrained(
"""EleutherAI/gpt-neo-125m""" , load_in_abit=__UpperCAmelCase , device_map=__UpperCAmelCase , )
UpperCAmelCase_= Accelerator()
# This should not work and get value error
with self.assertRaises(__UpperCAmelCase ):
UpperCAmelCase_= accelerator.prepare(__UpperCAmelCase )
PartialState._reset_state()
@slow
@require_bnb
@require_multi_gpu
def _SCREAMING_SNAKE_CASE ( self : Any ) -> Dict:
from transformers import AutoModelForCausalLM
with init_empty_weights():
UpperCAmelCase_= AutoModelForCausalLM.from_pretrained(
"""EleutherAI/gpt-neo-125m""" , )
UpperCAmelCase_= infer_auto_device_map(__UpperCAmelCase )
UpperCAmelCase_= 1
UpperCAmelCase_= AutoModelForCausalLM.from_pretrained(
"""EleutherAI/gpt-neo-125m""" , load_in_abit=__UpperCAmelCase , device_map=__UpperCAmelCase , )
UpperCAmelCase_= Accelerator()
# This should work
UpperCAmelCase_= accelerator.prepare(__UpperCAmelCase )
@require_cuda
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[int]:
UpperCAmelCase_= torch.nn.Linear(10 , 10 )
UpperCAmelCase_= torch.optim.SGD(model.parameters() , lr=0.01 )
UpperCAmelCase_= Accelerator(cpu=__UpperCAmelCase )
UpperCAmelCase_= accelerator.prepare(__UpperCAmelCase )
| 277 | 0 |
"""simple docstring"""
import unittest
import numpy as np
from transformers import BertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.bert.modeling_flax_bert import (
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForNextSentencePrediction,
FlaxBertForPreTraining,
FlaxBertForQuestionAnswering,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertModel,
)
class a ( unittest.TestCase ):
def __init__( self , _lowerCamelCase , _lowerCamelCase=1_3 , _lowerCamelCase=7 , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=9_9 , _lowerCamelCase=3_2 , _lowerCamelCase=5 , _lowerCamelCase=4 , _lowerCamelCase=3_7 , _lowerCamelCase="gelu" , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase=5_1_2 , _lowerCamelCase=1_6 , _lowerCamelCase=2 , _lowerCamelCase=0.0_2 , _lowerCamelCase=4 , ):
lowercase = parent
lowercase = batch_size
lowercase = seq_length
lowercase = is_training
lowercase = use_attention_mask
lowercase = use_token_type_ids
lowercase = use_labels
lowercase = vocab_size
lowercase = hidden_size
lowercase = num_hidden_layers
lowercase = num_attention_heads
lowercase = intermediate_size
lowercase = hidden_act
lowercase = hidden_dropout_prob
lowercase = attention_probs_dropout_prob
lowercase = max_position_embeddings
lowercase = type_vocab_size
lowercase = type_sequence_label_size
lowercase = initializer_range
lowercase = num_choices
def UpperCamelCase_ ( self ):
lowercase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase = None
if self.use_attention_mask:
lowercase = random_attention_mask([self.batch_size, self.seq_length] )
lowercase = None
if self.use_token_type_ids:
lowercase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowercase = BertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_lowerCamelCase , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def UpperCamelCase_ ( self ):
lowercase = self.prepare_config_and_inputs()
lowercase , lowercase , lowercase , lowercase = config_and_inputs
lowercase = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': attention_mask}
return config, inputs_dict
def UpperCamelCase_ ( self ):
lowercase = self.prepare_config_and_inputs()
lowercase , lowercase , lowercase , lowercase = config_and_inputs
lowercase = True
lowercase = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
lowercase = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
attention_mask,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class a ( a_, unittest.TestCase ):
UpperCAmelCase_ : Dict =True
UpperCAmelCase_ : Any =(
(
FlaxBertModel,
FlaxBertForPreTraining,
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForQuestionAnswering,
FlaxBertForNextSentencePrediction,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def UpperCamelCase_ ( self ):
lowercase = FlaxBertModelTester(self )
@slow
def UpperCamelCase_ ( self ):
# Only check this for base model, not necessary for all model classes.
# This will also help speed-up tests.
lowercase = FlaxBertModel.from_pretrained('bert-base-cased' )
lowercase = model(np.ones((1, 1) ) )
self.assertIsNotNone(_lowerCamelCase )
| 220 |
"""simple docstring"""
import re
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class a ( a_ ):
UpperCAmelCase_ : List[Any] =["image_processor", "tokenizer"]
UpperCAmelCase_ : str ="AutoImageProcessor"
UpperCAmelCase_ : Any ="AutoTokenizer"
def __init__( self , _lowerCamelCase=None , _lowerCamelCase=None , **_lowerCamelCase ):
lowercase = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , _lowerCamelCase , )
lowercase = kwargs.pop('feature_extractor' )
lowercase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(_lowerCamelCase , _lowerCamelCase )
lowercase = self.image_processor
lowercase = False
def __call__( self , *_lowerCamelCase , **_lowerCamelCase ):
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*_lowerCamelCase , **_lowerCamelCase )
lowercase = kwargs.pop('images' , _lowerCamelCase )
lowercase = kwargs.pop('text' , _lowerCamelCase )
if len(_lowerCamelCase ) > 0:
lowercase = args[0]
lowercase = args[1:]
if images is None and text is None:
raise ValueError('You need to specify either an `images` or `text` input to process.' )
if images is not None:
lowercase = self.image_processor(_lowerCamelCase , *_lowerCamelCase , **_lowerCamelCase )
if text is not None:
lowercase = self.tokenizer(_lowerCamelCase , **_lowerCamelCase )
if text is None:
return inputs
elif images is None:
return encodings
else:
lowercase = encodings['input_ids']
return inputs
def UpperCamelCase_ ( self , *_lowerCamelCase , **_lowerCamelCase ):
return self.tokenizer.batch_decode(*_lowerCamelCase , **_lowerCamelCase )
def UpperCamelCase_ ( self , *_lowerCamelCase , **_lowerCamelCase ):
return self.tokenizer.decode(*_lowerCamelCase , **_lowerCamelCase )
@contextmanager
def UpperCamelCase_ ( self ):
warnings.warn(
'`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your '
'labels by using the argument `text` of the regular `__call__` method (either in the same call as '
'your images inputs, or in a separate call.' )
lowercase = True
lowercase = self.tokenizer
yield
lowercase = self.image_processor
lowercase = False
def UpperCamelCase_ ( self , _lowerCamelCase , _lowerCamelCase=False , _lowerCamelCase=None ):
if added_vocab is None:
lowercase = self.tokenizer.get_added_vocab()
lowercase = {}
while tokens:
lowercase = re.search(R'<s_(.*?)>' , _lowerCamelCase , re.IGNORECASE )
if start_token is None:
break
lowercase = start_token.group(1 )
lowercase = re.search(RF'</s_{key}>' , _lowerCamelCase , re.IGNORECASE )
lowercase = start_token.group()
if end_token is None:
lowercase = tokens.replace(_lowerCamelCase , '' )
else:
lowercase = end_token.group()
lowercase = re.escape(_lowerCamelCase )
lowercase = re.escape(_lowerCamelCase )
lowercase = re.search(F'{start_token_escaped}(.*?){end_token_escaped}' , _lowerCamelCase , re.IGNORECASE )
if content is not None:
lowercase = content.group(1 ).strip()
if r"<s_" in content and r"</s_" in content: # non-leaf node
lowercase = self.tokenajson(_lowerCamelCase , is_inner_value=_lowerCamelCase , added_vocab=_lowerCamelCase )
if value:
if len(_lowerCamelCase ) == 1:
lowercase = value[0]
lowercase = value
else: # leaf nodes
lowercase = []
for leaf in content.split(R'<sep/>' ):
lowercase = leaf.strip()
if leaf in added_vocab and leaf[0] == "<" and leaf[-2:] == "/>":
lowercase = leaf[1:-2] # for categorical special tokens
output[key].append(_lowerCamelCase )
if len(output[key] ) == 1:
lowercase = output[key][0]
lowercase = tokens[tokens.find(_lowerCamelCase ) + len(_lowerCamelCase ) :].strip()
if tokens[:6] == r"<sep/>": # non-leaf nodes
return [output] + self.tokenajson(tokens[6:] , is_inner_value=_lowerCamelCase , added_vocab=_lowerCamelCase )
if len(_lowerCamelCase ):
return [output] if is_inner_value else output
else:
return [] if is_inner_value else {"text_sequence": tokens}
@property
def UpperCamelCase_ ( self ):
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , _lowerCamelCase , )
return self.image_processor_class
@property
def UpperCamelCase_ ( self ):
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , _lowerCamelCase , )
return self.image_processor
| 220 | 1 |
from collections.abc import Generator
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
lowerCamelCase_ , lowerCamelCase_ = 0, 1
while True:
lowerCamelCase_ , lowerCamelCase_ = b, a + b
yield b
def _SCREAMING_SNAKE_CASE ( lowercase : int = 10_00 ):
'''simple docstring'''
lowerCamelCase_ = 1
lowerCamelCase_ = fibonacci_generator()
while len(str(next(lowercase ) ) ) < n:
answer += 1
return answer + 1
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 208 |
import argparse
import os
import transformers
from .convert_slow_tokenizer import SLOW_TO_FAST_CONVERTERS
from .utils import logging
logging.set_verbosity_info()
lowerCamelCase : Union[str, Any] = logging.get_logger(__name__)
lowerCamelCase : str = {name: getattr(transformers, name + "Fast") for name in SLOW_TO_FAST_CONVERTERS}
def _SCREAMING_SNAKE_CASE ( lowercase : Any , lowercase : str , lowercase : int , lowercase : List[Any] ):
'''simple docstring'''
if tokenizer_name is not None and tokenizer_name not in TOKENIZER_CLASSES:
raise ValueError(f"""Unrecognized tokenizer name, should be one of {list(TOKENIZER_CLASSES.keys() )}.""" )
if tokenizer_name is None:
lowerCamelCase_ = TOKENIZER_CLASSES
else:
lowerCamelCase_ = {tokenizer_name: getattr(lowercase , tokenizer_name + 'Fast' )}
logger.info(f"""Loading tokenizer classes: {tokenizer_names}""" )
for tokenizer_name in tokenizer_names:
lowerCamelCase_ = TOKENIZER_CLASSES[tokenizer_name]
lowerCamelCase_ = True
if checkpoint_name is None:
lowerCamelCase_ = list(tokenizer_class.max_model_input_sizes.keys() )
else:
lowerCamelCase_ = [checkpoint_name]
logger.info(f"""For tokenizer {tokenizer_class.__class__.__name__} loading checkpoints: {checkpoint_names}""" )
for checkpoint in checkpoint_names:
logger.info(f"""Loading {tokenizer_class.__class__.__name__} {checkpoint}""" )
# Load tokenizer
lowerCamelCase_ = tokenizer_class.from_pretrained(lowercase , force_download=lowercase )
# Save fast tokenizer
logger.info(f"""Save fast tokenizer to {dump_path} with prefix {checkpoint} add_prefix {add_prefix}""" )
# For organization names we create sub-directories
if "/" in checkpoint:
lowerCamelCase_ , lowerCamelCase_ = checkpoint.split('/' )
lowerCamelCase_ = os.path.join(lowercase , lowercase )
elif add_prefix:
lowerCamelCase_ = checkpoint
lowerCamelCase_ = dump_path
else:
lowerCamelCase_ = None
lowerCamelCase_ = dump_path
logger.info(f"""=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}""" )
if checkpoint in list(tokenizer.pretrained_vocab_files_map.values() )[0]:
lowerCamelCase_ = list(tokenizer.pretrained_vocab_files_map.values() )[0][checkpoint]
lowerCamelCase_ = file_path.split(lowercase )[-1][0]
if next_char == "/":
lowerCamelCase_ = os.path.join(lowercase , lowercase )
lowerCamelCase_ = None
logger.info(f"""=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}""" )
lowerCamelCase_ = tokenizer.save_pretrained(
lowercase , legacy_format=lowercase , filename_prefix=lowercase )
logger.info(f"""=> File names {file_names}""" )
for file_name in file_names:
if not file_name.endswith('tokenizer.json' ):
os.remove(lowercase )
logger.info(f"""=> removing {file_name}""" )
if __name__ == "__main__":
lowerCamelCase : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--dump_path", default=None, type=str, required=True, help="Path to output generated fast tokenizer files."
)
parser.add_argument(
"--tokenizer_name",
default=None,
type=str,
help=(
F"""Optional tokenizer type selected in the list of {list(TOKENIZER_CLASSES.keys())}. If not given, will """
"download and convert all the checkpoints from AWS."
),
)
parser.add_argument(
"--checkpoint_name",
default=None,
type=str,
help="Optional checkpoint name. If not given, will download and convert the canonical checkpoints from AWS.",
)
parser.add_argument(
"--force_download",
action="store_true",
help="Re-download checkpoints.",
)
lowerCamelCase : Union[str, Any] = parser.parse_args()
convert_slow_checkpoint_to_fast(args.tokenizer_name, args.checkpoint_name, args.dump_path, args.force_download)
| 208 | 1 |
import argparse
import os
import re
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_dummies.py
A : Tuple = 'src/diffusers'
# Matches is_xxx_available()
A : Any = re.compile(R'is\_([a-z_]*)_available\(\)')
# Matches from xxx import bla
A : Optional[int] = re.compile(R'\s+from\s+\S*\s+import\s+([^\(\s].*)\n')
A : List[Any] = '\n{0} = None\n'
A : Optional[Any] = '\nclass {0}(metaclass=DummyObject):\n _backends = {1}\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, {1})\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, {1})\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, {1})\n'
A : Union[str, Any] = '\ndef {0}(*args, **kwargs):\n requires_backends({0}, {1})\n'
def __lowerCAmelCase ( a__ ) -> Dict:
__a = _re_backend.findall(a__ )
if len(a__ ) == 0:
return None
return "_and_".join(a__ )
def __lowerCAmelCase ( ) -> Dict:
with open(os.path.join(a__ , '''__init__.py''' ) , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
__a = f.readlines()
# Get to the point we do the actual imports for type checking
__a = 0
__a = {}
# Go through the end of the file
while line_index < len(a__ ):
# If the line contains is_backend_available, we grab all objects associated with the `else` block
__a = find_backend(lines[line_index] )
if backend is not None:
while not lines[line_index].startswith('''else:''' ):
line_index += 1
line_index += 1
__a = []
# Until we unindent, add backend objects to the list
while line_index < len(a__ ) and len(lines[line_index] ) > 1:
__a = lines[line_index]
__a = _re_single_line_import.search(a__ )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(''', ''' ) )
elif line.startswith(''' ''' * 8 ):
objects.append(line[8:-2] )
line_index += 1
if len(a__ ) > 0:
__a = objects
else:
line_index += 1
return backend_specific_objects
def __lowerCAmelCase ( a__ , a__ ) -> Tuple:
if name.isupper():
return DUMMY_CONSTANT.format(a__ )
elif name.islower():
return DUMMY_FUNCTION.format(a__ , a__ )
else:
return DUMMY_CLASS.format(a__ , a__ )
def __lowerCAmelCase ( a__=None ) -> Dict:
if backend_specific_objects is None:
__a = read_init()
# For special correspondence backend to module name as used in the function requires_modulename
__a = {}
for backend, objects in backend_specific_objects.items():
__a = '''[''' + ''', '''.join(F"""\"{b}\"""" for b in backend.split('''_and_''' ) ) + ''']'''
__a = '''# This file is autogenerated by the command `make fix-copies`, do not edit.\n'''
dummy_file += "from ..utils import DummyObject, requires_backends\n\n"
dummy_file += "\n".join([create_dummy_object(a__ , a__ ) for o in objects] )
__a = dummy_file
return dummy_files
def __lowerCAmelCase ( a__=False ) -> Union[str, Any]:
__a = create_dummy_files()
# For special correspondence backend to shortcut as used in utils/dummy_xxx_objects.py
__a = {'''torch''': '''pt'''}
# Locate actual dummy modules and read their content.
__a = os.path.join(a__ , '''utils''' )
__a = {
backend: os.path.join(a__ , F"""dummy_{short_names.get(a__ , a__ )}_objects.py""" )
for backend in dummy_files.keys()
}
__a = {}
for backend, file_path in dummy_file_paths.items():
if os.path.isfile(a__ ):
with open(a__ , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
__a = f.read()
else:
__a = ''''''
for backend in dummy_files.keys():
if dummy_files[backend] != actual_dummies[backend]:
if overwrite:
print(
F"""Updating diffusers.utils.dummy_{short_names.get(a__ , a__ )}_objects.py as the main """
'''__init__ has new objects.''' )
with open(dummy_file_paths[backend] , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.write(dummy_files[backend] )
else:
raise ValueError(
'''The main __init__ has objects that are not present in '''
F"""diffusers.utils.dummy_{short_names.get(a__ , a__ )}_objects.py. Run `make fix-copies` """
'''to fix this.''' )
if __name__ == "__main__":
A : List[str] = argparse.ArgumentParser()
parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.')
A : List[Any] = parser.parse_args()
check_dummies(args.fix_and_overwrite) | 6 |
from datetime import datetime
import requests
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ ) -> bytes:
'''simple docstring'''
UpperCAmelCase = '''https://downloadgram.net/wp-json/wppress/video-downloader/video?url='''
UpperCAmelCase = requests.get(base_url + url ).json()[0]['''urls'''][0]['''src''']
return requests.get(UpperCamelCase__ ).content
if __name__ == "__main__":
__A : Union[str, Any] = input("Enter Video/IGTV url: ").strip()
__A : Tuple = F'{datetime.now():%Y-%m-%d_%H:%M:%S}.mp4'
with open(file_name, "wb") as fp:
fp.write(download_video(url))
print(F'Done. Video saved to disk as {file_name}.')
| 273 | 0 |
"""simple docstring"""
import numpy as np
import torch
from imwatermark import WatermarkEncoder
# Copied from https://github.com/Stability-AI/generative-models/blob/613af104c6b85184091d42d374fef420eddb356d/scripts/demo/streamlit_helpers.py#L66
a__ : Dict = 0B10_11_00_11_11_10_11_00_10_01_00_00_01_11_10_11_10_11_00_01_10_01_11_10
# bin(x)[2:] gives bits of x as str, use int to convert them to 0/1
a__ : Tuple = [int(bit) for bit in bin(WATERMARK_MESSAGE)[2:]]
class UpperCamelCase_ :
"""simple docstring"""
def __init__( self : int ) -> Any:
__SCREAMING_SNAKE_CASE = WATERMARK_BITS
__SCREAMING_SNAKE_CASE = WatermarkEncoder()
self.encoder.set_watermark("bits" , self.watermark )
def UpperCAmelCase_ ( self : Union[str, Any] , UpperCAmelCase__ : torch.FloatTensor ) -> Union[str, Any]:
# can't encode images that are smaller than 256
if images.shape[-1] < 2_5_6:
return images
__SCREAMING_SNAKE_CASE = (2_5_5 * (images / 2 + 0.5)).cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
__SCREAMING_SNAKE_CASE = [self.encoder.encode(UpperCAmelCase__ , "dwtDct" ) for image in images]
__SCREAMING_SNAKE_CASE = torch.from_numpy(np.array(UpperCAmelCase__ ) ).permute(0 , 3 , 1 , 2 )
__SCREAMING_SNAKE_CASE = torch.clamp(2 * (images / 2_5_5 - 0.5) , min=-1.0 , max=1.0 )
return images
| 195 |
"""simple docstring"""
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
if digit_amount > 0:
return round(number - int(lowerCAmelCase_ ) , lowerCAmelCase_ )
return number - int(lowerCAmelCase_ )
if __name__ == "__main__":
print(decimal_isolate(1.53, 0))
print(decimal_isolate(35.3_45, 1))
print(decimal_isolate(35.3_45, 2))
print(decimal_isolate(35.3_45, 3))
print(decimal_isolate(-14.7_89, 3))
print(decimal_isolate(0, 2))
print(decimal_isolate(-14.1_23, 1))
print(decimal_isolate(-14.1_23, 2))
print(decimal_isolate(-14.1_23, 3))
| 195 | 1 |
"""simple docstring"""
import argparse
import json
from typing import List
from ltp import LTP
from transformers.models.bert.tokenization_bert import BertTokenizer
def __lowercase ( _a ):
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like the all of the other languages.
if (
(cp >= 0x4e00 and cp <= 0x9fff)
or (cp >= 0x3400 and cp <= 0x4dbf) #
or (cp >= 0x2_0000 and cp <= 0x2_a6df) #
or (cp >= 0x2_a700 and cp <= 0x2_b73f) #
or (cp >= 0x2_b740 and cp <= 0x2_b81f) #
or (cp >= 0x2_b820 and cp <= 0x2_ceaf) #
or (cp >= 0xf900 and cp <= 0xfaff)
or (cp >= 0x2_f800 and cp <= 0x2_fa1f) #
): #
return True
return False
def __lowercase ( _a ):
# word like '180' or '身高' or '神'
for char in word:
snake_case_ : Dict = ord(_a )
if not _is_chinese_char(_a ):
return 0
return 1
def __lowercase ( _a ):
snake_case_ : List[str] = set()
for token in tokens:
snake_case_ : Any = len(_a ) > 1 and is_chinese(_a )
if chinese_word:
word_set.add(_a )
snake_case_ : int = list(_a )
return word_list
def __lowercase ( _a , _a ):
if not chinese_word_set:
return bert_tokens
snake_case_ : Union[str, Any] = max([len(_a ) for w in chinese_word_set] )
snake_case_ : Optional[int] = bert_tokens
snake_case_, snake_case_ : Optional[int] = 0, len(_a )
while start < end:
snake_case_ : Union[str, Any] = True
if is_chinese(bert_word[start] ):
snake_case_ : List[Any] = min(end - start , _a )
for i in range(_a , 1 , -1 ):
snake_case_ : Tuple = ''''''.join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1 , start + i ):
snake_case_ : List[str] = '''##''' + bert_word[j]
snake_case_ : List[Any] = start + i
snake_case_ : str = False
break
if single_word:
start += 1
return bert_word
def __lowercase ( _a , _a , _a ):
snake_case_ : List[Any] = []
for i in range(0 , len(_a ) , 100 ):
snake_case_ : Optional[int] = ltp_tokenizer.pipeline(lines[i : i + 100] , tasks=['''cws'''] ).cws
snake_case_ : List[Any] = [get_chinese_word(_a ) for r in res]
ltp_res.extend(_a )
assert len(_a ) == len(_a )
snake_case_ : Optional[Any] = []
for i in range(0 , len(_a ) , 100 ):
snake_case_ : List[str] = bert_tokenizer(lines[i : i + 100] , add_special_tokens=_a , truncation=_a , max_length=512 )
bert_res.extend(res['''input_ids'''] )
assert len(_a ) == len(_a )
snake_case_ : List[str] = []
for input_ids, chinese_word in zip(_a , _a ):
snake_case_ : Dict = []
for id in input_ids:
snake_case_ : Optional[Any] = bert_tokenizer._convert_id_to_token(_a )
input_tokens.append(_a )
snake_case_ : str = add_sub_symbol(_a , _a )
snake_case_ : Tuple = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(_a ):
if token[:2] == "##":
snake_case_ : int = token[2:]
# save chinese tokens' pos
if len(_a ) == 1 and _is_chinese_char(ord(_a ) ):
ref_id.append(_a )
ref_ids.append(_a )
assert len(_a ) == len(_a )
return ref_ids
def __lowercase ( _a ):
# For Chinese (Ro)Bert, the best result is from : RoBERTa-wwm-ext (https://github.com/ymcui/Chinese-BERT-wwm)
# If we want to fine-tune these model, we have to use same tokenizer : LTP (https://github.com/HIT-SCIR/ltp)
with open(args.file_name , '''r''' , encoding='''utf-8''' ) as f:
snake_case_ : Optional[int] = f.readlines()
snake_case_ : Any = [line.strip() for line in data if len(_a ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
snake_case_ : Dict = LTP(args.ltp ) # faster in GPU device
snake_case_ : Any = BertTokenizer.from_pretrained(args.bert )
snake_case_ : Dict = prepare_ref(_a , _a , _a )
with open(args.save_path , '''w''' , encoding='''utf-8''' ) as f:
snake_case_ : Optional[int] = [json.dumps(_a ) + '''\n''' for ref in ref_ids]
f.writelines(_a )
if __name__ == "__main__":
lowercase__ : Any = argparse.ArgumentParser(description='''prepare_chinese_ref''')
parser.add_argument(
'''--file_name''',
required=False,
type=str,
default='''./resources/chinese-demo.txt''',
help='''file need process, same as training data in lm''',
)
parser.add_argument(
'''--ltp''',
required=False,
type=str,
default='''./resources/ltp''',
help='''resources for LTP tokenizer, usually a path''',
)
parser.add_argument(
'''--bert''',
required=False,
type=str,
default='''./resources/robert''',
help='''resources for Bert tokenizer''',
)
parser.add_argument(
'''--save_path''',
required=False,
type=str,
default='''./resources/ref.txt''',
help='''path to save res''',
)
lowercase__ : List[Any] = parser.parse_args()
main(args)
| 264 |
"""simple docstring"""
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OpenAIGPTConfig,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTForSequenceClassification,
OpenAIGPTLMHeadModel,
OpenAIGPTModel,
)
class _UpperCAmelCase :
def __init__( self : Union[str, Any] , lowercase_ : List[Any] , lowercase_ : int=13 , lowercase_ : Optional[int]=7 , lowercase_ : Any=True , lowercase_ : Dict=True , lowercase_ : Dict=True , lowercase_ : Optional[Any]=99 , lowercase_ : Union[str, Any]=32 , lowercase_ : str=5 , lowercase_ : Union[str, Any]=4 , lowercase_ : Any=37 , lowercase_ : Tuple="gelu" , lowercase_ : Dict=0.1 , lowercase_ : Tuple=0.1 , lowercase_ : Optional[int]=512 , lowercase_ : Optional[Any]=16 , lowercase_ : Optional[Any]=2 , lowercase_ : Optional[Any]=0.02 , lowercase_ : List[Any]=3 , lowercase_ : Union[str, Any]=4 , lowercase_ : List[Any]=None , ):
snake_case_ : Any = parent
snake_case_ : List[str] = batch_size
snake_case_ : List[Any] = seq_length
snake_case_ : Optional[int] = is_training
snake_case_ : Union[str, Any] = use_token_type_ids
snake_case_ : Optional[Any] = use_labels
snake_case_ : Union[str, Any] = vocab_size
snake_case_ : Any = hidden_size
snake_case_ : List[Any] = num_hidden_layers
snake_case_ : Any = num_attention_heads
snake_case_ : Dict = intermediate_size
snake_case_ : Union[str, Any] = hidden_act
snake_case_ : Optional[int] = hidden_dropout_prob
snake_case_ : Optional[Any] = attention_probs_dropout_prob
snake_case_ : Tuple = max_position_embeddings
snake_case_ : int = type_vocab_size
snake_case_ : Tuple = type_sequence_label_size
snake_case_ : str = initializer_range
snake_case_ : Tuple = num_labels
snake_case_ : str = num_choices
snake_case_ : Any = scope
snake_case_ : Dict = self.vocab_size - 1
def _snake_case ( self : int ):
snake_case_ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case_ : Optional[Any] = None
if self.use_token_type_ids:
snake_case_ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
snake_case_ : str = None
snake_case_ : Dict = None
snake_case_ : str = None
if self.use_labels:
snake_case_ : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case_ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
snake_case_ : Tuple = ids_tensor([self.batch_size] , self.num_choices )
snake_case_ : int = OpenAIGPTConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
snake_case_ : Any = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
head_mask,
token_type_ids,
sequence_labels,
token_labels,
choice_labels,
)
def _snake_case ( self : Tuple , lowercase_ : Any , lowercase_ : Union[str, Any] , lowercase_ : str , lowercase_ : Dict , *lowercase_ : Dict ):
snake_case_ : List[Any] = OpenAIGPTModel(config=lowercase_ )
model.to(lowercase_ )
model.eval()
snake_case_ : Any = model(lowercase_ , token_type_ids=lowercase_ , head_mask=lowercase_ )
snake_case_ : Optional[Any] = model(lowercase_ , token_type_ids=lowercase_ )
snake_case_ : Optional[Any] = model(lowercase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _snake_case ( self : Tuple , lowercase_ : Dict , lowercase_ : str , lowercase_ : Optional[Any] , lowercase_ : List[Any] , *lowercase_ : Optional[Any] ):
snake_case_ : Union[str, Any] = OpenAIGPTLMHeadModel(lowercase_ )
model.to(lowercase_ )
model.eval()
snake_case_ : Union[str, Any] = model(lowercase_ , token_type_ids=lowercase_ , labels=lowercase_ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _snake_case ( self : List[str] , lowercase_ : Dict , lowercase_ : List[str] , lowercase_ : Any , lowercase_ : Dict , *lowercase_ : Union[str, Any] ):
snake_case_ : Tuple = OpenAIGPTDoubleHeadsModel(lowercase_ )
model.to(lowercase_ )
model.eval()
snake_case_ : Dict = model(lowercase_ , token_type_ids=lowercase_ , labels=lowercase_ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _snake_case ( self : Any , lowercase_ : str , lowercase_ : List[str] , lowercase_ : Optional[Any] , lowercase_ : Optional[Any] , *lowercase_ : Any ):
snake_case_ : int = self.num_labels
snake_case_ : Any = OpenAIGPTForSequenceClassification(lowercase_ )
model.to(lowercase_ )
model.eval()
snake_case_ : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case_ : Optional[Any] = model(lowercase_ , token_type_ids=lowercase_ , labels=lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _snake_case ( self : int ):
snake_case_ : Dict = self.prepare_config_and_inputs()
(
(
snake_case_
), (
snake_case_
), (
snake_case_
), (
snake_case_
), (
snake_case_
), (
snake_case_
), (
snake_case_
),
) : str = config_and_inputs
snake_case_ : str = {
'''input_ids''': input_ids,
'''token_type_ids''': token_type_ids,
'''head_mask''': head_mask,
}
return config, inputs_dict
@require_torch
class _UpperCAmelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase):
_lowerCAmelCase : Dict = (
(OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification)
if is_torch_available()
else ()
)
_lowerCAmelCase : int = (
(OpenAIGPTLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly
_lowerCAmelCase : Union[str, Any] = (
{
"""feature-extraction""": OpenAIGPTModel,
"""text-classification""": OpenAIGPTForSequenceClassification,
"""text-generation""": OpenAIGPTLMHeadModel,
"""zero-shot""": OpenAIGPTForSequenceClassification,
}
if is_torch_available()
else {}
)
def _snake_case ( self : Tuple , lowercase_ : Optional[int] , lowercase_ : int , lowercase_ : List[Any] , lowercase_ : List[Any] , lowercase_ : Union[str, Any] ):
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a
# tiny config could not be created.
return True
return False
def _snake_case ( self : Optional[int] , lowercase_ : List[Any] , lowercase_ : Optional[int] , lowercase_ : List[str]=False ):
snake_case_ : Dict = super()._prepare_for_class(lowercase_ , lowercase_ , return_labels=lowercase_ )
if return_labels:
if model_class.__name__ == "OpenAIGPTDoubleHeadsModel":
snake_case_ : List[str] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length) , dtype=torch.long , device=lowercase_ , )
snake_case_ : int = inputs_dict['''labels''']
snake_case_ : Optional[Any] = inputs_dict['''labels''']
snake_case_ : int = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices) , dtype=torch.long , device=lowercase_ , )
snake_case_ : Tuple = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowercase_ )
return inputs_dict
def _snake_case ( self : Any ):
snake_case_ : List[str] = OpenAIGPTModelTester(self )
snake_case_ : Dict = ConfigTester(self , config_class=lowercase_ , n_embd=37 )
def _snake_case ( self : List[str] ):
self.config_tester.run_common_tests()
def _snake_case ( self : Optional[Any] ):
snake_case_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_model(*lowercase_ )
def _snake_case ( self : List[str] ):
snake_case_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*lowercase_ )
def _snake_case ( self : int ):
snake_case_ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_double_lm_head_model(*lowercase_ )
def _snake_case ( self : List[str] ):
snake_case_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*lowercase_ )
@slow
def _snake_case ( self : Dict ):
for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case_ : Optional[Any] = OpenAIGPTModel.from_pretrained(lowercase_ )
self.assertIsNotNone(lowercase_ )
@require_torch
class _UpperCAmelCase ( unittest.TestCase):
@slow
def _snake_case ( self : Optional[int] ):
snake_case_ : Optional[Any] = OpenAIGPTLMHeadModel.from_pretrained('''openai-gpt''' )
model.to(lowercase_ )
snake_case_ : List[str] = torch.tensor([[481, 4735, 544]] , dtype=torch.long , device=lowercase_ ) # the president is
snake_case_ : List[Any] = [
481,
4735,
544,
246,
963,
870,
762,
239,
244,
40477,
244,
249,
719,
881,
487,
544,
240,
244,
603,
481,
] # the president is a very good man. " \n " i\'m sure he is, " said the
snake_case_ : Optional[Any] = model.generate(lowercase_ , do_sample=lowercase_ )
self.assertListEqual(output_ids[0].tolist() , lowercase_ )
| 264 | 1 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
class lowerCamelCase (_snake_case ):
'''simple docstring'''
_snake_case : Union[str, Any] = '''encoder-decoder'''
_snake_case : Optional[int] = True
def __init__( self , **_UpperCamelCase ) -> Dict:
super().__init__(**_UpperCamelCase )
assert (
"encoder" in kwargs and "decoder" in kwargs
), "Config has to be initialized with encoder and decoder config"
UpperCAmelCase_ : int = kwargs.pop('encoder' )
UpperCAmelCase_ : int = encoder_config.pop('model_type' )
UpperCAmelCase_ : int = kwargs.pop('decoder' )
UpperCAmelCase_ : List[Any] = decoder_config.pop('model_type' )
from ..auto.configuration_auto import AutoConfig
UpperCAmelCase_ : Optional[int] = AutoConfig.for_model(_UpperCamelCase , **_UpperCamelCase )
UpperCAmelCase_ : Optional[Any] = AutoConfig.for_model(_UpperCamelCase , **_UpperCamelCase )
UpperCAmelCase_ : Tuple = True
@classmethod
def __UpperCAmelCase ( cls , _UpperCamelCase , _UpperCamelCase , **_UpperCamelCase ) -> PretrainedConfig:
logger.info('Set `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config' )
UpperCAmelCase_ : int = True
UpperCAmelCase_ : Optional[int] = True
return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **_UpperCamelCase )
def __UpperCAmelCase ( self ) -> str:
UpperCAmelCase_ : Any = copy.deepcopy(self.__dict__ )
UpperCAmelCase_ : int = self.encoder.to_dict()
UpperCAmelCase_ : Optional[Any] = self.decoder.to_dict()
UpperCAmelCase_ : List[Any] = self.__class__.model_type
return output
| 145 |
def lowercase__ ( __snake_case : str , __snake_case : int , __snake_case : List[str] ):
'''simple docstring'''
if n == 0:
return 1
elif n % 2 == 1:
return (binary_exponentiation(__snake_case , n - 1 , __snake_case ) * a) % mod
else:
UpperCAmelCase_ : Optional[int] = binary_exponentiation(__snake_case , n / 2 , __snake_case )
return (b * b) % mod
# a prime number
__UpperCAmelCase = 701
__UpperCAmelCase = 1000000000
__UpperCAmelCase = 10
# using binary exponentiation function, O(log(p)):
print((a / b) % p == (a * binary_exponentiation(b, p - 2, p)) % p)
print((a / b) % p == (a * b ** (p - 2)) % p)
| 145 | 1 |
import argparse
import torch
from transformers import FunnelBaseModel, FunnelConfig, FunnelModel, load_tf_weights_in_funnel
from transformers.utils import logging
logging.set_verbosity_info()
def _a ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> int:
"""simple docstring"""
# Initialise PyTorch model
lowerCamelCase__ : str = FunnelConfig.from_json_file(UpperCAmelCase__ )
print(f"Building PyTorch model from configuration: {config}" )
lowerCamelCase__ : Union[str, Any] = FunnelBaseModel(UpperCAmelCase__ ) if base_model else FunnelModel(UpperCAmelCase__ )
# Load weights from tf checkpoint
load_tf_weights_in_funnel(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
# Save pytorch-model
print(f"Save PyTorch model to {pytorch_dump_path}" )
torch.save(model.state_dict() , UpperCAmelCase__ )
if __name__ == "__main__":
_A : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help='The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.',
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--base_model', action='store_true', help='Whether you want just the base model (no decoder) or not.'
)
_A : List[str] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path, args.base_model
)
| 142 | '''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowercase : Optional[Any] = logging.get_logger(__name__)
_lowercase : Tuple = {
"BAAI/AltCLIP": "https://huggingface.co/BAAI/AltCLIP/resolve/main/config.json",
# See all AltCLIP models at https://huggingface.co/models?filter=altclip
}
class __magic_name__ ( _UpperCAmelCase):
UpperCamelCase__ = '''altclip_text_model'''
def __init__( self : Union[str, Any] , lowercase_ : str=250002 , lowercase_ : Union[str, Any]=1024 , lowercase_ : Any=24 , lowercase_ : Union[str, Any]=16 , lowercase_ : Any=4096 , lowercase_ : Union[str, Any]="gelu" , lowercase_ : Union[str, Any]=0.1 , lowercase_ : Union[str, Any]=0.1 , lowercase_ : int=514 , lowercase_ : Union[str, Any]=1 , lowercase_ : List[str]=0.02 , lowercase_ : Optional[int]=0.02 , lowercase_ : str=1E-05 , lowercase_ : List[str]=1 , lowercase_ : List[Any]=0 , lowercase_ : Dict=2 , lowercase_ : Union[str, Any]="absolute" , lowercase_ : Any=True , lowercase_ : Union[str, Any]=768 , **lowercase_ : Any , ):
super().__init__(pad_token_id=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ , **lowercase_ )
lowercase_ : Union[str, Any] = vocab_size
lowercase_ : str = hidden_size
lowercase_ : Optional[Any] = num_hidden_layers
lowercase_ : int = num_attention_heads
lowercase_ : str = hidden_act
lowercase_ : List[str] = intermediate_size
lowercase_ : Optional[int] = hidden_dropout_prob
lowercase_ : str = attention_probs_dropout_prob
lowercase_ : str = max_position_embeddings
lowercase_ : List[str] = type_vocab_size
lowercase_ : Union[str, Any] = initializer_range
lowercase_ : List[Any] = initializer_factor
lowercase_ : str = layer_norm_eps
lowercase_ : Tuple = position_embedding_type
lowercase_ : List[Any] = use_cache
lowercase_ : Tuple = project_dim
class __magic_name__ ( _UpperCAmelCase):
UpperCamelCase__ = '''altclip_vision_model'''
def __init__( self : Dict , lowercase_ : Any=768 , lowercase_ : Dict=3072 , lowercase_ : Optional[Any]=512 , lowercase_ : Dict=12 , lowercase_ : Optional[int]=12 , lowercase_ : Optional[Any]=3 , lowercase_ : str=224 , lowercase_ : List[Any]=32 , lowercase_ : Union[str, Any]="quick_gelu" , lowercase_ : Dict=1E-5 , lowercase_ : Optional[int]=0.0 , lowercase_ : Optional[Any]=0.02 , lowercase_ : Optional[Any]=1.0 , **lowercase_ : Dict , ):
super().__init__(**lowercase_ )
lowercase_ : Tuple = hidden_size
lowercase_ : Optional[Any] = intermediate_size
lowercase_ : Optional[Any] = projection_dim
lowercase_ : Tuple = num_hidden_layers
lowercase_ : List[Any] = num_attention_heads
lowercase_ : Any = num_channels
lowercase_ : Any = patch_size
lowercase_ : Dict = image_size
lowercase_ : Optional[Any] = initializer_range
lowercase_ : str = initializer_factor
lowercase_ : Any = attention_dropout
lowercase_ : Optional[int] = layer_norm_eps
lowercase_ : int = hidden_act
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : Optional[int] , lowercase_ : Union[str, os.PathLike] , **lowercase_ : Any ):
cls._set_token_in_kwargs(lowercase_ )
lowercase_ , lowercase_ : str = cls.get_config_dict(lowercase_ , **lowercase_ )
# get the vision config dict if we are loading from AltCLIPConfig
if config_dict.get("""model_type""" ) == "altclip":
lowercase_ : List[str] = config_dict["""vision_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(lowercase_ , **lowercase_ )
class __magic_name__ ( _UpperCAmelCase):
UpperCamelCase__ = '''altclip'''
UpperCamelCase__ = True
def __init__( self : Optional[int] , lowercase_ : Dict=None , lowercase_ : List[Any]=None , lowercase_ : Tuple=768 , lowercase_ : List[str]=2.65_92 , **lowercase_ : List[Any] ):
# If `_config_dict` exist, we use them for the backward compatibility.
# We pop out these 2 attributes before calling `super().__init__` to avoid them being saved (which causes a lot
# of confusion!).
lowercase_ : Dict = kwargs.pop("""text_config_dict""" , lowercase_ )
lowercase_ : str = kwargs.pop("""vision_config_dict""" , lowercase_ )
super().__init__(**lowercase_ )
# Instead of simply assigning `[text|vision]_config_dict` to `[text|vision]_config`, we use the values in
# `[text|vision]_config_dict` to update the values in `[text|vision]_config`. The values should be same in most
# cases, but we don't want to break anything regarding `_config_dict` that existed before commit `8827e1b2`.
if text_config_dict is not None:
if text_config is None:
lowercase_ : Dict = {}
# This is the complete result when using `text_config_dict`.
lowercase_ : List[str] = AltCLIPTextConfig(**lowercase_ ).to_dict()
# Give a warning if the values exist in both `_text_config_dict` and `text_config` but being different.
for key, value in _text_config_dict.items():
if key in text_config and value != text_config[key] and key not in ["transformers_version"]:
# If specified in `text_config_dict`
if key in text_config_dict:
lowercase_ : Optional[Any] = (
f'''`{key}` is found in both `text_config_dict` and `text_config` but with different values. '''
f'''The value `text_config_dict["{key}"]` will be used instead.'''
)
# If inferred from default argument values (just to be super careful)
else:
lowercase_ : Tuple = (
f'''`text_config_dict` is provided which will be used to initialize `AltCLIPTextConfig`. The '''
f'''value `text_config["{key}"]` will be overriden.'''
)
logger.warning(lowercase_ )
# Update all values in `text_config` with the ones in `_text_config_dict`.
text_config.update(_text_config_dict )
if vision_config_dict is not None:
if vision_config is None:
lowercase_ : int = {}
# This is the complete result when using `vision_config_dict`.
lowercase_ : List[str] = AltCLIPVisionConfig(**lowercase_ ).to_dict()
# convert keys to string instead of integer
if "id2label" in _vision_config_dict:
lowercase_ : List[str] = {
str(lowercase_ ): value for key, value in _vision_config_dict["""id2label"""].items()
}
# Give a warning if the values exist in both `_vision_config_dict` and `vision_config` but being different.
for key, value in _vision_config_dict.items():
if key in vision_config and value != vision_config[key] and key not in ["transformers_version"]:
# If specified in `vision_config_dict`
if key in vision_config_dict:
lowercase_ : Any = (
f'''`{key}` is found in both `vision_config_dict` and `vision_config` but with different '''
f'''values. The value `vision_config_dict["{key}"]` will be used instead.'''
)
# If inferred from default argument values (just to be super careful)
else:
lowercase_ : List[str] = (
f'''`vision_config_dict` is provided which will be used to initialize `AltCLIPVisionConfig`. '''
f'''The value `vision_config["{key}"]` will be overriden.'''
)
logger.warning(lowercase_ )
# Update all values in `vision_config` with the ones in `_vision_config_dict`.
vision_config.update(_vision_config_dict )
if text_config is None:
lowercase_ : int = {}
logger.info("""`text_config` is `None`. Initializing the `AltCLIPTextConfig` with default values.""" )
if vision_config is None:
lowercase_ : Optional[int] = {}
logger.info("""`vision_config` is `None`. initializing the `AltCLIPVisionConfig` with default values.""" )
lowercase_ : Optional[int] = AltCLIPTextConfig(**lowercase_ )
lowercase_ : Any = AltCLIPVisionConfig(**lowercase_ )
lowercase_ : List[Any] = projection_dim
lowercase_ : Optional[Any] = logit_scale_init_value
lowercase_ : int = 1.0
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : Dict , lowercase_ : AltCLIPTextConfig , lowercase_ : AltCLIPVisionConfig , **lowercase_ : Optional[int] ):
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ):
lowercase_ : Union[str, Any] = copy.deepcopy(self.__dict__ )
lowercase_ : Optional[int] = self.text_config.to_dict()
lowercase_ : Any = self.vision_config.to_dict()
lowercase_ : List[str] = self.__class__.model_type
return output
| 239 | 0 |
'''simple docstring'''
from __future__ import annotations
import requests
def a__ ( lowerCAmelCase__ ) -> Optional[int]:
UpperCAmelCase__ : Optional[int] = F"""https://hacker-news.firebaseio.com/v0/item/{story_id}.json?print=pretty"""
return requests.get(lowerCAmelCase__ ).json()
def a__ ( lowerCAmelCase__ = 10 ) -> List[Any]:
UpperCAmelCase__ : List[str] = '''https://hacker-news.firebaseio.com/v0/topstories.json?print=pretty'''
UpperCAmelCase__ : List[str] = requests.get(lowerCAmelCase__ ).json()[:max_stories]
return [get_hackernews_story(lowerCAmelCase__ ) for story_id in story_ids]
def a__ ( lowerCAmelCase__ = 10 ) -> int:
UpperCAmelCase__ : List[str] = hackernews_top_stories(lowerCAmelCase__ )
return "\n".join('''* [{title}]({url})'''.format(**lowerCAmelCase__ ) for story in stories )
if __name__ == "__main__":
print(hackernews_top_stories_as_markdown())
| 355 |
'''simple docstring'''
import argparse
import torch
from transformers import (
SpeechTaConfig,
SpeechTaFeatureExtractor,
SpeechTaForSpeechToSpeech,
SpeechTaForSpeechToText,
SpeechTaForTextToSpeech,
SpeechTaProcessor,
SpeechTaTokenizer,
logging,
)
from transformers.tokenization_utils import AddedToken
logging.set_verbosity_info()
UpperCamelCase__ = logging.get_logger('''transformers.models.speecht5''')
UpperCamelCase__ = {
'''speech_encoder_prenet.layer_norm''': '''speecht5.encoder.prenet.feature_projection.layer_norm''',
'''speech_encoder_prenet.post_extract_proj''': '''speecht5.encoder.prenet.feature_projection.projection''',
'''speech_encoder_prenet.pos_conv.0''': '''speecht5.encoder.prenet.pos_conv_embed.conv''',
'''speech_encoder_prenet.mask_emb''': '''speecht5.encoder.prenet.masked_spec_embed''',
}
UpperCamelCase__ = {
'''text_encoder_prenet.encoder_prenet.0''': '''speecht5.encoder.prenet.embed_tokens''',
'''text_encoder_prenet.encoder_prenet.1.alpha''': '''speecht5.encoder.prenet.encode_positions.alpha''',
}
UpperCamelCase__ = {
'''speech_decoder_prenet.decoder_prenet.0.0.prenet.0.0''': '''speecht5.decoder.prenet.layers.0''',
'''speech_decoder_prenet.decoder_prenet.0.0.prenet.1.0''': '''speecht5.decoder.prenet.layers.1''',
'''speech_decoder_prenet.decoder_prenet.0.1''': '''speecht5.decoder.prenet.final_layer''',
'''speech_decoder_prenet.decoder_prenet.1.alpha''': '''speecht5.decoder.prenet.encode_positions.alpha''',
'''speech_decoder_prenet.spkembs_layer.0''': '''speecht5.decoder.prenet.speaker_embeds_layer''',
}
UpperCamelCase__ = {
'''speech_decoder_postnet.feat_out''': '''speech_decoder_postnet.feat_out''',
'''speech_decoder_postnet.prob_out''': '''speech_decoder_postnet.prob_out''',
'''speech_decoder_postnet.postnet.postnet.0.0''': '''speech_decoder_postnet.layers.0.conv''',
'''speech_decoder_postnet.postnet.postnet.0.1''': '''speech_decoder_postnet.layers.0.batch_norm''',
'''speech_decoder_postnet.postnet.postnet.1.0''': '''speech_decoder_postnet.layers.1.conv''',
'''speech_decoder_postnet.postnet.postnet.1.1''': '''speech_decoder_postnet.layers.1.batch_norm''',
'''speech_decoder_postnet.postnet.postnet.2.0''': '''speech_decoder_postnet.layers.2.conv''',
'''speech_decoder_postnet.postnet.postnet.2.1''': '''speech_decoder_postnet.layers.2.batch_norm''',
'''speech_decoder_postnet.postnet.postnet.3.0''': '''speech_decoder_postnet.layers.3.conv''',
'''speech_decoder_postnet.postnet.postnet.3.1''': '''speech_decoder_postnet.layers.3.batch_norm''',
'''speech_decoder_postnet.postnet.postnet.4.0''': '''speech_decoder_postnet.layers.4.conv''',
'''speech_decoder_postnet.postnet.postnet.4.1''': '''speech_decoder_postnet.layers.4.batch_norm''',
}
UpperCamelCase__ = {
'''text_decoder_prenet.embed_tokens''': '''speecht5.decoder.prenet.embed_tokens''',
}
UpperCamelCase__ = {
'''text_decoder_postnet.output_projection''': '''text_decoder_postnet.lm_head''',
}
UpperCamelCase__ = {
'''encoder.layers.*.self_attn.k_proj''': '''speecht5.encoder.wrapped_encoder.layers.*.attention.k_proj''',
'''encoder.layers.*.self_attn.v_proj''': '''speecht5.encoder.wrapped_encoder.layers.*.attention.v_proj''',
'''encoder.layers.*.self_attn.q_proj''': '''speecht5.encoder.wrapped_encoder.layers.*.attention.q_proj''',
'''encoder.layers.*.self_attn.out_proj''': '''speecht5.encoder.wrapped_encoder.layers.*.attention.out_proj''',
'''encoder.layers.*.self_attn_layer_norm''': '''speecht5.encoder.wrapped_encoder.layers.*.layer_norm''',
'''encoder.layers.*.fc1''': '''speecht5.encoder.wrapped_encoder.layers.*.feed_forward.intermediate_dense''',
'''encoder.layers.*.fc2''': '''speecht5.encoder.wrapped_encoder.layers.*.feed_forward.output_dense''',
'''encoder.layers.*.final_layer_norm''': '''speecht5.encoder.wrapped_encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''speecht5.encoder.wrapped_encoder.layer_norm''',
'''encoder.pos_emb.pe_k''': '''speecht5.encoder.wrapped_encoder.embed_positions.pe_k''',
}
UpperCamelCase__ = {
'''decoder.layers.*.self_attn.k_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.self_attn.k_proj''',
'''decoder.layers.*.self_attn.v_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.self_attn.v_proj''',
'''decoder.layers.*.self_attn.q_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.self_attn.q_proj''',
'''decoder.layers.*.self_attn.out_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.self_attn.out_proj''',
'''decoder.layers.*.self_attn_layer_norm''': '''speecht5.decoder.wrapped_decoder.layers.*.self_attn_layer_norm''',
'''decoder.layers.*.encoder_attn.k_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.k_proj''',
'''decoder.layers.*.encoder_attn.v_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.v_proj''',
'''decoder.layers.*.encoder_attn.q_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.q_proj''',
'''decoder.layers.*.encoder_attn.out_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.out_proj''',
'''decoder.layers.*.encoder_attn_layer_norm''': '''speecht5.decoder.wrapped_decoder.layers.*.encoder_attn_layer_norm''',
'''decoder.layers.*.fc1''': '''speecht5.decoder.wrapped_decoder.layers.*.feed_forward.intermediate_dense''',
'''decoder.layers.*.fc2''': '''speecht5.decoder.wrapped_decoder.layers.*.feed_forward.output_dense''',
'''decoder.layers.*.final_layer_norm''': '''speecht5.decoder.wrapped_decoder.layers.*.final_layer_norm''',
}
UpperCamelCase__ = {
**MAPPING_SPEECH_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_TEXT_DECODER_PRENET,
**MAPPING_TEXT_DECODER_POSTNET,
}
UpperCamelCase__ = {
**MAPPING_TEXT_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_SPEECH_DECODER_PRENET,
**MAPPING_SPEECH_DECODER_POSTNET,
}
UpperCamelCase__ = {
**MAPPING_SPEECH_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_SPEECH_DECODER_PRENET,
**MAPPING_SPEECH_DECODER_POSTNET,
}
UpperCamelCase__ = []
UpperCamelCase__ = [
'''encoder.version''',
'''encoder.layers.*.norm_k.weight''',
'''encoder.layers.*.norm_k.bias''',
'''decoder.version''',
'''decoder.layers.*.norm_k.weight''',
'''decoder.layers.*.norm_k.bias''',
'''decoder.pos_emb.pe_k''',
'''speech_encoder_prenet.embed_positions._float_tensor''',
'''text_decoder_prenet.embed_positions._float_tensor''',
]
UpperCamelCase__ = IGNORE_KEYS + [
'''encoder.proj''',
'''text_encoder_prenet.*''',
'''speech_decoder_prenet.*''',
'''speech_decoder_postnet.*''',
]
UpperCamelCase__ = IGNORE_KEYS + [
'''encoder.proj''',
'''speech_encoder_prenet.*''',
'''text_decoder_prenet.*''',
'''text_decoder_postnet.*''',
]
UpperCamelCase__ = IGNORE_KEYS + [
'''encoder.proj''',
'''text_encoder_prenet.*''',
'''text_decoder_prenet.*''',
'''text_decoder_postnet.*''',
]
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> int:
for attribute in key.split('''.''' ):
UpperCAmelCase__ : Optional[int] = getattr(lowerCAmelCase__ , lowerCAmelCase__ )
if weight_type is not None:
UpperCAmelCase__ : List[str] = getattr(lowerCAmelCase__ , lowerCAmelCase__ ).shape
else:
UpperCAmelCase__ : Any = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be"""
F""" {value.shape} for {full_name}""" )
if weight_type == "weight":
UpperCAmelCase__ : Union[str, Any] = value
elif weight_type == "weight_g":
UpperCAmelCase__ : Tuple = value
elif weight_type == "weight_v":
UpperCAmelCase__ : List[Any] = value
elif weight_type == "bias":
UpperCAmelCase__ : int = value
elif weight_type == "running_mean":
UpperCAmelCase__ : int = value
elif weight_type == "running_var":
UpperCAmelCase__ : Union[str, Any] = value
elif weight_type == "num_batches_tracked":
UpperCAmelCase__ : List[Any] = value
else:
UpperCAmelCase__ : Union[str, Any] = value
logger.info(F"""{key + ("." + weight_type if weight_type is not None else "")} was initialized from {full_name}.""" )
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ) -> List[str]:
for key in ignore_keys:
if key.endswith('''.*''' ):
if name.startswith(key[:-1] ):
return True
elif ".*." in key:
UpperCAmelCase__ , UpperCAmelCase__ : int = key.split('''.*.''' )
if prefix in name and suffix in name:
return True
elif key in name:
return True
return False
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> List[Any]:
UpperCAmelCase__ : int = []
if task == "s2t":
UpperCAmelCase__ : Optional[Any] = hf_model.speechta.encoder.prenet.feature_encoder
UpperCAmelCase__ : List[Any] = MAPPING_S2T
UpperCAmelCase__ : int = IGNORE_KEYS_S2T
elif task == "t2s":
UpperCAmelCase__ : List[str] = None
UpperCAmelCase__ : Tuple = MAPPING_T2S
UpperCAmelCase__ : Union[str, Any] = IGNORE_KEYS_T2S
elif task == "s2s":
UpperCAmelCase__ : Optional[int] = hf_model.speechta.encoder.prenet.feature_encoder
UpperCAmelCase__ : Tuple = MAPPING_S2S
UpperCAmelCase__ : int = IGNORE_KEYS_S2S
else:
raise ValueError(F"""Unsupported task: {task}""" )
for name, value in fairseq_dict.items():
if should_ignore(lowerCAmelCase__ , lowerCAmelCase__ ):
logger.info(F"""{name} was ignored""" )
continue
UpperCAmelCase__ : List[Any] = False
if "conv_layers" in name:
load_conv_layer(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , hf_model.config.feat_extract_norm == '''group''' , )
UpperCAmelCase__ : Tuple = True
else:
for key, mapped_key in MAPPING.items():
# mapped_key = "speecht5." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if "*" in key:
UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = key.split('''.*.''' )
if prefix in name and suffix in name:
UpperCAmelCase__ : List[str] = suffix
# if key in name or key.split("w2v_model.")[-1] == name.split(".")[0]:
if key in name:
UpperCAmelCase__ : Optional[int] = True
if "*" in mapped_key:
UpperCAmelCase__ : Any = name.split(lowerCAmelCase__ )[0].split('''.''' )[-2]
UpperCAmelCase__ : Union[str, Any] = mapped_key.replace('''*''' , lowerCAmelCase__ )
if "weight_g" in name:
UpperCAmelCase__ : Dict = '''weight_g'''
elif "weight_v" in name:
UpperCAmelCase__ : Union[str, Any] = '''weight_v'''
elif "bias" in name:
UpperCAmelCase__ : Optional[int] = '''bias'''
elif "weight" in name:
UpperCAmelCase__ : Optional[int] = '''weight'''
elif "running_mean" in name:
UpperCAmelCase__ : Optional[int] = '''running_mean'''
elif "running_var" in name:
UpperCAmelCase__ : List[Any] = '''running_var'''
elif "num_batches_tracked" in name:
UpperCAmelCase__ : Optional[Any] = '''num_batches_tracked'''
else:
UpperCAmelCase__ : Union[str, Any] = None
set_recursively(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
continue
if not is_used:
unused_weights.append(lowerCAmelCase__ )
logger.warning(F"""Unused weights: {unused_weights}""" )
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> str:
UpperCAmelCase__ : Optional[int] = full_name.split('''conv_layers.''' )[-1]
UpperCAmelCase__ : Optional[Any] = name.split('''.''' )
UpperCAmelCase__ : Any = int(items[0] )
UpperCAmelCase__ : Optional[int] = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" )
UpperCAmelCase__ : Any = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" )
UpperCAmelCase__ : int = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.""" )
UpperCAmelCase__ : List[str] = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.""" )
UpperCAmelCase__ : Union[str, Any] = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(lowerCAmelCase__ )
@torch.no_grad()
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=None , ) -> Any:
if config_path is not None:
UpperCAmelCase__ : Optional[Any] = SpeechTaConfig.from_pretrained(lowerCAmelCase__ )
else:
UpperCAmelCase__ : str = SpeechTaConfig()
if task == "s2t":
UpperCAmelCase__ : str = config.max_text_positions
UpperCAmelCase__ : List[str] = SpeechTaForSpeechToText(lowerCAmelCase__ )
elif task == "t2s":
UpperCAmelCase__ : Tuple = 18_76
UpperCAmelCase__ : int = 6_00
UpperCAmelCase__ : Union[str, Any] = config.max_speech_positions
UpperCAmelCase__ : Optional[Any] = SpeechTaForTextToSpeech(lowerCAmelCase__ )
elif task == "s2s":
UpperCAmelCase__ : Tuple = 18_76
UpperCAmelCase__ : Optional[Any] = config.max_speech_positions
UpperCAmelCase__ : Dict = SpeechTaForSpeechToSpeech(lowerCAmelCase__ )
else:
raise ValueError(F"""Unknown task name: {task}""" )
if vocab_path:
UpperCAmelCase__ : Tuple = SpeechTaTokenizer(lowerCAmelCase__ , model_max_length=config.max_text_positions )
# Mask token behaves like a normal word, i.e. include the space before it
UpperCAmelCase__ : Dict = AddedToken('''<mask>''' , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ )
UpperCAmelCase__ : int = mask_token
tokenizer.add_special_tokens({'''mask_token''': mask_token} )
tokenizer.add_tokens(['''<ctc_blank>'''] )
UpperCAmelCase__ : Optional[Any] = SpeechTaFeatureExtractor()
UpperCAmelCase__ : Any = SpeechTaProcessor(tokenizer=lowerCAmelCase__ , feature_extractor=lowerCAmelCase__ )
processor.save_pretrained(lowerCAmelCase__ )
UpperCAmelCase__ : List[str] = torch.load(lowerCAmelCase__ )
recursively_load_weights(fairseq_checkpoint['''model'''] , lowerCAmelCase__ , lowerCAmelCase__ )
model.save_pretrained(lowerCAmelCase__ )
if repo_id:
print('''Pushing to the hub...''' )
processor.push_to_hub(lowerCAmelCase__ )
model.push_to_hub(lowerCAmelCase__ )
if __name__ == "__main__":
UpperCamelCase__ = argparse.ArgumentParser()
parser.add_argument(
'''--task''',
default='''s2t''',
type=str,
help='''Type of the SpeechT5 model you\'d like to convert. Should be one of \'s2t\', \'t2s\', \'s2s\'.''',
)
parser.add_argument('''--checkpoint_path''', required=True, default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--vocab_path''', default=None, type=str, help='''Path to SentencePiece model''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--pytorch_dump_folder_path''', required=True, default=None, type=str, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--push_to_hub''', default=None, type=str, help='''Where to upload the converted model on the 🤗 hub.'''
)
UpperCamelCase__ = parser.parse_args()
convert_speechta_checkpoint(
args.task,
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.vocab_path,
args.push_to_hub,
)
| 299 | 0 |
from typing import Dict
from .base import GenericTensor, Pipeline
class UpperCAmelCase_ ( _a ):
"""simple docstring"""
def __lowercase ( self , _a=None , _a=None , _a=None , **_a ) -> Any:
if tokenize_kwargs is None:
_a : Optional[Any] = {}
if truncation is not None:
if "truncation" in tokenize_kwargs:
raise ValueError(
'''truncation parameter defined twice (given as keyword argument as well as in tokenize_kwargs)''' )
_a : Any = truncation
_a : Union[str, Any] = tokenize_kwargs
_a : int = {}
if return_tensors is not None:
_a : Optional[Any] = return_tensors
return preprocess_params, {}, postprocess_params
def __lowercase ( self , _a , **_a ) -> Any:
_a : Union[str, Any] = self.framework
_a : List[str] = self.tokenizer(SCREAMING_SNAKE_CASE__ , return_tensors=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
return model_inputs
def __lowercase ( self , _a ) -> Optional[Any]:
_a : List[Any] = self.model(**SCREAMING_SNAKE_CASE__ )
return model_outputs
def __lowercase ( self , _a , _a=False ) -> Dict:
# [0] is the first available tensor, logits or last_hidden_state.
if return_tensors:
return model_outputs[0]
if self.framework == "pt":
return model_outputs[0].tolist()
elif self.framework == "tf":
return model_outputs[0].numpy().tolist()
def __call__( self , *_a , **_a ) -> Any:
return super().__call__(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
| 235 |
'''simple docstring'''
from __future__ import annotations
def A_ ( snake_case , snake_case , snake_case , ):
if (stress, tangential_force, area).count(0 ) != 1:
raise ValueError("You cannot supply more or less than 2 values" )
elif stress < 0:
raise ValueError("Stress cannot be negative" )
elif tangential_force < 0:
raise ValueError("Tangential Force cannot be negative" )
elif area < 0:
raise ValueError("Area cannot be negative" )
elif stress == 0:
return (
"stress",
tangential_force / area,
)
elif tangential_force == 0:
return (
"tangential_force",
stress * area,
)
else:
return (
"area",
tangential_force / stress,
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 139 | 0 |
"""simple docstring"""
import math
import time
from transformers import Trainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class __A( lowerCamelCase__ ):
def __init__( self , *_snake_case , _snake_case=None , _snake_case=None , **_snake_case ) -> Optional[Any]:
'''simple docstring'''
super().__init__(*lowercase__ , **lowercase__ )
__a = eval_examples
__a = post_process_function
def SCREAMING_SNAKE_CASE_ ( self , _snake_case=None , _snake_case=None , _snake_case=None , _snake_case = "eval" ) -> int:
'''simple docstring'''
__a = self.eval_dataset if eval_dataset is None else eval_dataset
__a = self.get_eval_dataloader(lowercase__ )
__a = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
__a = self.compute_metrics
__a = None
__a = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
__a = time.time()
try:
__a = eval_loop(
lowercase__ , description='''Evaluation''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=lowercase__ , metric_key_prefix=lowercase__ , )
finally:
__a = compute_metrics
__a = self.args.eval_batch_size * self.args.world_size
if F"""{metric_key_prefix}_jit_compilation_time""" in output.metrics:
start_time += output.metrics[F"""{metric_key_prefix}_jit_compilation_time"""]
output.metrics.update(
speed_metrics(
lowercase__ , lowercase__ , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
__a = self.post_process_function(lowercase__ , lowercase__ , output.predictions )
__a = self.compute_metrics(lowercase__ )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F"""{metric_key_prefix}_""" ):
__a = metrics.pop(lowercase__ )
metrics.update(output.metrics )
else:
__a = output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(lowercase__ )
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
__a = self.callback_handler.on_evaluate(self.args , self.state , self.control , lowercase__ )
return metrics
def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case , _snake_case=None , _snake_case = "test" ) -> Any:
'''simple docstring'''
__a = self.get_test_dataloader(lowercase__ )
# Temporarily disable metric computation, we will do it in the loop here.
__a = self.compute_metrics
__a = None
__a = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
__a = time.time()
try:
__a = eval_loop(
lowercase__ , description='''Prediction''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=lowercase__ , metric_key_prefix=lowercase__ , )
finally:
__a = compute_metrics
__a = self.args.eval_batch_size * self.args.world_size
if F"""{metric_key_prefix}_jit_compilation_time""" in output.metrics:
start_time += output.metrics[F"""{metric_key_prefix}_jit_compilation_time"""]
output.metrics.update(
speed_metrics(
lowercase__ , lowercase__ , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is None or self.compute_metrics is None:
return output
__a = self.post_process_function(lowercase__ , lowercase__ , output.predictions , '''predict''' )
__a = self.compute_metrics(lowercase__ )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F"""{metric_key_prefix}_""" ):
__a = metrics.pop(lowercase__ )
metrics.update(output.metrics )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=lowercase__ ) | 357 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
A : str = {
'configuration_perceiver': ['PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'PerceiverConfig', 'PerceiverOnnxConfig'],
'tokenization_perceiver': ['PerceiverTokenizer'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Optional[Any] = ['PerceiverFeatureExtractor']
A : int = ['PerceiverImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Tuple = [
'PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST',
'PerceiverForImageClassificationConvProcessing',
'PerceiverForImageClassificationFourier',
'PerceiverForImageClassificationLearned',
'PerceiverForMaskedLM',
'PerceiverForMultimodalAutoencoding',
'PerceiverForOpticalFlow',
'PerceiverForSequenceClassification',
'PerceiverLayer',
'PerceiverModel',
'PerceiverPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_perceiver import PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP, PerceiverConfig, PerceiverOnnxConfig
from .tokenization_perceiver import PerceiverTokenizer
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_perceiver import PerceiverFeatureExtractor
from .image_processing_perceiver import PerceiverImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_perceiver import (
PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST,
PerceiverForImageClassificationConvProcessing,
PerceiverForImageClassificationFourier,
PerceiverForImageClassificationLearned,
PerceiverForMaskedLM,
PerceiverForMultimodalAutoencoding,
PerceiverForOpticalFlow,
PerceiverForSequenceClassification,
PerceiverLayer,
PerceiverModel,
PerceiverPreTrainedModel,
)
else:
import sys
A : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 33 | 0 |
'''simple docstring'''
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
# Register SEW's fairseq modules
from sew_asapp import tasks # noqa: F401
from transformers import (
SEWConfig,
SEWForCTC,
SEWModel,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
a_ : str = logging.get_logger(__name__)
a_ : List[Any] = {
"""post_extract_proj""": """feature_projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.upsample.0""": """encoder.upsample.projection""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""w2v_model.layer_norm""": """layer_norm""",
"""w2v_encoder.proj""": """lm_head""",
"""mask_emb""": """masked_spec_embed""",
}
def a_ ( __snake_case : Dict , __snake_case : List[str] , __snake_case : Union[str, Any] , __snake_case : Optional[Any] , __snake_case : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
for attribute in key.split('''.''' ):
lowerCamelCase_ =getattr(__snake_case , __snake_case )
if weight_type is not None:
lowerCamelCase_ =getattr(__snake_case , __snake_case ).shape
else:
lowerCamelCase_ =hf_pointer.shape
assert hf_shape == value.shape, (
F'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be'''
F''' {value.shape} for {full_name}'''
)
if weight_type == "weight":
lowerCamelCase_ =value
elif weight_type == "weight_g":
lowerCamelCase_ =value
elif weight_type == "weight_v":
lowerCamelCase_ =value
elif weight_type == "bias":
lowerCamelCase_ =value
else:
lowerCamelCase_ =value
logger.info(F'''{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.''' )
def a_ ( __snake_case : int , __snake_case : Optional[int] , __snake_case : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ =[]
lowerCamelCase_ =fairseq_model.state_dict()
lowerCamelCase_ =hf_model.sew.feature_extractor if is_finetuned else hf_model.feature_extractor
for name, value in fairseq_dict.items():
lowerCamelCase_ =False
if "conv_layers" in name:
load_conv_layer(
__snake_case , __snake_case , __snake_case , __snake_case , hf_model.config.feat_extract_norm == '''group''' , )
lowerCamelCase_ =True
else:
for key, mapped_key in MAPPING.items():
lowerCamelCase_ ='''sew.''' + mapped_key if (is_finetuned and mapped_key != '''lm_head''') else mapped_key
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
lowerCamelCase_ =True
if "*" in mapped_key:
lowerCamelCase_ =name.split(__snake_case )[0].split('''.''' )[-2]
lowerCamelCase_ =mapped_key.replace('''*''' , __snake_case )
if "weight_g" in name:
lowerCamelCase_ ='''weight_g'''
elif "weight_v" in name:
lowerCamelCase_ ='''weight_v'''
elif "weight" in name:
lowerCamelCase_ ='''weight'''
elif "bias" in name:
lowerCamelCase_ ='''bias'''
else:
lowerCamelCase_ =None
set_recursively(__snake_case , __snake_case , __snake_case , __snake_case , __snake_case )
continue
if not is_used:
unused_weights.append(__snake_case )
logger.warning(F'''Unused weights: {unused_weights}''' )
def a_ ( __snake_case : Tuple , __snake_case : Any , __snake_case : Optional[Any] , __snake_case : List[Any] , __snake_case : Dict ) -> str:
"""simple docstring"""
lowerCamelCase_ =full_name.split('''conv_layers.''' )[-1]
lowerCamelCase_ =name.split('''.''' )
lowerCamelCase_ =int(items[0] )
lowerCamelCase_ =int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'''
)
lowerCamelCase_ =value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'''
)
lowerCamelCase_ =value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'''
" found."
)
lowerCamelCase_ =value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'''
)
lowerCamelCase_ =value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(__snake_case )
def a_ ( __snake_case : List[str] , __snake_case : List[Any] ) -> Tuple:
"""simple docstring"""
lowerCamelCase_ =SEWConfig()
if is_finetuned:
lowerCamelCase_ =model.wav_encoder.wav_model.cfg
else:
lowerCamelCase_ =model.cfg
lowerCamelCase_ =fs_config.conv_bias
lowerCamelCase_ =eval(fs_config.conv_feature_layers )
lowerCamelCase_ =[x[0] for x in conv_layers]
lowerCamelCase_ =[x[1] for x in conv_layers]
lowerCamelCase_ =[x[2] for x in conv_layers]
lowerCamelCase_ ='''gelu'''
lowerCamelCase_ ='''layer''' if fs_config.extractor_mode == '''layer_norm''' else '''group'''
lowerCamelCase_ =0.0
lowerCamelCase_ =fs_config.activation_fn.name
lowerCamelCase_ =fs_config.encoder_embed_dim
lowerCamelCase_ =0.0_2
lowerCamelCase_ =fs_config.encoder_ffn_embed_dim
lowerCamelCase_ =1e-5
lowerCamelCase_ =fs_config.encoder_layerdrop
lowerCamelCase_ =fs_config.encoder_attention_heads
lowerCamelCase_ =fs_config.conv_pos_groups
lowerCamelCase_ =fs_config.conv_pos
lowerCamelCase_ =len(__snake_case )
lowerCamelCase_ =fs_config.encoder_layers
lowerCamelCase_ =fs_config.squeeze_factor
# take care of any params that are overridden by the Wav2VecCtc model
if is_finetuned:
lowerCamelCase_ =model.cfg
lowerCamelCase_ =fs_config.final_dropout
lowerCamelCase_ =fs_config.layerdrop
lowerCamelCase_ =fs_config.activation_dropout
lowerCamelCase_ =fs_config.mask_prob > 0 or fs_config.mask_channel_prob > 0
lowerCamelCase_ =fs_config.attention_dropout
lowerCamelCase_ =fs_config.dropout_input
lowerCamelCase_ =fs_config.dropout
lowerCamelCase_ =fs_config.mask_channel_length
lowerCamelCase_ =fs_config.mask_channel_prob
lowerCamelCase_ =fs_config.mask_length
lowerCamelCase_ =fs_config.mask_prob
lowerCamelCase_ ='''Wav2Vec2FeatureExtractor'''
lowerCamelCase_ ='''Wav2Vec2CTCTokenizer'''
return config
@torch.no_grad()
def a_ ( __snake_case : Union[str, Any] , __snake_case : Tuple , __snake_case : str=None , __snake_case : str=None , __snake_case : Dict=True ) -> List[str]:
"""simple docstring"""
if is_finetuned:
lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ =fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} )
else:
lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ =fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
if config_path is not None:
lowerCamelCase_ =SEWConfig.from_pretrained(__snake_case )
else:
lowerCamelCase_ =convert_config(model[0] , __snake_case )
lowerCamelCase_ =model[0].eval()
lowerCamelCase_ =True if config.feat_extract_norm == '''layer''' else False
lowerCamelCase_ =WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6000 , padding_value=0 , do_normalize=__snake_case , return_attention_mask=__snake_case , )
if is_finetuned:
if dict_path:
lowerCamelCase_ =Dictionary.load(__snake_case )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
lowerCamelCase_ =target_dict.pad_index
lowerCamelCase_ =target_dict.bos_index
lowerCamelCase_ =target_dict.pad_index
lowerCamelCase_ =target_dict.bos_index
lowerCamelCase_ =target_dict.eos_index
lowerCamelCase_ =len(target_dict.symbols )
lowerCamelCase_ =os.path.join(__snake_case , '''vocab.json''' )
if not os.path.isdir(__snake_case ):
logger.error('''--pytorch_dump_folder_path ({}) should be a directory'''.format(__snake_case ) )
return
os.makedirs(__snake_case , exist_ok=__snake_case )
with open(__snake_case , '''w''' , encoding='''utf-8''' ) as vocab_handle:
json.dump(target_dict.indices , __snake_case )
lowerCamelCase_ =WavaVecaCTCTokenizer(
__snake_case , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='''|''' , do_lower_case=__snake_case , )
lowerCamelCase_ =WavaVecaProcessor(feature_extractor=__snake_case , tokenizer=__snake_case )
processor.save_pretrained(__snake_case )
lowerCamelCase_ =SEWForCTC(__snake_case )
else:
lowerCamelCase_ =SEWModel(__snake_case )
feature_extractor.save_pretrained(__snake_case )
recursively_load_weights(__snake_case , __snake_case , __snake_case )
hf_model.save_pretrained(__snake_case )
if __name__ == "__main__":
a_ : List[Any] = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--is_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not"""
)
a_ : List[Any] = parser.parse_args()
convert_sew_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, args.is_finetuned
)
| 75 |
'''simple docstring'''
from __future__ import annotations
def a_ ( __snake_case : str , __snake_case : list[str] | None = None , __snake_case : dict[str, float] | None = None , __snake_case : bool = False , ) -> tuple[int, float, str]:
"""simple docstring"""
lowerCamelCase_ =cipher_alphabet or [chr(__snake_case ) for i in range(97 , 123 )]
# If the argument is None or the user provided an empty dictionary
if not frequencies_dict:
# Frequencies of letters in the english language (how much they show up)
lowerCamelCase_ ={
'''a''': 0.0_8_4_9_7,
'''b''': 0.0_1_4_9_2,
'''c''': 0.0_2_2_0_2,
'''d''': 0.0_4_2_5_3,
'''e''': 0.1_1_1_6_2,
'''f''': 0.0_2_2_2_8,
'''g''': 0.0_2_0_1_5,
'''h''': 0.0_6_0_9_4,
'''i''': 0.0_7_5_4_6,
'''j''': 0.0_0_1_5_3,
'''k''': 0.0_1_2_9_2,
'''l''': 0.0_4_0_2_5,
'''m''': 0.0_2_4_0_6,
'''n''': 0.0_6_7_4_9,
'''o''': 0.0_7_5_0_7,
'''p''': 0.0_1_9_2_9,
'''q''': 0.0_0_0_9_5,
'''r''': 0.0_7_5_8_7,
'''s''': 0.0_6_3_2_7,
'''t''': 0.0_9_3_5_6,
'''u''': 0.0_2_7_5_8,
'''v''': 0.0_0_9_7_8,
'''w''': 0.0_2_5_6_0,
'''x''': 0.0_0_1_5_0,
'''y''': 0.0_1_9_9_4,
'''z''': 0.0_0_0_7_7,
}
else:
# Custom frequencies dictionary
lowerCamelCase_ =frequencies_dict
if not case_sensitive:
lowerCamelCase_ =ciphertext.lower()
# Chi squared statistic values
lowerCamelCase_ ={}
# cycle through all of the shifts
for shift in range(len(__snake_case ) ):
lowerCamelCase_ =''''''
# decrypt the message with the shift
for letter in ciphertext:
try:
# Try to index the letter in the alphabet
lowerCamelCase_ =(alphabet_letters.index(letter.lower() ) - shift) % len(
__snake_case )
decrypted_with_shift += (
alphabet_letters[new_key].upper()
if case_sensitive and letter.isupper()
else alphabet_letters[new_key]
)
except ValueError:
# Append the character if it isn't in the alphabet
decrypted_with_shift += letter
lowerCamelCase_ =0.0
# Loop through each letter in the decoded message with the shift
for letter in decrypted_with_shift:
if case_sensitive:
lowerCamelCase_ =letter.lower()
if letter in frequencies:
# Get the amount of times the letter occurs in the message
lowerCamelCase_ =decrypted_with_shift.lower().count(__snake_case )
# Get the excepcted amount of times the letter should appear based
# on letter frequencies
lowerCamelCase_ =frequencies[letter] * occurrences
# Complete the chi squared statistic formula
lowerCamelCase_ =((occurrences - expected) ** 2) / expected
# Add the margin of error to the total chi squared statistic
chi_squared_statistic += chi_letter_value
else:
if letter.lower() in frequencies:
# Get the amount of times the letter occurs in the message
lowerCamelCase_ =decrypted_with_shift.count(__snake_case )
# Get the excepcted amount of times the letter should appear based
# on letter frequencies
lowerCamelCase_ =frequencies[letter] * occurrences
# Complete the chi squared statistic formula
lowerCamelCase_ =((occurrences - expected) ** 2) / expected
# Add the margin of error to the total chi squared statistic
chi_squared_statistic += chi_letter_value
# Add the data to the chi_squared_statistic_values dictionary
lowerCamelCase_ =(
chi_squared_statistic,
decrypted_with_shift,
)
# Get the most likely cipher by finding the cipher with the smallest chi squared
# statistic
def chi_squared_statistic_values_sorting_key(__snake_case : int ) -> tuple[float, str]:
return chi_squared_statistic_values[key]
lowerCamelCase_ =min(
__snake_case , key=__snake_case , )
# Get all the data from the most likely cipher (key, decoded message)
(
(
lowerCamelCase_
), (
lowerCamelCase_
),
) =chi_squared_statistic_values[most_likely_cipher]
# Return the data on the most likely shift
return (
most_likely_cipher,
most_likely_cipher_chi_squared_value,
decoded_most_likely_cipher,
)
| 75 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
A = {
'''configuration_rembert''': ['''REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''RemBertConfig''', '''RemBertOnnxConfig''']
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A = ['''RemBertTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A = ['''RemBertTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A = [
'''REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''RemBertForCausalLM''',
'''RemBertForMaskedLM''',
'''RemBertForMultipleChoice''',
'''RemBertForQuestionAnswering''',
'''RemBertForSequenceClassification''',
'''RemBertForTokenClassification''',
'''RemBertLayer''',
'''RemBertModel''',
'''RemBertPreTrainedModel''',
'''load_tf_weights_in_rembert''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A = [
'''TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFRemBertForCausalLM''',
'''TFRemBertForMaskedLM''',
'''TFRemBertForMultipleChoice''',
'''TFRemBertForQuestionAnswering''',
'''TFRemBertForSequenceClassification''',
'''TFRemBertForTokenClassification''',
'''TFRemBertLayer''',
'''TFRemBertModel''',
'''TFRemBertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_rembert import REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RemBertConfig, RemBertOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_rembert import RemBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_rembert_fast import RemBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_rembert import (
REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
RemBertForCausalLM,
RemBertForMaskedLM,
RemBertForMultipleChoice,
RemBertForQuestionAnswering,
RemBertForSequenceClassification,
RemBertForTokenClassification,
RemBertLayer,
RemBertModel,
RemBertPreTrainedModel,
load_tf_weights_in_rembert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_rembert import (
TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRemBertForCausalLM,
TFRemBertForMaskedLM,
TFRemBertForMultipleChoice,
TFRemBertForQuestionAnswering,
TFRemBertForSequenceClassification,
TFRemBertForTokenClassification,
TFRemBertLayer,
TFRemBertModel,
TFRemBertPreTrainedModel,
)
else:
import sys
A = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 188 |
"""simple docstring"""
import argparse
import os
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_task_guides.py
A = '''src/transformers'''
A = '''docs/source/en/tasks'''
def __A ( a_ :List[Any] , a_ :List[Any] , a_ :List[str]) -> List[str]:
with open(a_ , '''r''' , encoding='''utf-8''' , newline='''\n''') as f:
__a : List[str] = f.readlines()
# Find the start prompt.
__a : Optional[Any] = 0
while not lines[start_index].startswith(a_):
start_index += 1
start_index += 1
__a : int = start_index
while not lines[end_index].startswith(a_):
end_index += 1
end_index -= 1
while len(lines[start_index]) <= 1:
start_index += 1
while len(lines[end_index]) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index]), start_index, end_index, lines
# This is to make sure the transformers module imported is the one in the repo.
A = direct_transformers_import(TRANSFORMERS_PATH)
A = {
'''asr.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_CTC_MAPPING_NAMES,
'''audio_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES,
'''language_modeling.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_CAUSAL_LM_MAPPING_NAMES,
'''image_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES,
'''masked_language_modeling.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_MASKED_LM_MAPPING_NAMES,
'''multiple_choice.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES,
'''object_detection.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES,
'''question_answering.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES,
'''semantic_segmentation.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES,
'''sequence_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES,
'''summarization.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
'''token_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES,
'''translation.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
'''video_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES,
'''document_question_answering.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES,
'''monocular_depth_estimation.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES,
}
# This list contains model types used in some task guides that are not in `CONFIG_MAPPING_NAMES` (therefore not in any
# `MODEL_MAPPING_NAMES` or any `MODEL_FOR_XXX_MAPPING_NAMES`).
A = {
'''summarization.md''': ('''nllb''',),
'''translation.md''': ('''nllb''',),
}
def __A ( a_ :Optional[Any]) -> Any:
__a : List[Any] = TASK_GUIDE_TO_MODELS[task_guide]
__a : List[Any] = SPECIAL_TASK_GUIDE_TO_MODEL_TYPES.get(a_ , set())
__a : Any = {
code: name
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if (code in model_maping_names or code in special_model_types)
}
return ", ".join([F"""[{name}](../model_doc/{code})""" for code, name in model_names.items()]) + "\n"
def __A ( a_ :Optional[int] , a_ :Dict=False) -> Any:
__a , __a , __a , __a : Any = _find_text_in_file(
filename=os.path.join(a_ , a_) , start_prompt='''<!--This tip is automatically generated by `make fix-copies`, do not fill manually!-->''' , end_prompt='''<!--End of the generated tip-->''' , )
__a : Optional[Any] = get_model_list_for_task(a_)
if current_list != new_list:
if overwrite:
with open(os.path.join(a_ , a_) , '''w''' , encoding='''utf-8''' , newline='''\n''') as f:
f.writelines(lines[:start_index] + [new_list] + lines[end_index:])
else:
raise ValueError(
F"""The list of models that can be used in the {task_guide} guide needs an update. Run `make fix-copies`"""
''' to fix this.''')
if __name__ == "__main__":
A = argparse.ArgumentParser()
parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''')
A = parser.parse_args()
for task_guide in TASK_GUIDE_TO_MODELS.keys():
check_model_list_for_task(task_guide, args.fix_and_overwrite) | 188 | 1 |
import unittest
import numpy as np
import torch
from torch import nn
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import KandinskyVaaPriorPipeline, PriorTransformer, UnCLIPScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import enable_full_determinism, skip_mps
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class snake_case__ (A__ , unittest.TestCase ):
"""simple docstring"""
__lowerCAmelCase :Optional[int] = KandinskyVaaPriorPipeline
__lowerCAmelCase :str = ["prompt"]
__lowerCAmelCase :Union[str, Any] = ["prompt", "negative_prompt"]
__lowerCAmelCase :Any = [
"num_images_per_prompt",
"generator",
"num_inference_steps",
"latents",
"negative_prompt",
"guidance_scale",
"output_type",
"return_dict",
]
__lowerCAmelCase :List[Any] = False
@property
def SCREAMING_SNAKE_CASE__( self ) -> List[Any]:
"""simple docstring"""
return 3_2
@property
def SCREAMING_SNAKE_CASE__( self ) -> Optional[int]:
"""simple docstring"""
return 3_2
@property
def SCREAMING_SNAKE_CASE__( self ) -> Optional[Any]:
"""simple docstring"""
return self.time_input_dim
@property
def SCREAMING_SNAKE_CASE__( self ) -> Dict:
"""simple docstring"""
return self.time_input_dim * 4
@property
def SCREAMING_SNAKE_CASE__( self ) -> Tuple:
"""simple docstring"""
return 1_0_0
@property
def SCREAMING_SNAKE_CASE__( self ) -> Tuple:
"""simple docstring"""
a__ : Any = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
return tokenizer
@property
def SCREAMING_SNAKE_CASE__( self ) -> Dict:
"""simple docstring"""
torch.manual_seed(0 )
a__ : Dict = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , )
return CLIPTextModelWithProjection(__lowercase )
@property
def SCREAMING_SNAKE_CASE__( self ) -> int:
"""simple docstring"""
torch.manual_seed(0 )
a__ : Dict = {
"""num_attention_heads""": 2,
"""attention_head_dim""": 1_2,
"""embedding_dim""": self.text_embedder_hidden_size,
"""num_layers""": 1,
}
a__ : List[str] = PriorTransformer(**__lowercase )
# clip_std and clip_mean is initialized to be 0 so PriorTransformer.post_process_latents will always return 0 - set clip_std to be 1 so it won't return 0
a__ : List[str] = nn.Parameter(torch.ones(model.clip_std.shape ) )
return model
@property
def SCREAMING_SNAKE_CASE__( self ) -> List[Any]:
"""simple docstring"""
torch.manual_seed(0 )
a__ : Optional[Any] = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=2_2_4 , projection_dim=self.text_embedder_hidden_size , intermediate_size=3_7 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=1_4 , )
a__ : str = CLIPVisionModelWithProjection(__lowercase )
return model
@property
def SCREAMING_SNAKE_CASE__( self ) -> Tuple:
"""simple docstring"""
a__ : List[Any] = CLIPImageProcessor(
crop_size=2_2_4 , do_center_crop=__lowercase , do_normalize=__lowercase , do_resize=__lowercase , image_mean=[0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3] , image_std=[0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1] , resample=3 , size=2_2_4 , )
return image_processor
def SCREAMING_SNAKE_CASE__( self ) -> Union[str, Any]:
"""simple docstring"""
a__ : Any = self.dummy_prior
a__ : Optional[int] = self.dummy_image_encoder
a__ : str = self.dummy_text_encoder
a__ : str = self.dummy_tokenizer
a__ : Tuple = self.dummy_image_processor
a__ : int = UnCLIPScheduler(
variance_type="""fixed_small_log""" , prediction_type="""sample""" , num_train_timesteps=1_0_0_0 , clip_sample=__lowercase , clip_sample_range=1_0.0 , )
a__ : List[Any] = {
"""prior""": prior,
"""image_encoder""": image_encoder,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""scheduler""": scheduler,
"""image_processor""": image_processor,
}
return components
def SCREAMING_SNAKE_CASE__( self , __lowercase , __lowercase=0 ) -> Tuple:
"""simple docstring"""
if str(__lowercase ).startswith("""mps""" ):
a__ : Any = torch.manual_seed(__lowercase )
else:
a__ : int = torch.Generator(device=__lowercase ).manual_seed(__lowercase )
a__ : Tuple = {
"""prompt""": """horse""",
"""generator""": generator,
"""guidance_scale""": 4.0,
"""num_inference_steps""": 2,
"""output_type""": """np""",
}
return inputs
def SCREAMING_SNAKE_CASE__( self ) -> Optional[int]:
"""simple docstring"""
a__ : Dict = """cpu"""
a__ : Union[str, Any] = self.get_dummy_components()
a__ : Any = self.pipeline_class(**__lowercase )
a__ : Dict = pipe.to(__lowercase )
pipe.set_progress_bar_config(disable=__lowercase )
a__ : Union[str, Any] = pipe(**self.get_dummy_inputs(__lowercase ) )
a__ : Union[str, Any] = output.image_embeds
a__ : Any = pipe(
**self.get_dummy_inputs(__lowercase ) , return_dict=__lowercase , )[0]
a__ : List[Any] = image[0, -1_0:]
a__ : Optional[Any] = image_from_tuple[0, -1_0:]
assert image.shape == (1, 3_2)
a__ : Any = np.array(
[-0.0_5_3_2, 1.7_1_2_0, 0.3_6_5_6, -1.0_8_5_2, -0.8_9_4_6, -1.1_7_5_6, 0.4_3_4_8, 0.2_4_8_2, 0.5_1_4_6, -0.1_1_5_6] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@skip_mps
def SCREAMING_SNAKE_CASE__( self ) -> Tuple:
"""simple docstring"""
a__ : Tuple = torch_device == """cpu"""
a__ : Any = True
a__ : List[Any] = False
self._test_inference_batch_single_identical(
test_max_difference=__lowercase , relax_max_difference=__lowercase , test_mean_pixel_difference=__lowercase , )
@skip_mps
def SCREAMING_SNAKE_CASE__( self ) -> Tuple:
"""simple docstring"""
a__ : int = torch_device == """cpu"""
a__ : List[Any] = False
self._test_attention_slicing_forward_pass(
test_max_difference=__lowercase , test_mean_pixel_difference=__lowercase , )
| 170 |
from typing import List, Union
import numpy as np
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_DEPTH_ESTIMATION_MAPPING
_lowercase : Any =logging.get_logger(__name__)
@add_end_docstrings(A__ )
class snake_case__ (A__ ):
"""simple docstring"""
def __init__( self , *__lowercase , **__lowercase ) -> int:
"""simple docstring"""
super().__init__(*__lowercase , **__lowercase )
requires_backends(self , """vision""" )
self.check_model_type(__lowercase )
def __call__( self , __lowercase , **__lowercase ) -> Union[str, Any]:
"""simple docstring"""
return super().__call__(__lowercase , **__lowercase )
def SCREAMING_SNAKE_CASE__( self , **__lowercase ) -> Optional[Any]:
"""simple docstring"""
return {}, {}, {}
def SCREAMING_SNAKE_CASE__( self , __lowercase ) -> Union[str, Any]:
"""simple docstring"""
a__ : Optional[Any] = load_image(__lowercase )
a__ : Optional[int] = image.size
a__ : int = self.image_processor(images=__lowercase , return_tensors=self.framework )
return model_inputs
def SCREAMING_SNAKE_CASE__( self , __lowercase ) -> Union[str, Any]:
"""simple docstring"""
a__ : str = self.model(**__lowercase )
return model_outputs
def SCREAMING_SNAKE_CASE__( self , __lowercase ) -> str:
"""simple docstring"""
a__ : Optional[int] = model_outputs.predicted_depth
a__ : Any = torch.nn.functional.interpolate(
predicted_depth.unsqueeze(1 ) , size=self.image_size[::-1] , mode="""bicubic""" , align_corners=__lowercase )
a__ : Union[str, Any] = prediction.squeeze().cpu().numpy()
a__ : List[str] = (output * 2_5_5 / np.max(__lowercase )).astype("""uint8""" )
a__ : Any = Image.fromarray(__lowercase )
a__ : List[str] = {}
a__ : Tuple = predicted_depth
a__ : Any = depth
return output_dict
| 170 | 1 |
'''simple docstring'''
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def _A (lowerCAmelCase__ :Tuple ) -> str:
'''simple docstring'''
_a = filter(lambda lowerCAmelCase__ : p.requires_grad , model.parameters() )
_a = sum([np.prod(p.size() ) for p in model_parameters] )
return params
a_ : Optional[Any] = logging.getLogger(__name__)
def _A (lowerCAmelCase__ :Tuple , lowerCAmelCase__ :int ) -> Any:
'''simple docstring'''
if metric == "rouge2":
_a = '{val_avg_rouge2:.4f}-{step_count}'
elif metric == "bleu":
_a = '{val_avg_bleu:.4f}-{step_count}'
elif metric == "em":
_a = '{val_avg_em:.4f}-{step_count}'
else:
raise NotImplementedError(
f'seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this'
' function.' )
_a = ModelCheckpoint(
dirpath=lowerCAmelCase__ , filename=lowerCAmelCase__ , monitor=f'val_{metric}' , mode='max' , save_top_k=3 , every_n_epochs=1 , )
return checkpoint_callback
def _A (lowerCAmelCase__ :str , lowerCAmelCase__ :Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
return EarlyStopping(
monitor=f'val_{metric}' , mode='min' if 'loss' in metric else 'max' , patience=lowerCAmelCase__ , verbose=lowerCAmelCase__ , )
class a ( pl.Callback ):
def __UpperCAmelCase ( self , __magic_name__ , __magic_name__ ) -> List[str]:
_a = {f'lr_group_{i}': param['lr'] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )}
pl_module.logger.log_metrics(__magic_name__ )
@rank_zero_only
def __UpperCAmelCase ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__=True ) -> None:
logger.info(f'***** {type_path} results at step {trainer.global_step:05d} *****' )
_a = trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ['log', 'progress_bar', 'preds']} )
# Log results
_a = Path(pl_module.hparams.output_dir )
if type_path == "test":
_a = od / 'test_results.txt'
_a = od / 'test_generations.txt'
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
_a = od / f'{type_path}_results/{trainer.global_step:05d}.txt'
_a = od / f'{type_path}_generations/{trainer.global_step:05d}.txt'
results_file.parent.mkdir(exist_ok=__magic_name__ )
generations_file.parent.mkdir(exist_ok=__magic_name__ )
with open(__magic_name__ , 'a+' ) as writer:
for key in sorted(__magic_name__ ):
if key in ["log", "progress_bar", "preds"]:
continue
_a = metrics[key]
if isinstance(__magic_name__ , torch.Tensor ):
_a = val.item()
_a = f'{key}: {val:.6f}\n'
writer.write(__magic_name__ )
if not save_generations:
return
if "preds" in metrics:
_a = '\n'.join(metrics['preds'] )
generations_file.open('w+' ).write(__magic_name__ )
@rank_zero_only
def __UpperCAmelCase ( self , __magic_name__ , __magic_name__ ) -> List[str]:
try:
_a = pl_module.model.model.num_parameters()
except AttributeError:
_a = pl_module.model.num_parameters()
_a = count_trainable_parameters(__magic_name__ )
# mp stands for million parameters
trainer.logger.log_metrics({'n_params': npars, 'mp': npars / 1e6, 'grad_mp': n_trainable_pars / 1e6} )
@rank_zero_only
def __UpperCAmelCase ( self , __magic_name__ , __magic_name__ ) -> Union[str, Any]:
save_json(pl_module.metrics , pl_module.metrics_save_path )
return self._write_logs(__magic_name__ , __magic_name__ , 'test' )
@rank_zero_only
def __UpperCAmelCase ( self , __magic_name__ , __magic_name__ ) -> Dict:
save_json(pl_module.metrics , pl_module.metrics_save_path )
# Uncommenting this will save val generations
# return self._write_logs(trainer, pl_module, "valid")
| 104 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_clip import CLIPImageProcessor
a_ : Optional[Any] = logging.get_logger(__name__)
class a ( _SCREAMING_SNAKE_CASE ):
def __init__( self , *__magic_name__ , **__magic_name__ ) -> None:
warnings.warn(
'The class CLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use CLIPImageProcessor instead.' , __magic_name__ , )
super().__init__(*__magic_name__ , **__magic_name__ )
| 104 | 1 |
import argparse
import dataclasses
import json
import logging
import os
import shutil
from typing import List, Optional
import datasets
from accelerate import Accelerator
from datasets import load_dataset
from finetuning import finetune
from tqdm.auto import tqdm
import transformers
from transformers import AutoConfig, set_seed
from transformers.trainer_utils import IntervalStrategy
A_ : Optional[int] = logging.getLogger(__name__)
A_ : Optional[int] = 'pytorch_model.bin'
@dataclasses.dataclass
class _a :
'''simple docstring'''
UpperCAmelCase__: str = dataclasses.field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models.'''} )
UpperCAmelCase__: Optional[str] = dataclasses.field(
default=__magic_name__ , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co.'''} , )
@dataclasses.dataclass
class _a :
'''simple docstring'''
UpperCAmelCase__: str = dataclasses.field(metadata={'''help''': '''A csv or a json file containing the training data.'''} )
UpperCAmelCase__: str = dataclasses.field(metadata={'''help''': '''A csv or a json file containing the data to predict on.'''} )
UpperCAmelCase__: Optional[str] = dataclasses.field(
default=__magic_name__ , metadata={'''help''': '''A csv or a json file containing the validation data.'''} )
UpperCAmelCase__: Optional[str] = dataclasses.field(
default=__magic_name__ , metadata={'''help''': '''The name of the task to train on.'''} , )
UpperCAmelCase__: Optional[List[str]] = dataclasses.field(
default=__magic_name__ , metadata={'''help''': '''The list of labels for the task.'''} )
@dataclasses.dataclass
class _a :
'''simple docstring'''
UpperCAmelCase__: str = dataclasses.field(
metadata={'''help''': '''The output directory where the model predictions and checkpoints will be written.'''} )
UpperCAmelCase__: Optional[str] = dataclasses.field(
default='''accuracy''' , metadata={'''help''': '''The evaluation metric used for the task.'''} )
UpperCAmelCase__: Optional[str] = dataclasses.field(
default='''no''' , metadata={
'''help''': '''The evaluation strategy to adopt during training. Possible values are: ["no", "step", "epoch]'''
} , )
UpperCAmelCase__: Optional[int] = dataclasses.field(
default=10 , metadata={'''help''': '''Number of evaluation calls with no improvement after which training will be stopped.'''} , )
UpperCAmelCase__: Optional[float] = dataclasses.field(
default=0.0 , metadata={
'''help''': '''How much the specified evaluation metric must improve to satisfy early stopping conditions.'''
} , )
UpperCAmelCase__: Optional[bool] = dataclasses.field(
default=__magic_name__ , metadata={'''help''': '''Whether to filter the pseudo-labeled data based on the confidence score.'''} , )
UpperCAmelCase__: Optional[bool] = dataclasses.field(
default=__magic_name__ , metadata={'''help''': '''Whether to filter the pseudo-labeled data based on the validation performance.'''} , )
UpperCAmelCase__: Optional[bool] = dataclasses.field(
default=__magic_name__ , metadata={'''help''': '''Whether to fine-tune on labeled data after pseudo training.'''} , )
UpperCAmelCase__: Optional[float] = dataclasses.field(
default=0.0 , metadata={'''help''': '''Confidence threshold for pseudo-labeled data filtering.'''} , )
UpperCAmelCase__: Optional[int] = dataclasses.field(
default=1_00 , metadata={'''help''': '''Number of evaluation calls with no improvement after which training will be stopped.'''} , )
UpperCAmelCase__: Optional[int] = dataclasses.field(
default=__magic_name__ , metadata={'''help''': '''Random seed for initialization.'''} , )
def UpperCamelCase (lowercase_: Tuple , lowercase_: Optional[int] , lowercase_: List[str] , lowercase_: List[str] , lowercase_: Dict , lowercase_: int ) -> List[str]:
A__ : int = datasets.concatenate_datasets([infer_input, infer_output] , axis=1 )
if args.do_filter_by_confidence:
A__ : Optional[int] = dataset.filter(lambda lowercase_ : example["probability"] > args.confidence_threshold )
if args.do_filter_by_val_performance:
assert eval_result >= 0.0 and eval_result <= 1.0
A__ : Any = int(eval_result * len(lowercase_ ) )
print(lowercase_ )
A__ : List[str] = dataset.sort("""probability""" , reverse=lowercase_ )
A__ : Optional[int] = dataset.select(range(lowercase_ ) )
A__ : Dict = dataset.remove_columns(["""label""", """probability"""] )
A__ : Optional[Any] = dataset.rename_column("""prediction""" , """label""" )
A__ : List[Any] = dataset.map(lambda lowercase_ : {"label": idalabel[example["label"]]} )
A__ : Union[str, Any] = dataset.shuffle(seed=args.seed )
A__ : List[Any] = os.path.join(lowercase_ , f"""train_pseudo.{args.data_file_extension}""" )
if args.data_file_extension == "csv":
dataset.to_csv(lowercase_ , index=lowercase_ )
else:
dataset.to_json(lowercase_ )
def UpperCamelCase (lowercase_: int , lowercase_: Union[str, Any] , lowercase_: Optional[int] , lowercase_: Optional[int] , **lowercase_: Dict ) -> List[Any]:
A__ : List[Any] = Accelerator()
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , level=logging.INFO , )
logger.info(accelerator.state )
# Setup logging, we only want one process per machine to log things on the
# screen. accelerator.is_local_main_process is only True for one process per
# machine.
logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
A__ : int = STModelArguments(model_name_or_path=lowercase_ )
A__ : List[Any] = STDataArguments(train_file=lowercase_ , infer_file=lowercase_ )
A__ : List[str] = STTrainingArguments(output_dir=lowercase_ )
A__ : List[Any] = argparse.Namespace()
for arg_class in (model_args, data_args, training_args):
for key, value in vars(lowercase_ ).items():
setattr(lowercase_ , lowercase_ , lowercase_ )
for key, value in kwargs.items():
if hasattr(lowercase_ , lowercase_ ):
setattr(lowercase_ , lowercase_ , lowercase_ )
# Sanity checks
A__ : Any = {}
A__ : List[Any] = None
# You need to provide the training data and the data to predict on
assert args.train_file is not None
assert args.infer_file is not None
A__ : Tuple = args.train_file
A__ : Tuple = args.infer_file
if args.evaluation_strategy != IntervalStrategy.NO.value:
assert args.eval_file is not None
A__ : Optional[Any] = args.eval_file
for key in data_files:
A__ : Tuple = data_files[key].split(""".""" )[-1]
assert extension in ["csv", "json"], f"""`{key}_file` should be a csv or a json file."""
if args.data_file_extension is None:
A__ : Optional[int] = extension
else:
assert extension == args.data_file_extension, f"""`{key}_file` should be a {args.data_file_extension} file`."""
assert (
args.eval_metric in datasets.list_metrics()
), f"""{args.eval_metric} not in the list of supported metrics {datasets.list_metrics()}."""
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed )
logger.info("""Creating the initial data directory for self-training...""" )
A__ : Tuple = f"""{args.output_dir}/self-train_iter-{{}}""".format
A__ : Union[str, Any] = data_dir_format(0 )
if accelerator.is_main_process:
if args.output_dir is not None:
os.makedirs(args.output_dir , exist_ok=lowercase_ )
os.makedirs(lowercase_ , exist_ok=lowercase_ )
accelerator.wait_for_everyone()
A__ : Optional[Any] = None
A__ : Union[str, Any] = None
A__ : Optional[int] = 0
A__ : List[Any] = False
# Show the progress bar
A__ : Any = tqdm(range(args.max_selftrain_iterations ) , disable=not accelerator.is_local_main_process )
# Self-train
for iteration in range(0 , int(args.max_selftrain_iterations ) ):
A__ : List[Any] = data_dir_format(lowercase_ )
assert os.path.exists(lowercase_ )
# Stage 1: initial fine-tuning for iteration = 0 or pseudo-training for
# iteration > 0
A__ : Optional[int] = os.path.join(lowercase_ , """stage-1""" )
A__ : str = {
"""accelerator""": accelerator,
"""model_name_or_path""": args.model_name_or_path,
"""cache_dir""": args.cache_dir,
"""do_train""": True,
"""train_file""": data_files["""train"""] if iteration == 0 else data_files["""train_pseudo"""],
"""do_eval""": True if args.eval_file is not None else False,
"""eval_file""": data_files["""eval"""],
"""do_predict""": True,
"""infer_file""": data_files["""infer"""],
"""task_name""": args.task_name,
"""label_list""": args.label_list,
"""output_dir""": current_output_dir,
"""eval_metric""": args.eval_metric,
"""evaluation_strategy""": args.evaluation_strategy,
"""early_stopping_patience""": args.early_stopping_patience,
"""early_stopping_threshold""": args.early_stopping_threshold,
"""seed""": args.seed,
}
# Add additional training arguments
for key, value in kwargs.items():
if key not in arguments_dict and not hasattr(lowercase_ , lowercase_ ):
arguments_dict.update({key: value} )
A__ : Dict = os.path.join(lowercase_ , """best-checkpoint""" , lowercase_ )
if os.path.exists(lowercase_ ):
logger.info(
"""Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 1.""" , lowercase_ , lowercase_ , )
else:
logger.info("""***** Running self-training: iteration: %d, stage: 1 *****""" , lowercase_ )
finetune(**lowercase_ )
accelerator.wait_for_everyone()
assert os.path.exists(lowercase_ )
logger.info("""Self-training job completed: iteration: %d, stage: 1.""" , lowercase_ )
if iteration > 0 and args.finetune_on_labeled_data:
# Stage 2 (optional): fine-tuning on the original labeled data
A__ : Tuple = os.path.join(lowercase_ , """best-checkpoint""" )
A__ : Tuple = os.path.join(lowercase_ , """stage-2""" )
# Update arguments_dict
A__ : List[str] = model_path
A__ : List[Any] = data_files["""train"""]
A__ : Any = current_output_dir
A__ : List[str] = os.path.join(lowercase_ , """best-checkpoint""" , lowercase_ )
if os.path.exists(lowercase_ ):
logger.info(
"""Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 2.""" , lowercase_ , lowercase_ , )
else:
logger.info("""***** Running self-training: iteration: %d, stage: 2 *****""" , lowercase_ )
finetune(**lowercase_ )
accelerator.wait_for_everyone()
assert os.path.exists(lowercase_ )
logger.info("""Self-training job completed: iteration: %d, stage: 2.""" , lowercase_ )
A__ : Dict = iteration
A__ : Tuple = data_dir_format(iteration + 1 )
A__ : Optional[int] = AutoConfig.from_pretrained(os.path.join(lowercase_ , """best-checkpoint""" ) )
A__ : Dict = config.idalabel
A__ : int = os.path.join(lowercase_ , """eval_results_best-checkpoint.json""" )
A__ : List[Any] = os.path.join(lowercase_ , """test_results_best-checkpoint.json""" )
assert os.path.exists(lowercase_ )
with open(lowercase_ , """r""" ) as f:
A__ : Dict = float(json.load(lowercase_ )[args.eval_metric] )
A__ : int = os.path.join(lowercase_ , """infer_output_best-checkpoint.csv""" )
assert os.path.exists(lowercase_ )
# Loading the dataset from local csv or json files.
A__ : List[Any] = load_dataset(args.data_file_extension , data_files={"""data""": data_files["""infer"""]} )["""data"""]
A__ : Dict = load_dataset("""csv""" , data_files={"""data""": infer_output_file} )["""data"""]
if accelerator.is_main_process:
os.makedirs(lowercase_ , exist_ok=lowercase_ )
shutil.copy(lowercase_ , os.path.join(lowercase_ , f"""eval_results_iter-{iteration}.json""" ) )
if os.path.exists(lowercase_ ):
shutil.copy(lowercase_ , os.path.join(lowercase_ , f"""test_results_iter-{iteration}.json""" ) )
create_pseudo_labeled_data(lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
accelerator.wait_for_everyone()
A__ : str = os.path.join(lowercase_ , f"""train_pseudo.{args.data_file_extension}""" )
if args.evaluation_strategy != IntervalStrategy.NO.value:
A__ : Tuple = eval_result
if best_iteration is None:
A__ : Union[str, Any] = new_iteration
A__ : str = new_eval_result
else:
if new_eval_result - best_eval_result > args.early_stopping_threshold:
A__ : Dict = new_iteration
A__ : Union[str, Any] = new_eval_result
A__ : List[str] = 0
else:
if new_eval_result == best_eval_result:
A__ : Union[str, Any] = new_iteration
A__ : Tuple = new_eval_result
early_stopping_patience_counter += 1
if early_stopping_patience_counter >= args.early_stopping_patience:
A__ : List[str] = True
progress_bar.update(1 )
if should_training_stop:
break
if best_iteration is not None:
# Save the best iteration
logger.info("""Best iteration: %d""" , lowercase_ )
logger.info("""Best evaluation result: %s = %f""" , args.eval_metric , lowercase_ )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
shutil.copy(
os.path.join(lowercase_ , f"""eval_results_iter-{iteration}.json""" ) , os.path.join(lowercase_ , """eval_results_best-iteration.json""" ) , )
else:
# Assume that the last iteration is the best
logger.info("""Best iteration: %d""" , args.max_selftrain_iterations - 1 )
logger.info("""Best evaluation result: %s = %f""" , args.eval_metric , lowercase_ )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
shutil.copy(
os.path.join(lowercase_ , f"""eval_results_iter-{args.max_selftrain_iterations - 1}.json""" ) , os.path.join(lowercase_ , """eval_results_best-iteration.json""" ) , )
| 192 |
import re
import time
from typing import Optional
import IPython.display as disp
from ..trainer_callback import TrainerCallback
from ..trainer_utils import IntervalStrategy, has_length
def UpperCamelCase (lowercase_: str ) -> Dict:
A__ : int = int(lowercase_ )
A__ , A__ , A__ : Tuple = t // 3600, (t // 60) % 60, t % 60
return f"""{h}:{m:02d}:{s:02d}""" if h != 0 else f"""{m:02d}:{s:02d}"""
def UpperCamelCase (lowercase_: str , lowercase_: Optional[Any] , lowercase_: Union[str, Any] , lowercase_: Tuple , lowercase_: Any=300 ) -> Optional[int]:
# docstyle-ignore
return f"""
<div>
{prefix}
<progress value='{value}' max='{total}' style='width:{width}px; height:20px; vertical-align: middle;'></progress>
{label}
</div>
"""
def UpperCamelCase (lowercase_: Tuple ) -> Optional[int]:
A__ : Tuple = """<table border=\"1\" class=\"dataframe\">\n"""
html_code += """ <thead>\n <tr style="text-align: left;">\n"""
for i in items[0]:
html_code += f""" <th>{i}</th>\n"""
html_code += " </tr>\n </thead>\n <tbody>\n"
for line in items[1:]:
html_code += " <tr>\n"
for elt in line:
A__ : str = f"""{elt:.6f}""" if isinstance(lowercase_ , lowercase_ ) else str(lowercase_ )
html_code += f""" <td>{elt}</td>\n"""
html_code += " </tr>\n"
html_code += " </tbody>\n</table><p>"
return html_code
class _a :
'''simple docstring'''
UpperCAmelCase__: str = 5
UpperCAmelCase__: int = 0.2
def __init__( self , A__ , A__ = None , A__ = True , A__ = None , A__ = 300 , ):
A__ : Optional[int] = total
A__ : Tuple = """""" if prefix is None else prefix
A__ : str = leave
A__ : str = parent
A__ : int = width
A__ : Dict = None
A__ : List[str] = None
A__ : Optional[int] = None
def __A ( self , A__ , A__ = False , A__ = None ):
A__ : Tuple = value
if comment is not None:
A__ : Any = comment
if self.last_value is None:
A__ : int = time.time()
A__ : Dict = value
A__ : int = None
A__ : int = self.warmup
A__ : str = 1
self.update_bar(A__ )
elif value <= self.last_value and not force_update:
return
elif force_update or self.first_calls > 0 or value >= min(self.last_value + self.wait_for , self.total ):
if self.first_calls > 0:
self.first_calls -= 1
A__ : Any = time.time()
A__ : str = current_time - self.start_time
# We could have value = self.start_value if the update is called twixe with the same start value.
if value > self.start_value:
A__ : Dict = self.elapsed_time / (value - self.start_value)
else:
A__ : List[str] = None
if value >= self.total:
A__ : Optional[Any] = self.total
A__ : List[Any] = None
if not self.leave:
self.close()
elif self.average_time_per_item is not None:
A__ : List[Any] = self.average_time_per_item * (self.total - value)
self.update_bar(A__ )
A__ : Any = value
A__ : List[str] = current_time
if self.average_time_per_item is None:
A__ : str = 1
else:
A__ : Optional[Any] = max(int(self.update_every / self.average_time_per_item ) , 1 )
def __A ( self , A__ , A__=None ):
A__ : Tuple = """ """ * (len(str(self.total ) ) - len(str(A__ ) )) + str(A__ )
if self.elapsed_time is None:
A__ : Union[str, Any] = F"""[{spaced_value}/{self.total} : < :"""
elif self.predicted_remaining is None:
A__ : Tuple = F"""[{spaced_value}/{self.total} {format_time(self.elapsed_time )}"""
else:
A__ : Optional[int] = (
F"""[{spaced_value}/{self.total} {format_time(self.elapsed_time )} <"""
F""" {format_time(self.predicted_remaining )}"""
)
self.label += F""", {1/self.average_time_per_item:.2f} it/s"""
self.label += "]" if self.comment is None or len(self.comment ) == 0 else F""", {self.comment}]"""
self.display()
def __A ( self ):
A__ : str = html_progress_bar(self.value , self.total , self.prefix , self.label , self.width )
if self.parent is not None:
# If this is a child bar, the parent will take care of the display.
self.parent.display()
return
if self.output is None:
A__ : str = disp.display(disp.HTML(self.html_code ) , display_id=A__ )
else:
self.output.update(disp.HTML(self.html_code ) )
def __A ( self ):
if self.parent is None and self.output is not None:
self.output.update(disp.HTML("""""" ) )
class _a (__magic_name__ ):
'''simple docstring'''
def __init__( self , A__ , A__=None ):
super().__init__(A__ )
A__ : Optional[Any] = None if column_names is None else [column_names]
A__ : Optional[Any] = None
def __A ( self ):
A__ : List[str] = html_progress_bar(self.value , self.total , self.prefix , self.label , self.width )
if self.inner_table is not None:
self.html_code += text_to_html_table(self.inner_table )
if self.child_bar is not None:
self.html_code += self.child_bar.html_code
if self.output is None:
A__ : Optional[int] = disp.display(disp.HTML(self.html_code ) , display_id=A__ )
else:
self.output.update(disp.HTML(self.html_code ) )
def __A ( self , A__ ):
if self.inner_table is None:
A__ : List[str] = [list(values.keys() ), list(values.values() )]
else:
A__ : Optional[Any] = self.inner_table[0]
if len(self.inner_table ) == 1:
# We give a chance to update the column names at the first iteration
for key in values.keys():
if key not in columns:
columns.append(A__ )
A__ : Any = columns
self.inner_table.append([values[c] for c in columns] )
def __A ( self , A__ , A__=None , A__=300 ):
A__ : Optional[Any] = NotebookProgressBar(A__ , prefix=A__ , parent=self , width=A__ )
return self.child_bar
def __A ( self ):
A__ : List[str] = None
self.display()
class _a (__magic_name__ ):
'''simple docstring'''
def __init__( self ):
A__ : int = None
A__ : List[str] = None
A__ : Union[str, Any] = False
def __A ( self , A__ , A__ , A__ , **A__ ):
A__ : List[str] = """Epoch""" if args.evaluation_strategy == IntervalStrategy.EPOCH else """Step"""
A__ : Dict = 0
A__ : Tuple = 0
A__ : Optional[int] = [self.first_column] + ["""Training Loss"""]
if args.evaluation_strategy != IntervalStrategy.NO:
column_names.append("""Validation Loss""" )
A__ : Union[str, Any] = NotebookTrainingTracker(state.max_steps , A__ )
def __A ( self , A__ , A__ , A__ , **A__ ):
A__ : Any = int(state.epoch ) if int(state.epoch ) == state.epoch else F"""{state.epoch:.2f}"""
self.training_tracker.update(
state.global_step + 1 , comment=F"""Epoch {epoch}/{state.num_train_epochs}""" , force_update=self._force_next_update , )
A__ : str = False
def __A ( self , A__ , A__ , A__ , A__=None , **A__ ):
if not has_length(A__ ):
return
if self.prediction_bar is None:
if self.training_tracker is not None:
A__ : Union[str, Any] = self.training_tracker.add_child(len(A__ ) )
else:
A__ : Tuple = NotebookProgressBar(len(A__ ) )
self.prediction_bar.update(1 )
else:
self.prediction_bar.update(self.prediction_bar.value + 1 )
def __A ( self , A__ , A__ , A__ , **A__ ):
if self.prediction_bar is not None:
self.prediction_bar.close()
A__ : List[str] = None
def __A ( self , A__ , A__ , A__ , A__=None , **A__ ):
# Only for when there is no evaluation
if args.evaluation_strategy == IntervalStrategy.NO and "loss" in logs:
A__ : Dict = {"""Training Loss""": logs["""loss"""]}
# First column is necessarily Step sine we're not in epoch eval strategy
A__ : List[Any] = state.global_step
self.training_tracker.write_line(A__ )
def __A ( self , A__ , A__ , A__ , A__=None , **A__ ):
if self.training_tracker is not None:
A__ : Tuple = {"""Training Loss""": """No log""", """Validation Loss""": """No log"""}
for log in reversed(state.log_history ):
if "loss" in log:
A__ : Dict = log["""loss"""]
break
if self.first_column == "Epoch":
A__ : List[Any] = int(state.epoch )
else:
A__ : Optional[Any] = state.global_step
A__ : Optional[Any] = """eval"""
for k in metrics:
if k.endswith("""_loss""" ):
A__ : Optional[int] = re.sub(r"""\_loss$""" , """""" , A__ )
A__ : int = metrics.pop("""total_flos""" , A__ )
A__ : int = metrics.pop("""epoch""" , A__ )
A__ : Optional[int] = metrics.pop(F"""{metric_key_prefix}_runtime""" , A__ )
A__ : Any = metrics.pop(F"""{metric_key_prefix}_samples_per_second""" , A__ )
A__ : List[Any] = metrics.pop(F"""{metric_key_prefix}_steps_per_second""" , A__ )
A__ : Optional[Any] = metrics.pop(F"""{metric_key_prefix}_jit_compilation_time""" , A__ )
for k, v in metrics.items():
if k == F"""{metric_key_prefix}_loss""":
A__ : Any = v
else:
A__ : Optional[Any] = k.split("""_""" )
A__ : Any = """ """.join([part.capitalize() for part in splits[1:]] )
A__ : List[str] = v
self.training_tracker.write_line(A__ )
self.training_tracker.remove_child()
A__ : Dict = None
# Evaluation takes a long time so we should force the next update.
A__ : Union[str, Any] = True
def __A ( self , A__ , A__ , A__ , **A__ ):
self.training_tracker.update(
state.global_step , comment=F"""Epoch {int(state.epoch )}/{state.num_train_epochs}""" , force_update=A__ )
A__ : Optional[int] = None
| 192 | 1 |
"""simple docstring"""
from __future__ import annotations
import os
import tempfile
import unittest
from transformers import ConvBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertModel,
)
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : Optional[Any], lowerCamelCase : Any, lowerCamelCase : str=13, lowerCamelCase : Optional[int]=7, lowerCamelCase : Tuple=True, lowerCamelCase : str=True, lowerCamelCase : Optional[int]=True, lowerCamelCase : Dict=True, lowerCamelCase : List[Any]=99, lowerCamelCase : Optional[Any]=32, lowerCamelCase : Tuple=2, lowerCamelCase : Dict=4, lowerCamelCase : List[str]=37, lowerCamelCase : Optional[int]="gelu", lowerCamelCase : Tuple=0.1, lowerCamelCase : Optional[int]=0.1, lowerCamelCase : Optional[int]=512, lowerCamelCase : Any=16, lowerCamelCase : Tuple=2, lowerCamelCase : Optional[Any]=0.02, lowerCamelCase : Dict=3, lowerCamelCase : Any=4, lowerCamelCase : str=None, )-> List[Any]:
lowerCamelCase__ : Any =parent
lowerCamelCase__ : Optional[Any] =13
lowerCamelCase__ : List[str] =7
lowerCamelCase__ : Dict =True
lowerCamelCase__ : List[Any] =True
lowerCamelCase__ : Optional[Any] =True
lowerCamelCase__ : Any =True
lowerCamelCase__ : str =99
lowerCamelCase__ : Dict =384
lowerCamelCase__ : Tuple =2
lowerCamelCase__ : List[str] =4
lowerCamelCase__ : List[Any] =37
lowerCamelCase__ : Optional[Any] ='gelu'
lowerCamelCase__ : Tuple =0.1
lowerCamelCase__ : Tuple =0.1
lowerCamelCase__ : List[Any] =512
lowerCamelCase__ : str =16
lowerCamelCase__ : Union[str, Any] =2
lowerCamelCase__ : int =0.02
lowerCamelCase__ : Any =3
lowerCamelCase__ : Optional[Any] =4
lowerCamelCase__ : Optional[Any] =128
lowerCamelCase__ : Optional[int] =2
lowerCamelCase__ : Union[str, Any] =9
lowerCamelCase__ : List[Any] =1
lowerCamelCase__ : Any =None
def snake_case ( self : int )-> Tuple:
lowerCamelCase__ : Optional[Any] =ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
lowerCamelCase__ : Optional[int] =None
if self.use_input_mask:
lowerCamelCase__ : str =random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase__ : List[Any] =None
if self.use_token_type_ids:
lowerCamelCase__ : Optional[Any] =ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size )
lowerCamelCase__ : Tuple =None
lowerCamelCase__ : List[Any] =None
lowerCamelCase__ : Tuple =None
if self.use_labels:
lowerCamelCase__ : int =ids_tensor([self.batch_size], self.type_sequence_label_size )
lowerCamelCase__ : Dict =ids_tensor([self.batch_size, self.seq_length], self.num_labels )
lowerCamelCase__ : Tuple =ids_tensor([self.batch_size], self.num_choices )
lowerCamelCase__ : List[str] =ConvBertConfig(
vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, initializer_range=self.initializer_range, return_dict=lowerCamelCase, )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def snake_case ( self : str, lowerCamelCase : int, lowerCamelCase : Tuple, lowerCamelCase : Union[str, Any], lowerCamelCase : Union[str, Any], lowerCamelCase : Optional[int], lowerCamelCase : Optional[Any], lowerCamelCase : int )-> int:
lowerCamelCase__ : Union[str, Any] =TFConvBertModel(config=lowerCamelCase )
lowerCamelCase__ : Union[str, Any] ={'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
lowerCamelCase__ : int =[input_ids, input_mask]
lowerCamelCase__ : Any =model(lowerCamelCase )
lowerCamelCase__ : Optional[Any] =model(lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
def snake_case ( self : Tuple, lowerCamelCase : str, lowerCamelCase : Optional[Any], lowerCamelCase : Any, lowerCamelCase : Any, lowerCamelCase : List[Any], lowerCamelCase : Any, lowerCamelCase : Optional[Any] )-> Union[str, Any]:
lowerCamelCase__ : Union[str, Any] =TFConvBertForMaskedLM(config=lowerCamelCase )
lowerCamelCase__ : Union[str, Any] ={
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
lowerCamelCase__ : int =model(lowerCamelCase )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size) )
def snake_case ( self : Tuple, lowerCamelCase : List[str], lowerCamelCase : List[Any], lowerCamelCase : Optional[Any], lowerCamelCase : int, lowerCamelCase : Optional[int], lowerCamelCase : Any, lowerCamelCase : str )-> Dict:
lowerCamelCase__ : List[Any] =self.num_labels
lowerCamelCase__ : str =TFConvBertForSequenceClassification(config=lowerCamelCase )
lowerCamelCase__ : Tuple ={
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
lowerCamelCase__ : Dict =model(lowerCamelCase )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) )
def snake_case ( self : str, lowerCamelCase : Tuple, lowerCamelCase : Any, lowerCamelCase : Tuple, lowerCamelCase : List[str], lowerCamelCase : str, lowerCamelCase : Dict, lowerCamelCase : Optional[Any] )-> Tuple:
lowerCamelCase__ : str =self.num_choices
lowerCamelCase__ : Tuple =TFConvBertForMultipleChoice(config=lowerCamelCase )
lowerCamelCase__ : List[Any] =tf.tile(tf.expand_dims(lowerCamelCase, 1 ), (1, self.num_choices, 1) )
lowerCamelCase__ : Optional[int] =tf.tile(tf.expand_dims(lowerCamelCase, 1 ), (1, self.num_choices, 1) )
lowerCamelCase__ : str =tf.tile(tf.expand_dims(lowerCamelCase, 1 ), (1, self.num_choices, 1) )
lowerCamelCase__ : List[Any] ={
'input_ids': multiple_choice_inputs_ids,
'attention_mask': multiple_choice_input_mask,
'token_type_ids': multiple_choice_token_type_ids,
}
lowerCamelCase__ : List[str] =model(lowerCamelCase )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices) )
def snake_case ( self : Union[str, Any], lowerCamelCase : List[str], lowerCamelCase : List[Any], lowerCamelCase : List[Any], lowerCamelCase : str, lowerCamelCase : List[str], lowerCamelCase : Dict, lowerCamelCase : Union[str, Any] )-> int:
lowerCamelCase__ : Optional[Any] =self.num_labels
lowerCamelCase__ : Any =TFConvBertForTokenClassification(config=lowerCamelCase )
lowerCamelCase__ : int ={
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
lowerCamelCase__ : int =model(lowerCamelCase )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels) )
def snake_case ( self : int, lowerCamelCase : int, lowerCamelCase : Optional[Any], lowerCamelCase : Tuple, lowerCamelCase : Dict, lowerCamelCase : List[Any], lowerCamelCase : List[Any], lowerCamelCase : Tuple )-> Optional[int]:
lowerCamelCase__ : str =TFConvBertForQuestionAnswering(config=lowerCamelCase )
lowerCamelCase__ : Dict ={
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
lowerCamelCase__ : str =model(lowerCamelCase )
self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length) )
def snake_case ( self : Tuple )-> Any:
lowerCamelCase__ : Union[str, Any] =self.prepare_config_and_inputs()
(
lowerCamelCase__
) : Union[str, Any] =config_and_inputs
lowerCamelCase__ : Any ={'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class __SCREAMING_SNAKE_CASE ( _a , _a , unittest.TestCase ):
'''simple docstring'''
_a = (
(
TFConvBertModel,
TFConvBertForMaskedLM,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertForMultipleChoice,
)
if is_tf_available()
else ()
)
_a = (
{
"""feature-extraction""": TFConvBertModel,
"""fill-mask""": TFConvBertForMaskedLM,
"""question-answering""": TFConvBertForQuestionAnswering,
"""text-classification""": TFConvBertForSequenceClassification,
"""token-classification""": TFConvBertForTokenClassification,
"""zero-shot""": TFConvBertForSequenceClassification,
}
if is_tf_available()
else {}
)
_a = False
_a = False
_a = False
def snake_case ( self : Optional[int] )-> Optional[int]:
lowerCamelCase__ : List[str] =TFConvBertModelTester(self )
lowerCamelCase__ : List[Any] =ConfigTester(self, config_class=lowerCamelCase, hidden_size=37 )
def snake_case ( self : Dict )-> List[str]:
self.config_tester.run_common_tests()
def snake_case ( self : Dict )-> Union[str, Any]:
lowerCamelCase__ : Union[str, Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase )
def snake_case ( self : Optional[int] )-> List[str]:
lowerCamelCase__ : int =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowerCamelCase )
def snake_case ( self : Optional[int] )-> Optional[Any]:
lowerCamelCase__ : Tuple =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*lowerCamelCase )
def snake_case ( self : int )-> Optional[Any]:
lowerCamelCase__ : str =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowerCamelCase )
def snake_case ( self : List[str] )-> Dict:
lowerCamelCase__ : List[str] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowerCamelCase )
def snake_case ( self : str )-> int:
lowerCamelCase__ : Optional[Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowerCamelCase )
@slow
def snake_case ( self : Optional[int] )-> Union[str, Any]:
lowerCamelCase__ : Optional[int] =self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase__ : List[Any] =True
lowerCamelCase__ : Union[str, Any] =True
if hasattr(lowerCamelCase, '''use_cache''' ):
lowerCamelCase__ : str =True
lowerCamelCase__ : Optional[Any] =getattr(self.model_tester, '''encoder_seq_length''', self.model_tester.seq_length )
lowerCamelCase__ : str =getattr(self.model_tester, '''key_length''', lowerCamelCase )
for model_class in self.all_model_classes:
lowerCamelCase__ : Union[str, Any] =self._prepare_for_class(lowerCamelCase, lowerCamelCase )
lowerCamelCase__ : Tuple =model_class(lowerCamelCase )
lowerCamelCase__ : Optional[Any] =len(model(lowerCamelCase ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(lowerCamelCase, saved_model=lowerCamelCase )
lowerCamelCase__ : Union[str, Any] =os.path.join(lowerCamelCase, '''saved_model''', '''1''' )
lowerCamelCase__ : Optional[Any] =tf.keras.models.load_model(lowerCamelCase )
lowerCamelCase__ : Dict =model(lowerCamelCase )
if self.is_encoder_decoder:
lowerCamelCase__ : Tuple =outputs['encoder_hidden_states']
lowerCamelCase__ : Tuple =outputs['encoder_attentions']
else:
lowerCamelCase__ : List[str] =outputs['hidden_states']
lowerCamelCase__ : Dict =outputs['attentions']
self.assertEqual(len(lowerCamelCase ), lowerCamelCase )
lowerCamelCase__ : Tuple =getattr(
self.model_tester, '''expected_num_hidden_layers''', self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(lowerCamelCase ), lowerCamelCase )
self.assertListEqual(
list(output_hidden_states[0].shape[-2:] ), [self.model_tester.seq_length, self.model_tester.hidden_size], )
self.assertEqual(len(lowerCamelCase ), self.model_tester.num_hidden_layers )
self.assertListEqual(
list(output_attentions[0].shape[-3:] ), [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length], )
@slow
def snake_case ( self : List[str] )-> Optional[int]:
lowerCamelCase__ : Optional[int] =TFConvBertModel.from_pretrained('''YituTech/conv-bert-base''' )
self.assertIsNotNone(lowerCamelCase )
def snake_case ( self : int )-> Dict:
lowerCamelCase__ : List[str] =self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase__ : int =True
lowerCamelCase__ : Optional[int] =getattr(self.model_tester, '''decoder_seq_length''', self.model_tester.seq_length )
lowerCamelCase__ : Union[str, Any] =getattr(self.model_tester, '''encoder_seq_length''', self.model_tester.seq_length )
lowerCamelCase__ : Union[str, Any] =getattr(self.model_tester, '''key_length''', lowerCamelCase )
lowerCamelCase__ : Union[str, Any] =getattr(self.model_tester, '''key_length''', lowerCamelCase )
def check_decoder_attentions_output(lowerCamelCase : int ):
lowerCamelCase__ : List[str] =len(lowerCamelCase )
self.assertEqual(out_len % 2, 0 )
lowerCamelCase__ : Union[str, Any] =outputs.decoder_attentions
self.assertEqual(len(lowerCamelCase ), self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ), [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length], )
def check_encoder_attentions_output(lowerCamelCase : int ):
lowerCamelCase__ : Dict =[
t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions)
]
self.assertEqual(len(lowerCamelCase ), self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ), [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length], )
for model_class in self.all_model_classes:
lowerCamelCase__ : int =True
lowerCamelCase__ : Tuple =False
lowerCamelCase__ : str =model_class(lowerCamelCase )
lowerCamelCase__ : str =model(self._prepare_for_class(lowerCamelCase, lowerCamelCase ) )
lowerCamelCase__ : int =len(lowerCamelCase )
self.assertEqual(config.output_hidden_states, lowerCamelCase )
check_encoder_attentions_output(lowerCamelCase )
if self.is_encoder_decoder:
lowerCamelCase__ : str =model_class(lowerCamelCase )
lowerCamelCase__ : str =model(self._prepare_for_class(lowerCamelCase, lowerCamelCase ) )
self.assertEqual(config.output_hidden_states, lowerCamelCase )
check_decoder_attentions_output(lowerCamelCase )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
lowerCamelCase__ : Union[str, Any] =True
lowerCamelCase__ : List[Any] =model_class(lowerCamelCase )
lowerCamelCase__ : Any =model(self._prepare_for_class(lowerCamelCase, lowerCamelCase ) )
self.assertEqual(config.output_hidden_states, lowerCamelCase )
check_encoder_attentions_output(lowerCamelCase )
# Check attention is always last and order is fine
lowerCamelCase__ : Tuple =True
lowerCamelCase__ : List[Any] =True
lowerCamelCase__ : Optional[int] =model_class(lowerCamelCase )
lowerCamelCase__ : List[str] =model(self._prepare_for_class(lowerCamelCase, lowerCamelCase ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1), len(lowerCamelCase ) )
self.assertEqual(model.config.output_hidden_states, lowerCamelCase )
check_encoder_attentions_output(lowerCamelCase )
@require_tf
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
@slow
def snake_case ( self : Tuple )-> List[Any]:
lowerCamelCase__ : Union[str, Any] =TFConvBertModel.from_pretrained('''YituTech/conv-bert-base''' )
lowerCamelCase__ : List[Any] =tf.constant([[0, 1, 2, 3, 4, 5]] )
lowerCamelCase__ : Tuple =model(lowerCamelCase )[0]
lowerCamelCase__ : List[str] =[1, 6, 768]
self.assertEqual(output.shape, lowerCamelCase )
lowerCamelCase__ : List[str] =tf.constant(
[
[
[-0.03_475_493, -0.4_686_034, -0.30_638_832],
[0.22_637_248, -0.26_988_646, -0.7_423_424],
[0.10_324_868, -0.45_013_508, -0.58_280_784],
]
] )
tf.debugging.assert_near(output[:, :3, :3], lowerCamelCase, atol=1E-4 )
| 368 |
"""simple docstring"""
import math
import flax.linen as nn
import jax.numpy as jnp
def snake_case__ ( __lowerCamelCase : jnp.ndarray , __lowerCamelCase : int , __lowerCamelCase : float = 1 , __lowerCamelCase : float = 1 , __lowerCamelCase : float = 1.0e4 , __lowerCamelCase : bool = False , __lowerCamelCase : float = 1.0 , ):
"""simple docstring"""
assert timesteps.ndim == 1, "Timesteps should be a 1d-array"
assert embedding_dim % 2 == 0, f'''Embedding dimension {embedding_dim} should be even'''
lowerCamelCase__ : Any =float(embedding_dim // 2 )
lowerCamelCase__ : List[str] =math.log(max_timescale / min_timescale ) / (num_timescales - freq_shift)
lowerCamelCase__ : int =min_timescale * jnp.exp(jnp.arange(__lowerCamelCase , dtype=jnp.floataa ) * -log_timescale_increment )
lowerCamelCase__ : Tuple =jnp.expand_dims(__lowerCamelCase , 1 ) * jnp.expand_dims(__lowerCamelCase , 0 )
# scale embeddings
lowerCamelCase__ : List[str] =scale * emb
if flip_sin_to_cos:
lowerCamelCase__ : int =jnp.concatenate([jnp.cos(__lowerCamelCase ), jnp.sin(__lowerCamelCase )] , axis=1 )
else:
lowerCamelCase__ : List[str] =jnp.concatenate([jnp.sin(__lowerCamelCase ), jnp.cos(__lowerCamelCase )] , axis=1 )
lowerCamelCase__ : str =jnp.reshape(__lowerCamelCase , [jnp.shape(__lowerCamelCase )[0], embedding_dim] )
return signal
class __SCREAMING_SNAKE_CASE ( nn.Module ):
'''simple docstring'''
_a = 3_2
_a = jnp.floataa
@nn.compact
def __call__( self : Optional[Any], lowerCamelCase : int )-> Any:
lowerCamelCase__ : Optional[Any] =nn.Dense(self.time_embed_dim, dtype=self.dtype, name='''linear_1''' )(lowerCamelCase )
lowerCamelCase__ : List[str] =nn.silu(lowerCamelCase )
lowerCamelCase__ : Any =nn.Dense(self.time_embed_dim, dtype=self.dtype, name='''linear_2''' )(lowerCamelCase )
return temb
class __SCREAMING_SNAKE_CASE ( nn.Module ):
'''simple docstring'''
_a = 3_2
_a = False
_a = 1
@nn.compact
def __call__( self : Any, lowerCamelCase : int )-> int:
return get_sinusoidal_embeddings(
lowerCamelCase, embedding_dim=self.dim, flip_sin_to_cos=self.flip_sin_to_cos, freq_shift=self.freq_shift )
| 272 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCamelCase : List[Any] = logging.get_logger(__name__)
_lowerCamelCase : Optional[int] = {
"""studio-ousia/luke-base""": """https://huggingface.co/studio-ousia/luke-base/resolve/main/config.json""",
"""studio-ousia/luke-large""": """https://huggingface.co/studio-ousia/luke-large/resolve/main/config.json""",
}
class UpperCamelCase_ ( UpperCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase__ = '''luke'''
def __init__( self : Any , UpperCAmelCase__ : str=50_267 , UpperCAmelCase__ : Any=500_000 , UpperCAmelCase__ : Dict=768 , UpperCAmelCase__ : str=256 , UpperCAmelCase__ : Dict=12 , UpperCAmelCase__ : List[str]=12 , UpperCAmelCase__ : Optional[Any]=3_072 , UpperCAmelCase__ : Union[str, Any]="gelu" , UpperCAmelCase__ : Union[str, Any]=0.1 , UpperCAmelCase__ : List[Any]=0.1 , UpperCAmelCase__ : Optional[int]=512 , UpperCAmelCase__ : Tuple=2 , UpperCAmelCase__ : str=0.02 , UpperCAmelCase__ : Optional[Any]=1e-12 , UpperCAmelCase__ : List[str]=True , UpperCAmelCase__ : Optional[Any]=None , UpperCAmelCase__ : str=1 , UpperCAmelCase__ : Union[str, Any]=0 , UpperCAmelCase__ : Optional[int]=2 , **UpperCAmelCase__ : Any , ) ->List[Any]:
'''simple docstring'''
super().__init__(pad_token_id=UpperCAmelCase__ , bos_token_id=UpperCAmelCase__ , eos_token_id=UpperCAmelCase__ , **UpperCAmelCase__)
A__ = vocab_size
A__ = entity_vocab_size
A__ = hidden_size
A__ = entity_emb_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = hidden_act
A__ = intermediate_size
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = max_position_embeddings
A__ = type_vocab_size
A__ = initializer_range
A__ = layer_norm_eps
A__ = use_entity_aware_attention
A__ = classifier_dropout
| 14 |
import json
import os
import unittest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __lowerCamelCase (_a , unittest.TestCase ):
_lowercase = MgpstrTokenizer
_lowercase = False
_lowercase = {}
_lowercase = False
def snake_case_ ( self: int ):
'''simple docstring'''
super().setUp()
# fmt: off
__UpperCamelCase = ['[GO]', '[s]', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z']
# fmt: on
__UpperCamelCase = dict(zip(A_,range(len(A_ ) ) ) )
__UpperCamelCase = os.path.join(self.tmpdirname,VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file,'w',encoding='utf-8' ) as fp:
fp.write(json.dumps(A_ ) + '\n' )
def snake_case_ ( self: Dict,**A_: Tuple ):
'''simple docstring'''
return MgpstrTokenizer.from_pretrained(self.tmpdirname,**A_ )
def snake_case_ ( self: List[Any],A_: Optional[Any] ):
'''simple docstring'''
__UpperCamelCase = 'tester'
__UpperCamelCase = 'tester'
return input_text, output_text
@unittest.skip('MGP-STR always lower cases letters.' )
def snake_case_ ( self: str ):
'''simple docstring'''
pass
def snake_case_ ( self: List[Any] ):
'''simple docstring'''
__UpperCamelCase = self.get_tokenizers(do_lower_case=A_ )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
__UpperCamelCase = '[SPECIAL_TOKEN]'
tokenizer.add_special_tokens({'cls_token': special_token} )
__UpperCamelCase = tokenizer.encode([special_token],add_special_tokens=A_ )
self.assertEqual(len(A_ ),1 )
__UpperCamelCase = tokenizer.decode(A_,skip_special_tokens=A_ )
self.assertTrue(special_token not in decoded )
def snake_case_ ( self: Dict ):
'''simple docstring'''
__UpperCamelCase = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
__UpperCamelCase, __UpperCamelCase = self.get_input_output_texts(A_ )
__UpperCamelCase = tokenizer.tokenize(A_ )
__UpperCamelCase = tokenizer.convert_tokens_to_ids(A_ )
__UpperCamelCase = tokenizer.encode(A_,add_special_tokens=A_ )
self.assertListEqual(A_,A_ )
__UpperCamelCase = tokenizer.convert_ids_to_tokens(A_ )
self.assertNotEqual(len(A_ ),0 )
__UpperCamelCase = tokenizer.decode(A_ )
self.assertIsInstance(A_,A_ )
self.assertEqual(text_a.replace(' ','' ),A_ )
@unittest.skip('MGP-STR tokenizer only handles one sequence.' )
def snake_case_ ( self: int ):
'''simple docstring'''
pass
@unittest.skip('inputs cannot be pretokenized in MgpstrTokenizer' )
def snake_case_ ( self: List[str] ):
'''simple docstring'''
pass
| 310 | 0 |
"""simple docstring"""
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetaImageProcessor
class __snake_case ( unittest.TestCase ):
def __init__( self , lowercase , lowercase=7 , lowercase=3 , lowercase=30 , lowercase=4_00 , lowercase=True , lowercase=None , lowercase=True , lowercase=[0.5, 0.5, 0.5] , lowercase=[0.5, 0.5, 0.5] , lowercase=True , lowercase=1 / 2_55 , lowercase=True , ) -> Any:
'''simple docstring'''
a__: Optional[Any] = size if size is not None else {"shortest_edge": 18, "longest_edge": 13_33}
a__: str = parent
a__: Optional[int] = batch_size
a__: Union[str, Any] = num_channels
a__: str = min_resolution
a__: str = max_resolution
a__: Optional[int] = do_resize
a__: Dict = size
a__: str = do_normalize
a__: int = image_mean
a__: Any = image_std
a__: str = do_rescale
a__: str = rescale_factor
a__: int = do_pad
def lowerCamelCase_ ( self) -> Tuple:
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def lowerCamelCase_ ( self , lowercase , lowercase=False) -> Any:
'''simple docstring'''
if not batched:
a__: Union[str, Any] = image_inputs[0]
if isinstance(lowerCAmelCase__ , Image.Image):
a__: Optional[Any] = image.size
else:
a__: Tuple = image.shape[1], image.shape[2]
if w < h:
a__: str = int(self.size['shortest_edge'] * h / w)
a__: Dict = self.size["shortest_edge"]
elif w > h:
a__: Optional[int] = self.size["shortest_edge"]
a__: Optional[Any] = int(self.size['shortest_edge'] * w / h)
else:
a__: int = self.size["shortest_edge"]
a__: Optional[Any] = self.size["shortest_edge"]
else:
a__: List[str] = []
for image in image_inputs:
a__: Optional[int] = self.get_expected_values([image])
expected_values.append((expected_height, expected_width))
a__: List[Any] = max(lowerCAmelCase__ , key=lambda lowercase: item[0])[0]
a__: Tuple = max(lowerCAmelCase__ , key=lambda lowercase: item[1])[1]
return expected_height, expected_width
@require_torch
@require_vision
class __snake_case ( a__ , unittest.TestCase ):
a__ = DetaImageProcessor if is_vision_available() else None
def lowerCamelCase_ ( self) -> int:
'''simple docstring'''
a__: Any = DetaImageProcessingTester(self)
@property
def lowerCamelCase_ ( self) -> Union[str, Any]:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCamelCase_ ( self) -> str:
'''simple docstring'''
a__: Union[str, Any] = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(lowerCAmelCase__ , 'image_mean'))
self.assertTrue(hasattr(lowerCAmelCase__ , 'image_std'))
self.assertTrue(hasattr(lowerCAmelCase__ , 'do_normalize'))
self.assertTrue(hasattr(lowerCAmelCase__ , 'do_resize'))
self.assertTrue(hasattr(lowerCAmelCase__ , 'do_rescale'))
self.assertTrue(hasattr(lowerCAmelCase__ , 'do_pad'))
self.assertTrue(hasattr(lowerCAmelCase__ , 'size'))
def lowerCamelCase_ ( self) -> List[str]:
'''simple docstring'''
a__: Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict)
self.assertEqual(image_processor.size , {'shortest_edge': 18, 'longest_edge': 13_33})
self.assertEqual(image_processor.do_pad , lowerCAmelCase__)
def lowerCamelCase_ ( self) -> str:
'''simple docstring'''
pass
def lowerCamelCase_ ( self) -> Any:
'''simple docstring'''
a__: Optional[int] = self.image_processing_class(**self.image_processor_dict)
# create random PIL images
a__: Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__)
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , Image.Image)
# Test not batched input
a__: str = image_processing(image_inputs[0] , return_tensors='pt').pixel_values
a__: Dict = self.image_processor_tester.get_expected_values(lowerCAmelCase__)
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
a__: Any = self.image_processor_tester.get_expected_values(lowerCAmelCase__ , batched=lowerCAmelCase__)
a__: Tuple = image_processing(lowerCAmelCase__ , return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowerCamelCase_ ( self) -> Optional[Any]:
'''simple docstring'''
a__: Dict = self.image_processing_class(**self.image_processor_dict)
# create random numpy tensors
a__: Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ , numpify=lowerCAmelCase__)
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , np.ndarray)
# Test not batched input
a__: Dict = image_processing(image_inputs[0] , return_tensors='pt').pixel_values
a__: List[str] = self.image_processor_tester.get_expected_values(lowerCAmelCase__)
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
a__: Tuple = image_processing(lowerCAmelCase__ , return_tensors='pt').pixel_values
a__: Optional[Any] = self.image_processor_tester.get_expected_values(lowerCAmelCase__ , batched=lowerCAmelCase__)
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowerCamelCase_ ( self) -> int:
'''simple docstring'''
a__: Optional[int] = self.image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
a__: List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ , torchify=lowerCAmelCase__)
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , torch.Tensor)
# Test not batched input
a__: Any = image_processing(image_inputs[0] , return_tensors='pt').pixel_values
a__: Any = self.image_processor_tester.get_expected_values(lowerCAmelCase__)
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
a__: str = image_processing(lowerCAmelCase__ , return_tensors='pt').pixel_values
a__: Any = self.image_processor_tester.get_expected_values(lowerCAmelCase__ , batched=lowerCAmelCase__)
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def lowerCamelCase_ ( self) -> str:
'''simple docstring'''
a__: Any = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png')
with open('./tests/fixtures/tests_samples/COCO/coco_annotations.txt' , 'r') as f:
a__: int = json.loads(f.read())
a__: List[str] = {"image_id": 3_97_69, "annotations": target}
# encode them
a__: Dict = DetaImageProcessor()
a__: Optional[Any] = image_processing(images=lowerCAmelCase__ , annotations=lowerCAmelCase__ , return_tensors='pt')
# verify pixel values
a__: Dict = torch.Size([1, 3, 8_00, 10_66])
self.assertEqual(encoding['pixel_values'].shape , lowerCAmelCase__)
a__: Any = torch.tensor([0.2796, 0.3138, 0.3481])
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , lowerCAmelCase__ , atol=1e-4))
# verify area
a__: Tuple = torch.tensor([58_87.96_00, 1_12_50.20_61, 48_93_53.84_38, 83_71_22.75_00, 14_79_67.51_56, 16_57_32.34_38])
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , lowerCAmelCase__))
# verify boxes
a__: Optional[int] = torch.Size([6, 4])
self.assertEqual(encoding['labels'][0]['boxes'].shape , lowerCAmelCase__)
a__: List[Any] = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215])
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , lowerCAmelCase__ , atol=1e-3))
# verify image_id
a__: Union[str, Any] = torch.tensor([3_97_69])
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , lowerCAmelCase__))
# verify is_crowd
a__: Any = torch.tensor([0, 0, 0, 0, 0, 0])
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , lowerCAmelCase__))
# verify class_labels
a__: str = torch.tensor([75, 75, 63, 65, 17, 17])
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , lowerCAmelCase__))
# verify orig_size
a__: int = torch.tensor([4_80, 6_40])
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , lowerCAmelCase__))
# verify size
a__: List[str] = torch.tensor([8_00, 10_66])
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , lowerCAmelCase__))
@slow
def lowerCamelCase_ ( self) -> Any:
'''simple docstring'''
a__: Tuple = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png')
with open('./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt' , 'r') as f:
a__: Dict = json.loads(f.read())
a__: Optional[int] = {"file_name": "000000039769.png", "image_id": 3_97_69, "segments_info": target}
a__: Optional[int] = pathlib.Path('./tests/fixtures/tests_samples/COCO/coco_panoptic')
# encode them
a__: List[Any] = DetaImageProcessor(format='coco_panoptic')
a__: Optional[Any] = image_processing(images=lowerCAmelCase__ , annotations=lowerCAmelCase__ , masks_path=lowerCAmelCase__ , return_tensors='pt')
# verify pixel values
a__: Optional[int] = torch.Size([1, 3, 8_00, 10_66])
self.assertEqual(encoding['pixel_values'].shape , lowerCAmelCase__)
a__: Any = torch.tensor([0.2796, 0.3138, 0.3481])
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , lowerCAmelCase__ , atol=1e-4))
# verify area
a__: str = torch.tensor([14_79_79.68_75, 16_55_27.04_69, 48_46_38.59_38, 1_12_92.93_75, 58_79.65_62, 76_34.11_47])
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , lowerCAmelCase__))
# verify boxes
a__: Tuple = torch.Size([6, 4])
self.assertEqual(encoding['labels'][0]['boxes'].shape , lowerCAmelCase__)
a__: List[str] = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625])
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , lowerCAmelCase__ , atol=1e-3))
# verify image_id
a__: Optional[int] = torch.tensor([3_97_69])
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , lowerCAmelCase__))
# verify is_crowd
a__: Tuple = torch.tensor([0, 0, 0, 0, 0, 0])
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , lowerCAmelCase__))
# verify class_labels
a__: Tuple = torch.tensor([17, 17, 63, 75, 75, 93])
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , lowerCAmelCase__))
# verify masks
a__: Union[str, Any] = 82_28_73
self.assertEqual(encoding['labels'][0]['masks'].sum().item() , lowerCAmelCase__)
# verify orig_size
a__: Union[str, Any] = torch.tensor([4_80, 6_40])
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , lowerCAmelCase__))
# verify size
a__: Optional[Any] = torch.tensor([8_00, 10_66])
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , lowerCAmelCase__))
| 363 | """simple docstring"""
import inspect
import unittest
from transformers import RegNetConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import RegNetForImageClassification, RegNetModel
from transformers.models.regnet.modeling_regnet import REGNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __snake_case :
def __init__( self , lowercase , lowercase=3 , lowercase=32 , lowercase=3 , lowercase=10 , lowercase=[10, 20, 30, 40] , lowercase=[1, 1, 2, 1] , lowercase=True , lowercase=True , lowercase="relu" , lowercase=3 , lowercase=None , ) -> Any:
'''simple docstring'''
a__: Union[str, Any] = parent
a__: int = batch_size
a__: List[Any] = image_size
a__: Any = num_channels
a__: Dict = embeddings_size
a__: str = hidden_sizes
a__: List[Any] = depths
a__: Optional[int] = is_training
a__: Optional[int] = use_labels
a__: Tuple = hidden_act
a__: Any = num_labels
a__: Union[str, Any] = scope
a__: Tuple = len(lowercase)
def lowerCamelCase_ ( self) -> int:
'''simple docstring'''
a__: Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
a__: str = None
if self.use_labels:
a__: Dict = ids_tensor([self.batch_size] , self.num_labels)
a__: int = self.get_config()
return config, pixel_values, labels
def lowerCamelCase_ ( self) -> Optional[int]:
'''simple docstring'''
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , )
def lowerCamelCase_ ( self , lowercase , lowercase , lowercase) -> Any:
'''simple docstring'''
a__: Dict = RegNetModel(config=lowercase)
model.to(lowercase)
model.eval()
a__: List[str] = model(lowercase)
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def lowerCamelCase_ ( self , lowercase , lowercase , lowercase) -> List[Any]:
'''simple docstring'''
a__: Tuple = self.num_labels
a__: Tuple = RegNetForImageClassification(lowercase)
model.to(lowercase)
model.eval()
a__: Any = model(lowercase , labels=lowercase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def lowerCamelCase_ ( self) -> str:
'''simple docstring'''
a__: int = self.prepare_config_and_inputs()
a__ , a__ , a__: List[Any] = config_and_inputs
a__: Union[str, Any] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class __snake_case ( __lowerCAmelCase , __lowerCAmelCase , unittest.TestCase ):
a__ = (RegNetModel, RegNetForImageClassification) if is_torch_available() else ()
a__ = (
{"""feature-extraction""": RegNetModel, """image-classification""": RegNetForImageClassification}
if is_torch_available()
else {}
)
a__ = False
a__ = False
a__ = False
a__ = False
def lowerCamelCase_ ( self) -> Dict:
'''simple docstring'''
a__: Dict = RegNetModelTester(self)
a__: Dict = ConfigTester(self , config_class=lowercase , has_text_modality=lowercase)
def lowerCamelCase_ ( self) -> Dict:
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowerCamelCase_ ( self) -> Optional[Any]:
'''simple docstring'''
return
@unittest.skip(reason='RegNet does not use inputs_embeds')
def lowerCamelCase_ ( self) -> List[str]:
'''simple docstring'''
pass
@unittest.skip(reason='RegNet does not support input and output embeddings')
def lowerCamelCase_ ( self) -> Optional[Any]:
'''simple docstring'''
pass
def lowerCamelCase_ ( self) -> List[Any]:
'''simple docstring'''
a__ , a__: str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a__: int = model_class(lowercase)
a__: str = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a__: int = [*signature.parameters.keys()]
a__: List[str] = ['pixel_values']
self.assertListEqual(arg_names[:1] , lowercase)
def lowerCamelCase_ ( self) -> Optional[Any]:
'''simple docstring'''
a__: Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase)
def lowerCamelCase_ ( self) -> Dict:
'''simple docstring'''
a__ , a__: int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a__: int = model_class(config=lowercase)
for name, module in model.named_modules():
if isinstance(lowercase , (nn.BatchNormad, nn.GroupNorm)):
self.assertTrue(
torch.all(module.weight == 1) , msg=f'Parameter {name} of model {model_class} seems not properly initialized' , )
self.assertTrue(
torch.all(module.bias == 0) , msg=f'Parameter {name} of model {model_class} seems not properly initialized' , )
def lowerCamelCase_ ( self) -> Optional[int]:
'''simple docstring'''
def check_hidden_states_output(lowercase , lowercase , lowercase):
a__: int = model_class(lowercase)
model.to(lowercase)
model.eval()
with torch.no_grad():
a__: int = model(**self._prepare_for_class(lowercase , lowercase))
a__: Any = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
a__: List[str] = self.model_tester.num_stages
self.assertEqual(len(lowercase) , expected_num_stages + 1)
# RegNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:]) , [self.model_tester.image_size // 2, self.model_tester.image_size // 2] , )
a__ , a__: Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
a__: Dict = ['basic', 'bottleneck']
for model_class in self.all_model_classes:
for layer_type in layers_type:
a__: Dict = layer_type
a__: Dict = True
check_hidden_states_output(lowercase , lowercase , lowercase)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
a__: str = True
check_hidden_states_output(lowercase , lowercase , lowercase)
def lowerCamelCase_ ( self) -> List[Any]:
'''simple docstring'''
a__: Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowercase)
@slow
def lowerCamelCase_ ( self) -> str:
'''simple docstring'''
for model_name in REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a__: Tuple = RegNetModel.from_pretrained(lowercase)
self.assertIsNotNone(lowercase)
def __a ( ) ->Dict:
a__: str = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class __snake_case ( unittest.TestCase ):
@cached_property
def lowerCamelCase_ ( self) -> Any:
'''simple docstring'''
return (
AutoImageProcessor.from_pretrained(REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0])
if is_vision_available()
else None
)
@slow
def lowerCamelCase_ ( self) -> Optional[int]:
'''simple docstring'''
a__: Tuple = RegNetForImageClassification.from_pretrained(REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0]).to(lowercase)
a__: Tuple = self.default_image_processor
a__: str = prepare_img()
a__: Optional[int] = image_processor(images=lowercase , return_tensors='pt').to(lowercase)
# forward pass
with torch.no_grad():
a__: List[Any] = model(**lowercase)
# verify the logits
a__: Tuple = torch.Size((1, 10_00))
self.assertEqual(outputs.logits.shape , lowercase)
a__: Optional[Any] = torch.tensor([-0.4180, -1.5051, -3.4836]).to(lowercase)
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowercase , atol=1e-4))
| 203 | 0 |
'''simple docstring'''
import itertools
import string
from collections.abc import Generator, Iterable
def __snake_case ( UpperCAmelCase_ : Iterable[str] , UpperCAmelCase_ : int ):
lowerCamelCase_ = iter(UpperCAmelCase_ )
while True:
lowerCamelCase_ = tuple(itertools.islice(UpperCAmelCase_ , UpperCAmelCase_ ) )
if not chunk:
return
yield chunk
def __snake_case ( UpperCAmelCase_ : str ):
lowerCamelCase_ = "".join([c.upper() for c in dirty if c in string.ascii_letters] )
lowerCamelCase_ = ""
if len(UpperCAmelCase_ ) < 2:
return dirty
for i in range(len(UpperCAmelCase_ ) - 1 ):
clean += dirty[i]
if dirty[i] == dirty[i + 1]:
clean += "X"
clean += dirty[-1]
if len(UpperCAmelCase_ ) & 1:
clean += "X"
return clean
def __snake_case ( UpperCAmelCase_ : str ):
# I and J are used interchangeably to allow
# us to use a 5x5 table (25 letters)
lowerCamelCase_ = "ABCDEFGHIKLMNOPQRSTUVWXYZ"
# we're using a list instead of a '2d' array because it makes the math
# for setting up the table and doing the actual encoding/decoding simpler
lowerCamelCase_ = []
# copy key chars into the table if they are in `alphabet` ignoring duplicates
for char in key.upper():
if char not in table and char in alphabet:
table.append(UpperCAmelCase_ )
# fill the rest of the table in with the remaining alphabet chars
for char in alphabet:
if char not in table:
table.append(UpperCAmelCase_ )
return table
def __snake_case ( UpperCAmelCase_ : str , UpperCAmelCase_ : str ):
lowerCamelCase_ = generate_table(UpperCAmelCase_ )
lowerCamelCase_ = prepare_input(UpperCAmelCase_ )
lowerCamelCase_ = ""
# https://en.wikipedia.org/wiki/Playfair_cipher#Description
for chara, chara in chunker(UpperCAmelCase_ , 2 ):
lowerCamelCase_ ,lowerCamelCase_ = divmod(table.index(UpperCAmelCase_ ) , 5 )
lowerCamelCase_ ,lowerCamelCase_ = divmod(table.index(UpperCAmelCase_ ) , 5 )
if rowa == rowa:
ciphertext += table[rowa * 5 + (cola + 1) % 5]
ciphertext += table[rowa * 5 + (cola + 1) % 5]
elif cola == cola:
ciphertext += table[((rowa + 1) % 5) * 5 + cola]
ciphertext += table[((rowa + 1) % 5) * 5 + cola]
else: # rectangle
ciphertext += table[rowa * 5 + cola]
ciphertext += table[rowa * 5 + cola]
return ciphertext
def __snake_case ( UpperCAmelCase_ : str , UpperCAmelCase_ : str ):
lowerCamelCase_ = generate_table(UpperCAmelCase_ )
lowerCamelCase_ = ""
# https://en.wikipedia.org/wiki/Playfair_cipher#Description
for chara, chara in chunker(UpperCAmelCase_ , 2 ):
lowerCamelCase_ ,lowerCamelCase_ = divmod(table.index(UpperCAmelCase_ ) , 5 )
lowerCamelCase_ ,lowerCamelCase_ = divmod(table.index(UpperCAmelCase_ ) , 5 )
if rowa == rowa:
plaintext += table[rowa * 5 + (cola - 1) % 5]
plaintext += table[rowa * 5 + (cola - 1) % 5]
elif cola == cola:
plaintext += table[((rowa - 1) % 5) * 5 + cola]
plaintext += table[((rowa - 1) % 5) * 5 + cola]
else: # rectangle
plaintext += table[rowa * 5 + cola]
plaintext += table[rowa * 5 + cola]
return plaintext
| 55 |
'''simple docstring'''
class a__ :
def __init__( self , _UpperCamelCase ):
"""simple docstring"""
_lowercase : Tuple = n
_lowercase : Any = [None] * self.n
_lowercase : Tuple = 0 # index of the first element
_lowercase : Union[str, Any] = 0
_lowercase : str = 0
def __len__( self ):
"""simple docstring"""
return self.size
def _lowerCamelCase ( self ):
"""simple docstring"""
return self.size == 0
def _lowerCamelCase ( self ):
"""simple docstring"""
return False if self.is_empty() else self.array[self.front]
def _lowerCamelCase ( self , _UpperCamelCase ):
"""simple docstring"""
if self.size >= self.n:
raise Exception("QUEUE IS FULL" )
_lowercase : Optional[int] = data
_lowercase : Dict = (self.rear + 1) % self.n
self.size += 1
return self
def _lowerCamelCase ( self ):
"""simple docstring"""
if self.size == 0:
raise Exception("UNDERFLOW" )
_lowercase : Optional[Any] = self.array[self.front]
_lowercase : List[Any] = None
_lowercase : int = (self.front + 1) % self.n
self.size -= 1
return temp
| 250 | 0 |
'''simple docstring'''
__UpperCAmelCase :Tuple = 9.8_0665
def _a ( _lowercase : float , _lowercase : float , _lowercase : float = g ):
'''simple docstring'''
if fluid_density <= 0:
raise ValueError('''Impossible fluid density''' )
if volume < 0:
raise ValueError('''Impossible Object volume''' )
if gravity <= 0:
raise ValueError('''Impossible Gravity''' )
return fluid_density * gravity * volume
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod() | 367 |
'''simple docstring'''
def _a ( _lowercase : int = 600851475143 ):
'''simple docstring'''
try:
__UpperCAmelCase : str = int(_lowercase )
except (TypeError, ValueError):
raise TypeError('''Parameter n must be int or castable to int.''' )
if n <= 0:
raise ValueError('''Parameter n must be greater than or equal to one.''' )
__UpperCAmelCase : Dict = 1
__UpperCAmelCase : List[str] = 2
while i * i <= n:
while n % i == 0:
__UpperCAmelCase : int = i
n //= i
i += 1
if n > 1:
__UpperCAmelCase : List[str] = n
return int(_lowercase )
if __name__ == "__main__":
print(f"""{solution() = }""") | 240 | 0 |
from typing import Optional
import pyspark
from .. import Features, NamedSplit
from ..download import DownloadMode
from ..packaged_modules.spark.spark import Spark
from .abc import AbstractDatasetReader
class __lowerCamelCase ( snake_case__):
"""simple docstring"""
def __init__( self , UpperCAmelCase , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = True , UpperCAmelCase = None , UpperCAmelCase = False , UpperCAmelCase = None , UpperCAmelCase = True , UpperCAmelCase = "arrow" , **UpperCAmelCase , ):
"""simple docstring"""
super().__init__(
split=UpperCAmelCase , features=UpperCAmelCase , cache_dir=UpperCAmelCase , keep_in_memory=UpperCAmelCase , streaming=UpperCAmelCase , **UpperCAmelCase , )
_UpperCAmelCase = load_from_cache_file
_UpperCAmelCase = file_format
_UpperCAmelCase = Spark(
df=UpperCAmelCase , features=UpperCAmelCase , cache_dir=UpperCAmelCase , working_dir=UpperCAmelCase , **UpperCAmelCase , )
def UpperCamelCase ( self ):
"""simple docstring"""
if self.streaming:
return self.builder.as_streaming_dataset(split=self.split )
_UpperCAmelCase = None if self._load_from_cache_file else DownloadMode.FORCE_REDOWNLOAD
self.builder.download_and_prepare(
download_mode=UpperCAmelCase , file_format=self._file_format , )
return self.builder.as_dataset(split=self.split )
| 39 |
from __future__ import annotations
from itertools import permutations
from random import randint
from timeit import repeat
def __A ( )-> tuple[list[int], int]:
"""simple docstring"""
_UpperCAmelCase = [randint(-1_000 , 1_000 ) for i in range(10 )]
_UpperCAmelCase = randint(-5_000 , 5_000 )
return (arr, r)
_a = make_dataset()
def __A ( __lowerCAmelCase , __lowerCAmelCase )-> tuple[int, ...]:
"""simple docstring"""
for triplet in permutations(__lowerCAmelCase , 3 ):
if sum(__lowerCAmelCase ) == target:
return tuple(sorted(__lowerCAmelCase ) )
return (0, 0, 0)
def __A ( __lowerCAmelCase , __lowerCAmelCase )-> tuple[int, int, int]:
"""simple docstring"""
arr.sort()
_UpperCAmelCase = len(__lowerCAmelCase )
for i in range(n - 1 ):
_UpperCAmelCase , _UpperCAmelCase = i + 1, n - 1
while left < right:
if arr[i] + arr[left] + arr[right] == target:
return (arr[i], arr[left], arr[right])
elif arr[i] + arr[left] + arr[right] < target:
left += 1
elif arr[i] + arr[left] + arr[right] > target:
right -= 1
return (0, 0, 0)
def __A ( )-> tuple[float, float]:
"""simple docstring"""
_UpperCAmelCase = '\nfrom __main__ import dataset, triplet_sum1, triplet_sum2\n'
_UpperCAmelCase = '\ntriplet_sum1(*dataset)\n'
_UpperCAmelCase = '\ntriplet_sum2(*dataset)\n'
_UpperCAmelCase = repeat(setup=__lowerCAmelCase , stmt=__lowerCAmelCase , repeat=5 , number=10_000 )
_UpperCAmelCase = repeat(setup=__lowerCAmelCase , stmt=__lowerCAmelCase , repeat=5 , number=10_000 )
return (min(__lowerCAmelCase ), min(__lowerCAmelCase ))
if __name__ == "__main__":
from doctest import testmod
testmod()
_a = solution_times()
print(F'''The time for naive implementation is {times[0]}.''')
print(F'''The time for optimized implementation is {times[1]}.''')
| 39 | 1 |
import functools
from typing import Any
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
# Validation
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) or len(SCREAMING_SNAKE_CASE_ ) == 0:
raise ValueError("the string should be not empty string" )
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) or not all(
isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) and len(SCREAMING_SNAKE_CASE_ ) > 0 for item in words ):
raise ValueError("the words should be a list of non-empty strings" )
# Build trie
lowercase__ = {}
lowercase__ = "WORD_KEEPER"
for word in words:
lowercase__ = trie
for c in word:
if c not in trie_node:
lowercase__ = {}
lowercase__ = trie_node[c]
lowercase__ = True
lowercase__ = len(SCREAMING_SNAKE_CASE_ )
# Dynamic programming method
@functools.cache
def is_breakable(SCREAMING_SNAKE_CASE_ ) -> bool:
if index == len_string:
return True
lowercase__ = trie
for i in range(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
lowercase__ = trie_node.get(string[i] , SCREAMING_SNAKE_CASE_ )
if trie_node is None:
return False
if trie_node.get(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) and is_breakable(i + 1 ):
return True
return False
return is_breakable(0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 224 |
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ):
lowercase__ = len(SCREAMING_SNAKE_CASE_ )
for i in range(SCREAMING_SNAKE_CASE_ ):
for j in range(i + 1 , SCREAMING_SNAKE_CASE_ ):
if numbers[j] < numbers[i]:
lowercase__ , lowercase__ = numbers[j], numbers[i]
return numbers
if __name__ == "__main__":
lowercase_ = input("""Enter numbers separated by a comma:\n""").strip()
lowercase_ = [int(item) for item in user_input.split(""",""")]
print(exchange_sort(unsorted))
| 224 | 1 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = '▁'
UpperCAmelCase_ = {'vocab_file': 'sentencepiece.bpe.model'}
UpperCAmelCase_ = {
'vocab_file': {
'facebook/xglm-564M': 'https://huggingface.co/facebook/xglm-564M/resolve/main/sentencepiece.bpe.model',
}
}
UpperCAmelCase_ = {
'facebook/xglm-564M': 2_0_4_8,
}
class lowerCAmelCase_ ( lowerCamelCase_ ):
'''simple docstring'''
lowerCAmelCase_ : Optional[int] = VOCAB_FILES_NAMES
lowerCAmelCase_ : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ : Dict = ["""input_ids""", """attention_mask"""]
def __init__( self : Any , _UpperCAmelCase : Tuple , _UpperCAmelCase : int="<s>" , _UpperCAmelCase : str="</s>" , _UpperCAmelCase : Tuple="</s>" , _UpperCAmelCase : Optional[Any]="<s>" , _UpperCAmelCase : Union[str, Any]="<unk>" , _UpperCAmelCase : int="<pad>" , _UpperCAmelCase : Optional[Dict[str, Any]] = None , **_UpperCAmelCase : Optional[Any] , ):
"""simple docstring"""
UpperCAmelCase__ = {} if sp_model_kwargs is None else sp_model_kwargs
# Compatibility with the original tokenizer
UpperCAmelCase__ = 7
UpperCAmelCase__ = [f'''<madeupword{i}>''' for i in range(self.num_madeup_words )]
UpperCAmelCase__ = kwargs.get("""additional_special_tokens""" , [] )
kwargs["additional_special_tokens"] += [
word for word in madeup_words if word not in kwargs["additional_special_tokens"]
]
super().__init__(
bos_token=_UpperCAmelCase , eos_token=_UpperCAmelCase , unk_token=_UpperCAmelCase , sep_token=_UpperCAmelCase , cls_token=_UpperCAmelCase , pad_token=_UpperCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **_UpperCAmelCase , )
UpperCAmelCase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(_UpperCAmelCase ) )
UpperCAmelCase__ = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
UpperCAmelCase__ = 1
# Mimic fairseq token-to-id alignment for the first 4 token
UpperCAmelCase__ = {"""<s>""": 0, """<pad>""": 1, """</s>""": 2, """<unk>""": 3}
UpperCAmelCase__ = len(self.sp_model )
UpperCAmelCase__ = {f'''<madeupword{i}>''': sp_size + i + self.fairseq_offset for i in range(self.num_madeup_words )}
self.fairseq_tokens_to_ids.update(_UpperCAmelCase )
UpperCAmelCase__ = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self : Any ):
"""simple docstring"""
UpperCAmelCase__ = self.__dict__.copy()
UpperCAmelCase__ = None
UpperCAmelCase__ = self.sp_model.serialized_model_proto()
return state
def __setstate__( self : Optional[int] , _UpperCAmelCase : Optional[int] ):
"""simple docstring"""
UpperCAmelCase__ = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
UpperCAmelCase__ = {}
UpperCAmelCase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def SCREAMING_SNAKE_CASE__ ( self : Tuple , _UpperCAmelCase : List[int] , _UpperCAmelCase : Optional[List[int]] = None ):
"""simple docstring"""
if token_ids_a is None:
return [self.sep_token_id] + token_ids_a
UpperCAmelCase__ = [self.sep_token_id]
return sep + token_ids_a + sep + sep + token_ids_a
def SCREAMING_SNAKE_CASE__ ( self : List[str] , _UpperCAmelCase : List[int] , _UpperCAmelCase : Optional[List[int]] = None , _UpperCAmelCase : bool = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_UpperCAmelCase , token_ids_a=_UpperCAmelCase , already_has_special_tokens=_UpperCAmelCase )
if token_ids_a is None:
return [1] + ([0] * len(_UpperCAmelCase ))
return [1] + ([0] * len(_UpperCAmelCase )) + [1, 1] + ([0] * len(_UpperCAmelCase ))
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , _UpperCAmelCase : List[int] , _UpperCAmelCase : Optional[List[int]] = None ):
"""simple docstring"""
UpperCAmelCase__ = [self.sep_token_id]
if token_ids_a is None:
return len(sep + token_ids_a ) * [0]
return len(sep + token_ids_a + sep + sep + token_ids_a ) * [0]
@property
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
"""simple docstring"""
return len(self.sp_model ) + self.fairseq_offset + self.num_madeup_words
def SCREAMING_SNAKE_CASE__ ( self : int ):
"""simple docstring"""
UpperCAmelCase__ = {self.convert_ids_to_tokens(_UpperCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , _UpperCAmelCase : str ):
"""simple docstring"""
return self.sp_model.encode(_UpperCAmelCase , out_type=_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , _UpperCAmelCase : Union[str, Any] ):
"""simple docstring"""
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
UpperCAmelCase__ = self.sp_model.PieceToId(_UpperCAmelCase )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def SCREAMING_SNAKE_CASE__ ( self : List[str] , _UpperCAmelCase : Union[str, Any] ):
"""simple docstring"""
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , _UpperCAmelCase : Any ):
"""simple docstring"""
UpperCAmelCase__ = """""".join(_UpperCAmelCase ).replace(_UpperCAmelCase , """ """ ).strip()
return out_string
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , _UpperCAmelCase : str , _UpperCAmelCase : Optional[str] = None ):
"""simple docstring"""
if not os.path.isdir(_UpperCAmelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
UpperCAmelCase__ = os.path.join(
_UpperCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_UpperCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _UpperCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(_UpperCAmelCase , """wb""" ) as fi:
UpperCAmelCase__ = self.sp_model.serialized_model_proto()
fi.write(_UpperCAmelCase )
return (out_vocab_file,)
| 346 |
'''simple docstring'''
import datasets
from .nmt_bleu import compute_bleu # From: https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py
UpperCAmelCase_ = '\\n@INPROCEEDINGS{Papineni02bleu:a,\n author = {Kishore Papineni and Salim Roukos and Todd Ward and Wei-jing Zhu},\n title = {BLEU: a Method for Automatic Evaluation of Machine Translation},\n booktitle = {},\n year = {2002},\n pages = {311--318}\n}\n@inproceedings{lin-och-2004-orange,\n title = "{ORANGE}: a Method for Evaluating Automatic Evaluation Metrics for Machine Translation",\n author = "Lin, Chin-Yew and\n Och, Franz Josef",\n booktitle = "{COLING} 2004: Proceedings of the 20th International Conference on Computational Linguistics",\n month = "aug 23{--}aug 27",\n year = "2004",\n address = "Geneva, Switzerland",\n publisher = "COLING",\n url = "https://www.aclweb.org/anthology/C04-1072",\n pages = "501--507",\n}\n'
UpperCAmelCase_ = '\\nBLEU (bilingual evaluation understudy) is an algorithm for evaluating the quality of text which has been machine-translated from one natural language to another.\nQuality is considered to be the correspondence between a machine\'s output and that of a human: "the closer a machine translation is to a professional human translation,\nthe better it is" – this is the central idea behind BLEU. BLEU was one of the first metrics to claim a high correlation with human judgements of quality, and\nremains one of the most popular automated and inexpensive metrics.\n\nScores are calculated for individual translated segments—generally sentences—by comparing them with a set of good quality reference translations.\nThose scores are then averaged over the whole corpus to reach an estimate of the translation\'s overall quality. Intelligibility or grammatical correctness\nare not taken into account[citation needed].\n\nBLEU\'s output is always a number between 0 and 1. This value indicates how similar the candidate text is to the reference texts, with values closer to 1\nrepresenting more similar texts. Few human translations will attain a score of 1, since this would indicate that the candidate is identical to one of the\nreference translations. For this reason, it is not necessary to attain a score of 1. Because there are more opportunities to match, adding additional\nreference translations will increase the BLEU score.\n'
UpperCAmelCase_ = '\nComputes BLEU score of translated segments against one or more references.\nArgs:\n predictions: list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references: list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n max_order: Maximum n-gram order to use when computing BLEU score.\n smooth: Whether or not to apply Lin et al. 2004 smoothing.\nReturns:\n \'bleu\': bleu score,\n \'precisions\': geometric mean of n-gram precisions,\n \'brevity_penalty\': brevity penalty,\n \'length_ratio\': ratio of lengths,\n \'translation_length\': translation_length,\n \'reference_length\': reference_length\nExamples:\n\n >>> predictions = [\n ... ["hello", "there", "general", "kenobi"], # tokenized prediction of the first sample\n ... ["foo", "bar", "foobar"] # tokenized prediction of the second sample\n ... ]\n >>> references = [\n ... [["hello", "there", "general", "kenobi"], ["hello", "there", "!"]], # tokenized references for the first sample (2 references)\n ... [["foo", "bar", "foobar"]] # tokenized references for the second sample (1 reference)\n ... ]\n >>> bleu = datasets.load_metric("bleu")\n >>> results = bleu.compute(predictions=predictions, references=references)\n >>> print(results["bleu"])\n 1.0\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCAmelCase_ ( datasets.Metric ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self : int ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Sequence(datasets.Value("""string""" , id="""token""" ) , id="""sequence""" ),
"""references""": datasets.Sequence(
datasets.Sequence(datasets.Value("""string""" , id="""token""" ) , id="""sequence""" ) , id="""references""" ),
} ) , codebase_urls=["""https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py"""] , reference_urls=[
"""https://en.wikipedia.org/wiki/BLEU""",
"""https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213""",
] , )
def SCREAMING_SNAKE_CASE__ ( self : int , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : List[Any]=4 , _UpperCAmelCase : Union[str, Any]=False ):
"""simple docstring"""
UpperCAmelCase__ = compute_bleu(
reference_corpus=_UpperCAmelCase , translation_corpus=_UpperCAmelCase , max_order=_UpperCAmelCase , smooth=_UpperCAmelCase )
((UpperCAmelCase__) , (UpperCAmelCase__) , (UpperCAmelCase__) , (UpperCAmelCase__) , (UpperCAmelCase__) , (UpperCAmelCase__)) = score
return {
"bleu": bleu,
"precisions": precisions,
"brevity_penalty": bp,
"length_ratio": ratio,
"translation_length": translation_length,
"reference_length": reference_length,
}
| 346 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A_ :str = logging.get_logger(__name__)
A_ :str = {
'''uw-madison/mra-base-512-4''': '''https://huggingface.co/uw-madison/mra-base-512-4/resolve/main/config.json''',
}
class __A ( a ):
"""simple docstring"""
UpperCamelCase__ : List[str] ="""mra"""
def __init__( self , lowerCamelCase__=50265 , lowerCamelCase__=768 , lowerCamelCase__=12 , lowerCamelCase__=12 , lowerCamelCase__=3072 , lowerCamelCase__="gelu" , lowerCamelCase__=0.1 , lowerCamelCase__=0.1 , lowerCamelCase__=512 , lowerCamelCase__=1 , lowerCamelCase__=0.02 , lowerCamelCase__=1E-5 , lowerCamelCase__="absolute" , lowerCamelCase__=4 , lowerCamelCase__="full" , lowerCamelCase__=0 , lowerCamelCase__=0 , lowerCamelCase__=1 , lowerCamelCase__=0 , lowerCamelCase__=2 , **lowerCamelCase__ , ):
"""simple docstring"""
super().__init__(pad_token_id=lowerCamelCase__ , bos_token_id=lowerCamelCase__ , eos_token_id=lowerCamelCase__ , **lowerCamelCase__ )
__UpperCamelCase : Dict =vocab_size
__UpperCamelCase : Any =max_position_embeddings
__UpperCamelCase : Optional[int] =hidden_size
__UpperCamelCase : List[Any] =num_hidden_layers
__UpperCamelCase : Dict =num_attention_heads
__UpperCamelCase : List[Any] =intermediate_size
__UpperCamelCase : int =hidden_act
__UpperCamelCase : Optional[int] =hidden_dropout_prob
__UpperCamelCase : str =attention_probs_dropout_prob
__UpperCamelCase : Optional[Any] =initializer_range
__UpperCamelCase : List[str] =type_vocab_size
__UpperCamelCase : str =layer_norm_eps
__UpperCamelCase : Optional[Any] =position_embedding_type
__UpperCamelCase : Optional[Any] =block_per_row
__UpperCamelCase : str =approx_mode
__UpperCamelCase : int =initial_prior_first_n_blocks
__UpperCamelCase : Optional[int] =initial_prior_diagonal_n_blocks
| 245 |
import math
import tensorflow as tf
from packaging import version
def A ( a_ ) -> Optional[Any]:
__UpperCamelCase : Dict =tf.convert_to_tensor(a_ )
__UpperCamelCase : str =0.5 * (1.0 + tf.math.erf(x / tf.cast(tf.sqrt(2.0 ) ,x.dtype ) ))
return x * cdf
def A ( a_ ) -> Union[str, Any]:
__UpperCamelCase : str =tf.convert_to_tensor(a_ )
__UpperCamelCase : Union[str, Any] =tf.cast(math.pi ,x.dtype )
__UpperCamelCase : List[str] =tf.cast(0.044_715 ,x.dtype )
__UpperCamelCase : Optional[int] =0.5 * (1.0 + tf.tanh(tf.sqrt(2.0 / pi ) * (x + coeff * tf.pow(a_ ,3 )) ))
return x * cdf
def A ( a_ ) -> Any:
__UpperCamelCase : str =tf.convert_to_tensor(a_ )
return x * tf.tanh(tf.math.softplus(a_ ) )
def A ( a_ ) -> Dict:
__UpperCamelCase : int =tf.convert_to_tensor(a_ )
__UpperCamelCase : Optional[int] =tf.cast(0.044_715 ,x.dtype )
__UpperCamelCase : List[str] =tf.cast(0.7_978_845_608 ,x.dtype )
return 0.5 * x * (1.0 + tf.tanh(x * coeffa * (1.0 + coeffa * x * x) ))
def A ( a_ ) -> List[str]:
__UpperCamelCase : List[Any] =tf.convert_to_tensor(a_ )
__UpperCamelCase : Optional[int] =tf.cast(1.702 ,x.dtype )
return x * tf.math.sigmoid(coeff * x )
def A ( a_ ) -> Tuple:
return tf.clip_by_value(_gelu(a_ ) ,-10 ,10 )
def A ( a_ ,a_=-1 ) -> Any:
__UpperCamelCase , __UpperCamelCase : List[Any] =tf.split(a_ ,2 ,axis=a_ )
return a * tf.math.sigmoid(a_ )
if version.parse(tf.version.VERSION) >= version.parse('''2.4'''):
def A ( a_ ) -> Tuple:
return tf.keras.activations.gelu(a_ ,approximate=a_ )
A_ :int = tf.keras.activations.gelu
A_ :Any = approximate_gelu_wrap
else:
A_ :str = _gelu
A_ :Dict = _gelu_new
A_ :str = {
'''gelu''': gelu,
'''gelu_10''': gelu_aa,
'''gelu_fast''': gelu_fast,
'''gelu_new''': gelu_new,
'''glu''': glu,
'''mish''': mish,
'''quick_gelu''': quick_gelu,
'''relu''': tf.keras.activations.relu,
'''sigmoid''': tf.keras.activations.sigmoid,
'''silu''': tf.keras.activations.swish,
'''swish''': tf.keras.activations.swish,
'''tanh''': tf.keras.activations.tanh,
}
def A ( a_ ) -> Dict:
if activation_string in ACTaFN:
return ACTaFN[activation_string]
else:
raise KeyError(F'function {activation_string} not found in ACT2FN mapping {list(ACTaFN.keys() )}' )
| 245 | 1 |
"""simple docstring"""
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
_UpperCAmelCase = (1 + 24 * n) ** 0.5
return ((1 + root) / 6) % 1 == 0
def __UpperCAmelCase ( lowercase = 50_00 ):
"""simple docstring"""
_UpperCAmelCase = [(i * (3 * i - 1)) // 2 for i in range(1 ,A__ )]
for i, pentagonal_i in enumerate(A__ ):
for j in range(A__ ,len(A__ ) ):
_UpperCAmelCase = pentagonal_nums[j]
_UpperCAmelCase = pentagonal_i + pentagonal_j
_UpperCAmelCase = pentagonal_j - pentagonal_i
if is_pentagonal(A__ ) and is_pentagonal(A__ ):
return b
return -1
if __name__ == "__main__":
print(F'''{solution() = }''')
| 289 |
'''simple docstring'''
import json
import os
import shutil
import warnings
from argparse import ArgumentParser, Namespace
from pathlib import Path
from typing import List
from ..utils import logging
from . import BaseTransformersCLICommand
try:
from cookiecutter.main import cookiecutter
__A : List[Any] = True
except ImportError:
__A : int = False
__A : str = logging.get_logger(__name__) # pylint: disable=invalid-name
def UpperCamelCase_ ( A__ : Namespace ):
'''simple docstring'''
return AddNewModelCommand(args.testing , args.testing_file , path=args.path )
class __snake_case ( _SCREAMING_SNAKE_CASE):
"""simple docstring"""
@staticmethod
def __lowercase ( lowerCamelCase : ArgumentParser ) -> int:
lowerCAmelCase_ : Optional[int] = parser.add_parser("""add-new-model""" )
add_new_model_parser.add_argument("""--testing""" , action="""store_true""" , help="""If in testing mode.""" )
add_new_model_parser.add_argument("""--testing_file""" , type=lowerCamelCase , help="""Configuration file on which to run.""" )
add_new_model_parser.add_argument(
"""--path""" , type=lowerCamelCase , help="""Path to cookiecutter. Should only be used for testing purposes.""" )
add_new_model_parser.set_defaults(func=lowerCamelCase )
def __init__( self : List[str] , lowerCamelCase : bool , lowerCamelCase : str , lowerCamelCase : Any=None , *lowerCamelCase : List[str] ) -> Optional[Any]:
lowerCAmelCase_ : int = testing
lowerCAmelCase_ : Union[str, Any] = testing_file
lowerCAmelCase_ : Tuple = path
def __lowercase ( self : Tuple ) -> int:
warnings.warn(
"""The command `transformers-cli add-new-model` is deprecated and will be removed in v5 of Transformers. """
"""It is not actively maintained anymore, so might give a result that won't pass all tests and quality """
"""checks, you should use `transformers-cli add-new-model-like` instead.""" )
if not _has_cookiecutter:
raise ImportError(
"""Model creation dependencies are required to use the `add_new_model` command. Install them by running """
"""the following at the root of your `transformers` clone:\n\n\t$ pip install -e .[modelcreation]\n""" )
# Ensure that there is no other `cookiecutter-template-xxx` directory in the current working directory
lowerCAmelCase_ : int = [directory for directory in os.listdir() if """cookiecutter-template-""" == directory[:22]]
if len(lowerCamelCase ) > 0:
raise ValueError(
"""Several directories starting with `cookiecutter-template-` in current working directory. """
"""Please clean your directory by removing all folders starting with `cookiecutter-template-` or """
"""change your working directory.""" )
lowerCAmelCase_ : List[Any] = (
Path(lowerCamelCase ).parent.parent.parent.parent if self._path is None else Path(self._path ).parent.parent
)
lowerCAmelCase_ : Dict = path_to_transformer_root / """templates""" / """adding_a_new_model"""
# Execute cookiecutter
if not self._testing:
cookiecutter(str(lowerCamelCase ) )
else:
with open(self._testing_file , """r""" ) as configuration_file:
lowerCAmelCase_ : Tuple = json.load(lowerCamelCase )
cookiecutter(
str(path_to_cookiecutter if self._path is None else self._path ) , no_input=lowerCamelCase , extra_context=lowerCamelCase , )
lowerCAmelCase_ : List[str] = [directory for directory in os.listdir() if """cookiecutter-template-""" in directory[:22]][0]
# Retrieve configuration
with open(directory + """/configuration.json""" , """r""" ) as configuration_file:
lowerCAmelCase_ : Tuple = json.load(lowerCamelCase )
lowerCAmelCase_ : str = configuration["""lowercase_modelname"""]
lowerCAmelCase_ : List[str] = configuration["""generate_tensorflow_pytorch_and_flax"""]
os.remove(F'{directory}/configuration.json' )
lowerCAmelCase_ : Dict = """PyTorch""" in generate_tensorflow_pytorch_and_flax
lowerCAmelCase_ : Optional[int] = """TensorFlow""" in generate_tensorflow_pytorch_and_flax
lowerCAmelCase_ : List[str] = """Flax""" in generate_tensorflow_pytorch_and_flax
lowerCAmelCase_ : Union[str, Any] = F'{path_to_transformer_root}/src/transformers/models/{lowercase_model_name}'
os.makedirs(lowerCamelCase , exist_ok=lowerCamelCase )
os.makedirs(F'{path_to_transformer_root}/tests/models/{lowercase_model_name}' , exist_ok=lowerCamelCase )
# Tests require submodules as they have parent imports
with open(F'{path_to_transformer_root}/tests/models/{lowercase_model_name}/__init__.py' , """w""" ):
pass
shutil.move(
F'{directory}/__init__.py' , F'{model_dir}/__init__.py' , )
shutil.move(
F'{directory}/configuration_{lowercase_model_name}.py' , F'{model_dir}/configuration_{lowercase_model_name}.py' , )
def remove_copy_lines(lowerCamelCase : Any ):
with open(lowerCamelCase , """r""" ) as f:
lowerCAmelCase_ : List[str] = f.readlines()
with open(lowerCamelCase , """w""" ) as f:
for line in lines:
if "# Copied from transformers." not in line:
f.write(lowerCamelCase )
if output_pytorch:
if not self._testing:
remove_copy_lines(F'{directory}/modeling_{lowercase_model_name}.py' )
shutil.move(
F'{directory}/modeling_{lowercase_model_name}.py' , F'{model_dir}/modeling_{lowercase_model_name}.py' , )
shutil.move(
F'{directory}/test_modeling_{lowercase_model_name}.py' , F'{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_{lowercase_model_name}.py' , )
else:
os.remove(F'{directory}/modeling_{lowercase_model_name}.py' )
os.remove(F'{directory}/test_modeling_{lowercase_model_name}.py' )
if output_tensorflow:
if not self._testing:
remove_copy_lines(F'{directory}/modeling_tf_{lowercase_model_name}.py' )
shutil.move(
F'{directory}/modeling_tf_{lowercase_model_name}.py' , F'{model_dir}/modeling_tf_{lowercase_model_name}.py' , )
shutil.move(
F'{directory}/test_modeling_tf_{lowercase_model_name}.py' , F'{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_tf_{lowercase_model_name}.py' , )
else:
os.remove(F'{directory}/modeling_tf_{lowercase_model_name}.py' )
os.remove(F'{directory}/test_modeling_tf_{lowercase_model_name}.py' )
if output_flax:
if not self._testing:
remove_copy_lines(F'{directory}/modeling_flax_{lowercase_model_name}.py' )
shutil.move(
F'{directory}/modeling_flax_{lowercase_model_name}.py' , F'{model_dir}/modeling_flax_{lowercase_model_name}.py' , )
shutil.move(
F'{directory}/test_modeling_flax_{lowercase_model_name}.py' , F'{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_flax_{lowercase_model_name}.py' , )
else:
os.remove(F'{directory}/modeling_flax_{lowercase_model_name}.py' )
os.remove(F'{directory}/test_modeling_flax_{lowercase_model_name}.py' )
shutil.move(
F'{directory}/{lowercase_model_name}.md' , F'{path_to_transformer_root}/docs/source/en/model_doc/{lowercase_model_name}.md' , )
shutil.move(
F'{directory}/tokenization_{lowercase_model_name}.py' , F'{model_dir}/tokenization_{lowercase_model_name}.py' , )
shutil.move(
F'{directory}/tokenization_fast_{lowercase_model_name}.py' , F'{model_dir}/tokenization_{lowercase_model_name}_fast.py' , )
from os import fdopen, remove
from shutil import copymode, move
from tempfile import mkstemp
def replace(lowerCamelCase : str , lowerCamelCase : str , lowerCamelCase : List[str] ):
# Create temp file
lowerCAmelCase_, lowerCAmelCase_ : int = mkstemp()
lowerCAmelCase_ : List[Any] = False
with fdopen(lowerCamelCase , """w""" ) as new_file:
with open(lowerCamelCase ) as old_file:
for line in old_file:
new_file.write(lowerCamelCase )
if line_to_copy_below in line:
lowerCAmelCase_ : List[str] = True
for line_to_copy in lines_to_copy:
new_file.write(lowerCamelCase )
if not line_found:
raise ValueError(F'Line {line_to_copy_below} was not found in file.' )
# Copy the file permissions from the old file to the new file
copymode(lowerCamelCase , lowerCamelCase )
# Remove original file
remove(lowerCamelCase )
# Move new file
move(lowerCamelCase , lowerCamelCase )
def skip_units(lowerCamelCase : Optional[int] ):
return (
("generating PyTorch" in line and not output_pytorch)
or ("generating TensorFlow" in line and not output_tensorflow)
or ("generating Flax" in line and not output_flax)
)
def replace_in_files(lowerCamelCase : Any ):
with open(lowerCamelCase ) as datafile:
lowerCAmelCase_ : Dict = []
lowerCAmelCase_ : List[str] = False
lowerCAmelCase_ : str = False
for line in datafile:
if "# To replace in: " in line and "##" not in line:
lowerCAmelCase_ : Dict = line.split("""\"""" )[1]
lowerCAmelCase_ : int = skip_units(lowerCamelCase )
elif "# Below: " in line and "##" not in line:
lowerCAmelCase_ : Any = line.split("""\"""" )[1]
lowerCAmelCase_ : Tuple = skip_units(lowerCamelCase )
elif "# End." in line and "##" not in line:
if not skip_file and not skip_snippet:
replace(lowerCamelCase , lowerCamelCase , lowerCamelCase )
lowerCAmelCase_ : Dict = []
elif "# Replace with" in line and "##" not in line:
lowerCAmelCase_ : int = []
elif "##" not in line:
lines_to_copy.append(lowerCamelCase )
remove(lowerCamelCase )
replace_in_files(F'{directory}/to_replace_{lowercase_model_name}.py' )
os.rmdir(lowerCamelCase )
| 120 | 0 |
import sys
from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
UpperCamelCase__ = "python tqdm regex requests packaging filelock numpy tokenizers".split()
if sys.version_info < (3, 7):
pkgs_to_check_at_runtime.append("dataclasses")
if sys.version_info < (3, 8):
pkgs_to_check_at_runtime.append("importlib_metadata")
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(f"""can't find {pkg} in {deps.keys()}, check dependency_versions_table.py""")
def _UpperCamelCase (a__ :int , a__ :int=None ):
"""simple docstring"""
require_version(deps[pkg] , a__ )
| 87 |
def _UpperCamelCase (a__ :str ):
"""simple docstring"""
UpperCamelCase__ = 0
# if input_string is "aba" than new_input_string become "a|b|a"
UpperCamelCase__ = """"""
UpperCamelCase__ = """"""
# append each character + "|" in new_string for range(0, length-1)
for i in input_string[: len(a__ ) - 1]:
new_input_string += i + "|"
# append last character
new_input_string += input_string[-1]
# we will store the starting and ending of previous furthest ending palindromic
# substring
UpperCamelCase__ , UpperCamelCase__ = 0, 0
# length[i] shows the length of palindromic substring with center i
UpperCamelCase__ = [1 for i in range(len(a__ ) )]
# for each character in new_string find corresponding palindromic string
UpperCamelCase__ = 0
for j in range(len(a__ ) ):
UpperCamelCase__ = 1 if j > r else min(length[l + r - j] // 2 , r - j + 1 )
while (
j - k >= 0
and j + k < len(a__ )
and new_input_string[k + j] == new_input_string[j - k]
):
k += 1
UpperCamelCase__ = 2 * k - 1
# does this string is ending after the previously explored end (that is r) ?
# if yes the update the new r to the last index of this
if j + k - 1 > r:
UpperCamelCase__ = j - k + 1 # noqa: E741
UpperCamelCase__ = j + k - 1
# update max_length and start position
if max_length < length[j]:
UpperCamelCase__ = length[j]
UpperCamelCase__ = j
# create that string
UpperCamelCase__ = new_input_string[start - max_length // 2 : start + max_length // 2 + 1]
for i in s:
if i != "|":
output_string += i
return output_string
if __name__ == "__main__":
import doctest
doctest.testmod()
| 87 | 1 |
from math import isqrt
def lowerCamelCase__ ( a ) -> bool:
return all(number % divisor != 0 for divisor in range(2 , isqrt(a ) + 1 ) )
def lowerCamelCase__ ( a = 10**6 ) -> int:
_A: Tuple = 0
_A: List[Any] = 1
_A: List[Any] = 7
while prime_candidate < max_prime:
primes_count += is_prime(a )
cube_index += 1
prime_candidate += 6 * cube_index
return primes_count
if __name__ == "__main__":
print(F"""{solution() = }""")
| 121 |
import os
import pytest
import yaml
from datasets.features.features import Features, Value
from datasets.info import DatasetInfo, DatasetInfosDict
@pytest.mark.parametrize(
'''files''' , [
['''full:README.md''', '''dataset_infos.json'''],
['''empty:README.md''', '''dataset_infos.json'''],
['''dataset_infos.json'''],
['''full:README.md'''],
] , )
def lowerCamelCase__ ( a , a ) -> Any:
_A: Any = tmp_path_factory.mktemp('''dset_infos_dir''' )
if "full:README.md" in files:
with open(dataset_infos_dir / '''README.md''' , '''w''' ) as f:
f.write('''---\ndataset_info:\n dataset_size: 42\n---''' )
if "empty:README.md" in files:
with open(dataset_infos_dir / '''README.md''' , '''w''' ) as f:
f.write('''''' )
# we want to support dataset_infos.json for backward compatibility
if "dataset_infos.json" in files:
with open(dataset_infos_dir / '''dataset_infos.json''' , '''w''' ) as f:
f.write('''{"default": {"dataset_size": 42}}''' )
_A: Optional[int] = DatasetInfosDict.from_directory(a )
assert dataset_infos
assert dataset_infos["default"].dataset_size == 42
@pytest.mark.parametrize(
'''dataset_info''' , [
DatasetInfo(),
DatasetInfo(
description='''foo''' , features=Features({'''a''': Value('''int32''' )} ) , builder_name='''builder''' , config_name='''config''' , version='''1.0.0''' , splits=[{'''name''': '''train'''}] , download_size=42 , ),
] , )
def lowerCamelCase__ ( a , a ) -> Any:
_A: int = str(a )
dataset_info.write_to_directory(a )
_A: str = DatasetInfo.from_directory(a )
assert dataset_info == reloaded
assert os.path.exists(os.path.join(a , '''dataset_info.json''' ) )
def lowerCamelCase__ ( ) -> Any:
_A: int = DatasetInfo(
description='''foo''' , citation='''bar''' , homepage='''https://foo.bar''' , license='''CC0''' , features=Features({'''a''': Value('''int32''' )} ) , post_processed={} , supervised_keys=() , task_templates=[] , builder_name='''builder''' , config_name='''config''' , version='''1.0.0''' , splits=[{'''name''': '''train''', '''num_examples''': 42}] , download_checksums={} , download_size=13_37 , post_processing_size=4_42 , dataset_size=12_34 , size_in_bytes=13_37 + 4_42 + 12_34 , )
_A: Optional[Any] = dataset_info._to_yaml_dict()
assert sorted(a ) == sorted(DatasetInfo._INCLUDED_INFO_IN_YAML )
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
assert key in dataset_info_yaml_dict
assert isinstance(dataset_info_yaml_dict[key] , (list, dict, int, str) )
_A: str = yaml.safe_dump(a )
_A: Optional[int] = yaml.safe_load(a )
assert dataset_info_yaml_dict == reloaded
def lowerCamelCase__ ( ) -> int:
_A: Union[str, Any] = DatasetInfo()
_A: Union[str, Any] = dataset_info._to_yaml_dict()
assert dataset_info_yaml_dict == {}
@pytest.mark.parametrize(
'''dataset_infos_dict''' , [
DatasetInfosDict(),
DatasetInfosDict({'''default''': DatasetInfo()} ),
DatasetInfosDict({'''my_config_name''': DatasetInfo()} ),
DatasetInfosDict(
{
'''default''': DatasetInfo(
description='''foo''' , features=Features({'''a''': Value('''int32''' )} ) , builder_name='''builder''' , config_name='''config''' , version='''1.0.0''' , splits=[{'''name''': '''train'''}] , download_size=42 , )
} ),
DatasetInfosDict(
{
'''v1''': DatasetInfo(dataset_size=42 ),
'''v2''': DatasetInfo(dataset_size=13_37 ),
} ),
] , )
def lowerCamelCase__ ( a , a ) -> Optional[int]:
_A: Optional[int] = str(a )
dataset_infos_dict.write_to_directory(a )
_A: Union[str, Any] = DatasetInfosDict.from_directory(a )
# the config_name of the dataset_infos_dict take over the attribute
for config_name, dataset_info in dataset_infos_dict.items():
_A: Optional[Any] = config_name
# the yaml representation doesn't include fields like description or citation
# so we just test that we can recover what we can from the yaml
_A: Any = DatasetInfo._from_yaml_dict(dataset_info._to_yaml_dict() )
assert dataset_infos_dict == reloaded
if dataset_infos_dict:
assert os.path.exists(os.path.join(a , '''README.md''' ) )
| 121 | 1 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
"""microsoft/swin-tiny-patch4-window7-224""": (
"""https://huggingface.co/microsoft/swin-tiny-patch4-window7-224/resolve/main/config.json"""
),
# See all Swin models at https://huggingface.co/models?filter=swin
}
class A__ ( _snake_case , _snake_case ):
lowercase = 'swin'
lowercase = {
'num_attention_heads': 'num_heads',
'num_hidden_layers': 'num_layers',
}
def __init__( self : Any , a : List[str]=224 , a : Any=4 , a : Optional[int]=3 , a : List[str]=96 , a : Dict=[2, 2, 6, 2] , a : int=[3, 6, 12, 24] , a : str=7 , a : int=4.0 , a : List[str]=True , a : int=0.0 , a : List[Any]=0.0 , a : int=0.1 , a : Tuple="gelu" , a : Any=False , a : int=0.0_2 , a : List[Any]=1E-5 , a : Optional[int]=32 , a : Tuple=None , a : List[str]=None , **a : List[str] , ):
'''simple docstring'''
super().__init__(**UpperCamelCase__ )
lowerCAmelCase__ : List[Any] = image_size
lowerCAmelCase__ : List[Any] = patch_size
lowerCAmelCase__ : Tuple = num_channels
lowerCAmelCase__ : List[Any] = embed_dim
lowerCAmelCase__ : Dict = depths
lowerCAmelCase__ : List[str] = len(UpperCamelCase__ )
lowerCAmelCase__ : Optional[int] = num_heads
lowerCAmelCase__ : str = window_size
lowerCAmelCase__ : Optional[Any] = mlp_ratio
lowerCAmelCase__ : str = qkv_bias
lowerCAmelCase__ : Dict = hidden_dropout_prob
lowerCAmelCase__ : int = attention_probs_dropout_prob
lowerCAmelCase__ : int = drop_path_rate
lowerCAmelCase__ : Dict = hidden_act
lowerCAmelCase__ : Optional[Any] = use_absolute_embeddings
lowerCAmelCase__ : Optional[int] = layer_norm_eps
lowerCAmelCase__ : Optional[int] = initializer_range
lowerCAmelCase__ : Union[str, Any] = encoder_stride
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
lowerCAmelCase__ : Tuple = int(embed_dim * 2 ** (len(UpperCamelCase__ ) - 1) )
lowerCAmelCase__ : Any = ['stem'] + [f'''stage{idx}''' for idx in range(1 , len(UpperCamelCase__ ) + 1 )]
lowerCAmelCase__ , lowerCAmelCase__ : str = get_aligned_output_features_output_indices(
out_features=UpperCamelCase__ , out_indices=UpperCamelCase__ , stage_names=self.stage_names )
class A__ ( _snake_case ):
lowercase = version.parse('1.11' )
@property
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def _lowerCamelCase ( self : str ):
'''simple docstring'''
return 1E-4 | 363 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import (
DiffusionPipeline,
UnCLIPImageVariationPipeline,
UnCLIPScheduler,
UNetaDConditionModel,
UNetaDModel,
)
from diffusers.pipelines.unclip.text_proj import UnCLIPTextProjModel
from diffusers.utils import floats_tensor, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, load_image, require_torch_gpu, skip_mps
from ..pipeline_params import IMAGE_VARIATION_BATCH_PARAMS, IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class A__ ( __magic_name__ , unittest.TestCase ):
lowercase = UnCLIPImageVariationPipeline
lowercase = IMAGE_VARIATION_PARAMS - {'height', 'width', 'guidance_scale'}
lowercase = IMAGE_VARIATION_BATCH_PARAMS
lowercase = [
'generator',
'return_dict',
'decoder_num_inference_steps',
'super_res_num_inference_steps',
]
lowercase = False
@property
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
return 32
@property
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
return 32
@property
def _lowerCamelCase ( self : int ):
'''simple docstring'''
return self.time_input_dim
@property
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
return self.time_input_dim * 4
@property
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
return 100
@property
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
return tokenizer
@property
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
torch.manual_seed(0 )
lowerCAmelCase__ : Optional[int] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
return CLIPTextModelWithProjection(a )
@property
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
torch.manual_seed(0 )
lowerCAmelCase__ : List[Any] = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , num_hidden_layers=5 , num_attention_heads=4 , image_size=32 , intermediate_size=37 , patch_size=1 , )
return CLIPVisionModelWithProjection(a )
@property
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
torch.manual_seed(0 )
lowerCAmelCase__ : Union[str, Any] = {
'clip_embeddings_dim': self.text_embedder_hidden_size,
'time_embed_dim': self.time_embed_dim,
'cross_attention_dim': self.cross_attention_dim,
}
lowerCAmelCase__ : Optional[Any] = UnCLIPTextProjModel(**a )
return model
@property
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
torch.manual_seed(0 )
lowerCAmelCase__ : str = {
'sample_size': 32,
# RGB in channels
'in_channels': 3,
# Out channels is double in channels because predicts mean and variance
'out_channels': 6,
'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'),
'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'),
'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn',
'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2),
'layers_per_block': 1,
'cross_attention_dim': self.cross_attention_dim,
'attention_head_dim': 4,
'resnet_time_scale_shift': 'scale_shift',
'class_embed_type': 'identity',
}
lowerCAmelCase__ : str = UNetaDConditionModel(**a )
return model
@property
def _lowerCamelCase ( self : str ):
'''simple docstring'''
return {
"sample_size": 64,
"layers_per_block": 1,
"down_block_types": ("ResnetDownsampleBlock2D", "ResnetDownsampleBlock2D"),
"up_block_types": ("ResnetUpsampleBlock2D", "ResnetUpsampleBlock2D"),
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"in_channels": 6,
"out_channels": 3,
}
@property
def _lowerCamelCase ( self : str ):
'''simple docstring'''
torch.manual_seed(0 )
lowerCAmelCase__ : Any = UNetaDModel(**self.dummy_super_res_kwargs )
return model
@property
def _lowerCamelCase ( self : int ):
'''simple docstring'''
torch.manual_seed(1 )
lowerCAmelCase__ : List[str] = UNetaDModel(**self.dummy_super_res_kwargs )
return model
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = self.dummy_decoder
lowerCAmelCase__ : Optional[int] = self.dummy_text_proj
lowerCAmelCase__ : Any = self.dummy_text_encoder
lowerCAmelCase__ : Any = self.dummy_tokenizer
lowerCAmelCase__ : Any = self.dummy_super_res_first
lowerCAmelCase__ : Optional[int] = self.dummy_super_res_last
lowerCAmelCase__ : Dict = UnCLIPScheduler(
variance_type='learned_range' , prediction_type='epsilon' , num_train_timesteps=1_000 , )
lowerCAmelCase__ : Any = UnCLIPScheduler(
variance_type='fixed_small_log' , prediction_type='epsilon' , num_train_timesteps=1_000 , )
lowerCAmelCase__ : Any = CLIPImageProcessor(crop_size=32 , size=32 )
lowerCAmelCase__ : Optional[int] = self.dummy_image_encoder
return {
"decoder": decoder,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"text_proj": text_proj,
"feature_extractor": feature_extractor,
"image_encoder": image_encoder,
"super_res_first": super_res_first,
"super_res_last": super_res_last,
"decoder_scheduler": decoder_scheduler,
"super_res_scheduler": super_res_scheduler,
}
def _lowerCamelCase ( self : Any , a : Dict , a : List[str]=0 , a : List[str]=True ):
'''simple docstring'''
lowerCAmelCase__ : Dict = floats_tensor((1, 3, 32, 32) , rng=random.Random(a ) ).to(a )
if str(a ).startswith('mps' ):
lowerCAmelCase__ : Optional[int] = torch.manual_seed(a )
else:
lowerCAmelCase__ : str = torch.Generator(device=a ).manual_seed(a )
if pil_image:
lowerCAmelCase__ : Optional[int] = input_image * 0.5 + 0.5
lowerCAmelCase__ : Dict = input_image.clamp(0 , 1 )
lowerCAmelCase__ : List[Any] = input_image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
lowerCAmelCase__ : Union[str, Any] = DiffusionPipeline.numpy_to_pil(a )[0]
return {
"image": input_image,
"generator": generator,
"decoder_num_inference_steps": 2,
"super_res_num_inference_steps": 2,
"output_type": "np",
}
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = 'cpu'
lowerCAmelCase__ : Any = self.get_dummy_components()
lowerCAmelCase__ : List[str] = self.pipeline_class(**a )
lowerCAmelCase__ : Dict = pipe.to(a )
pipe.set_progress_bar_config(disable=a )
lowerCAmelCase__ : Dict = self.get_dummy_inputs(a , pil_image=a )
lowerCAmelCase__ : str = pipe(**a )
lowerCAmelCase__ : Optional[Any] = output.images
lowerCAmelCase__ : str = self.get_dummy_inputs(a , pil_image=a )
lowerCAmelCase__ : Optional[int] = pipe(
**a , return_dict=a , )[0]
lowerCAmelCase__ : Optional[Any] = image[0, -3:, -3:, -1]
lowerCAmelCase__ : Tuple = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowerCAmelCase__ : List[str] = np.array(
[
0.9_9_9_7,
0.0_0_0_2,
0.9_9_9_7,
0.9_9_9_7,
0.9_9_6_9,
0.0_0_2_3,
0.9_9_9_7,
0.9_9_6_9,
0.9_9_7_0,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = 'cpu'
lowerCAmelCase__ : Dict = self.get_dummy_components()
lowerCAmelCase__ : Optional[int] = self.pipeline_class(**a )
lowerCAmelCase__ : int = pipe.to(a )
pipe.set_progress_bar_config(disable=a )
lowerCAmelCase__ : List[Any] = self.get_dummy_inputs(a , pil_image=a )
lowerCAmelCase__ : List[str] = pipe(**a )
lowerCAmelCase__ : Union[str, Any] = output.images
lowerCAmelCase__ : int = self.get_dummy_inputs(a , pil_image=a )
lowerCAmelCase__ : int = pipe(
**a , return_dict=a , )[0]
lowerCAmelCase__ : Tuple = image[0, -3:, -3:, -1]
lowerCAmelCase__ : Tuple = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowerCAmelCase__ : str = np.array([0.9_9_9_7, 0.0_0_0_3, 0.9_9_9_7, 0.9_9_9_7, 0.9_9_7_0, 0.0_0_2_4, 0.9_9_9_7, 0.9_9_7_1, 0.9_9_7_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = 'cpu'
lowerCAmelCase__ : int = self.get_dummy_components()
lowerCAmelCase__ : Tuple = self.pipeline_class(**a )
lowerCAmelCase__ : Union[str, Any] = pipe.to(a )
pipe.set_progress_bar_config(disable=a )
lowerCAmelCase__ : Tuple = self.get_dummy_inputs(a , pil_image=a )
lowerCAmelCase__ : List[str] = [
pipeline_inputs['image'],
pipeline_inputs['image'],
]
lowerCAmelCase__ : Optional[int] = pipe(**a )
lowerCAmelCase__ : Tuple = output.images
lowerCAmelCase__ : List[str] = self.get_dummy_inputs(a , pil_image=a )
lowerCAmelCase__ : Union[str, Any] = [
tuple_pipeline_inputs['image'],
tuple_pipeline_inputs['image'],
]
lowerCAmelCase__ : str = pipe(
**a , return_dict=a , )[0]
lowerCAmelCase__ : Optional[Any] = image[0, -3:, -3:, -1]
lowerCAmelCase__ : Any = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (2, 64, 64, 3)
lowerCAmelCase__ : Union[str, Any] = np.array(
[
0.9_9_9_7,
0.9_9_8_9,
0.0_0_0_8,
0.0_0_2_1,
0.9_9_6_0,
0.0_0_1_8,
0.0_0_1_4,
0.0_0_0_2,
0.9_9_3_3,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = torch.device('cpu' )
class A__ :
lowercase = 1
lowerCAmelCase__ : Optional[Any] = self.get_dummy_components()
lowerCAmelCase__ : Dict = self.pipeline_class(**a )
lowerCAmelCase__ : Optional[Any] = pipe.to(a )
pipe.set_progress_bar_config(disable=a )
lowerCAmelCase__ : Optional[int] = torch.Generator(device=a ).manual_seed(0 )
lowerCAmelCase__ : Optional[int] = pipe.decoder.dtype
lowerCAmelCase__ : Union[str, Any] = 1
lowerCAmelCase__ : str = (
batch_size,
pipe.decoder.config.in_channels,
pipe.decoder.config.sample_size,
pipe.decoder.config.sample_size,
)
lowerCAmelCase__ : List[Any] = pipe.prepare_latents(
a , dtype=a , device=a , generator=a , latents=a , scheduler=DummyScheduler() )
lowerCAmelCase__ : List[str] = (
batch_size,
pipe.super_res_first.config.in_channels // 2,
pipe.super_res_first.config.sample_size,
pipe.super_res_first.config.sample_size,
)
lowerCAmelCase__ : Any = pipe.prepare_latents(
a , dtype=a , device=a , generator=a , latents=a , scheduler=DummyScheduler() )
lowerCAmelCase__ : List[Any] = self.get_dummy_inputs(a , pil_image=a )
lowerCAmelCase__ : Optional[int] = pipe(
**a , decoder_latents=a , super_res_latents=a ).images
lowerCAmelCase__ : Optional[Any] = self.get_dummy_inputs(a , pil_image=a )
# Don't pass image, instead pass embedding
lowerCAmelCase__ : Union[str, Any] = pipeline_inputs.pop('image' )
lowerCAmelCase__ : Union[str, Any] = pipe.image_encoder(a ).image_embeds
lowerCAmelCase__ : List[Any] = pipe(
**a , decoder_latents=a , super_res_latents=a , image_embeddings=a , ).images
# make sure passing text embeddings manually is identical
assert np.abs(img_out_a - img_out_a ).max() < 1E-4
@skip_mps
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = torch_device == 'cpu'
# Check is relaxed because there is not a torch 2.0 sliced attention added kv processor
lowerCAmelCase__ : int = 1E-2
self._test_attention_slicing_forward_pass(
test_max_difference=a , expected_max_diff=a )
@skip_mps
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = torch_device == 'cpu'
lowerCAmelCase__ : Any = True
lowerCAmelCase__ : Optional[Any] = [
'decoder_num_inference_steps',
'super_res_num_inference_steps',
]
self._test_inference_batch_single_identical(
test_max_difference=a , relax_max_difference=a , additional_params_copy_to_batched_inputs=a , )
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = [
'decoder_num_inference_steps',
'super_res_num_inference_steps',
]
if torch_device == "mps":
# TODO: MPS errors with larger batch sizes
lowerCAmelCase__ : List[str] = [2, 3]
self._test_inference_batch_consistent(
batch_sizes=a , additional_params_copy_to_batched_inputs=a , )
else:
self._test_inference_batch_consistent(
additional_params_copy_to_batched_inputs=a )
@skip_mps
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
return super().test_dict_tuple_outputs_equivalent()
@skip_mps
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
return super().test_save_load_local()
@skip_mps
def _lowerCamelCase ( self : str ):
'''simple docstring'''
return super().test_save_load_optional_components()
@slow
@require_torch_gpu
class A__ ( unittest.TestCase ):
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
lowerCAmelCase__ : Dict = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/unclip/cat.png' )
lowerCAmelCase__ : List[Any] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/unclip/karlo_v1_alpha_cat_variation_fp16.npy' )
lowerCAmelCase__ : Tuple = UnCLIPImageVariationPipeline.from_pretrained(
'kakaobrain/karlo-v1-alpha-image-variations' , torch_dtype=torch.floataa )
lowerCAmelCase__ : Union[str, Any] = pipeline.to(a )
pipeline.set_progress_bar_config(disable=a )
lowerCAmelCase__ : Dict = torch.Generator(device='cpu' ).manual_seed(0 )
lowerCAmelCase__ : List[str] = pipeline(
a , generator=a , output_type='np' , )
lowerCAmelCase__ : Union[str, Any] = output.images[0]
assert image.shape == (256, 256, 3)
assert_mean_pixel_difference(a , a , 15 ) | 307 | 0 |
import os
import unittest
from transformers import BertTokenizerFast
from transformers.models.bert.tokenization_bert import (
VOCAB_FILES_NAMES,
BasicTokenizer,
BertTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase__ , unittest.TestCase ):
__SCREAMING_SNAKE_CASE = BertTokenizer
__SCREAMING_SNAKE_CASE = BertTokenizerFast
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = filter_non_english
def UpperCamelCase ( self ):
super().setUp()
A__ = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
A__ = os.path.join(self.tmpdirname,VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file,'''w''',encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
def UpperCamelCase ( self,__lowerCamelCase ):
A__ = '''UNwant\u00E9d,running'''
A__ = '''unwanted, running'''
return input_text, output_text
def UpperCamelCase ( self ):
A__ = self.tokenizer_class(self.vocab_file )
A__ = tokenizer.tokenize('''UNwant\u00E9d,running''' )
self.assertListEqual(__lowerCamelCase,['''un''', '''##want''', '''##ed''', ''',''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowerCamelCase ),[9, 6, 7, 12, 10, 11] )
def UpperCamelCase ( self ):
if not self.test_rust_tokenizer:
return
A__ = self.get_tokenizer()
A__ = self.get_rust_tokenizer()
A__ = '''UNwant\u00E9d,running'''
A__ = tokenizer.tokenize(__lowerCamelCase )
A__ = rust_tokenizer.tokenize(__lowerCamelCase )
self.assertListEqual(__lowerCamelCase,__lowerCamelCase )
A__ = tokenizer.encode(__lowerCamelCase,add_special_tokens=__lowerCamelCase )
A__ = rust_tokenizer.encode(__lowerCamelCase,add_special_tokens=__lowerCamelCase )
self.assertListEqual(__lowerCamelCase,__lowerCamelCase )
A__ = self.get_rust_tokenizer()
A__ = tokenizer.encode(__lowerCamelCase )
A__ = rust_tokenizer.encode(__lowerCamelCase )
self.assertListEqual(__lowerCamelCase,__lowerCamelCase )
# With lower casing
A__ = self.get_tokenizer(do_lower_case=__lowerCamelCase )
A__ = self.get_rust_tokenizer(do_lower_case=__lowerCamelCase )
A__ = '''UNwant\u00E9d,running'''
A__ = tokenizer.tokenize(__lowerCamelCase )
A__ = rust_tokenizer.tokenize(__lowerCamelCase )
self.assertListEqual(__lowerCamelCase,__lowerCamelCase )
A__ = tokenizer.encode(__lowerCamelCase,add_special_tokens=__lowerCamelCase )
A__ = rust_tokenizer.encode(__lowerCamelCase,add_special_tokens=__lowerCamelCase )
self.assertListEqual(__lowerCamelCase,__lowerCamelCase )
A__ = self.get_rust_tokenizer()
A__ = tokenizer.encode(__lowerCamelCase )
A__ = rust_tokenizer.encode(__lowerCamelCase )
self.assertListEqual(__lowerCamelCase,__lowerCamelCase )
def UpperCamelCase ( self ):
A__ = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize('''ah\u535A\u63A8zz''' ),['''ah''', '''\u535A''', '''\u63A8''', '''zz'''] )
def UpperCamelCase ( self ):
A__ = BasicTokenizer(do_lower_case=__lowerCamelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ),['''hello''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ),['''hello'''] )
def UpperCamelCase ( self ):
A__ = BasicTokenizer(do_lower_case=__lowerCamelCase,strip_accents=__lowerCamelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ),['''hällo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ),['''h\u00E9llo'''] )
def UpperCamelCase ( self ):
A__ = BasicTokenizer(do_lower_case=__lowerCamelCase,strip_accents=__lowerCamelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ),['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ),['''hello'''] )
def UpperCamelCase ( self ):
A__ = BasicTokenizer(do_lower_case=__lowerCamelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ),['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ),['''hello'''] )
def UpperCamelCase ( self ):
A__ = BasicTokenizer(do_lower_case=__lowerCamelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ),['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def UpperCamelCase ( self ):
A__ = BasicTokenizer(do_lower_case=__lowerCamelCase,strip_accents=__lowerCamelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ),['''HäLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def UpperCamelCase ( self ):
A__ = BasicTokenizer(do_lower_case=__lowerCamelCase,strip_accents=__lowerCamelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ),['''HaLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def UpperCamelCase ( self ):
A__ = BasicTokenizer(do_lower_case=__lowerCamelCase,never_split=['''[UNK]'''] )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? [UNK]''' ),['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?''', '''[UNK]'''] )
def UpperCamelCase ( self ):
A__ = BasicTokenizer()
A__ = '''a\n\'ll !!to?\'d of, can\'t.'''
A__ = ['''a''', '''\'''', '''ll''', '''!''', '''!''', '''to''', '''?''', '''\'''', '''d''', '''of''', ''',''', '''can''', '''\'''', '''t''', '''.''']
self.assertListEqual(tokenizer.tokenize(__lowerCamelCase ),__lowerCamelCase )
def UpperCamelCase ( self ):
A__ = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''']
A__ = {}
for i, token in enumerate(__lowerCamelCase ):
A__ = i
A__ = WordpieceTokenizer(vocab=__lowerCamelCase,unk_token='''[UNK]''' )
self.assertListEqual(tokenizer.tokenize('''''' ),[] )
self.assertListEqual(tokenizer.tokenize('''unwanted running''' ),['''un''', '''##want''', '''##ed''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.tokenize('''unwantedX running''' ),['''[UNK]''', '''runn''', '''##ing'''] )
def UpperCamelCase ( self ):
self.assertTrue(_is_whitespace(''' ''' ) )
self.assertTrue(_is_whitespace('''\t''' ) )
self.assertTrue(_is_whitespace('''\r''' ) )
self.assertTrue(_is_whitespace('''\n''' ) )
self.assertTrue(_is_whitespace('''\u00A0''' ) )
self.assertFalse(_is_whitespace('''A''' ) )
self.assertFalse(_is_whitespace('''-''' ) )
def UpperCamelCase ( self ):
self.assertTrue(_is_control('''\u0005''' ) )
self.assertFalse(_is_control('''A''' ) )
self.assertFalse(_is_control(''' ''' ) )
self.assertFalse(_is_control('''\t''' ) )
self.assertFalse(_is_control('''\r''' ) )
def UpperCamelCase ( self ):
self.assertTrue(_is_punctuation('''-''' ) )
self.assertTrue(_is_punctuation('''$''' ) )
self.assertTrue(_is_punctuation('''`''' ) )
self.assertTrue(_is_punctuation('''.''' ) )
self.assertFalse(_is_punctuation('''A''' ) )
self.assertFalse(_is_punctuation(''' ''' ) )
def UpperCamelCase ( self ):
A__ = self.get_tokenizer()
A__ = self.get_rust_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(__lowerCamelCase ) for t in ['''Test''', '''\xad''', '''test''']],[['''[UNK]'''], [], ['''[UNK]''']] )
self.assertListEqual(
[rust_tokenizer.tokenize(__lowerCamelCase ) for t in ['''Test''', '''\xad''', '''test''']],[['''[UNK]'''], [], ['''[UNK]''']] )
@slow
def UpperCamelCase ( self ):
A__ = self.tokenizer_class.from_pretrained('''bert-base-uncased''' )
A__ = tokenizer.encode('''sequence builders''',add_special_tokens=__lowerCamelCase )
A__ = tokenizer.encode('''multi-sequence build''',add_special_tokens=__lowerCamelCase )
A__ = tokenizer.build_inputs_with_special_tokens(__lowerCamelCase )
A__ = tokenizer.build_inputs_with_special_tokens(__lowerCamelCase,__lowerCamelCase )
assert encoded_sentence == [101] + text + [102]
assert encoded_pair == [101] + text + [102] + text_a + [102]
def UpperCamelCase ( self ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
A__ = self.rust_tokenizer_class.from_pretrained(__lowerCamelCase,**__lowerCamelCase )
A__ = f"A, naïve {tokenizer_r.mask_token} AllenNLP sentence."
A__ = tokenizer_r.encode_plus(
__lowerCamelCase,return_attention_mask=__lowerCamelCase,return_token_type_ids=__lowerCamelCase,return_offsets_mapping=__lowerCamelCase,add_special_tokens=__lowerCamelCase,)
A__ = tokenizer_r.do_lower_case if hasattr(__lowerCamelCase,'''do_lower_case''' ) else False
A__ = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), '''A'''),
((1, 2), ''','''),
((3, 5), '''na'''),
((5, 6), '''##ï'''),
((6, 8), '''##ve'''),
((9, 15), tokenizer_r.mask_token),
((16, 21), '''Allen'''),
((21, 23), '''##NL'''),
((23, 24), '''##P'''),
((25, 33), '''sentence'''),
((33, 34), '''.'''),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), '''a'''),
((1, 2), ''','''),
((3, 8), '''naive'''),
((9, 15), tokenizer_r.mask_token),
((16, 21), '''allen'''),
((21, 23), '''##nl'''),
((23, 24), '''##p'''),
((25, 33), '''sentence'''),
((33, 34), '''.'''),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results],tokenizer_r.convert_ids_to_tokens(tokens['''input_ids'''] ) )
self.assertEqual([e[0] for e in expected_results],tokens['''offset_mapping'''] )
def UpperCamelCase ( self ):
A__ = ['''的''', '''人''', '''有''']
A__ = ''''''.join(__lowerCamelCase )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
A__ = True
A__ = self.tokenizer_class.from_pretrained(__lowerCamelCase,**__lowerCamelCase )
A__ = self.rust_tokenizer_class.from_pretrained(__lowerCamelCase,**__lowerCamelCase )
A__ = tokenizer_p.encode(__lowerCamelCase,add_special_tokens=__lowerCamelCase )
A__ = tokenizer_r.encode(__lowerCamelCase,add_special_tokens=__lowerCamelCase )
A__ = tokenizer_r.convert_ids_to_tokens(__lowerCamelCase )
A__ = tokenizer_p.convert_ids_to_tokens(__lowerCamelCase )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(__lowerCamelCase,__lowerCamelCase )
self.assertListEqual(__lowerCamelCase,__lowerCamelCase )
A__ = False
A__ = self.rust_tokenizer_class.from_pretrained(__lowerCamelCase,**__lowerCamelCase )
A__ = self.tokenizer_class.from_pretrained(__lowerCamelCase,**__lowerCamelCase )
A__ = tokenizer_r.encode(__lowerCamelCase,add_special_tokens=__lowerCamelCase )
A__ = tokenizer_p.encode(__lowerCamelCase,add_special_tokens=__lowerCamelCase )
A__ = tokenizer_r.convert_ids_to_tokens(__lowerCamelCase )
A__ = tokenizer_p.convert_ids_to_tokens(__lowerCamelCase )
# it is expected that only the first Chinese character is not preceded by "##".
A__ = [
f"##{token}" if idx != 0 else token for idx, token in enumerate(__lowerCamelCase )
]
self.assertListEqual(__lowerCamelCase,__lowerCamelCase )
self.assertListEqual(__lowerCamelCase,__lowerCamelCase )
| 193 |
import unittest
from transformers import AutoConfig, AutoTokenizer, BertConfig, TensorType, is_flax_available
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, slow
if is_flax_available():
import jax
from transformers.models.auto.modeling_flax_auto import FlaxAutoModel
from transformers.models.bert.modeling_flax_bert import FlaxBertModel
from transformers.models.roberta.modeling_flax_roberta import FlaxRobertaModel
@require_flax
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
@slow
def UpperCamelCase ( self ):
for model_name in ["bert-base-cased", "bert-large-uncased"]:
with self.subTest(__lowerCamelCase ):
A__ = AutoConfig.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase,__lowerCamelCase )
A__ = FlaxAutoModel.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase,__lowerCamelCase )
@slow
def UpperCamelCase ( self ):
for model_name in ["roberta-base", "roberta-large"]:
with self.subTest(__lowerCamelCase ):
A__ = AutoConfig.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase,__lowerCamelCase )
A__ = FlaxAutoModel.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase,__lowerCamelCase )
@slow
def UpperCamelCase ( self ):
for model_name in ["bert-base-cased", "bert-large-uncased"]:
A__ = AutoTokenizer.from_pretrained(__lowerCamelCase )
A__ = FlaxBertModel.from_pretrained(__lowerCamelCase )
A__ = tokenizer('''Do you support jax jitted function?''',return_tensors=TensorType.JAX )
@jax.jit
def eval(**__lowerCamelCase ):
return model(**__lowerCamelCase )
eval(**__lowerCamelCase ).block_until_ready()
@slow
def UpperCamelCase ( self ):
for model_name in ["roberta-base", "roberta-large"]:
A__ = AutoTokenizer.from_pretrained(__lowerCamelCase )
A__ = FlaxRobertaModel.from_pretrained(__lowerCamelCase )
A__ = tokenizer('''Do you support jax jitted function?''',return_tensors=TensorType.JAX )
@jax.jit
def eval(**__lowerCamelCase ):
return model(**__lowerCamelCase )
eval(**__lowerCamelCase ).block_until_ready()
def UpperCamelCase ( self ):
with self.assertRaisesRegex(
__lowerCamelCase,'''bert-base is not a local folder and is not a valid model identifier''' ):
A__ = FlaxAutoModel.from_pretrained('''bert-base''' )
def UpperCamelCase ( self ):
with self.assertRaisesRegex(
__lowerCamelCase,r'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ):
A__ = FlaxAutoModel.from_pretrained(__lowerCamelCase,revision='''aaaaaa''' )
def UpperCamelCase ( self ):
with self.assertRaisesRegex(
__lowerCamelCase,'''hf-internal-testing/config-no-model does not appear to have a file named flax_model.msgpack''',):
A__ = FlaxAutoModel.from_pretrained('''hf-internal-testing/config-no-model''' )
def UpperCamelCase ( self ):
with self.assertRaisesRegex(__lowerCamelCase,'''Use `from_pt=True` to load this model''' ):
A__ = FlaxAutoModel.from_pretrained('''hf-internal-testing/tiny-bert-pt-only''' )
| 193 | 1 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_flava import FlavaImageProcessor
__SCREAMING_SNAKE_CASE :int = logging.get_logger(__name__)
class A_ ( _A ):
def __init__( self : Union[str, Any] , *snake_case_ : Any , **snake_case_ : str ):
warnings.warn(
"The class FlavaFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use FlavaImageProcessor instead." , __SCREAMING_SNAKE_CASE , )
super().__init__(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
| 363 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__SCREAMING_SNAKE_CASE :Dict = {
'''configuration_upernet''': ['''UperNetConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE :Optional[int] = [
'''UperNetForSemanticSegmentation''',
'''UperNetPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_upernet import UperNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_upernet import UperNetForSemanticSegmentation, UperNetPreTrainedModel
else:
import sys
__SCREAMING_SNAKE_CASE :List[str] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 156 | 0 |
"""simple docstring"""
from dataclasses import dataclass, field
from typing import Tuple
from ..utils import cached_property, is_torch_available, is_torch_tpu_available, logging, requires_backends
from .benchmark_args_utils import BenchmarkArguments
if is_torch_available():
import torch
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
_a : Any = logging.get_logger(__name__)
@dataclass
class __A ( SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : List[str] = [
"no_inference",
"no_cuda",
"no_tpu",
"no_speed",
"no_memory",
"no_env_print",
"no_multi_process",
]
def __init__( self , **a__ ):
for deprecated_arg in self.deprecated_args:
if deprecated_arg in kwargs:
_lowerCAmelCase : Tuple = deprecated_arg[3:]
setattr(self , a__ , not kwargs.pop(a__ ) )
logger.warning(
F"{deprecated_arg} is depreciated. Please use --no_{positive_arg} or"
F" {positive_arg}={kwargs[positive_arg]}" )
_lowerCAmelCase : List[Any] = kwargs.pop("""torchscript""" , self.torchscript )
_lowerCAmelCase : List[str] = kwargs.pop("""torch_xla_tpu_print_metrics""" , self.torch_xla_tpu_print_metrics )
_lowerCAmelCase : List[str] = kwargs.pop("""fp16_opt_level""" , self.fpaa_opt_level )
super().__init__(**a__ )
_UpperCamelCase : bool = field(default=SCREAMING_SNAKE_CASE_ , metadata={"help": "Trace the models using torchscript"} )
_UpperCamelCase : bool = field(default=SCREAMING_SNAKE_CASE_ , metadata={"help": "Print Xla/PyTorch tpu metrics"} )
_UpperCamelCase : str = field(
default="O1" , metadata={
"help": (
"For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']. "
"See details at https://nvidia.github.io/apex/amp.html"
)
} , )
@cached_property
def __A ( self ):
requires_backends(self , ["""torch"""] )
logger.info("""PyTorch: setting up devices""" )
if not self.cuda:
_lowerCAmelCase : int = torch.device("""cpu""" )
_lowerCAmelCase : Union[str, Any] = 0
elif is_torch_tpu_available():
_lowerCAmelCase : str = xm.xla_device()
_lowerCAmelCase : Optional[Any] = 0
else:
_lowerCAmelCase : Union[str, Any] = torch.device("""cuda""" if torch.cuda.is_available() else """cpu""" )
_lowerCAmelCase : Optional[Any] = torch.cuda.device_count()
return device, n_gpu
@property
def __A ( self ):
return is_torch_tpu_available() and self.tpu
@property
def __A ( self ):
requires_backends(self , ["""torch"""] )
# TODO(PVP): currently only single GPU is supported
return torch.cuda.current_device()
@property
def __A ( self ):
requires_backends(self , ["""torch"""] )
return self._setup_devices[0]
@property
def __A ( self ):
requires_backends(self , ["""torch"""] )
return self._setup_devices[1]
@property
def __A ( self ):
return self.n_gpu > 0
| 44 | """simple docstring"""
from ..utils import DummyObject, requires_backends
class _A ( metaclass=lowerCAmelCase ):
snake_case__ : List[str] = ['onnx']
def __init__( self , *__lowerCAmelCase , **__lowerCAmelCase ):
"""simple docstring"""
requires_backends(self , ["""onnx"""] )
@classmethod
def A__ ( cls , *__lowerCAmelCase , **__lowerCAmelCase ):
"""simple docstring"""
requires_backends(cls , ["""onnx"""] )
@classmethod
def A__ ( cls , *__lowerCAmelCase , **__lowerCAmelCase ):
"""simple docstring"""
requires_backends(cls , ["""onnx"""] )
| 197 | 0 |
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision import transforms
from transformers import BitImageProcessor, FocalNetConfig, FocalNetForImageClassification
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def __lowercase ( a__ ) -> Optional[Any]:
__SCREAMING_SNAKE_CASE = [2, 2, 6, 2] if 'tiny' in model_name else [2, 2, 18, 2]
__SCREAMING_SNAKE_CASE = True if 'large' in model_name or 'huge' in model_name else False
__SCREAMING_SNAKE_CASE = True if 'large' in model_name or 'huge' in model_name else False
__SCREAMING_SNAKE_CASE = True if 'large' in model_name or 'huge' in model_name else False
if "large" in model_name or "xlarge" in model_name or "huge" in model_name:
if "fl3" in model_name:
__SCREAMING_SNAKE_CASE = [3, 3, 3, 3]
__SCREAMING_SNAKE_CASE = [5, 5, 5, 5]
elif "fl4" in model_name:
__SCREAMING_SNAKE_CASE = [4, 4, 4, 4]
__SCREAMING_SNAKE_CASE = [3, 3, 3, 3]
if "tiny" in model_name or "small" in model_name or "base" in model_name:
__SCREAMING_SNAKE_CASE = [3, 3, 3, 3]
if "lrf" in model_name:
__SCREAMING_SNAKE_CASE = [3, 3, 3, 3]
else:
__SCREAMING_SNAKE_CASE = [2, 2, 2, 2]
if "tiny" in model_name:
__SCREAMING_SNAKE_CASE = 96
elif "small" in model_name:
__SCREAMING_SNAKE_CASE = 96
elif "base" in model_name:
__SCREAMING_SNAKE_CASE = 1_28
elif "large" in model_name:
__SCREAMING_SNAKE_CASE = 1_92
elif "xlarge" in model_name:
__SCREAMING_SNAKE_CASE = 2_56
elif "huge" in model_name:
__SCREAMING_SNAKE_CASE = 3_52
# set label information
__SCREAMING_SNAKE_CASE = 'huggingface/label-files'
if "large" in model_name or "huge" in model_name:
__SCREAMING_SNAKE_CASE = 'imagenet-22k-id2label.json'
else:
__SCREAMING_SNAKE_CASE = 'imagenet-1k-id2label.json'
__SCREAMING_SNAKE_CASE = json.load(open(hf_hub_download(a__ , a__ , repo_type='dataset' ) , 'r' ) )
__SCREAMING_SNAKE_CASE = {int(a__ ): v for k, v in idalabel.items()}
__SCREAMING_SNAKE_CASE = {v: k for k, v in idalabel.items()}
__SCREAMING_SNAKE_CASE = FocalNetConfig(
embed_dim=a__ , depths=a__ , focal_levels=a__ , focal_windows=a__ , use_conv_embed=a__ , idalabel=a__ , labelaid=a__ , use_post_layernorm=a__ , use_layerscale=a__ , )
return config
def __lowercase ( a__ ) -> Any:
if "patch_embed.proj" in name:
__SCREAMING_SNAKE_CASE = name.replace('patch_embed.proj' , 'embeddings.patch_embeddings.projection' )
if "patch_embed.norm" in name:
__SCREAMING_SNAKE_CASE = name.replace('patch_embed.norm' , 'embeddings.norm' )
if "layers" in name:
__SCREAMING_SNAKE_CASE = 'encoder.' + name
if "encoder.layers" in name:
__SCREAMING_SNAKE_CASE = name.replace('encoder.layers' , 'encoder.stages' )
if "downsample.proj" in name:
__SCREAMING_SNAKE_CASE = name.replace('downsample.proj' , 'downsample.projection' )
if "blocks" in name:
__SCREAMING_SNAKE_CASE = name.replace('blocks' , 'layers' )
if "modulation.f.weight" in name or "modulation.f.bias" in name:
__SCREAMING_SNAKE_CASE = name.replace('modulation.f' , 'modulation.projection_in' )
if "modulation.h.weight" in name or "modulation.h.bias" in name:
__SCREAMING_SNAKE_CASE = name.replace('modulation.h' , 'modulation.projection_context' )
if "modulation.proj.weight" in name or "modulation.proj.bias" in name:
__SCREAMING_SNAKE_CASE = name.replace('modulation.proj' , 'modulation.projection_out' )
if name == "norm.weight":
__SCREAMING_SNAKE_CASE = 'layernorm.weight'
if name == "norm.bias":
__SCREAMING_SNAKE_CASE = 'layernorm.bias'
if "head" in name:
__SCREAMING_SNAKE_CASE = name.replace('head' , 'classifier' )
else:
__SCREAMING_SNAKE_CASE = 'focalnet.' + name
return name
def __lowercase ( a__ , a__ , a__=False ) -> Dict:
# fmt: off
__SCREAMING_SNAKE_CASE = {
'focalnet-tiny': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_srf.pth',
'focalnet-tiny-lrf': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_lrf.pth',
'focalnet-small': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_srf.pth',
'focalnet-small-lrf': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_lrf.pth',
'focalnet-base': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_srf.pth',
'focalnet-base-lrf': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_lrf.pth',
'focalnet-large-lrf-fl3': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384.pth',
'focalnet-large-lrf-fl4': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384_fl4.pth',
'focalnet-xlarge-lrf-fl3': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384.pth',
'focalnet-xlarge-lrf-fl4': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384_fl4.pth',
}
# fmt: on
__SCREAMING_SNAKE_CASE = model_name_to_url[model_name]
print('Checkpoint URL: ' , a__ )
__SCREAMING_SNAKE_CASE = torch.hub.load_state_dict_from_url(a__ , map_location='cpu' )['model']
# rename keys
for key in state_dict.copy().keys():
__SCREAMING_SNAKE_CASE = state_dict.pop(a__ )
__SCREAMING_SNAKE_CASE = val
__SCREAMING_SNAKE_CASE = get_focalnet_config(a__ )
__SCREAMING_SNAKE_CASE = FocalNetForImageClassification(a__ )
model.eval()
# load state dict
model.load_state_dict(a__ )
# verify conversion
__SCREAMING_SNAKE_CASE = 'http://images.cocodataset.org/val2017/000000039769.jpg'
__SCREAMING_SNAKE_CASE = BitImageProcessor(
do_resize=a__ , size={'shortest_edge': 2_56} , resample=PILImageResampling.BILINEAR , do_center_crop=a__ , crop_size=2_24 , do_normalize=a__ , image_mean=a__ , image_std=a__ , )
__SCREAMING_SNAKE_CASE = Image.open(requests.get(a__ , stream=a__ ).raw )
__SCREAMING_SNAKE_CASE = processor(images=a__ , return_tensors='pt' )
__SCREAMING_SNAKE_CASE = transforms.Compose(
[
transforms.Resize(2_56 ),
transforms.CenterCrop(2_24 ),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] ),
] )
__SCREAMING_SNAKE_CASE = image_transforms(a__ ).unsqueeze(0 )
# verify pixel_values
assert torch.allclose(inputs.pixel_values , a__ , atol=1E-4 )
__SCREAMING_SNAKE_CASE = model(**a__ )
__SCREAMING_SNAKE_CASE = outputs.logits.argmax(-1 ).item()
print('Predicted class:' , model.config.idalabel[predicted_class_idx] )
print('First values of logits:' , outputs.logits[0, :3] )
if model_name == "focalnet-tiny":
__SCREAMING_SNAKE_CASE = torch.tensor([0.2166, -0.4368, 0.2191] )
elif model_name == "focalnet-tiny-lrf":
__SCREAMING_SNAKE_CASE = torch.tensor([1.1669, 0.0125, -0.1695] )
elif model_name == "focalnet-small":
__SCREAMING_SNAKE_CASE = torch.tensor([0.4917, -0.0430, 0.1341] )
elif model_name == "focalnet-small-lrf":
__SCREAMING_SNAKE_CASE = torch.tensor([-0.2588, -0.5342, -0.2331] )
elif model_name == "focalnet-base":
__SCREAMING_SNAKE_CASE = torch.tensor([-0.1655, -0.4090, -0.1730] )
elif model_name == "focalnet-base-lrf":
__SCREAMING_SNAKE_CASE = torch.tensor([0.5306, -0.0483, -0.3928] )
assert torch.allclose(outputs.logits[0, :3] , a__ , atol=1E-4 )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
print(f"""Saving model and processor of {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(a__ )
processor.save_pretrained(a__ )
if push_to_hub:
print(f"""Pushing model and processor of {model_name} to the hub...""" )
model.push_to_hub(f"""{model_name}""" )
processor.push_to_hub(f"""{model_name}""" )
if __name__ == "__main__":
lowerCAmelCase__ : int =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''focalnet-tiny''',
type=str,
help='''Name of the FocalNet model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
help='''Whether to push the model and processor to the hub.''',
)
lowerCAmelCase__ : List[Any] =parser.parse_args()
convert_focalnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 118 |
import itertools
import random
import unittest
import numpy as np
from transformers import ASTFeatureExtractor
from transformers.testing_utils import require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
lowerCAmelCase__ : Dict =random.Random()
if is_torch_available():
import torch
def __lowercase ( a__ , a__=1.0 , a__=None , a__=None ) -> Any:
if rng is None:
__SCREAMING_SNAKE_CASE = global_rng
__SCREAMING_SNAKE_CASE = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class UpperCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , _A , _A=7 , _A=400 , _A=2_000 , _A=1 , _A=0.0 , _A=16_000 , _A=True , _A=True , ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = parent
__SCREAMING_SNAKE_CASE = batch_size
__SCREAMING_SNAKE_CASE = min_seq_length
__SCREAMING_SNAKE_CASE = max_seq_length
__SCREAMING_SNAKE_CASE = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
__SCREAMING_SNAKE_CASE = feature_size
__SCREAMING_SNAKE_CASE = padding_value
__SCREAMING_SNAKE_CASE = sampling_rate
__SCREAMING_SNAKE_CASE = return_attention_mask
__SCREAMING_SNAKE_CASE = do_normalize
def _A ( self ):
'''simple docstring'''
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def _A ( self , _A=False , _A=False ):
'''simple docstring'''
def _flatten(_A ):
return list(itertools.chain(*_A ) )
if equal_length:
__SCREAMING_SNAKE_CASE = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
__SCREAMING_SNAKE_CASE = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
__SCREAMING_SNAKE_CASE = [np.asarray(_A ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class UpperCAmelCase_ ( UpperCamelCase_ , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase__ : Optional[int] = ASTFeatureExtractor
def _A ( self ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = ASTFeatureExtractionTester(self )
def _A ( self ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
__SCREAMING_SNAKE_CASE = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
__SCREAMING_SNAKE_CASE = [np.asarray(_A ) for speech_input in speech_inputs]
# Test not batched input
__SCREAMING_SNAKE_CASE = feat_extract(speech_inputs[0] , return_tensors='np' ).input_values
__SCREAMING_SNAKE_CASE = feat_extract(np_speech_inputs[0] , return_tensors='np' ).input_values
self.assertTrue(np.allclose(_A , _A , atol=1e-3 ) )
# Test batched
__SCREAMING_SNAKE_CASE = feat_extract(_A , padding=_A , return_tensors='np' ).input_values
__SCREAMING_SNAKE_CASE = feat_extract(_A , padding=_A , return_tensors='np' ).input_values
for enc_seq_a, enc_seq_a in zip(_A , _A ):
self.assertTrue(np.allclose(_A , _A , atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
__SCREAMING_SNAKE_CASE = [floats_list((1, x) )[0] for x in (800, 800, 800)]
__SCREAMING_SNAKE_CASE = np.asarray(_A )
__SCREAMING_SNAKE_CASE = feat_extract(_A , return_tensors='np' ).input_values
__SCREAMING_SNAKE_CASE = feat_extract(_A , return_tensors='np' ).input_values
for enc_seq_a, enc_seq_a in zip(_A , _A ):
self.assertTrue(np.allclose(_A , _A , atol=1e-3 ) )
@require_torch
def _A ( self ):
'''simple docstring'''
import torch
__SCREAMING_SNAKE_CASE = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__SCREAMING_SNAKE_CASE = np.random.rand(100 ).astype(np.floataa )
__SCREAMING_SNAKE_CASE = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
__SCREAMING_SNAKE_CASE = feature_extractor.pad([{'input_values': inputs}] , return_tensors='np' )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
__SCREAMING_SNAKE_CASE = feature_extractor.pad([{'input_values': inputs}] , return_tensors='pt' )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
def _A ( self , _A ):
'''simple docstring'''
from datasets import load_dataset
__SCREAMING_SNAKE_CASE = load_dataset('hf-internal-testing/librispeech_asr_dummy' , 'clean' , split='validation' )
# automatic decoding with librispeech
__SCREAMING_SNAKE_CASE = ds.sort('id' ).select(range(_A ) )[:num_samples]['audio']
return [x["array"] for x in speech_samples]
@require_torch
def _A ( self ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = torch.tensor(
[-0.9_8_9_4, -1.2_7_7_6, -0.9_0_6_6, -1.2_7_7_6, -0.9_3_4_9, -1.2_6_0_9, -1.0_3_8_6, -1.2_7_7_6,
-1.1_5_6_1, -1.2_7_7_6, -1.2_0_5_2, -1.2_7_2_3, -1.2_1_9_0, -1.2_1_3_2, -1.2_7_7_6, -1.1_1_3_3,
-1.1_9_5_3, -1.1_3_4_3, -1.1_5_8_4, -1.2_2_0_3, -1.1_7_7_0, -1.2_4_7_4, -1.2_3_8_1, -1.1_9_3_6,
-0.9_2_7_0, -0.8_3_1_7, -0.8_0_4_9, -0.7_7_0_6, -0.7_5_6_5, -0.7_8_6_9] )
# fmt: on
__SCREAMING_SNAKE_CASE = self._load_datasamples(1 )
__SCREAMING_SNAKE_CASE = ASTFeatureExtractor()
__SCREAMING_SNAKE_CASE = feature_extractor(_A , return_tensors='pt' ).input_values
self.assertEquals(input_values.shape , (1, 1_024, 128) )
self.assertTrue(torch.allclose(input_values[0, 0, :30] , _A , atol=1e-4 ) )
| 118 | 1 |
'''simple docstring'''
from __future__ import annotations
import pandas as pd
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase , __lowercase ) -> Tuple:
A: int = [0] * no_of_processes
A: Optional[Any] = [0] * no_of_processes
# Copy the burst time into remaining_time[]
for i in range(SCREAMING_SNAKE_CASE__ ):
A: str = burst_time[i]
A: Optional[int] = 0
A: Optional[Any] = 0
A: str = 9_9_9_9_9_9_9_9_9
A: Union[str, Any] = 0
A: Any = False
# Process until all processes are completed
while complete != no_of_processes:
for j in range(SCREAMING_SNAKE_CASE__ ):
if arrival_time[j] <= increment_time and remaining_time[j] > 0:
if remaining_time[j] < minm:
A: Optional[int] = remaining_time[j]
A: Any = j
A: Any = True
if not check:
increment_time += 1
continue
remaining_time[short] -= 1
A: List[Any] = remaining_time[short]
if minm == 0:
A: Union[str, Any] = 9_9_9_9_9_9_9_9_9
if remaining_time[short] == 0:
complete += 1
A: int = False
# Find finish time of current process
A: str = increment_time + 1
# Calculate waiting time
A: Any = finish_time - arrival_time[short]
A: List[Any] = finar - burst_time[short]
if waiting_time[short] < 0:
A: List[str] = 0
# Increment time
increment_time += 1
return waiting_time
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase , __lowercase ) -> List[Any]:
A: List[Any] = [0] * no_of_processes
for i in range(SCREAMING_SNAKE_CASE__ ):
A: Tuple = burst_time[i] + waiting_time[i]
return turn_around_time
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase , __lowercase ) -> Union[str, Any]:
A: Optional[Any] = 0
A: Tuple = 0
for i in range(SCREAMING_SNAKE_CASE__ ):
A: Optional[int] = total_waiting_time + waiting_time[i]
A: Dict = total_turn_around_time + turn_around_time[i]
print(F"""Average waiting time = {total_waiting_time / no_of_processes:.5f}""" )
print('''Average turn around time =''' , total_turn_around_time / no_of_processes )
if __name__ == "__main__":
print('''Enter how many process you want to analyze''')
UpperCamelCase = int(input())
UpperCamelCase = [0] * no_of_processes
UpperCamelCase = [0] * no_of_processes
UpperCamelCase = list(range(1, no_of_processes + 1))
for i in range(no_of_processes):
print('''Enter the arrival time and burst time for process:--''' + str(i + 1))
UpperCamelCase , UpperCamelCase = map(int, input().split())
UpperCamelCase = calculate_waitingtime(arrival_time, burst_time, no_of_processes)
UpperCamelCase = burst_time
UpperCamelCase = no_of_processes
UpperCamelCase = waiting_time
UpperCamelCase = calculate_turnaroundtime(bt, n, wt)
calculate_average_times(waiting_time, turn_around_time, no_of_processes)
UpperCamelCase = pd.DataFrame(
list(zip(processes, burst_time, arrival_time, waiting_time, turn_around_time)),
columns=[
'''Process''',
'''BurstTime''',
'''ArrivalTime''',
'''WaitingTime''',
'''TurnAroundTime''',
],
)
# Printing the dataFrame
pd.set_option('''display.max_rows''', fcfs.shape[0] + 1)
print(fcfs)
| 319 |
import sys
def _A ( SCREAMING_SNAKE_CASE__ : List[str] ):
UpperCamelCase :Any = len(SCREAMING_SNAKE_CASE__ )
UpperCamelCase :Any = [[0 for x in range(SCREAMING_SNAKE_CASE__ )] for x in range(SCREAMING_SNAKE_CASE__ )]
UpperCamelCase :List[Any] = [[0 for x in range(SCREAMING_SNAKE_CASE__ )] for x in range(SCREAMING_SNAKE_CASE__ )]
for chain_length in range(2 , SCREAMING_SNAKE_CASE__ ):
for a in range(1 , n - chain_length + 1 ):
UpperCamelCase :Optional[Any] = a + chain_length - 1
UpperCamelCase :int = sys.maxsize
for c in range(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
UpperCamelCase :Any = (
matrix[a][c] + matrix[c + 1][b] + array[a - 1] * array[c] * array[b]
)
if cost < matrix[a][b]:
UpperCamelCase :int = cost
UpperCamelCase :List[str] = c
return matrix, sol
def _A ( SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Optional[int] ):
if i == j:
print('''A''' + str(SCREAMING_SNAKE_CASE__ ) , end=''' ''' )
else:
print('''(''' , end=''' ''' )
print_optiomal_solution(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , optimal_solution[i][j] )
print_optiomal_solution(SCREAMING_SNAKE_CASE__ , optimal_solution[i][j] + 1 , SCREAMING_SNAKE_CASE__ )
print(''')''' , end=''' ''' )
def _A ( ):
UpperCamelCase :Optional[int] = [30, 35, 15, 5, 10, 20, 25]
UpperCamelCase :Optional[Any] = len(SCREAMING_SNAKE_CASE__ )
# Size of matrix created from above array will be
# 30*35 35*15 15*5 5*10 10*20 20*25
UpperCamelCase , UpperCamelCase :Dict = matrix_chain_order(SCREAMING_SNAKE_CASE__ )
print('''No. of Operation required: ''' + str(matrix[1][n - 1] ) )
print_optiomal_solution(SCREAMING_SNAKE_CASE__ , 1 , n - 1 )
if __name__ == "__main__":
main()
| 259 | 0 |
import importlib
import os
import fsspec
import pytest
from fsspec import register_implementation
from fsspec.registry import _registry as _fsspec_registry
from datasets.filesystems import COMPRESSION_FILESYSTEMS, HfFileSystem, extract_path_from_uri, is_remote_filesystem
from .utils import require_lza, require_zstandard
def __lowerCamelCase ( UpperCAmelCase_ : List[str] ):
"""simple docstring"""
assert "mock" in _fsspec_registry
assert "bz2" in _fsspec_registry
def __lowerCamelCase ( ):
"""simple docstring"""
assert "mock" not in _fsspec_registry
assert "bz2" in _fsspec_registry
def __lowerCamelCase ( ):
"""simple docstring"""
a :Optional[int] = '''mock-s3-bucket'''
a :List[Any] = F'''s3://{mock_bucket}'''
a :int = extract_path_from_uri(UpperCAmelCase_ )
assert dataset_path.startswith('''s3://''' ) is False
a :Dict = '''./local/path'''
a :Optional[int] = extract_path_from_uri(UpperCAmelCase_ )
assert dataset_path == new_dataset_path
def __lowerCamelCase ( UpperCAmelCase_ : int ):
"""simple docstring"""
a :Optional[Any] = is_remote_filesystem(UpperCAmelCase_ )
assert is_remote is True
a :str = fsspec.filesystem('''file''' )
a :Any = is_remote_filesystem(UpperCAmelCase_ )
assert is_remote is False
@pytest.mark.parametrize('''compression_fs_class''' , UpperCAmelCase_ )
def __lowerCamelCase ( UpperCAmelCase_ : Dict , UpperCAmelCase_ : Dict , UpperCAmelCase_ : int , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Any , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Tuple ):
"""simple docstring"""
a :Any = {'''gzip''': gz_file, '''xz''': xz_file, '''zstd''': zstd_file, '''bz2''': bza_file, '''lz4''': lza_file}
a :Union[str, Any] = input_paths[compression_fs_class.protocol]
if input_path is None:
a :Optional[Any] = F'''for \'{compression_fs_class.protocol}\' compression protocol, '''
if compression_fs_class.protocol == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_fs_class.protocol == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(UpperCAmelCase_ )
a :Optional[Any] = fsspec.filesystem(compression_fs_class.protocol , fo=UpperCAmelCase_ )
assert isinstance(UpperCAmelCase_ , UpperCAmelCase_ )
a :Union[str, Any] = os.path.basename(UpperCAmelCase_ )
a :int = expected_filename[: expected_filename.rindex('''.''' )]
assert fs.glob('''*''' ) == [expected_filename]
with fs.open(UpperCAmelCase_ , '''r''' , encoding='''utf-8''' ) as f, open(UpperCAmelCase_ , encoding='''utf-8''' ) as expected_file:
assert f.read() == expected_file.read()
@pytest.mark.parametrize('''protocol''' , ['''zip''', '''gzip'''] )
def __lowerCamelCase ( UpperCAmelCase_ : Dict , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : int ):
"""simple docstring"""
a :str = {'''zip''': zip_jsonl_path, '''gzip''': jsonl_gz_path}
a :Optional[int] = compressed_file_paths[protocol]
a :Any = '''dataset.jsonl'''
a :int = F'''{protocol}://{member_file_path}::{compressed_file_path}'''
a , *a :Tuple = fsspec.get_fs_token_paths(UpperCAmelCase_ )
assert fs.isfile(UpperCAmelCase_ )
assert not fs.isfile('''non_existing_''' + member_file_path )
@pytest.mark.integration
def __lowerCamelCase ( UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Optional[Any] ):
"""simple docstring"""
a :List[Any] = hf_api.dataset_info(UpperCAmelCase_ , token=UpperCAmelCase_ )
a :List[str] = HfFileSystem(repo_info=UpperCAmelCase_ , token=UpperCAmelCase_ )
assert sorted(hffs.glob('''*''' ) ) == [".gitattributes", "data"]
assert hffs.isdir('''data''' )
assert hffs.isfile('''.gitattributes''' ) and hffs.isfile('''data/text_data.txt''' )
with open(UpperCAmelCase_ ) as f:
assert hffs.open('''data/text_data.txt''' , '''r''' ).read() == f.read()
def __lowerCamelCase ( ):
"""simple docstring"""
a :int = '''bz2'''
# Import module
import datasets.filesystems
# Overwrite protocol and reload
register_implementation(UpperCAmelCase_ , UpperCAmelCase_ , clobber=UpperCAmelCase_ )
with pytest.warns(UpperCAmelCase_ ) as warning_info:
importlib.reload(datasets.filesystems )
assert len(UpperCAmelCase_ ) == 1
assert (
str(warning_info[0].message )
== F'''A filesystem protocol was already set for {protocol} and will be overwritten.'''
)
| 281 |
import json
import os
import unittest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _snake_case ( _snake_case , unittest.TestCase ):
SCREAMING_SNAKE_CASE__ = MgpstrTokenizer
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = {}
SCREAMING_SNAKE_CASE__ = False
def SCREAMING_SNAKE_CASE__ ( self ):
super().setUp()
# fmt: off
a :int = ['''[GO]''', '''[s]''', '''0''', '''1''', '''2''', '''3''', '''4''', '''5''', '''6''', '''7''', '''8''', '''9''', '''a''', '''b''', '''c''', '''d''', '''e''', '''f''', '''g''', '''h''', '''i''', '''j''', '''k''', '''l''', '''m''', '''n''', '''o''', '''p''', '''q''', '''r''', '''s''', '''t''', '''u''', '''v''', '''w''', '''x''', '''y''', '''z''']
# fmt: on
a :List[str] = dict(zip(_lowerCamelCase , range(len(_lowerCamelCase ) ) ) )
a :Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(_lowerCamelCase ) + '''\n''' )
def SCREAMING_SNAKE_CASE__ ( self , **_lowerCamelCase ):
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **_lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase ):
a :str = '''tester'''
a :Union[str, Any] = '''tester'''
return input_text, output_text
@unittest.skip('''MGP-STR always lower cases letters.''' )
def SCREAMING_SNAKE_CASE__ ( self ):
pass
def SCREAMING_SNAKE_CASE__ ( self ):
a :List[Any] = self.get_tokenizers(do_lower_case=_lowerCamelCase )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
a :Any = '''[SPECIAL_TOKEN]'''
tokenizer.add_special_tokens({'''cls_token''': special_token} )
a :str = tokenizer.encode([special_token] , add_special_tokens=_lowerCamelCase )
self.assertEqual(len(_lowerCamelCase ) , 1 )
a :Tuple = tokenizer.decode(_lowerCamelCase , skip_special_tokens=_lowerCamelCase )
self.assertTrue(special_token not in decoded )
def SCREAMING_SNAKE_CASE__ ( self ):
a :Optional[Any] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
a , a :Tuple = self.get_input_output_texts(_lowerCamelCase )
a :Tuple = tokenizer.tokenize(_lowerCamelCase )
a :int = tokenizer.convert_tokens_to_ids(_lowerCamelCase )
a :Optional[int] = tokenizer.encode(_lowerCamelCase , add_special_tokens=_lowerCamelCase )
self.assertListEqual(_lowerCamelCase , _lowerCamelCase )
a :Any = tokenizer.convert_ids_to_tokens(_lowerCamelCase )
self.assertNotEqual(len(_lowerCamelCase ) , 0 )
a :str = tokenizer.decode(_lowerCamelCase )
self.assertIsInstance(_lowerCamelCase , _lowerCamelCase )
self.assertEqual(text_a.replace(''' ''' , '''''' ) , _lowerCamelCase )
@unittest.skip('''MGP-STR tokenizer only handles one sequence.''' )
def SCREAMING_SNAKE_CASE__ ( self ):
pass
@unittest.skip('''inputs cannot be pretokenized in MgpstrTokenizer''' )
def SCREAMING_SNAKE_CASE__ ( self ):
pass
| 281 | 1 |
from math import sqrt
def lowerCamelCase_ ( UpperCamelCase__ : int ) -> bool:
"""simple docstring"""
assert isinstance(UpperCamelCase__ , UpperCamelCase__ ) and (
number >= 0
), "'number' must been an int and positive"
__lowerCamelCase = True
# 0 and 1 are none primes.
if number <= 1:
__lowerCamelCase = False
for divisor in range(2 , int(round(sqrt(UpperCamelCase__ ) ) ) + 1 ):
# if 'number' divisible by 'divisor' then sets 'status'
# of false and break up the loop.
if number % divisor == 0:
__lowerCamelCase = False
break
# precondition
assert isinstance(UpperCamelCase__ , UpperCamelCase__ ), "'status' must been from type bool"
return status
def lowerCamelCase_ ( UpperCamelCase__ : str ) -> Tuple:
"""simple docstring"""
assert isinstance(UpperCamelCase__ , UpperCamelCase__ ) and (n > 2), "'N' must been an int and > 2"
# beginList: contains all natural numbers from 2 up to N
__lowerCamelCase = list(range(2 , n + 1 ) )
__lowerCamelCase = [] # this list will be returns.
# actual sieve of erathostenes
for i in range(len(UpperCamelCase__ ) ):
for j in range(i + 1 , len(UpperCamelCase__ ) ):
if (begin_list[i] != 0) and (begin_list[j] % begin_list[i] == 0):
__lowerCamelCase = 0
# filters actual prime numbers.
__lowerCamelCase = [x for x in begin_list if x != 0]
# precondition
assert isinstance(UpperCamelCase__ , UpperCamelCase__ ), "'ans' must been from type list"
return ans
def lowerCamelCase_ ( UpperCamelCase__ : List[Any] ) -> List[str]:
"""simple docstring"""
assert isinstance(UpperCamelCase__ , UpperCamelCase__ ) and (n > 2), "'N' must been an int and > 2"
__lowerCamelCase = []
# iterates over all numbers between 2 up to N+1
# if a number is prime then appends to list 'ans'
for number in range(2 , n + 1 ):
if is_prime(UpperCamelCase__ ):
ans.append(UpperCamelCase__ )
# precondition
assert isinstance(UpperCamelCase__ , UpperCamelCase__ ), "'ans' must been from type list"
return ans
def lowerCamelCase_ ( UpperCamelCase__ : Optional[int] ) -> int:
"""simple docstring"""
assert isinstance(UpperCamelCase__ , UpperCamelCase__ ) and number >= 0, "'number' must been an int and >= 0"
__lowerCamelCase = [] # this list will be returns of the function.
# potential prime number factors.
__lowerCamelCase = 2
__lowerCamelCase = number
if number == 0 or number == 1:
ans.append(UpperCamelCase__ )
# if 'number' not prime then builds the prime factorization of 'number'
elif not is_prime(UpperCamelCase__ ):
while quotient != 1:
if is_prime(UpperCamelCase__ ) and (quotient % factor == 0):
ans.append(UpperCamelCase__ )
quotient /= factor
else:
factor += 1
else:
ans.append(UpperCamelCase__ )
# precondition
assert isinstance(UpperCamelCase__ , UpperCamelCase__ ), "'ans' must been from type list"
return ans
def lowerCamelCase_ ( UpperCamelCase__ : Tuple ) -> Optional[int]:
"""simple docstring"""
assert isinstance(UpperCamelCase__ , UpperCamelCase__ ) and (
number >= 0
), "'number' bust been an int and >= 0"
__lowerCamelCase = 0
# prime factorization of 'number'
__lowerCamelCase = prime_factorization(UpperCamelCase__ )
__lowerCamelCase = max(UpperCamelCase__ )
# precondition
assert isinstance(UpperCamelCase__ , UpperCamelCase__ ), "'ans' must been from type int"
return ans
def lowerCamelCase_ ( UpperCamelCase__ : Union[str, Any] ) -> Tuple:
"""simple docstring"""
assert isinstance(UpperCamelCase__ , UpperCamelCase__ ) and (
number >= 0
), "'number' bust been an int and >= 0"
__lowerCamelCase = 0
# prime factorization of 'number'
__lowerCamelCase = prime_factorization(UpperCamelCase__ )
__lowerCamelCase = min(UpperCamelCase__ )
# precondition
assert isinstance(UpperCamelCase__ , UpperCamelCase__ ), "'ans' must been from type int"
return ans
def lowerCamelCase_ ( UpperCamelCase__ : Optional[int] ) -> Optional[int]:
"""simple docstring"""
assert isinstance(UpperCamelCase__ , UpperCamelCase__ ), "'number' must been an int"
assert isinstance(number % 2 == 0 , UpperCamelCase__ ), "compare bust been from type bool"
return number % 2 == 0
def lowerCamelCase_ ( UpperCamelCase__ : str ) -> str:
"""simple docstring"""
assert isinstance(UpperCamelCase__ , UpperCamelCase__ ), "'number' must been an int"
assert isinstance(number % 2 != 0 , UpperCamelCase__ ), "compare bust been from type bool"
return number % 2 != 0
def lowerCamelCase_ ( UpperCamelCase__ : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
assert (
isinstance(UpperCamelCase__ , UpperCamelCase__ ) and (number > 2) and is_even(UpperCamelCase__ )
), "'number' must been an int, even and > 2"
__lowerCamelCase = [] # this list will returned
# creates a list of prime numbers between 2 up to 'number'
__lowerCamelCase = get_prime_numbers(UpperCamelCase__ )
__lowerCamelCase = len(UpperCamelCase__ )
# run variable for while-loops.
__lowerCamelCase = 0
__lowerCamelCase = None
# exit variable. for break up the loops
__lowerCamelCase = True
while i < len_pn and loop:
__lowerCamelCase = i + 1
while j < len_pn and loop:
if prime_numbers[i] + prime_numbers[j] == number:
__lowerCamelCase = False
ans.append(prime_numbers[i] )
ans.append(prime_numbers[j] )
j += 1
i += 1
# precondition
assert (
isinstance(UpperCamelCase__ , UpperCamelCase__ )
and (len(UpperCamelCase__ ) == 2)
and (ans[0] + ans[1] == number)
and is_prime(ans[0] )
and is_prime(ans[1] )
), "'ans' must contains two primes. And sum of elements must been eq 'number'"
return ans
def lowerCamelCase_ ( UpperCamelCase__ : Any , UpperCamelCase__ : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
assert (
isinstance(UpperCamelCase__ , UpperCamelCase__ )
and isinstance(UpperCamelCase__ , UpperCamelCase__ )
and (numbera >= 0)
and (numbera >= 0)
), "'number1' and 'number2' must been positive integer."
__lowerCamelCase = 0
while numbera != 0:
__lowerCamelCase = numbera % numbera
__lowerCamelCase = numbera
__lowerCamelCase = rest
# precondition
assert isinstance(UpperCamelCase__ , UpperCamelCase__ ) and (
numbera >= 0
), "'number' must been from type int and positive"
return numbera
def lowerCamelCase_ ( UpperCamelCase__ : Dict , UpperCamelCase__ : str ) -> int:
"""simple docstring"""
assert (
isinstance(UpperCamelCase__ , UpperCamelCase__ )
and isinstance(UpperCamelCase__ , UpperCamelCase__ )
and (numbera >= 1)
and (numbera >= 1)
), "'number1' and 'number2' must been positive integer."
__lowerCamelCase = 1 # actual answer that will be return.
# for kgV (x,1)
if numbera > 1 and numbera > 1:
# builds the prime factorization of 'number1' and 'number2'
__lowerCamelCase = prime_factorization(UpperCamelCase__ )
__lowerCamelCase = prime_factorization(UpperCamelCase__ )
elif numbera == 1 or numbera == 1:
__lowerCamelCase = []
__lowerCamelCase = []
__lowerCamelCase = max(UpperCamelCase__ , UpperCamelCase__ )
__lowerCamelCase = 0
__lowerCamelCase = 0
__lowerCamelCase = [] # captured numbers int both 'primeFac1' and 'primeFac2'
# iterates through primeFac1
for n in prime_fac_a:
if n not in done:
if n in prime_fac_a:
__lowerCamelCase = prime_fac_a.count(UpperCamelCase__ )
__lowerCamelCase = prime_fac_a.count(UpperCamelCase__ )
for _ in range(max(UpperCamelCase__ , UpperCamelCase__ ) ):
ans *= n
else:
__lowerCamelCase = prime_fac_a.count(UpperCamelCase__ )
for _ in range(UpperCamelCase__ ):
ans *= n
done.append(UpperCamelCase__ )
# iterates through primeFac2
for n in prime_fac_a:
if n not in done:
__lowerCamelCase = prime_fac_a.count(UpperCamelCase__ )
for _ in range(UpperCamelCase__ ):
ans *= n
done.append(UpperCamelCase__ )
# precondition
assert isinstance(UpperCamelCase__ , UpperCamelCase__ ) and (
ans >= 0
), "'ans' must been from type int and positive"
return ans
def lowerCamelCase_ ( UpperCamelCase__ : Dict ) -> str:
"""simple docstring"""
assert isinstance(UpperCamelCase__ , UpperCamelCase__ ) and (n >= 0), "'number' must been a positive int"
__lowerCamelCase = 0
__lowerCamelCase = 2 # this variable holds the answer
while index < n:
index += 1
ans += 1 # counts to the next number
# if ans not prime then
# runs to the next prime number.
while not is_prime(UpperCamelCase__ ):
ans += 1
# precondition
assert isinstance(UpperCamelCase__ , UpperCamelCase__ ) and is_prime(
UpperCamelCase__ ), "'ans' must been a prime number and from type int"
return ans
def lowerCamelCase_ ( UpperCamelCase__ : str , UpperCamelCase__ : Tuple ) -> Tuple:
"""simple docstring"""
assert (
is_prime(UpperCamelCase__ ) and is_prime(UpperCamelCase__ ) and (p_number_a < p_number_a)
), "The arguments must been prime numbers and 'pNumber1' < 'pNumber2'"
__lowerCamelCase = p_number_a + 1 # jump to the next number
__lowerCamelCase = [] # this list will be returns.
# if number is not prime then
# fetch the next prime number.
while not is_prime(UpperCamelCase__ ):
number += 1
while number < p_number_a:
ans.append(UpperCamelCase__ )
number += 1
# fetch the next prime number.
while not is_prime(UpperCamelCase__ ):
number += 1
# precondition
assert (
isinstance(UpperCamelCase__ , UpperCamelCase__ )
and ans[0] != p_number_a
and ans[len(UpperCamelCase__ ) - 1] != p_number_a
), "'ans' must been a list without the arguments"
# 'ans' contains not 'pNumber1' and 'pNumber2' !
return ans
def lowerCamelCase_ ( UpperCamelCase__ : int ) -> Optional[Any]:
"""simple docstring"""
assert isinstance(UpperCamelCase__ , UpperCamelCase__ ) and (n >= 1), "'n' must been int and >= 1"
__lowerCamelCase = [] # will be returned.
for divisor in range(1 , n + 1 ):
if n % divisor == 0:
ans.append(UpperCamelCase__ )
# precondition
assert ans[0] == 1 and ans[len(UpperCamelCase__ ) - 1] == n, "Error in function getDivisiors(...)"
return ans
def lowerCamelCase_ ( UpperCamelCase__ : Dict ) -> Optional[int]:
"""simple docstring"""
assert isinstance(UpperCamelCase__ , UpperCamelCase__ ) and (
number > 1
), "'number' must been an int and >= 1"
__lowerCamelCase = get_divisors(UpperCamelCase__ )
# precondition
assert (
isinstance(UpperCamelCase__ , UpperCamelCase__ )
and (divisors[0] == 1)
and (divisors[len(UpperCamelCase__ ) - 1] == number)
), "Error in help-function getDivisiors(...)"
# summed all divisors up to 'number' (exclusive), hence [:-1]
return sum(divisors[:-1] ) == number
def lowerCamelCase_ ( UpperCamelCase__ : Optional[int] , UpperCamelCase__ : List[str] ) -> List[Any]:
"""simple docstring"""
assert (
isinstance(UpperCamelCase__ , UpperCamelCase__ )
and isinstance(UpperCamelCase__ , UpperCamelCase__ )
and (denominator != 0)
), "The arguments must been from type int and 'denominator' != 0"
# build the greatest common divisor of numerator and denominator.
__lowerCamelCase = gcd(abs(UpperCamelCase__ ) , abs(UpperCamelCase__ ) )
# precondition
assert (
isinstance(UpperCamelCase__ , UpperCamelCase__ )
and (numerator % gcd_of_fraction == 0)
and (denominator % gcd_of_fraction == 0)
), "Error in function gcd(...,...)"
return (numerator // gcd_of_fraction, denominator // gcd_of_fraction)
def lowerCamelCase_ ( UpperCamelCase__ : List[str] ) -> Optional[Any]:
"""simple docstring"""
assert isinstance(UpperCamelCase__ , UpperCamelCase__ ) and (n >= 0), "'n' must been a int and >= 0"
__lowerCamelCase = 1 # this will be return.
for factor in range(1 , n + 1 ):
ans *= factor
return ans
def lowerCamelCase_ ( UpperCamelCase__ : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
assert isinstance(UpperCamelCase__ , UpperCamelCase__ ) and (n >= 0), "'n' must been an int and >= 0"
__lowerCamelCase = 0
__lowerCamelCase = 1
__lowerCamelCase = 1 # this will be return
for _ in range(n - 1 ):
__lowerCamelCase = ans
ans += fiba
__lowerCamelCase = tmp
return ans
| 90 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__lowercase = {
"""configuration_rag""": ["""RagConfig"""],
"""retrieval_rag""": ["""RagRetriever"""],
"""tokenization_rag""": ["""RagTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = [
"""RagModel""",
"""RagPreTrainedModel""",
"""RagSequenceForGeneration""",
"""RagTokenForGeneration""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = [
"""TFRagModel""",
"""TFRagPreTrainedModel""",
"""TFRagSequenceForGeneration""",
"""TFRagTokenForGeneration""",
]
if TYPE_CHECKING:
from .configuration_rag import RagConfig
from .retrieval_rag import RagRetriever
from .tokenization_rag import RagTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_rag import RagModel, RagPreTrainedModel, RagSequenceForGeneration, RagTokenForGeneration
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_rag import (
TFRagModel,
TFRagPreTrainedModel,
TFRagSequenceForGeneration,
TFRagTokenForGeneration,
)
else:
import sys
__lowercase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 40 | 0 |
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTConfig, DeiTForImageClassificationWithTeacher, DeiTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase : Union[str, Any] =logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase=False ) -> str:
UpperCamelCase__ : Tuple = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f'blocks.{i}.norm1.weight', f'deit.encoder.layer.{i}.layernorm_before.weight') )
rename_keys.append((f'blocks.{i}.norm1.bias', f'deit.encoder.layer.{i}.layernorm_before.bias') )
rename_keys.append((f'blocks.{i}.attn.proj.weight', f'deit.encoder.layer.{i}.attention.output.dense.weight') )
rename_keys.append((f'blocks.{i}.attn.proj.bias', f'deit.encoder.layer.{i}.attention.output.dense.bias') )
rename_keys.append((f'blocks.{i}.norm2.weight', f'deit.encoder.layer.{i}.layernorm_after.weight') )
rename_keys.append((f'blocks.{i}.norm2.bias', f'deit.encoder.layer.{i}.layernorm_after.bias') )
rename_keys.append((f'blocks.{i}.mlp.fc1.weight', f'deit.encoder.layer.{i}.intermediate.dense.weight') )
rename_keys.append((f'blocks.{i}.mlp.fc1.bias', f'deit.encoder.layer.{i}.intermediate.dense.bias') )
rename_keys.append((f'blocks.{i}.mlp.fc2.weight', f'deit.encoder.layer.{i}.output.dense.weight') )
rename_keys.append((f'blocks.{i}.mlp.fc2.bias', f'deit.encoder.layer.{i}.output.dense.bias') )
# projection layer + position embeddings
rename_keys.extend(
[
("cls_token", "deit.embeddings.cls_token"),
("dist_token", "deit.embeddings.distillation_token"),
("patch_embed.proj.weight", "deit.embeddings.patch_embeddings.projection.weight"),
("patch_embed.proj.bias", "deit.embeddings.patch_embeddings.projection.bias"),
("pos_embed", "deit.embeddings.position_embeddings"),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("norm.weight", "layernorm.weight"),
("norm.bias", "layernorm.bias"),
("pre_logits.fc.weight", "pooler.dense.weight"),
("pre_logits.fc.bias", "pooler.dense.bias"),
] )
# if just the base model, we should remove "deit" from all keys that start with "deit"
UpperCamelCase__ : Optional[Any] = [(pair[0], pair[1][4:]) if pair[1].startswith("deit" ) else pair for pair in rename_keys]
else:
# layernorm + classification heads
rename_keys.extend(
[
("norm.weight", "deit.layernorm.weight"),
("norm.bias", "deit.layernorm.bias"),
("head.weight", "cls_classifier.weight"),
("head.bias", "cls_classifier.bias"),
("head_dist.weight", "distillation_classifier.weight"),
("head_dist.bias", "distillation_classifier.bias"),
] )
return rename_keys
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=False ) -> Union[str, Any]:
for i in range(config.num_hidden_layers ):
if base_model:
UpperCamelCase__ : Tuple = ""
else:
UpperCamelCase__ : Union[str, Any] = "deit."
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
UpperCamelCase__ : List[Any] = state_dict.pop(f'blocks.{i}.attn.qkv.weight' )
UpperCamelCase__ : List[Any] = state_dict.pop(f'blocks.{i}.attn.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
UpperCamelCase__ : Optional[int] = in_proj_weight[
: config.hidden_size, :
]
UpperCamelCase__ : int = in_proj_bias[: config.hidden_size]
UpperCamelCase__ : Optional[Any] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
UpperCamelCase__ : Any = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
UpperCamelCase__ : Tuple = in_proj_weight[
-config.hidden_size :, :
]
UpperCamelCase__ : Union[str, Any] = in_proj_bias[-config.hidden_size :]
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Any:
UpperCamelCase__ : List[Any] = dct.pop(__lowerCAmelCase )
UpperCamelCase__ : Any = val
def SCREAMING_SNAKE_CASE ( ) -> Any:
UpperCamelCase__ : Union[str, Any] = "http://images.cocodataset.org/val2017/000000039769.jpg"
UpperCamelCase__ : Tuple = Image.open(requests.get(__lowerCAmelCase , stream=__lowerCAmelCase ).raw )
return im
@torch.no_grad()
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase ) -> int:
UpperCamelCase__ : List[Any] = DeiTConfig()
# all deit models have fine-tuned heads
UpperCamelCase__ : Optional[int] = False
# dataset (fine-tuned on ImageNet 2012), patch_size and image_size
UpperCamelCase__ : List[Any] = 1000
UpperCamelCase__ : int = "huggingface/label-files"
UpperCamelCase__ : List[str] = "imagenet-1k-id2label.json"
UpperCamelCase__ : List[str] = json.load(open(hf_hub_download(__lowerCAmelCase , __lowerCAmelCase , repo_type="dataset" ) , "r" ) )
UpperCamelCase__ : Optional[int] = {int(__lowerCAmelCase ): v for k, v in idalabel.items()}
UpperCamelCase__ : str = idalabel
UpperCamelCase__ : Dict = {v: k for k, v in idalabel.items()}
UpperCamelCase__ : Optional[Any] = int(deit_name[-6:-4] )
UpperCamelCase__ : Dict = int(deit_name[-3:] )
# size of the architecture
if deit_name[9:].startswith("tiny" ):
UpperCamelCase__ : Any = 192
UpperCamelCase__ : Optional[Any] = 768
UpperCamelCase__ : List[Any] = 12
UpperCamelCase__ : Any = 3
elif deit_name[9:].startswith("small" ):
UpperCamelCase__ : Union[str, Any] = 384
UpperCamelCase__ : Optional[int] = 1536
UpperCamelCase__ : List[Any] = 12
UpperCamelCase__ : Any = 6
if deit_name[9:].startswith("base" ):
pass
elif deit_name[4:].startswith("large" ):
UpperCamelCase__ : Optional[Any] = 1024
UpperCamelCase__ : int = 4096
UpperCamelCase__ : int = 24
UpperCamelCase__ : List[Any] = 16
# load original model from timm
UpperCamelCase__ : List[Any] = timm.create_model(__lowerCAmelCase , pretrained=__lowerCAmelCase )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
UpperCamelCase__ : int = timm_model.state_dict()
UpperCamelCase__ : Optional[int] = create_rename_keys(__lowerCAmelCase , __lowerCAmelCase )
for src, dest in rename_keys:
rename_key(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
read_in_q_k_v(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# load HuggingFace model
UpperCamelCase__ : Tuple = DeiTForImageClassificationWithTeacher(__lowerCAmelCase ).eval()
model.load_state_dict(__lowerCAmelCase )
# Check outputs on an image, prepared by DeiTImageProcessor
UpperCamelCase__ : Any = int(
(256 / 224) * config.image_size ) # to maintain same ratio w.r.t. 224 images, see https://github.com/facebookresearch/deit/blob/ab5715372db8c6cad5740714b2216d55aeae052e/datasets.py#L103
UpperCamelCase__ : int = DeiTImageProcessor(size=__lowerCAmelCase , crop_size=config.image_size )
UpperCamelCase__ : Any = image_processor(images=prepare_img() , return_tensors="pt" )
UpperCamelCase__ : Optional[Any] = encoding["pixel_values"]
UpperCamelCase__ : str = model(__lowerCAmelCase )
UpperCamelCase__ : Optional[int] = timm_model(__lowerCAmelCase )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(__lowerCAmelCase , outputs.logits , atol=1E-3 )
Path(__lowerCAmelCase ).mkdir(exist_ok=__lowerCAmelCase )
print(f'Saving model {deit_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(__lowerCAmelCase )
print(f'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(__lowerCAmelCase )
if __name__ == "__main__":
lowerCamelCase : int =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--deit_name''',
default='''vit_deit_base_distilled_patch16_224''',
type=str,
help='''Name of the DeiT timm model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
lowerCamelCase : List[str] =parser.parse_args()
convert_deit_checkpoint(args.deit_name, args.pytorch_dump_folder_path)
| 352 |
from collections import UserDict
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
lowerCamelCase : str =logging.get_logger(__name__)
@add_end_docstrings(A__ )
class __a ( A__ ):
def __init__( self : List[str] , **SCREAMING_SNAKE_CASE : Tuple ):
'''simple docstring'''
super().__init__(**SCREAMING_SNAKE_CASE )
requires_backends(self , "vision" )
self.check_model_type(
TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if self.framework == "tf"
else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING )
def __call__( self : Dict , SCREAMING_SNAKE_CASE : Union[str, List[str], "Image", List["Image"]] , **SCREAMING_SNAKE_CASE : str ):
'''simple docstring'''
return super().__call__(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
def __lowercase ( self : List[str] , **SCREAMING_SNAKE_CASE : List[str] ):
'''simple docstring'''
UpperCamelCase__ : List[Any] = {}
if "candidate_labels" in kwargs:
UpperCamelCase__ : Optional[Any] = kwargs["candidate_labels"]
if "hypothesis_template" in kwargs:
UpperCamelCase__ : int = kwargs["hypothesis_template"]
return preprocess_params, {}, {}
def __lowercase ( self : List[Any] , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Tuple=None , SCREAMING_SNAKE_CASE : Optional[int]="This is a photo of {}." ):
'''simple docstring'''
UpperCamelCase__ : Dict = load_image(SCREAMING_SNAKE_CASE )
UpperCamelCase__ : List[Any] = self.image_processor(images=[image] , return_tensors=self.framework )
UpperCamelCase__ : Any = candidate_labels
UpperCamelCase__ : Dict = [hypothesis_template.format(SCREAMING_SNAKE_CASE ) for x in candidate_labels]
UpperCamelCase__ : Optional[Any] = self.tokenizer(SCREAMING_SNAKE_CASE , return_tensors=self.framework , padding=SCREAMING_SNAKE_CASE )
UpperCamelCase__ : List[Any] = [text_inputs]
return inputs
def __lowercase ( self : int , SCREAMING_SNAKE_CASE : Union[str, Any] ):
'''simple docstring'''
UpperCamelCase__ : Tuple = model_inputs.pop("candidate_labels" )
UpperCamelCase__ : List[str] = model_inputs.pop("text_inputs" )
if isinstance(text_inputs[0] , SCREAMING_SNAKE_CASE ):
UpperCamelCase__ : Dict = text_inputs[0]
else:
# Batching case.
UpperCamelCase__ : Union[str, Any] = text_inputs[0][0]
UpperCamelCase__ : Any = self.model(**SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Dict = {
"candidate_labels": candidate_labels,
"logits": outputs.logits_per_image,
}
return model_outputs
def __lowercase ( self : Any , SCREAMING_SNAKE_CASE : List[str] ):
'''simple docstring'''
UpperCamelCase__ : Optional[int] = model_outputs.pop("candidate_labels" )
UpperCamelCase__ : int = model_outputs["logits"][0]
if self.framework == "pt":
UpperCamelCase__ : Dict = logits.softmax(dim=-1 ).squeeze(-1 )
UpperCamelCase__ : Optional[Any] = probs.tolist()
if not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
UpperCamelCase__ : List[Any] = [scores]
elif self.framework == "tf":
UpperCamelCase__ : Optional[Any] = stable_softmax(SCREAMING_SNAKE_CASE , axis=-1 )
UpperCamelCase__ : Optional[int] = probs.numpy().tolist()
else:
raise ValueError(F'Unsupported framework: {self.framework}' )
UpperCamelCase__ : Optional[int] = [
{"score": score, "label": candidate_label}
for score, candidate_label in sorted(zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , key=lambda SCREAMING_SNAKE_CASE : -x[0] )
]
return result | 196 | 0 |
"""simple docstring"""
import re
from filelock import FileLock
try:
import nltk
_snake_case = True
except (ImportError, ModuleNotFoundError):
_snake_case = False
if NLTK_AVAILABLE:
with FileLock('.lock') as lock:
nltk.download('punkt', quiet=True)
def lowerCAmelCase__ ( UpperCamelCase__ ):
'''simple docstring'''
re.sub("""<n>""" , """""" , UpperCamelCase__ ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(UpperCamelCase__ ) )
| 294 |
"""simple docstring"""
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
'RUCAIBox/mvp': 'https://huggingface.co/RUCAIBox/mvp/resolve/main/config.json',
}
class UpperCamelCase ( snake_case_ ):
UpperCamelCase : int = '''mvp'''
UpperCamelCase : Union[str, Any] = ['''past_key_values''']
UpperCamelCase : Any = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self : List[str] , UpperCAmelCase__ : List[str]=50267 , UpperCAmelCase__ : Optional[Any]=1024 , UpperCAmelCase__ : Tuple=12 , UpperCAmelCase__ : Optional[Any]=4096 , UpperCAmelCase__ : int=16 , UpperCAmelCase__ : Tuple=12 , UpperCAmelCase__ : int=4096 , UpperCAmelCase__ : List[Any]=16 , UpperCAmelCase__ : Tuple=0.0 , UpperCAmelCase__ : Tuple=0.0 , UpperCAmelCase__ : Tuple="gelu" , UpperCAmelCase__ : Union[str, Any]=1024 , UpperCAmelCase__ : List[str]=0.1 , UpperCAmelCase__ : Any=0.0 , UpperCAmelCase__ : Dict=0.0 , UpperCAmelCase__ : Tuple=0.0_2 , UpperCAmelCase__ : Tuple=0.0 , UpperCAmelCase__ : Optional[Any]=False , UpperCAmelCase__ : int=True , UpperCAmelCase__ : Tuple=1 , UpperCAmelCase__ : Dict=0 , UpperCAmelCase__ : Union[str, Any]=2 , UpperCAmelCase__ : Optional[int]=True , UpperCAmelCase__ : Tuple=2 , UpperCAmelCase__ : Any=2 , UpperCAmelCase__ : Optional[Any]=False , UpperCAmelCase__ : Dict=100 , UpperCAmelCase__ : Union[str, Any]=800 , **UpperCAmelCase__ : Dict , ) -> List[Any]:
_a : Any = vocab_size
_a : Any = max_position_embeddings
_a : Union[str, Any] = d_model
_a : List[str] = encoder_ffn_dim
_a : List[Any] = encoder_layers
_a : Dict = encoder_attention_heads
_a : Tuple = decoder_ffn_dim
_a : List[Any] = decoder_layers
_a : Optional[Any] = decoder_attention_heads
_a : Optional[Any] = dropout
_a : str = attention_dropout
_a : Dict = activation_dropout
_a : Any = activation_function
_a : Tuple = init_std
_a : Dict = encoder_layerdrop
_a : Optional[int] = decoder_layerdrop
_a : Optional[Any] = classifier_dropout
_a : List[Any] = use_cache
_a : Dict = encoder_layers
_a : str = scale_embedding # scale factor will be sqrt(d_model) if True
_a : int = use_prompt
_a : Dict = prompt_length
_a : Dict = prompt_mid_dim
super().__init__(
pad_token_id=UpperCAmelCase__ , bos_token_id=UpperCAmelCase__ , eos_token_id=UpperCAmelCase__ , is_encoder_decoder=UpperCAmelCase__ , decoder_start_token_id=UpperCAmelCase__ , forced_eos_token_id=UpperCAmelCase__ , **UpperCAmelCase__ , )
if self.forced_bos_token_id is None and kwargs.get("""force_bos_token_to_be_generated""" , UpperCAmelCase__ ):
_a : List[str] = self.bos_token_id
warnings.warn(
f"""Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. """
"""The config can simply be saved and uploaded again to be fixed.""" )
| 294 | 1 |
import argparse
import os
import sys
from unittest.mock import patch
import pytorch_lightning as pl
import timeout_decorator
import torch
from distillation import SummarizationDistiller, distill_main
from finetune import SummarizationModule, main
from transformers import MarianMTModel
from transformers.file_utils import cached_path
from transformers.testing_utils import TestCasePlus, require_torch_gpu, slow
from utils import load_json
SCREAMING_SNAKE_CASE : Optional[Any] = "sshleifer/mar_enro_6_3_student"
class _lowerCamelCase( _a ):
def UpperCamelCase ( self) -> str:
"""simple docstring"""
super().setUp()
_lowercase : int = cached_path(
'https://cdn-datasets.huggingface.co/translation/wmt_en_ro-tr40k-va0.5k-te0.5k.tar.gz', extract_compressed_file=lowerCamelCase, )
_lowercase : Any = F'''{data_cached}/wmt_en_ro-tr40k-va0.5k-te0.5k'''
@slow
@require_torch_gpu
def UpperCamelCase ( self) -> Union[str, Any]:
"""simple docstring"""
MarianMTModel.from_pretrained(lowerCamelCase)
@slow
@require_torch_gpu
def UpperCamelCase ( self) -> str:
"""simple docstring"""
_lowercase : str = {
'$MAX_LEN': 64,
'$BS': 64,
'$GAS': 1,
'$ENRO_DIR': self.data_dir,
'facebook/mbart-large-cc25': MARIAN_MODEL,
# "val_check_interval=0.25": "val_check_interval=1.0",
'--learning_rate=3e-5': '--learning_rate 3e-4',
'--num_train_epochs 6': '--num_train_epochs 1',
}
# Clean up bash script
_lowercase : Optional[int] = (self.test_file_dir / 'train_mbart_cc25_enro.sh').open().read().split('finetune.py')[1].strip()
_lowercase : List[Any] = bash_script.replace('\\\n', '').strip().replace('"$@"', '')
for k, v in env_vars_to_replace.items():
_lowercase : str = bash_script.replace(lowerCamelCase, str(lowerCamelCase))
_lowercase : Optional[Any] = self.get_auto_remove_tmp_dir()
# bash_script = bash_script.replace("--fp16 ", "")
_lowercase : Tuple = F'''
--output_dir {output_dir}
--tokenizer_name Helsinki-NLP/opus-mt-en-ro
--sortish_sampler
--do_predict
--gpus 1
--freeze_encoder
--n_train 40000
--n_val 500
--n_test 500
--fp16_opt_level O1
--num_sanity_val_steps 0
--eval_beams 2
'''.split()
# XXX: args.gpus > 1 : handle multi_gpu in the future
_lowercase : int = ['finetune.py'] + bash_script.split() + args
with patch.object(lowerCamelCase, 'argv', lowerCamelCase):
_lowercase : Optional[int] = argparse.ArgumentParser()
_lowercase : str = pl.Trainer.add_argparse_args(lowerCamelCase)
_lowercase : List[str] = SummarizationModule.add_model_specific_args(lowerCamelCase, os.getcwd())
_lowercase : List[Any] = parser.parse_args()
_lowercase : Union[str, Any] = main(lowerCamelCase)
# Check metrics
_lowercase : Tuple = load_json(model.metrics_save_path)
_lowercase : Dict = metrics['val'][0]
_lowercase : int = metrics['val'][-1]
self.assertEqual(len(metrics['val']), (args.max_epochs / args.val_check_interval))
assert isinstance(last_step_stats[F'''val_avg_{model.val_metric}'''], lowerCamelCase)
self.assertGreater(last_step_stats['val_avg_gen_time'], 0.0_1)
# model hanging on generate. Maybe bad config was saved. (XXX: old comment/assert?)
self.assertLessEqual(last_step_stats['val_avg_gen_time'], 1.0)
# test learning requirements:
# 1. BLEU improves over the course of training by more than 2 pts
self.assertGreater(last_step_stats['val_avg_bleu'] - first_step_stats['val_avg_bleu'], 2)
# 2. BLEU finishes above 17
self.assertGreater(last_step_stats['val_avg_bleu'], 17)
# 3. test BLEU and val BLEU within ~1.1 pt.
self.assertLess(abs(metrics['val'][-1]['val_avg_bleu'] - metrics['test'][-1]['test_avg_bleu']), 1.1)
# check lightning ckpt can be loaded and has a reasonable statedict
_lowercase : List[Any] = os.listdir(lowerCamelCase)
_lowercase : Optional[Any] = [x for x in contents if x.endswith('.ckpt')][0]
_lowercase : List[str] = os.path.join(args.output_dir, lowerCamelCase)
_lowercase : List[Any] = torch.load(lowerCamelCase, map_location='cpu')
_lowercase : str = 'model.model.decoder.layers.0.encoder_attn_layer_norm.weight'
assert expected_key in ckpt["state_dict"]
assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.floataa
# TODO: turn on args.do_predict when PL bug fixed.
if args.do_predict:
_lowercase : int = {os.path.basename(lowerCamelCase) for p in contents}
assert "test_generations.txt" in contents
assert "test_results.txt" in contents
# assert len(metrics["val"]) == desired_n_evals
assert len(metrics['test']) == 1
class _lowerCamelCase( _a ):
@timeout_decorator.timeout(6_00)
@slow
@require_torch_gpu
def UpperCamelCase ( self) -> Optional[int]:
"""simple docstring"""
_lowercase : List[Any] = F'''{self.test_file_dir_str}/test_data/wmt_en_ro'''
_lowercase : Optional[Any] = {
'--fp16_opt_level=O1': '',
'$MAX_LEN': 1_28,
'$BS': 16,
'$GAS': 1,
'$ENRO_DIR': data_dir,
'$m': 'sshleifer/student_marian_en_ro_6_1',
'val_check_interval=0.25': 'val_check_interval=1.0',
}
# Clean up bash script
_lowercase : Optional[int] = (
(self.test_file_dir / 'distil_marian_no_teacher.sh').open().read().split('distillation.py')[1].strip()
)
_lowercase : Any = bash_script.replace('\\\n', '').strip().replace('"$@"', '')
_lowercase : List[str] = bash_script.replace('--fp16 ', ' ')
for k, v in env_vars_to_replace.items():
_lowercase : Optional[int] = bash_script.replace(lowerCamelCase, str(lowerCamelCase))
_lowercase : Any = self.get_auto_remove_tmp_dir()
_lowercase : str = bash_script.replace('--fp16', '')
_lowercase : Dict = 6
_lowercase : Tuple = (
['distillation.py']
+ bash_script.split()
+ [
F'''--output_dir={output_dir}''',
'--gpus=1',
'--learning_rate=1e-3',
F'''--num_train_epochs={epochs}''',
'--warmup_steps=10',
'--val_check_interval=1.0',
'--do_predict',
]
)
with patch.object(lowerCamelCase, 'argv', lowerCamelCase):
_lowercase : Dict = argparse.ArgumentParser()
_lowercase : int = pl.Trainer.add_argparse_args(lowerCamelCase)
_lowercase : Tuple = SummarizationDistiller.add_model_specific_args(lowerCamelCase, os.getcwd())
_lowercase : Optional[int] = parser.parse_args()
# assert args.gpus == gpus THIS BREAKS for multi_gpu
_lowercase : Tuple = distill_main(lowerCamelCase)
# Check metrics
_lowercase : Tuple = load_json(model.metrics_save_path)
_lowercase : Any = metrics['val'][0]
_lowercase : int = metrics['val'][-1]
assert len(metrics['val']) >= (args.max_epochs / args.val_check_interval) # +1 accounts for val_sanity_check
assert last_step_stats["val_avg_gen_time"] >= 0.0_1
assert first_step_stats["val_avg_bleu"] < last_step_stats["val_avg_bleu"] # model learned nothing
assert 1.0 >= last_step_stats["val_avg_gen_time"] # model hanging on generate. Maybe bad config was saved.
assert isinstance(last_step_stats[F'''val_avg_{model.val_metric}'''], lowerCamelCase)
# check lightning ckpt can be loaded and has a reasonable statedict
_lowercase : List[str] = os.listdir(lowerCamelCase)
_lowercase : List[Any] = [x for x in contents if x.endswith('.ckpt')][0]
_lowercase : List[str] = os.path.join(args.output_dir, lowerCamelCase)
_lowercase : Tuple = torch.load(lowerCamelCase, map_location='cpu')
_lowercase : Dict = 'model.model.decoder.layers.0.encoder_attn_layer_norm.weight'
assert expected_key in ckpt["state_dict"]
assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.floataa
# TODO: turn on args.do_predict when PL bug fixed.
if args.do_predict:
_lowercase : List[Any] = {os.path.basename(lowerCamelCase) for p in contents}
assert "test_generations.txt" in contents
assert "test_results.txt" in contents
# assert len(metrics["val"]) == desired_n_evals
assert len(metrics['test']) == 1
| 84 |
def UpperCamelCase_( lowerCamelCase_ = 1000 ) -> int:
return sum(2 * a * ((a - 1) // 2) for a in range(3 , n + 1 ) )
if __name__ == "__main__":
print(solution())
| 84 | 1 |
'''simple docstring'''
import itertools
import random
import unittest
import numpy as np
from transformers import ASTFeatureExtractor
from transformers.testing_utils import require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
lowercase__ : int = random.Random()
if is_torch_available():
import torch
def _lowerCAmelCase ( __snake_case : Optional[int] , __snake_case : Optional[int]=1.0 , __snake_case : Union[str, Any]=None , __snake_case : Optional[int]=None ) -> Any:
if rng is None:
__A : str = global_rng
__A : List[str] = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class SCREAMING_SNAKE_CASE (unittest.TestCase ):
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=7 , _UpperCAmelCase=400 , _UpperCAmelCase=2000 , _UpperCAmelCase=1 , _UpperCAmelCase=0.0 , _UpperCAmelCase=1_6000 , _UpperCAmelCase=True , _UpperCAmelCase=True , ):
'''simple docstring'''
__A : Union[str, Any] = parent
__A : str = batch_size
__A : Union[str, Any] = min_seq_length
__A : int = max_seq_length
__A : List[str] = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
__A : str = feature_size
__A : Dict = padding_value
__A : List[str] = sampling_rate
__A : Dict = return_attention_mask
__A : Optional[int] = do_normalize
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase=False , _UpperCAmelCase=False):
'''simple docstring'''
def _flatten(_UpperCAmelCase):
return list(itertools.chain(*A_))
if equal_length:
__A : List[Any] = floats_list((self.batch_size, self.max_seq_length))
else:
# make sure that inputs increase in size
__A : Optional[int] = [
_flatten(floats_list((x, self.feature_size)))
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff)
]
if numpify:
__A : Union[str, Any] = [np.asarray(A_) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class SCREAMING_SNAKE_CASE (a__ , unittest.TestCase ):
lowerCAmelCase = ASTFeatureExtractor
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Union[str, Any] = ASTFeatureExtractionTester(self)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Optional[int] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict())
# create three inputs of length 800, 1000, and 1200
__A : List[str] = [floats_list((1, x))[0] for x in range(800 , 1400 , 200)]
__A : Union[str, Any] = [np.asarray(A_) for speech_input in speech_inputs]
# Test not batched input
__A : Union[str, Any] = feat_extract(speech_inputs[0] , return_tensors='np').input_values
__A : Dict = feat_extract(np_speech_inputs[0] , return_tensors='np').input_values
self.assertTrue(np.allclose(A_ , A_ , atol=1e-3))
# Test batched
__A : List[Any] = feat_extract(A_ , padding=A_ , return_tensors='np').input_values
__A : int = feat_extract(A_ , padding=A_ , return_tensors='np').input_values
for enc_seq_a, enc_seq_a in zip(A_ , A_):
self.assertTrue(np.allclose(A_ , A_ , atol=1e-3))
# Test 2-D numpy arrays are batched.
__A : Tuple = [floats_list((1, x))[0] for x in (800, 800, 800)]
__A : Optional[Any] = np.asarray(A_)
__A : Optional[int] = feat_extract(A_ , return_tensors='np').input_values
__A : Optional[int] = feat_extract(A_ , return_tensors='np').input_values
for enc_seq_a, enc_seq_a in zip(A_ , A_):
self.assertTrue(np.allclose(A_ , A_ , atol=1e-3))
@require_torch
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
import torch
__A : Optional[int] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict())
__A : List[Any] = np.random.rand(100).astype(np.floataa)
__A : Any = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
__A : Any = feature_extractor.pad([{'input_values': inputs}] , return_tensors='np')
self.assertTrue(np_processed.input_values.dtype == np.floataa)
__A : Optional[Any] = feature_extractor.pad([{'input_values': inputs}] , return_tensors='pt')
self.assertTrue(pt_processed.input_values.dtype == torch.floataa)
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase):
'''simple docstring'''
from datasets import load_dataset
__A : Optional[Any] = load_dataset('hf-internal-testing/librispeech_asr_dummy' , 'clean' , split='validation')
# automatic decoding with librispeech
__A : Union[str, Any] = ds.sort('id').select(range(A_))[:num_samples]['audio']
return [x["array"] for x in speech_samples]
@require_torch
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : List[str] = torch.tensor(
[-0.9894, -1.2776, -0.9066, -1.2776, -0.9349, -1.2609, -1.0386, -1.2776,
-1.1561, -1.2776, -1.2052, -1.2723, -1.2190, -1.2132, -1.2776, -1.1133,
-1.1953, -1.1343, -1.1584, -1.2203, -1.1770, -1.2474, -1.2381, -1.1936,
-0.9270, -0.8317, -0.8049, -0.7706, -0.7565, -0.7869])
# fmt: on
__A : str = self._load_datasamples(1)
__A : Dict = ASTFeatureExtractor()
__A : Optional[int] = feature_extractor(A_ , return_tensors='pt').input_values
self.assertEquals(input_values.shape , (1, 1024, 128))
self.assertTrue(torch.allclose(input_values[0, 0, :30] , A_ , atol=1e-4)) | 190 |
from itertools import zip_longest
import requests
from bsa import BeautifulSoup
from pandas import DataFrame
def _SCREAMING_SNAKE_CASE ( lowercase : str = "laptop" ):
'''simple docstring'''
lowerCamelCase_ = f"""https://www.amazon.in/laptop/s?k={product}"""
lowerCamelCase_ = {
'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36\n (KHTML, like Gecko)Chrome/44.0.2403.157 Safari/537.36',
'Accept-Language': 'en-US, en;q=0.5',
}
lowerCamelCase_ = BeautifulSoup(requests.get(lowercase , headers=lowercase ).text )
# Initialize a Pandas dataframe with the column titles
lowerCamelCase_ = DataFrame(
columns=[
'Product Title',
'Product Link',
'Current Price of the product',
'Product Rating',
'MRP of the product',
'Discount',
] )
# Loop through each entry and store them in the dataframe
for item, _ in zip_longest(
soup.find_all(
'div' , attrs={'class': 's-result-item', 'data-component-type': 's-search-result'} , ) , soup.find_all('div' , attrs={'class': 'a-row a-size-base a-color-base'} ) , ):
try:
lowerCamelCase_ = item.ha.text
lowerCamelCase_ = 'https://www.amazon.in/' + item.ha.a['href']
lowerCamelCase_ = item.find('span' , attrs={'class': 'a-offscreen'} ).text
try:
lowerCamelCase_ = item.find('span' , attrs={'class': 'a-icon-alt'} ).text
except AttributeError:
lowerCamelCase_ = 'Not available'
try:
lowerCamelCase_ = (
'₹'
+ item.find(
'span' , attrs={'class': 'a-price a-text-price'} ).text.split('₹' )[1]
)
except AttributeError:
lowerCamelCase_ = ''
try:
lowerCamelCase_ = float(
(
(
float(product_mrp.strip('₹' ).replace(',' , '' ) )
- float(product_price.strip('₹' ).replace(',' , '' ) )
)
/ float(product_mrp.strip('₹' ).replace(',' , '' ) )
)
* 1_00 )
except ValueError:
lowerCamelCase_ = float('nan' )
except AttributeError:
pass
lowerCamelCase_ = [
product_title,
product_link,
product_price,
product_rating,
product_mrp,
discount,
]
lowerCamelCase_ = ' '
lowerCamelCase_ = ' '
data_frame.index += 1
return data_frame
if __name__ == "__main__":
lowerCamelCase : Tuple = "headphones"
get_amazon_product_data(product).to_csv(F"""Amazon Product Data for {product}.csv""")
| 204 | 0 |
"""simple docstring"""
__lowerCamelCase : List[str] = 0 # The first color of the flag.
__lowerCamelCase : Dict = 1 # The second color of the flag.
__lowerCamelCase : Optional[int] = 2 # The third color of the flag.
__lowerCamelCase : Tuple = (red, white, blue)
def SCREAMING_SNAKE_CASE ( snake_case_ : list ):
if not sequence:
return []
if len(snake_case_ ) == 1:
return list(snake_case_ )
snake_case__ : Optional[int] = 0
snake_case__ : Tuple = len(snake_case_ ) - 1
snake_case__ : Optional[int] = 0
while mid <= high:
if sequence[mid] == colors[0]:
snake_case__ : List[Any] = sequence[mid], sequence[low]
low += 1
mid += 1
elif sequence[mid] == colors[1]:
mid += 1
elif sequence[mid] == colors[2]:
snake_case__ : Union[str, Any] = sequence[high], sequence[mid]
high -= 1
else:
snake_case__ : Dict = F'''The elements inside the sequence must contains only {colors} values'''
raise ValueError(snake_case_ )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
__lowerCamelCase : List[str] = input("""Enter numbers separated by commas:\n""").strip()
__lowerCamelCase : Tuple = [int(item.strip()) for item in user_input.split(""",""")]
print(f"{dutch_national_flag_sort(unsorted)}")
| 367 |
import random
import timeit
from functools import wraps
from typing import Callable, Optional
from ..configuration_utils import PretrainedConfig
from ..models.auto.modeling_tf_auto import TF_MODEL_MAPPING, TF_MODEL_WITH_LM_HEAD_MAPPING
from ..utils import is_pyanvml_available, is_tf_available, logging
from .benchmark_utils import (
Benchmark,
Memory,
MemorySummary,
measure_peak_memory_cpu,
start_memory_tracing,
stop_memory_tracing,
)
if is_tf_available():
import tensorflow as tf
from tensorflow.python.framework.errors_impl import ResourceExhaustedError
from .benchmark_args_tf import TensorFlowBenchmarkArguments
if is_pyanvml_available():
import pyanvml.pyanvml as nvml
__lowerCamelCase : List[Any] = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE ( snake_case_ : bool , snake_case_ : bool ):
def run_func(snake_case_ : str ):
@wraps(snake_case_ )
def run_in_eager_mode(*snake_case_ : str , **snake_case_ : Union[str, Any] ):
return func(*snake_case_ , **snake_case_ )
@wraps(snake_case_ )
@tf.function(experimental_compile=snake_case_ )
def run_in_graph_mode(*snake_case_ : List[Any] , **snake_case_ : List[Any] ):
return func(*snake_case_ , **snake_case_ )
if do_eager_mode is True:
if use_xla is not False:
raise ValueError(
"Cannot run model in XLA, if `args.eager_mode` is set to `True`. Please set `args.eager_mode=False`." )
return run_in_eager_mode
else:
return run_in_graph_mode
return run_func
def SCREAMING_SNAKE_CASE ( snake_case_ : int , snake_case_ : int , snake_case_ : int ):
snake_case__ : Dict = random.Random()
snake_case__ : List[Any] = [rng.randint(0 , vocab_size - 1 ) for i in range(batch_size * sequence_length )]
return tf.constant(snake_case_ , shape=(batch_size, sequence_length) , dtype=tf.intaa )
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ ):
"""simple docstring"""
a_ = 42
a_ = 42
a_ = "TensorFlow"
@property
def _lowercase ( self : List[str] ):
return tf.__version__
def _lowercase ( self : List[str] , __A : str , __A : int , __A : int ):
# initialize GPU on separate process
snake_case__ : str = self.args.strategy
if strategy is None:
raise ValueError("A device strategy has to be initialized before using TensorFlow." )
snake_case__ : Dict = self._prepare_inference_func(__A , __A , __A )
return self._measure_speed(_inference )
def _lowercase ( self : Tuple , __A : str , __A : int , __A : int ):
snake_case__ : Optional[int] = self.args.strategy
if strategy is None:
raise ValueError("A device strategy has to be initialized before using TensorFlow." )
snake_case__ : Any = self._prepare_train_func(__A , __A , __A )
return self._measure_speed(_train )
def _lowercase ( self : List[Any] , __A : str , __A : int , __A : int ):
# initialize GPU on separate process
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , __A )
snake_case__ : List[str] = self.args.strategy
if strategy is None:
raise ValueError("A device strategy has to be initialized before using TensorFlow." )
snake_case__ : Optional[Any] = self._prepare_inference_func(__A , __A , __A )
return self._measure_memory(_inference )
def _lowercase ( self : str , __A : str , __A : int , __A : int ):
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , __A )
snake_case__ : List[Any] = self.args.strategy
if strategy is None:
raise ValueError("A device strategy has to be initialized before using TensorFlow." )
snake_case__ : int = self._prepare_train_func(__A , __A , __A )
return self._measure_memory(_train )
def _lowercase ( self : Union[str, Any] , __A : str , __A : int , __A : int ):
snake_case__ : int = self.config_dict[model_name]
if self.args.fpaa:
raise NotImplementedError("Mixed precision is currently not supported." )
snake_case__ : Tuple = (
hasattr(__A , "architectures" )
and isinstance(config.architectures , __A )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
snake_case__ : Dict = "TF" + config.architectures[0] # prepend 'TF' for tensorflow model
snake_case__ : Union[str, Any] = __import__("transformers" , fromlist=[model_class] )
snake_case__ : Any = getattr(__A , __A )
snake_case__ : Dict = model_cls(__A )
except ImportError:
raise ImportError(
f'''{model_class} does not exist. If you just want to test the pretrained model, you might want to'''
" set `--only_pretrain_model` or `args.only_pretrain_model=True`." )
else:
snake_case__ : Dict = TF_MODEL_MAPPING[config.__class__](__A )
# encoder-decoder has vocab size saved differently
snake_case__ : Optional[int] = config.vocab_size if hasattr(__A , "vocab_size" ) else config.encoder.vocab_size
snake_case__ : List[Any] = random_input_ids(__A , __A , __A )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_decoder_forward():
return model(__A , decoder_input_ids=__A , training=__A )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_forward():
return model(__A , training=__A )
snake_case__ : Optional[int] = encoder_decoder_forward if config.is_encoder_decoder else encoder_forward
return _inference
def _lowercase ( self : List[str] , __A : str , __A : int , __A : int ):
snake_case__ : Optional[Any] = self.config_dict[model_name]
if self.args.eager_mode is not False:
raise ValueError("Training cannot be done in eager mode. Please make sure that `args.eager_mode = False`." )
if self.args.fpaa:
raise NotImplementedError("Mixed precision is currently not supported." )
snake_case__ : Any = (
hasattr(__A , "architectures" )
and isinstance(config.architectures , __A )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
snake_case__ : Dict = "TF" + config.architectures[0] # prepend 'TF' for tensorflow model
snake_case__ : List[Any] = __import__("transformers" , fromlist=[model_class] )
snake_case__ : Optional[int] = getattr(__A , __A )
snake_case__ : str = model_cls(__A )
except ImportError:
raise ImportError(
f'''{model_class} does not exist. If you just want to test the pretrained model, you might want to'''
" set `--only_pretrain_model` or `args.only_pretrain_model=True`." )
else:
snake_case__ : Union[str, Any] = TF_MODEL_WITH_LM_HEAD_MAPPING[config.__class__](__A )
# encoder-decoder has vocab size saved differently
snake_case__ : Union[str, Any] = config.vocab_size if hasattr(__A , "vocab_size" ) else config.encoder.vocab_size
snake_case__ : List[str] = random_input_ids(__A , __A , __A )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_decoder_train():
snake_case__ : str = model(__A , decoder_input_ids=__A , labels=__A , training=__A )[0]
snake_case__ : Dict = tf.gradients(__A , model.trainable_variables )
return gradients
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_train():
snake_case__ : Optional[Any] = model(__A , labels=__A , training=__A )[0]
snake_case__ : Dict = tf.gradients(__A , model.trainable_variables )
return gradients
snake_case__ : int = encoder_decoder_train if config.is_encoder_decoder else encoder_train
return _train
def _lowercase ( self : int , __A : List[Any] ):
with self.args.strategy.scope():
try:
if self.args.is_tpu or self.args.use_xla:
# run additional 10 times to stabilize compilation for tpu
logger.info("Do inference on TPU. Running model 5 times to stabilize compilation" )
timeit.repeat(__A , repeat=1 , number=5 )
# as written in https://docs.python.org/2/library/timeit.html#timeit.Timer.repeat, min should be taken rather than the average
snake_case__ : Optional[Any] = timeit.repeat(
__A , repeat=self.args.repeat , number=1_0 , )
return min(__A ) / 1_0.0
except ResourceExhaustedError as e:
self.print_fn(f'''Doesn\'t fit on GPU. {e}''' )
def _lowercase ( self : str , __A : Callable[[], None] ):
logger.info(
"Note that TensorFlow allocates more memory than "
"it might need to speed up computation. "
"The memory reported here corresponds to the memory "
"reported by `nvidia-smi`, which can vary depending "
"on total available memory on the GPU that is used." )
with self.args.strategy.scope():
try:
if self.args.trace_memory_line_by_line:
if not self.args.eager_mode:
raise ValueError(
"`args.eager_mode` is set to `False`. Make sure to run model in eager mode to measure memory"
" consumption line by line." )
snake_case__ : Optional[int] = start_memory_tracing("transformers" )
if self.args.is_tpu:
# tpu
raise NotImplementedError(
"Memory Benchmarking is currently not implemented for TPU. Please disable memory benchmarking"
" with `args.memory=False`" )
elif self.args.is_gpu:
# gpu
if not is_pyanvml_available():
logger.warning(
"py3nvml not installed, we won't log GPU memory usage. "
"Install py3nvml (pip install py3nvml) to log information about GPU." )
snake_case__ : List[str] = "N/A"
else:
logger.info(
"Measuring total GPU usage on GPU device. Make sure to not have additional processes"
" running on the same GPU." )
# init nvml
nvml.nvmlInit()
func()
snake_case__ : Any = nvml.nvmlDeviceGetHandleByIndex(self.args.device_idx )
snake_case__ : Optional[Any] = nvml.nvmlDeviceGetMemoryInfo(__A )
snake_case__ : Optional[int] = meminfo.used
snake_case__ : Any = Memory(__A )
# shutdown nvml
nvml.nvmlShutdown()
else:
# cpu
if self.args.trace_memory_line_by_line:
logger.info(
"When enabling line by line tracing, the max peak memory for CPU is inaccurate in"
" TensorFlow." )
snake_case__ : int = None
else:
snake_case__ : Any = measure_peak_memory_cpu(__A )
snake_case__ : Tuple = Memory(__A ) if isinstance(__A , __A ) else memory_bytes
if self.args.trace_memory_line_by_line:
snake_case__ : Optional[int] = stop_memory_tracing(__A )
if memory is None:
snake_case__ : Dict = summary.total
else:
snake_case__ : List[str] = None
return memory, summary
except ResourceExhaustedError as e:
self.print_fn(f'''Doesn\'t fit on GPU. {e}''' )
return "N/A", None
| 286 | 0 |
'''simple docstring'''
class _UpperCamelCase :
'''simple docstring'''
def __init__( self : Optional[int]):
'''simple docstring'''
__lowercase =''
__lowercase =''
__lowercase =[]
def __lowerCamelCase ( self : List[Any] , _lowerCAmelCase : int , _lowerCAmelCase : int):
'''simple docstring'''
if m == -1:
return n + 1
elif n == -1:
return m + 1
elif self.dp[m][n] > -1:
return self.dp[m][n]
else:
if self.worda[m] == self.worda[n]:
__lowercase =self.__min_dist_top_down_dp(m - 1 , n - 1)
else:
__lowercase =self.__min_dist_top_down_dp(_lowerCAmelCase , n - 1)
__lowercase =self.__min_dist_top_down_dp(m - 1 , _lowerCAmelCase)
__lowercase =self.__min_dist_top_down_dp(m - 1 , n - 1)
__lowercase =1 + min(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase)
return self.dp[m][n]
def __lowerCamelCase ( self : int , _lowerCAmelCase : str , _lowerCAmelCase : str):
'''simple docstring'''
__lowercase =worda
__lowercase =worda
__lowercase =[[-1 for _ in range(len(_lowerCAmelCase))] for _ in range(len(_lowerCAmelCase))]
return self.__min_dist_top_down_dp(len(_lowerCAmelCase) - 1 , len(_lowerCAmelCase) - 1)
def __lowerCamelCase ( self : Optional[int] , _lowerCAmelCase : str , _lowerCAmelCase : str):
'''simple docstring'''
__lowercase =worda
__lowercase =worda
__lowercase =len(_lowerCAmelCase)
__lowercase =len(_lowerCAmelCase)
__lowercase =[[0 for _ in range(n + 1)] for _ in range(m + 1)]
for i in range(m + 1):
for j in range(n + 1):
if i == 0: # first string is empty
__lowercase =j
elif j == 0: # second string is empty
__lowercase =i
elif worda[i - 1] == worda[j - 1]: # last characters are equal
__lowercase =self.dp[i - 1][j - 1]
else:
__lowercase =self.dp[i][j - 1]
__lowercase =self.dp[i - 1][j]
__lowercase =self.dp[i - 1][j - 1]
__lowercase =1 + min(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase)
return self.dp[m][n]
if __name__ == "__main__":
lowerCamelCase = EditDistance()
print("""****************** Testing Edit Distance DP Algorithm ******************""")
print()
lowerCamelCase = input("""Enter the first string: """).strip()
lowerCamelCase = input("""Enter the second string: """).strip()
print()
print(f"The minimum edit distance is: {solver.min_dist_top_down(Sa, Sa)}")
print(f"The minimum edit distance is: {solver.min_dist_bottom_up(Sa, Sa)}")
print()
print("""*************** End of Testing Edit Distance DP Algorithm ***************""")
| 166 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase = logging.get_logger(__name__)
lowerCamelCase = {
"""alibaba-damo/mgp-str-base""": """https://huggingface.co/alibaba-damo/mgp-str-base/resolve/main/config.json""",
}
class _UpperCamelCase ( A ):
'''simple docstring'''
lowerCAmelCase__ = """mgp-str"""
def __init__( self : int , _lowerCAmelCase : str=[3_2, 1_2_8] , _lowerCAmelCase : Dict=4 , _lowerCAmelCase : int=3 , _lowerCAmelCase : str=2_7 , _lowerCAmelCase : List[str]=3_8 , _lowerCAmelCase : Tuple=5_0_2_5_7 , _lowerCAmelCase : str=3_0_5_2_2 , _lowerCAmelCase : Optional[int]=7_6_8 , _lowerCAmelCase : Optional[int]=1_2 , _lowerCAmelCase : Optional[Any]=1_2 , _lowerCAmelCase : Optional[int]=4.0 , _lowerCAmelCase : List[Any]=True , _lowerCAmelCase : Tuple=False , _lowerCAmelCase : List[Any]=1e-5 , _lowerCAmelCase : List[Any]=0.0 , _lowerCAmelCase : str=0.0 , _lowerCAmelCase : Tuple=0.0 , _lowerCAmelCase : str=False , _lowerCAmelCase : List[Any]=0.02 , **_lowerCAmelCase : Optional[Any] , ):
'''simple docstring'''
super().__init__(**_lowerCAmelCase)
__lowercase =image_size
__lowercase =patch_size
__lowercase =num_channels
__lowercase =max_token_length
__lowercase =num_character_labels
__lowercase =num_bpe_labels
__lowercase =num_wordpiece_labels
__lowercase =hidden_size
__lowercase =num_hidden_layers
__lowercase =num_attention_heads
__lowercase =mlp_ratio
__lowercase =distilled
__lowercase =layer_norm_eps
__lowercase =drop_rate
__lowercase =qkv_bias
__lowercase =attn_drop_rate
__lowercase =drop_path_rate
__lowercase =output_aa_attentions
__lowercase =initializer_range
| 166 | 1 |
import builtins
import sys
from ...utils.imports import _is_package_available
from . import cursor, input
from .helpers import Direction, clear_line, forceWrite, linebreak, move_cursor, reset_cursor, writeColor
from .keymap import KEYMAP
_UpperCAmelCase : List[str] = False
try:
_UpperCAmelCase : List[str] = _is_package_available("""google.colab""")
except ModuleNotFoundError:
pass
@input.register
class lowercase :
def __init__( self , snake_case = None , snake_case = [] ):
snake_case_ = 0
snake_case_ = choices
snake_case_ = prompt
if sys.platform == "win32":
snake_case_ = '*'
else:
snake_case_ = '➔ '
def a ( self , snake_case , snake_case = "" ):
if sys.platform != "win32":
writeColor(self.choices[index] , 32 , snake_case )
else:
forceWrite(self.choices[index] , snake_case )
def a ( self , snake_case ):
if index == self.position:
forceWrite(F''' {self.arrow_char} ''' )
self.write_choice(snake_case )
else:
forceWrite(F''' {self.choices[index]}''' )
reset_cursor()
def a ( self , snake_case , snake_case = 1 ):
snake_case_ = self.position
if direction == Direction.DOWN:
if self.position + 1 >= len(self.choices ):
return
self.position += num_spaces
else:
if self.position - 1 < 0:
return
self.position -= num_spaces
clear_line()
self.print_choice(snake_case )
move_cursor(snake_case , direction.name )
self.print_choice(self.position )
@input.mark(KEYMAP['up'] )
def a ( self ):
self.move_direction(Direction.UP )
@input.mark(KEYMAP['down'] )
def a ( self ):
self.move_direction(Direction.DOWN )
@input.mark(KEYMAP['newline'] )
def a ( self ):
move_cursor(len(self.choices ) - self.position , 'DOWN' )
return self.position
@input.mark(KEYMAP['interrupt'] )
def a ( self ):
move_cursor(len(self.choices ) - self.position , 'DOWN' )
raise KeyboardInterrupt
@input.mark_multiple(*[KEYMAP[str(snake_case )] for number in range(10 )] )
def a ( self ):
snake_case_ = int(chr(self.current_selection ) )
snake_case_ = index - self.position
if index == self.position:
return
if index < len(self.choices ):
if self.position > index:
self.move_direction(Direction.UP , -movement )
elif self.position < index:
self.move_direction(Direction.DOWN , snake_case )
else:
return
else:
return
def a ( self , snake_case = 0 ):
if self.prompt:
linebreak()
forceWrite(self.prompt , '\n' )
if in_colab:
forceWrite('Please input a choice index (starting from 0), and press enter' , '\n' )
else:
forceWrite('Please select a choice using the arrow or number keys, and selecting with enter' , '\n' )
snake_case_ = default_choice
for i in range(len(self.choices ) ):
self.print_choice(snake_case )
forceWrite('\n' )
move_cursor(len(self.choices ) - self.position , 'UP' )
with cursor.hide():
while True:
if in_colab:
try:
snake_case_ = int(builtins.input() )
except ValueError:
snake_case_ = default_choice
else:
snake_case_ = self.handle_input()
if choice is not None:
reset_cursor()
for _ in range(len(self.choices ) + 1 ):
move_cursor(1 , 'UP' )
clear_line()
self.write_choice(snake_case , '\n' )
return choice
| 200 |
import os
import sys
import unittest
_UpperCAmelCase : str = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, """utils"""))
import get_test_info # noqa: E402
from get_test_info import ( # noqa: E402
get_model_to_test_mapping,
get_model_to_tester_mapping,
get_test_to_tester_mapping,
)
_UpperCAmelCase : List[Any] = os.path.join("""tests""", """models""", """bert""", """test_modeling_bert.py""")
_UpperCAmelCase : Dict = os.path.join("""tests""", """models""", """blip""", """test_modeling_blip.py""")
class lowercase ( unittest.TestCase ):
def a ( self ):
snake_case_ = get_test_to_tester_mapping(snake_case )
snake_case_ = get_test_to_tester_mapping(snake_case )
snake_case_ = {'BertModelTest': 'BertModelTester'}
snake_case_ = {
'BlipModelTest': 'BlipModelTester',
'BlipTextImageModelTest': 'BlipTextImageModelsModelTester',
'BlipTextModelTest': 'BlipTextModelTester',
'BlipTextRetrievalModelTest': 'BlipTextRetrievalModelTester',
'BlipVQAModelTest': 'BlipVQAModelTester',
'BlipVisionModelTest': 'BlipVisionModelTester',
}
self.assertEqual(get_test_info.to_json(snake_case ) , snake_case )
self.assertEqual(get_test_info.to_json(snake_case ) , snake_case )
def a ( self ):
snake_case_ = get_model_to_test_mapping(snake_case )
snake_case_ = get_model_to_test_mapping(snake_case )
snake_case_ = {
'BertForMaskedLM': ['BertModelTest'],
'BertForMultipleChoice': ['BertModelTest'],
'BertForNextSentencePrediction': ['BertModelTest'],
'BertForPreTraining': ['BertModelTest'],
'BertForQuestionAnswering': ['BertModelTest'],
'BertForSequenceClassification': ['BertModelTest'],
'BertForTokenClassification': ['BertModelTest'],
'BertLMHeadModel': ['BertModelTest'],
'BertModel': ['BertModelTest'],
}
snake_case_ = {
'BlipForConditionalGeneration': ['BlipTextImageModelTest'],
'BlipForImageTextRetrieval': ['BlipTextRetrievalModelTest'],
'BlipForQuestionAnswering': ['BlipVQAModelTest'],
'BlipModel': ['BlipModelTest'],
'BlipTextModel': ['BlipTextModelTest'],
'BlipVisionModel': ['BlipVisionModelTest'],
}
self.assertEqual(get_test_info.to_json(snake_case ) , snake_case )
self.assertEqual(get_test_info.to_json(snake_case ) , snake_case )
def a ( self ):
snake_case_ = get_model_to_tester_mapping(snake_case )
snake_case_ = get_model_to_tester_mapping(snake_case )
snake_case_ = {
'BertForMaskedLM': ['BertModelTester'],
'BertForMultipleChoice': ['BertModelTester'],
'BertForNextSentencePrediction': ['BertModelTester'],
'BertForPreTraining': ['BertModelTester'],
'BertForQuestionAnswering': ['BertModelTester'],
'BertForSequenceClassification': ['BertModelTester'],
'BertForTokenClassification': ['BertModelTester'],
'BertLMHeadModel': ['BertModelTester'],
'BertModel': ['BertModelTester'],
}
snake_case_ = {
'BlipForConditionalGeneration': ['BlipTextImageModelsModelTester'],
'BlipForImageTextRetrieval': ['BlipTextRetrievalModelTester'],
'BlipForQuestionAnswering': ['BlipVQAModelTester'],
'BlipModel': ['BlipModelTester'],
'BlipTextModel': ['BlipTextModelTester'],
'BlipVisionModel': ['BlipVisionModelTester'],
}
self.assertEqual(get_test_info.to_json(snake_case ) , snake_case )
self.assertEqual(get_test_info.to_json(snake_case ) , snake_case )
| 200 | 1 |
"""simple docstring"""
import os
from glob import glob
import imageio
import torch
import torchvision
import wandb
from img_processing import custom_to_pil, loop_post_process, preprocess, preprocess_vqgan
from loaders import load_vqgan
from PIL import Image
from torch import nn
from transformers import CLIPModel, CLIPTokenizerFast
from utils import get_device, get_timestamp, show_pil
class __A :
"""simple docstring"""
def __init__( self , __A = "cpu" , __A = "openai/clip-vit-large-patch14" ) -> None:
a =device
a =CLIPTokenizerFast.from_pretrained(__A )
a =[0.48_145_466, 0.4_578_275, 0.40_821_073]
a =[0.26_862_954, 0.26_130_258, 0.27_577_711]
a =torchvision.transforms.Normalize(self.image_mean , self.image_std )
a =torchvision.transforms.Resize(224 )
a =torchvision.transforms.CenterCrop(224 )
def SCREAMING_SNAKE_CASE ( self , __A ) -> Any:
a =self.resize(__A )
a =self.center_crop(__A )
a =self.normalize(__A )
return images
def __call__( self , __A=None , __A=None , **__A ) -> Optional[Any]:
a =self.tokenizer(text=__A , **__A )
a =self.preprocess_img(__A )
a ={key: value.to(self.device ) for (key, value) in encoding.items()}
return encoding
class __A ( nn.Module ):
"""simple docstring"""
def __init__( self , __A=10 , __A=0.01 , __A=None , __A=None , __A=None , __A=None , __A=None , __A=None , __A=False , __A=True , __A="image" , __A=True , __A=False , __A=False , __A=False , ) -> None:
super().__init__()
a =None
a =device if device else get_device()
if vqgan:
a =vqgan
else:
a =load_vqgan(self.device , conf_path=__A , ckpt_path=__A )
self.vqgan.eval()
if clip:
a =clip
else:
a =CLIPModel.from_pretrained('''openai/clip-vit-base-patch32''' )
self.clip.to(self.device )
a =ProcessorGradientFlow(device=self.device )
a =iterations
a =lr
a =log
a =make_grid
a =return_val
a =quantize
a =self.vqgan.decoder.z_shape
def SCREAMING_SNAKE_CASE ( self , __A=None , __A=None , __A=5 , __A=True ) -> List[str]:
a =[]
if output_path is None:
a ='''./animation.gif'''
if input_path is None:
a =self.save_path
a =sorted(glob(input_path + '''/*''' ) )
if not len(__A ):
raise ValueError(
'''No images found in save path, aborting (did you pass save_intermediate=True to the generate'''
''' function?)''' )
if len(__A ) == 1:
print('''Only one image found in save path, (did you pass save_intermediate=True to the generate function?)''' )
a =total_duration / len(__A )
a =[frame_duration] * len(__A )
if extend_frames:
a =1.5
a =3
for file_name in paths:
if file_name.endswith('''.png''' ):
images.append(imageio.imread(__A ) )
imageio.mimsave(__A , __A , duration=__A )
print(f'''gif saved to {output_path}''' )
def SCREAMING_SNAKE_CASE ( self , __A=None , __A=None ) -> Any:
if not (path or img):
raise ValueError('''Input either path or tensor''' )
if img is not None:
raise NotImplementedError
a =preprocess(Image.open(__A ) , target_image_size=256 ).to(self.device )
a =preprocess_vqgan(__A )
a , *a =self.vqgan.encode(__A )
return z
def SCREAMING_SNAKE_CASE ( self , __A ) -> Union[str, Any]:
a =self.latent.detach().requires_grad_()
a =base_latent + transform_vector
if self.quantize:
a , *a =self.vqgan.quantize(__A )
else:
a =trans_latent
return self.vqgan.decode(__A )
def SCREAMING_SNAKE_CASE ( self , __A , __A , __A=None ) -> List[str]:
a =self.clip_preprocessor(text=__A , images=__A , return_tensors='''pt''' , padding=__A )
a =self.clip(**__A )
a =clip_outputs.logits_per_image
if weights is not None:
a =similarity_logits * weights
return similarity_logits.sum()
def SCREAMING_SNAKE_CASE ( self , __A , __A , __A ) -> Any:
a =self._get_clip_similarity(pos_prompts['''prompts'''] , __A , weights=(1 / pos_prompts['''weights''']) )
if neg_prompts:
a =self._get_clip_similarity(neg_prompts['''prompts'''] , __A , weights=neg_prompts['''weights'''] )
else:
a =torch.tensor([1] , device=self.device )
a =-torch.log(__A ) + torch.log(__A )
return loss
def SCREAMING_SNAKE_CASE ( self , __A , __A , __A ) -> Dict:
a =torch.randn_like(self.latent , requires_grad=__A , device=self.device )
a =torch.optim.Adam([vector] , lr=self.lr )
for i in range(self.iterations ):
optim.zero_grad()
a =self._add_vector(__A )
a =loop_post_process(__A )
a =self._get_CLIP_loss(__A , __A , __A )
print('''CLIP loss''' , __A )
if self.log:
wandb.log({'''CLIP Loss''': clip_loss} )
clip_loss.backward(retain_graph=__A )
optim.step()
if self.return_val == "image":
yield custom_to_pil(transformed_img[0] )
else:
yield vector
def SCREAMING_SNAKE_CASE ( self , __A , __A , __A ) -> str:
wandb.init(reinit=__A , project='''face-editor''' )
wandb.config.update({'''Positive Prompts''': positive_prompts} )
wandb.config.update({'''Negative Prompts''': negative_prompts} )
wandb.config.update({'''lr''': self.lr, '''iterations''': self.iterations} )
if image_path:
a =Image.open(__A )
a =image.resize((256, 256) )
wandb.log('''Original Image''' , wandb.Image(__A ) )
def SCREAMING_SNAKE_CASE ( self , __A ) -> int:
if not prompts:
return []
a =[]
a =[]
if isinstance(__A , __A ):
a =[prompt.strip() for prompt in prompts.split('''|''' )]
for prompt in prompts:
if isinstance(__A , (tuple, list) ):
a =prompt[0]
a =float(prompt[1] )
elif ":" in prompt:
a , a =prompt.split(''':''' )
a =float(__A )
else:
a =prompt
a =1.0
processed_prompts.append(__A )
weights.append(__A )
return {
"prompts": processed_prompts,
"weights": torch.tensor(__A , device=self.device ),
}
def SCREAMING_SNAKE_CASE ( self , __A , __A=None , __A=None , __A=True , __A=False , __A=True , __A=True , __A=None , ) -> int:
if image_path:
a =self._get_latent(__A )
else:
a =torch.randn(self.latent_dim , device=self.device )
if self.log:
self._init_logging(__A , __A , __A )
assert pos_prompts, "You must provide at least one positive prompt."
a =self.process_prompts(__A )
a =self.process_prompts(__A )
if save_final and save_path is None:
a =os.path.join('''./outputs/''' , '''_'''.join(pos_prompts['''prompts'''] ) )
if not os.path.exists(__A ):
os.makedirs(__A )
else:
a =save_path + '''_''' + get_timestamp()
os.makedirs(__A )
a =save_path
a =self.vqgan.decode(self.latent )[0]
if show_intermediate:
print('''Original Image''' )
show_pil(custom_to_pil(__A ) )
a =loop_post_process(__A )
for iter, transformed_img in enumerate(self._optimize_CLIP(__A , __A , __A ) ):
if show_intermediate:
show_pil(__A )
if save_intermediate:
transformed_img.save(os.path.join(self.save_path , f'''iter_{iter:03d}.png''' ) )
if self.log:
wandb.log({'''Image''': wandb.Image(__A )} )
if show_final:
show_pil(__A )
if save_final:
transformed_img.save(os.path.join(self.save_path , f'''iter_{iter:03d}_final.png''' ) ) | 81 |
import unittest
from transformers import EsmConfig, is_torch_available
from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import EsmForMaskedLM, EsmForSequenceClassification, EsmForTokenClassification, EsmModel
from transformers.models.esm.modeling_esm import (
ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
EsmEmbeddings,
create_position_ids_from_input_ids,
)
class __UpperCAmelCase :
def __init__( self: Union[str, Any] , UpperCAmelCase_: Union[str, Any] , UpperCAmelCase_: int=13 , UpperCAmelCase_: Optional[int]=7 , UpperCAmelCase_: List[str]=False , UpperCAmelCase_: str=True , UpperCAmelCase_: Union[str, Any]=False , UpperCAmelCase_: Optional[Any]=True , UpperCAmelCase_: Optional[int]=33 , UpperCAmelCase_: Tuple=32 , UpperCAmelCase_: List[Any]=5 , UpperCAmelCase_: Union[str, Any]=4 , UpperCAmelCase_: Any=37 , UpperCAmelCase_: Optional[Any]="gelu" , UpperCAmelCase_: Dict=0.1 , UpperCAmelCase_: List[Any]=0.1 , UpperCAmelCase_: Dict=512 , UpperCAmelCase_: int=16 , UpperCAmelCase_: Optional[Any]=2 , UpperCAmelCase_: Optional[Any]=0.02 , UpperCAmelCase_: Tuple=3 , UpperCAmelCase_: Union[str, Any]=4 , UpperCAmelCase_: str=None , ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = parent
_SCREAMING_SNAKE_CASE = batch_size
_SCREAMING_SNAKE_CASE = seq_length
_SCREAMING_SNAKE_CASE = is_training
_SCREAMING_SNAKE_CASE = use_input_mask
_SCREAMING_SNAKE_CASE = use_token_type_ids
_SCREAMING_SNAKE_CASE = use_labels
_SCREAMING_SNAKE_CASE = vocab_size
_SCREAMING_SNAKE_CASE = hidden_size
_SCREAMING_SNAKE_CASE = num_hidden_layers
_SCREAMING_SNAKE_CASE = num_attention_heads
_SCREAMING_SNAKE_CASE = intermediate_size
_SCREAMING_SNAKE_CASE = hidden_act
_SCREAMING_SNAKE_CASE = hidden_dropout_prob
_SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
_SCREAMING_SNAKE_CASE = max_position_embeddings
_SCREAMING_SNAKE_CASE = type_vocab_size
_SCREAMING_SNAKE_CASE = type_sequence_label_size
_SCREAMING_SNAKE_CASE = initializer_range
_SCREAMING_SNAKE_CASE = num_labels
_SCREAMING_SNAKE_CASE = num_choices
_SCREAMING_SNAKE_CASE = scope
def UpperCamelCase ( self: List[str] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_SCREAMING_SNAKE_CASE = None
if self.use_input_mask:
_SCREAMING_SNAKE_CASE = random_attention_mask([self.batch_size, self.seq_length] )
_SCREAMING_SNAKE_CASE = None
_SCREAMING_SNAKE_CASE = None
_SCREAMING_SNAKE_CASE = None
if self.use_labels:
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.num_choices )
_SCREAMING_SNAKE_CASE = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase ( self: List[Any] ):
'''simple docstring'''
return EsmConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , pad_token_id=1 , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def UpperCamelCase ( self: Dict , UpperCAmelCase_: List[Any] , UpperCAmelCase_: Optional[Any] , UpperCAmelCase_: str , UpperCAmelCase_: List[str] , UpperCAmelCase_: Tuple , UpperCAmelCase_: Dict ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = EsmModel(config=UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def UpperCamelCase ( self: List[Any] , UpperCAmelCase_: List[str] , UpperCAmelCase_: int , UpperCAmelCase_: int , UpperCAmelCase_: int , UpperCAmelCase_: Union[str, Any] , UpperCAmelCase_: Any ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = EsmForMaskedLM(config=UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , labels=UpperCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase ( self: List[Any] , UpperCAmelCase_: int , UpperCAmelCase_: List[str] , UpperCAmelCase_: str , UpperCAmelCase_: Union[str, Any] , UpperCAmelCase_: Tuple , UpperCAmelCase_: Dict ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.num_labels
_SCREAMING_SNAKE_CASE = EsmForTokenClassification(config=UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , labels=UpperCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCamelCase ( self: Optional[Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
(
(
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) ,
) = config_and_inputs
_SCREAMING_SNAKE_CASE = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class __UpperCAmelCase (_UpperCAmelCase ,_UpperCAmelCase ,unittest.TestCase ):
__snake_case : List[Any] = False
__snake_case : Dict = (
(
EsmForMaskedLM,
EsmModel,
EsmForSequenceClassification,
EsmForTokenClassification,
)
if is_torch_available()
else ()
)
__snake_case : List[Any] = ()
__snake_case : Dict = (
{
"feature-extraction": EsmModel,
"fill-mask": EsmForMaskedLM,
"text-classification": EsmForSequenceClassification,
"token-classification": EsmForTokenClassification,
"zero-shot": EsmForSequenceClassification,
}
if is_torch_available()
else {}
)
__snake_case : int = True
def UpperCamelCase ( self: List[str] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = EsmModelTester(self )
_SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=UpperCAmelCase_ , hidden_size=37 )
def UpperCamelCase ( self: int ):
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCamelCase ( self: Tuple ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase_ )
def UpperCamelCase ( self: Dict ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_SCREAMING_SNAKE_CASE = type
self.model_tester.create_and_check_model(*UpperCAmelCase_ )
def UpperCamelCase ( self: Optional[Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*UpperCAmelCase_ )
def UpperCamelCase ( self: Any ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*UpperCAmelCase_ )
@slow
def UpperCamelCase ( self: int ):
'''simple docstring'''
for model_name in ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_SCREAMING_SNAKE_CASE = EsmModel.from_pretrained(UpperCAmelCase_ )
self.assertIsNotNone(UpperCAmelCase_ )
def UpperCamelCase ( self: str ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()[0]
_SCREAMING_SNAKE_CASE = EsmEmbeddings(config=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = torch.as_tensor([[12, 31, 13, model.padding_idx]] )
_SCREAMING_SNAKE_CASE = torch.as_tensor(
[
[
0 + model.padding_idx + 1,
1 + model.padding_idx + 1,
2 + model.padding_idx + 1,
model.padding_idx,
]
] )
_SCREAMING_SNAKE_CASE = create_position_ids_from_input_ids(UpperCAmelCase_ , model.padding_idx )
self.assertEqual(position_ids.shape , expected_positions.shape )
self.assertTrue(torch.all(torch.eq(UpperCAmelCase_ , UpperCAmelCase_ ) ) )
def UpperCamelCase ( self: List[str] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()[0]
_SCREAMING_SNAKE_CASE = EsmEmbeddings(config=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = torch.empty(2 , 4 , 30 )
_SCREAMING_SNAKE_CASE = [
0 + embeddings.padding_idx + 1,
1 + embeddings.padding_idx + 1,
2 + embeddings.padding_idx + 1,
3 + embeddings.padding_idx + 1,
]
_SCREAMING_SNAKE_CASE = torch.as_tensor([expected_single_positions, expected_single_positions] )
_SCREAMING_SNAKE_CASE = embeddings.create_position_ids_from_inputs_embeds(UpperCAmelCase_ )
self.assertEqual(position_ids.shape , expected_positions.shape )
self.assertTrue(torch.all(torch.eq(UpperCAmelCase_ , UpperCAmelCase_ ) ) )
@unittest.skip("""Esm does not support embedding resizing""" )
def UpperCamelCase ( self: Union[str, Any] ):
'''simple docstring'''
pass
@unittest.skip("""Esm does not support embedding resizing""" )
def UpperCamelCase ( self: Dict ):
'''simple docstring'''
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def UpperCamelCase ( self: Any ):
'''simple docstring'''
pass
@require_torch
class __UpperCAmelCase (_UpperCAmelCase ):
@slow
def UpperCamelCase ( self: Optional[Any] ):
'''simple docstring'''
with torch.no_grad():
_SCREAMING_SNAKE_CASE = EsmForMaskedLM.from_pretrained("""facebook/esm2_t6_8M_UR50D""" )
model.eval()
_SCREAMING_SNAKE_CASE = torch.tensor([[0, 1, 2, 3, 4, 5]] )
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ )[0]
_SCREAMING_SNAKE_CASE = 33
_SCREAMING_SNAKE_CASE = torch.Size((1, 6, vocab_size) )
self.assertEqual(output.shape , UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = torch.tensor(
[[[8.92_15, -10.58_98, -6.46_71], [-6.39_67, -13.91_14, -1.12_12], [-7.78_12, -13.95_16, -3.74_06]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , UpperCAmelCase_ , atol=1E-4 ) )
@slow
def UpperCamelCase ( self: Dict ):
'''simple docstring'''
with torch.no_grad():
_SCREAMING_SNAKE_CASE = EsmModel.from_pretrained("""facebook/esm2_t6_8M_UR50D""" )
model.eval()
_SCREAMING_SNAKE_CASE = torch.tensor([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ )[0]
# compare the actual values for a slice.
_SCREAMING_SNAKE_CASE = torch.tensor(
[[[0.14_44, 0.54_13, 0.32_48], [0.30_34, 0.00_53, 0.31_08], [0.32_28, -0.24_99, 0.34_15]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , UpperCAmelCase_ , atol=1E-4 ) )
| 306 | 0 |
"""simple docstring"""
from arguments import InitializationArguments
from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer, HfArgumentParser
# Configuration
_a = HfArgumentParser(InitializationArguments)
_a = parser.parse_args()
# Load codeparrot tokenizer trained for Python code tokenization
_a = AutoTokenizer.from_pretrained(args.tokenizer_name)
# Config: "scale_attn_by_layer_idx" and "reorder_and_upcast_attn" are Mistral stability tweaks
_a = {
'vocab_size': len(tokenizer),
'scale_attn_by_inverse_layer_idx': True,
'reorder_and_upcast_attn': True,
}
# Load model config (GPT-2 large in this case)
_a = AutoConfig.from_pretrained(args.config_name, **config_kwargs)
# Initialize new model with config
_a = AutoModelForCausalLM.from_config(config)
# Save model to the hub
model.save_pretrained(args.model_name, push_to_hub=args.push_to_hub)
| 144 |
"""simple docstring"""
import fire
from transformers import AutoConfig, AutoModelForSeqaSeqLM, AutoTokenizer
def _A ( UpperCamelCase_ : str, UpperCamelCase_ : str, **UpperCamelCase_ : Optional[int]) -> Tuple:
'''simple docstring'''
__lowercase = AutoConfig.from_pretrained(UpperCamelCase_, **UpperCamelCase_)
__lowercase = AutoModelForSeqaSeqLM.from_config(UpperCamelCase_)
model.save_pretrained(UpperCamelCase_)
AutoTokenizer.from_pretrained(UpperCamelCase_).save_pretrained(UpperCamelCase_)
return model
if __name__ == "__main__":
fire.Fire(save_randomly_initialized_version)
| 144 | 1 |
import torch
from diffusers import DiffusionPipeline
class __lowerCAmelCase ( a ):
"""simple docstring"""
def __init__( self : List[str] , _lowerCAmelCase : int , _lowerCAmelCase : Dict ) -> Tuple:
"""simple docstring"""
super().__init__()
self.register_modules(unet=_lowerCAmelCase , scheduler=_lowerCAmelCase )
def __call__( self : Tuple ) -> List[str]:
"""simple docstring"""
snake_case_ = torch.randn(
(1, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , )
snake_case_ = 1
snake_case_ = self.unet(_lowerCAmelCase , _lowerCAmelCase ).sample
snake_case_ = self.scheduler.step(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ).prev_sample
snake_case_ = scheduler_output - scheduler_output + torch.ones_like(_lowerCAmelCase )
return result
| 159 |
import datasets
import faiss
import numpy as np
import streamlit as st
import torch
from elasticsearch import Elasticsearch
from elia_utils import (
embed_questions_for_retrieval,
make_qa_sas_model,
qa_sas_generate,
query_es_index,
query_qa_dense_index,
)
import transformers
from transformers import AutoModel, AutoModelForSeqaSeqLM, AutoTokenizer
SCREAMING_SNAKE_CASE :Any = '''bart'''
SCREAMING_SNAKE_CASE :Any = True
@st.cache(allow_output_mutation=lowerCAmelCase_ )
def _lowerCAmelCase ( )->List[Any]:
'''simple docstring'''
if LOAD_DENSE_INDEX:
snake_case_ = AutoTokenizer.from_pretrained("yjernite/retribert-base-uncased" )
snake_case_ = AutoModel.from_pretrained("yjernite/retribert-base-uncased" ).to("cuda:0" )
snake_case_ = qar_model.eval()
else:
snake_case_ , snake_case_ = (None, None)
if MODEL_TYPE == "bart":
snake_case_ = AutoTokenizer.from_pretrained("yjernite/bart_eli5" )
snake_case_ = AutoModelForSeqaSeqLM.from_pretrained("yjernite/bart_eli5" ).to("cuda:0" )
snake_case_ = torch.load("seq2seq_models/eli5_bart_model_blm_2.pth" )
sas_model.load_state_dict(save_dict["model"] )
snake_case_ = sas_model.eval()
else:
snake_case_ , snake_case_ = make_qa_sas_model(
model_name="t5-small" , from_file="seq2seq_models/eli5_t5_model_1024_4.pth" , device="cuda:0" )
return (qar_tokenizer, qar_model, sas_tokenizer, sas_model)
@st.cache(allow_output_mutation=lowerCAmelCase_ )
def _lowerCAmelCase ( )->Tuple:
'''simple docstring'''
if LOAD_DENSE_INDEX:
snake_case_ = faiss.StandardGpuResources()
snake_case_ = datasets.load_dataset(path="wiki_snippets" , name="wiki40b_en_100_0" )["train"]
snake_case_ = np.memmap(
"wiki40b_passages_reps_32_l-8_h-768_b-512-512.dat" , dtype="float32" , mode="r" , shape=(wikiaab_passages.num_rows, 128) , )
snake_case_ = faiss.IndexFlatIP(128 )
snake_case_ = faiss.index_cpu_to_gpu(lowerCAmelCase_ , 1 , lowerCAmelCase_ )
wikiaab_gpu_index_flat.add(lowerCAmelCase_ ) # TODO fix for larger GPU
else:
snake_case_ , snake_case_ = (None, None)
snake_case_ = Elasticsearch([{"host": "localhost", "port": "9200"}] )
return (wikiaab_passages, wikiaab_gpu_index_flat, es_client)
@st.cache(allow_output_mutation=lowerCAmelCase_ )
def _lowerCAmelCase ( )->Union[str, Any]:
'''simple docstring'''
snake_case_ = datasets.load_dataset("eli5" , name="LFQA_reddit" )
snake_case_ = elia["train_eli5"]
snake_case_ = np.memmap(
"eli5_questions_reps.dat" , dtype="float32" , mode="r" , shape=(elia_train.num_rows, 128) )
snake_case_ = faiss.IndexFlatIP(128 )
eli5_train_q_index.add(lowerCAmelCase_ )
return (elia_train, eli5_train_q_index)
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE :Union[str, Any] = load_indexes()
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE :Optional[int] = load_models()
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE :Union[str, Any] = load_train_data()
def _lowerCAmelCase ( lowerCAmelCase_ :List[Any] , lowerCAmelCase_ :List[Any]=10 )->int:
'''simple docstring'''
snake_case_ = embed_questions_for_retrieval([question] , lowerCAmelCase_ , lowerCAmelCase_ )
snake_case_ , snake_case_ = eli5_train_q_index.search(lowerCAmelCase_ , lowerCAmelCase_ )
snake_case_ = [elia_train[int(lowerCAmelCase_ )] for i in I[0]]
return nn_examples
def _lowerCAmelCase ( lowerCAmelCase_ :Optional[int] , lowerCAmelCase_ :Optional[int]="wiki40b" , lowerCAmelCase_ :Optional[Any]="dense" , lowerCAmelCase_ :Any=10 )->Union[str, Any]:
'''simple docstring'''
if source == "none":
snake_case_ , snake_case_ = (" <P> ".join(["" for _ in range(11 )] ).strip(), [])
else:
if method == "dense":
snake_case_ , snake_case_ = query_qa_dense_index(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
else:
snake_case_ , snake_case_ = query_es_index(
lowerCAmelCase_ , lowerCAmelCase_ , index_name="english_wiki40b_snippets_100w" , n_results=lowerCAmelCase_ , )
snake_case_ = [
(res["article_title"], res["section_title"].strip(), res["score"], res["passage_text"]) for res in hit_lst
]
snake_case_ = "question: {} context: {}".format(lowerCAmelCase_ , lowerCAmelCase_ )
return question_doc, support_list
@st.cache(
hash_funcs={
torch.Tensor: (lambda lowerCAmelCase_ : None),
transformers.models.bart.tokenization_bart.BartTokenizer: (lambda lowerCAmelCase_ : None),
} )
def _lowerCAmelCase ( lowerCAmelCase_ :List[str] , lowerCAmelCase_ :Optional[int] , lowerCAmelCase_ :Union[str, Any] , lowerCAmelCase_ :int=64 , lowerCAmelCase_ :str=256 , lowerCAmelCase_ :int=False , lowerCAmelCase_ :Optional[int]=2 , lowerCAmelCase_ :Optional[int]=0.9_5 , lowerCAmelCase_ :str=0.8 )->Any:
'''simple docstring'''
with torch.no_grad():
snake_case_ = qa_sas_generate(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , num_answers=1 , num_beams=lowerCAmelCase_ , min_len=lowerCAmelCase_ , max_len=lowerCAmelCase_ , do_sample=lowerCAmelCase_ , temp=lowerCAmelCase_ , top_p=lowerCAmelCase_ , top_k=lowerCAmelCase_ , max_input_length=1_024 , device="cuda:0" , )[0]
return (answer, support_list)
st.title('''Long Form Question Answering with ELI5''')
# Start sidebar
SCREAMING_SNAKE_CASE :Dict = '''<img src=\'https://huggingface.co/front/assets/huggingface_logo.svg\'>'''
SCREAMING_SNAKE_CASE :Optional[int] = '''
<html>
<head>
<style>
.img-container {
padding-left: 90px;
padding-right: 90px;
padding-top: 50px;
padding-bottom: 50px;
background-color: #f0f3f9;
}
</style>
</head>
<body>
<span class="img-container"> <!-- Inline parent element -->
%s
</span>
</body>
</html>
''' % (
header_html,
)
st.sidebar.markdown(
header_full,
unsafe_allow_html=True,
)
# Long Form QA with ELI5 and Wikipedia
SCREAMING_SNAKE_CASE :Optional[Any] = '''
This demo presents a model trained to [provide long-form answers to open-domain questions](https://yjernite.github.io/lfqa.html).
First, a document retriever fetches a set of relevant Wikipedia passages given the question from the [Wiki40b](https://research.google/pubs/pub49029/) dataset,
a pre-processed fixed snapshot of Wikipedia.
'''
st.sidebar.markdown(description, unsafe_allow_html=True)
SCREAMING_SNAKE_CASE :Tuple = [
'''Answer the question''',
'''View the retrieved document only''',
'''View the most similar ELI5 question and answer''',
'''Show me everything, please!''',
]
SCREAMING_SNAKE_CASE :Any = st.sidebar.checkbox('''Demo options''')
if demo_options:
SCREAMING_SNAKE_CASE :Tuple = st.sidebar.selectbox(
'''''',
action_list,
index=3,
)
SCREAMING_SNAKE_CASE :Optional[int] = action_list.index(action_st)
SCREAMING_SNAKE_CASE :Any = st.sidebar.selectbox(
'''''',
['''Show full text of passages''', '''Show passage section titles'''],
index=0,
)
SCREAMING_SNAKE_CASE :Optional[int] = show_type == '''Show full text of passages'''
else:
SCREAMING_SNAKE_CASE :List[str] = 3
SCREAMING_SNAKE_CASE :Dict = True
SCREAMING_SNAKE_CASE :List[Any] = st.sidebar.checkbox('''Retrieval options''')
if retrieval_options:
SCREAMING_SNAKE_CASE :str = '''
### Information retriever options
The **sparse** retriever uses ElasticSearch, while the **dense** retriever uses max-inner-product search between a question and passage embedding
trained using the [ELI5](https://arxiv.org/abs/1907.09190) questions-answer pairs.
The answer is then generated by sequence to sequence model which takes the question and retrieved document as input.
'''
st.sidebar.markdown(retriever_info)
SCREAMING_SNAKE_CASE :str = st.sidebar.selectbox('''Which Wikipedia format should the model use?''', ['''wiki40b''', '''none'''])
SCREAMING_SNAKE_CASE :Union[str, Any] = st.sidebar.selectbox('''Which Wikipedia indexer should the model use?''', ['''dense''', '''sparse''', '''mixed'''])
else:
SCREAMING_SNAKE_CASE :Dict = '''wiki40b'''
SCREAMING_SNAKE_CASE :Optional[int] = '''dense'''
SCREAMING_SNAKE_CASE :str = '''beam'''
SCREAMING_SNAKE_CASE :List[str] = 2
SCREAMING_SNAKE_CASE :int = 64
SCREAMING_SNAKE_CASE :List[str] = 2_56
SCREAMING_SNAKE_CASE :str = None
SCREAMING_SNAKE_CASE :Optional[Any] = None
SCREAMING_SNAKE_CASE :int = st.sidebar.checkbox('''Generation options''')
if generate_options:
SCREAMING_SNAKE_CASE :Optional[Any] = '''
### Answer generation options
The sequence-to-sequence model was initialized with [BART](https://huggingface.co/facebook/bart-large)
weights and fine-tuned on the ELI5 QA pairs and retrieved documents. You can use the model for greedy decoding with
**beam** search, or **sample** from the decoder\'s output probabilities.
'''
st.sidebar.markdown(generate_info)
SCREAMING_SNAKE_CASE :str = st.sidebar.selectbox('''Would you like to use beam search or sample an answer?''', ['''beam''', '''sampled'''])
SCREAMING_SNAKE_CASE :Optional[Any] = st.sidebar.slider(
'''Minimum generation length''', min_value=8, max_value=2_56, value=64, step=8, format=None, key=None
)
SCREAMING_SNAKE_CASE :Union[str, Any] = st.sidebar.slider(
'''Maximum generation length''', min_value=64, max_value=5_12, value=2_56, step=16, format=None, key=None
)
if sampled == "beam":
SCREAMING_SNAKE_CASE :List[Any] = st.sidebar.slider('''Beam size''', min_value=1, max_value=8, value=2, step=None, format=None, key=None)
else:
SCREAMING_SNAKE_CASE :Any = st.sidebar.slider(
'''Nucleus sampling p''', min_value=0.1, max_value=1.0, value=0.95, step=0.01, format=None, key=None
)
SCREAMING_SNAKE_CASE :Optional[Any] = st.sidebar.slider(
'''Temperature''', min_value=0.1, max_value=1.0, value=0.7, step=0.01, format=None, key=None
)
SCREAMING_SNAKE_CASE :Union[str, Any] = None
# start main text
SCREAMING_SNAKE_CASE :Any = [
'''<MY QUESTION>''',
'''How do people make chocolate?''',
'''Why do we get a fever when we are sick?''',
'''How can different animals perceive different colors?''',
'''What is natural language processing?''',
'''What\'s the best way to treat a sunburn?''',
'''What exactly are vitamins ?''',
'''How does nuclear energy provide electricity?''',
'''What\'s the difference between viruses and bacteria?''',
'''Why are flutes classified as woodwinds when most of them are made out of metal ?''',
'''Why do people like drinking coffee even though it tastes so bad?''',
'''What happens when wine ages? How does it make the wine taste better?''',
'''If an animal is an herbivore, where does it get the protein that it needs to survive if it only eats grass?''',
'''How can we set a date to the beginning or end of an artistic period? Doesn\'t the change happen gradually?''',
'''How does New Zealand have so many large bird predators?''',
]
SCREAMING_SNAKE_CASE :Optional[Any] = st.selectbox(
'''What would you like to ask? ---- select <MY QUESTION> to enter a new query''',
questions_list,
index=1,
)
if question_s == "<MY QUESTION>":
SCREAMING_SNAKE_CASE :List[Any] = st.text_input('''Enter your question here:''', '''''')
else:
SCREAMING_SNAKE_CASE :str = question_s
if st.button('''Show me!'''):
if action in [0, 1, 3]:
if index_type == "mixed":
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE :int = make_support(question, source=wiki_source, method='''dense''', n_results=10)
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE :Optional[int] = make_support(question, source=wiki_source, method='''sparse''', n_results=10)
SCREAMING_SNAKE_CASE :Optional[Any] = []
for res_d, res_s in zip(support_list_dense, support_list_sparse):
if tuple(res_d) not in support_list:
support_list += [tuple(res_d)]
if tuple(res_s) not in support_list:
support_list += [tuple(res_s)]
SCREAMING_SNAKE_CASE :Union[str, Any] = support_list[:10]
SCREAMING_SNAKE_CASE :int = '''<P> ''' + ''' <P> '''.join([res[-1] for res in support_list])
else:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE :str = make_support(question, source=wiki_source, method=index_type, n_results=10)
if action in [0, 3]:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE :Any = answer_question(
question_doc,
sas_model,
sas_tokenizer,
min_len=min_len,
max_len=int(max_len),
sampling=(sampled == '''sampled'''),
n_beams=n_beams,
top_p=top_p,
temp=temp,
)
st.markdown('''### The model generated answer is:''')
st.write(answer)
if action in [0, 1, 3] and wiki_source != "none":
st.markdown('''--- \n ### The model is drawing information from the following Wikipedia passages:''')
for i, res in enumerate(support_list):
SCREAMING_SNAKE_CASE :Dict = '''https://en.wikipedia.org/wiki/{}'''.format(res[0].replace(''' ''', '''_'''))
SCREAMING_SNAKE_CASE :Tuple = res[1].strip()
if sec_titles == "":
SCREAMING_SNAKE_CASE :Tuple = '''[{}]({})'''.format(res[0], wiki_url)
else:
SCREAMING_SNAKE_CASE :Union[str, Any] = sec_titles.split(''' & ''')
SCREAMING_SNAKE_CASE :Optional[int] = ''' & '''.join(
['''[{}]({}#{})'''.format(sec.strip(), wiki_url, sec.strip().replace(''' ''', '''_''')) for sec in sec_list]
)
st.markdown(
'''{0:02d} - **Article**: {1:<18} <br> _Section_: {2}'''.format(i + 1, res[0], sections),
unsafe_allow_html=True,
)
if show_passages:
st.write(
'''> <span style="font-family:arial; font-size:10pt;">''' + res[-1] + '''</span>''', unsafe_allow_html=True
)
if action in [2, 3]:
SCREAMING_SNAKE_CASE :List[Any] = find_nearest_training(question)
SCREAMING_SNAKE_CASE :List[Any] = nn_train_list[0]
st.markdown(
'''--- \n ### The most similar question in the ELI5 training set was: \n\n {}'''.format(train_exple['''title'''])
)
SCREAMING_SNAKE_CASE :Any = [
'''{}. {}'''.format(i + 1, ''' \n'''.join([line.strip() for line in ans.split('''\n''') if line.strip() != '''''']))
for i, (ans, sc) in enumerate(zip(train_exple['''answers''']['''text'''], train_exple['''answers''']['''score''']))
if i == 0 or sc > 2
]
st.markdown('''##### Its answers were: \n\n {}'''.format('''\n'''.join(answers_st)))
SCREAMING_SNAKE_CASE :Optional[int] = '''
---
**Disclaimer**
*The intent of this app is to provide some (hopefully entertaining) insights into the behavior of a current LFQA system.
Evaluating biases of such a model and ensuring factual generations are still very much open research problems.
Therefore, until some significant progress is achieved, we caution against using the generated answers for practical purposes.*
'''
st.sidebar.markdown(disclaimer, unsafe_allow_html=True)
| 159 | 1 |
"""simple docstring"""
import logging
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import arg_to_scheduler
from transformers import TrainingArguments
lowercase_ = logging.getLogger(__name__)
@dataclass
class A_ ( __UpperCamelCase ):
'''simple docstring'''
__snake_case = field(
default=0.0 , metadata={"""help""": """The label smoothing epsilon to apply (if not zero)."""} )
__snake_case = field(default=__UpperCamelCase , metadata={"""help""": """Whether to SortishSamler or not."""} )
__snake_case = field(
default=__UpperCamelCase , metadata={"""help""": """Whether to use generate to calculate generative metrics (ROUGE, BLEU)."""} )
__snake_case = field(default=__UpperCamelCase , metadata={"""help""": """whether to use adafactor"""} )
__snake_case = field(
default=__UpperCamelCase , metadata={"""help""": """Encoder layer dropout probability. Goes into model.config."""} )
__snake_case = field(
default=__UpperCamelCase , metadata={"""help""": """Decoder layer dropout probability. Goes into model.config."""} )
__snake_case = field(default=__UpperCamelCase , metadata={"""help""": """Dropout probability. Goes into model.config."""} )
__snake_case = field(
default=__UpperCamelCase , metadata={"""help""": """Attention dropout probability. Goes into model.config."""} )
__snake_case = field(
default="""linear""" , metadata={"""help""": f'''Which lr scheduler to use. Selected in {sorted(arg_to_scheduler.keys() )}'''} , )
| 362 |
import copy
import os
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Dict, Mapping, Optional, Union
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
'google/owlvit-base-patch32': 'https://huggingface.co/google/owlvit-base-patch32/resolve/main/config.json',
'google/owlvit-base-patch16': 'https://huggingface.co/google/owlvit-base-patch16/resolve/main/config.json',
'google/owlvit-large-patch14': 'https://huggingface.co/google/owlvit-large-patch14/resolve/main/config.json',
}
class A_ ( __UpperCamelCase ):
'''simple docstring'''
__snake_case = """owlvit_text_model"""
def __init__( self: Optional[int] , a: Dict=4_9408 , a: Optional[Any]=512 , a: Dict=2048 , a: Optional[Any]=12 , a: Tuple=8 , a: Union[str, Any]=16 , a: str="quick_gelu" , a: List[Any]=1e-5 , a: Dict=0.0 , a: Optional[int]=0.0_2 , a: Dict=1.0 , a: Any=0 , a: Union[str, Any]=4_9406 , a: Any=4_9407 , **a: Dict , ):
super().__init__(pad_token_id=a , bos_token_id=a , eos_token_id=a , **a )
__lowerCamelCase : List[Any] = vocab_size
__lowerCamelCase : int = hidden_size
__lowerCamelCase : Optional[Any] = intermediate_size
__lowerCamelCase : List[Any] = num_hidden_layers
__lowerCamelCase : Any = num_attention_heads
__lowerCamelCase : Union[str, Any] = max_position_embeddings
__lowerCamelCase : List[Any] = hidden_act
__lowerCamelCase : List[str] = layer_norm_eps
__lowerCamelCase : Tuple = attention_dropout
__lowerCamelCase : Optional[int] = initializer_range
__lowerCamelCase : Tuple = initializer_factor
@classmethod
def _snake_case ( cls: Dict , a: Union[str, os.PathLike] , **a: Optional[int] ):
cls._set_token_in_kwargs(a )
__lowerCamelCase , __lowerCamelCase : Dict = cls.get_config_dict(a , **a )
# get the text config dict if we are loading from OwlViTConfig
if config_dict.get('model_type' ) == "owlvit":
__lowerCamelCase : Optional[Any] = config_dict['text_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
F'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(a , **a )
class A_ ( __UpperCamelCase ):
'''simple docstring'''
__snake_case = """owlvit_vision_model"""
def __init__( self: int , a: Tuple=768 , a: int=3072 , a: List[str]=12 , a: Optional[Any]=12 , a: Optional[int]=3 , a: Optional[int]=768 , a: Optional[Any]=32 , a: Optional[int]="quick_gelu" , a: Union[str, Any]=1e-5 , a: Union[str, Any]=0.0 , a: Union[str, Any]=0.0_2 , a: int=1.0 , **a: Union[str, Any] , ):
super().__init__(**a )
__lowerCamelCase : str = hidden_size
__lowerCamelCase : Tuple = intermediate_size
__lowerCamelCase : Dict = num_hidden_layers
__lowerCamelCase : Optional[Any] = num_attention_heads
__lowerCamelCase : int = num_channels
__lowerCamelCase : Optional[Any] = image_size
__lowerCamelCase : Tuple = patch_size
__lowerCamelCase : List[str] = hidden_act
__lowerCamelCase : Tuple = layer_norm_eps
__lowerCamelCase : List[Any] = attention_dropout
__lowerCamelCase : Tuple = initializer_range
__lowerCamelCase : List[Any] = initializer_factor
@classmethod
def _snake_case ( cls: Optional[int] , a: Union[str, os.PathLike] , **a: int ):
cls._set_token_in_kwargs(a )
__lowerCamelCase , __lowerCamelCase : Dict = cls.get_config_dict(a , **a )
# get the vision config dict if we are loading from OwlViTConfig
if config_dict.get('model_type' ) == "owlvit":
__lowerCamelCase : Any = config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
F'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(a , **a )
class A_ ( __UpperCamelCase ):
'''simple docstring'''
__snake_case = """owlvit"""
__snake_case = True
def __init__( self: Dict , a: int=None , a: str=None , a: Tuple=512 , a: Tuple=2.6_5_9_2 , a: int=True , **a: int , ):
super().__init__(**a )
if text_config is None:
__lowerCamelCase : List[str] = {}
logger.info('text_config is None. Initializing the OwlViTTextConfig with default values.' )
if vision_config is None:
__lowerCamelCase : str = {}
logger.info('vision_config is None. initializing the OwlViTVisionConfig with default values.' )
__lowerCamelCase : List[Any] = OwlViTTextConfig(**a )
__lowerCamelCase : str = OwlViTVisionConfig(**a )
__lowerCamelCase : Union[str, Any] = projection_dim
__lowerCamelCase : Tuple = logit_scale_init_value
__lowerCamelCase : Dict = return_dict
__lowerCamelCase : Tuple = 1.0
@classmethod
def _snake_case ( cls: str , a: Union[str, os.PathLike] , **a: List[Any] ):
cls._set_token_in_kwargs(a )
__lowerCamelCase , __lowerCamelCase : List[Any] = cls.get_config_dict(a , **a )
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
F'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(a , **a )
@classmethod
def _snake_case ( cls: Tuple , a: Dict , a: Dict , **a: str ):
__lowerCamelCase : List[str] = {}
__lowerCamelCase : List[str] = text_config
__lowerCamelCase : Optional[int] = vision_config
return cls.from_dict(a , **a )
def _snake_case ( self: Optional[int] ):
__lowerCamelCase : Optional[Any] = copy.deepcopy(self.__dict__ )
__lowerCamelCase : List[Any] = self.text_config.to_dict()
__lowerCamelCase : List[str] = self.vision_config.to_dict()
__lowerCamelCase : Optional[Any] = self.__class__.model_type
return output
class A_ ( __UpperCamelCase ):
'''simple docstring'''
@property
def _snake_case ( self: str ):
return OrderedDict(
[
('input_ids', {0: 'batch', 1: 'sequence'}),
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
('attention_mask', {0: 'batch', 1: 'sequence'}),
] )
@property
def _snake_case ( self: Dict ):
return OrderedDict(
[
('logits_per_image', {0: 'batch'}),
('logits_per_text', {0: 'batch'}),
('text_embeds', {0: 'batch'}),
('image_embeds', {0: 'batch'}),
] )
@property
def _snake_case ( self: int ):
return 1e-4
def _snake_case ( self: Any , a: "ProcessorMixin" , a: int = -1 , a: int = -1 , a: Optional["TensorType"] = None , ):
__lowerCamelCase : List[str] = super().generate_dummy_inputs(
processor.tokenizer , batch_size=a , seq_length=a , framework=a )
__lowerCamelCase : int = super().generate_dummy_inputs(
processor.image_processor , batch_size=a , framework=a )
return {**text_input_dict, **image_input_dict}
@property
def _snake_case ( self: int ):
return 14
| 194 | 0 |
'''simple docstring'''
import shutil
import tempfile
import unittest
from transformers import ClapFeatureExtractor, ClapProcessor, RobertaTokenizer, RobertaTokenizerFast
from transformers.testing_utils import require_sentencepiece, require_torchaudio
from .test_feature_extraction_clap import floats_list
@require_torchaudio
@require_sentencepiece
class a_ ( unittest.TestCase ):
'''simple docstring'''
def snake_case_( self ) -> List[Any]:
_SCREAMING_SNAKE_CASE = """laion/clap-htsat-unfused"""
_SCREAMING_SNAKE_CASE = tempfile.mkdtemp()
def snake_case_( self , **A ) -> int:
return RobertaTokenizer.from_pretrained(self.checkpoint , **A )
def snake_case_( self , **A ) -> Optional[int]:
return ClapFeatureExtractor.from_pretrained(self.checkpoint , **A )
def snake_case_( self ) -> Dict:
shutil.rmtree(self.tmpdirname )
def snake_case_( self ) -> Union[str, Any]:
_SCREAMING_SNAKE_CASE = self.get_tokenizer()
_SCREAMING_SNAKE_CASE = self.get_feature_extractor()
_SCREAMING_SNAKE_CASE = ClapProcessor(tokenizer=A , feature_extractor=A )
processor.save_pretrained(self.tmpdirname )
_SCREAMING_SNAKE_CASE = ClapProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , A )
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor , A )
def snake_case_( self ) -> List[str]:
_SCREAMING_SNAKE_CASE = ClapProcessor(tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() )
processor.save_pretrained(self.tmpdirname )
_SCREAMING_SNAKE_CASE = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
_SCREAMING_SNAKE_CASE = self.get_feature_extractor(do_normalize=A , padding_value=1.0 )
_SCREAMING_SNAKE_CASE = ClapProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=A , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , A )
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.feature_extractor , A )
def snake_case_( self ) -> List[Any]:
_SCREAMING_SNAKE_CASE = self.get_feature_extractor()
_SCREAMING_SNAKE_CASE = self.get_tokenizer()
_SCREAMING_SNAKE_CASE = ClapProcessor(tokenizer=A , feature_extractor=A )
_SCREAMING_SNAKE_CASE = floats_list((3, 1000) )
_SCREAMING_SNAKE_CASE = feature_extractor(A , return_tensors="""np""" )
_SCREAMING_SNAKE_CASE = processor(audios=A , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def snake_case_( self ) -> str:
_SCREAMING_SNAKE_CASE = self.get_feature_extractor()
_SCREAMING_SNAKE_CASE = self.get_tokenizer()
_SCREAMING_SNAKE_CASE = ClapProcessor(tokenizer=A , feature_extractor=A )
_SCREAMING_SNAKE_CASE = """This is a test string"""
_SCREAMING_SNAKE_CASE = processor(text=A )
_SCREAMING_SNAKE_CASE = tokenizer(A )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def snake_case_( self ) -> Any:
_SCREAMING_SNAKE_CASE = self.get_feature_extractor()
_SCREAMING_SNAKE_CASE = self.get_tokenizer()
_SCREAMING_SNAKE_CASE = ClapProcessor(tokenizer=A , feature_extractor=A )
_SCREAMING_SNAKE_CASE = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_SCREAMING_SNAKE_CASE = processor.batch_decode(A )
_SCREAMING_SNAKE_CASE = tokenizer.batch_decode(A )
self.assertListEqual(A , A )
def snake_case_( self ) -> Tuple:
_SCREAMING_SNAKE_CASE = self.get_feature_extractor()
_SCREAMING_SNAKE_CASE = self.get_tokenizer()
_SCREAMING_SNAKE_CASE = ClapProcessor(tokenizer=A , feature_extractor=A )
self.assertListEqual(
processor.model_input_names[2:] , feature_extractor.model_input_names , msg="""`processor` and `feature_extractor` model input names do not match""" , )
| 58 |
"""simple docstring"""
import json
import os
from pathlib import Path
import pytest
from datasets.download.download_config import DownloadConfig
from datasets.download.download_manager import DownloadManager
from datasets.utils.file_utils import hash_url_to_filename
_a = """http://www.mocksite.com/file1.txt"""
_a = """\"text\": [\"foo\", \"foo\"]"""
_a = """6d8ce9aa78a471c7477201efbeabd3bb01ac2e7d100a6dc024ba1608361f90a8"""
class _UpperCAmelCase:
lowercase__ = 2_00
lowercase__ = {'Content-Length': '100'}
lowercase__ = {}
def UpperCAmelCase ( self , **__a) -> Optional[int]:
'''simple docstring'''
return [bytes(__a , '''utf-8''')]
def lowerCamelCase__ ( *__snake_case, **__snake_case ) -> int:
"""simple docstring"""
return MockResponse()
@pytest.mark.parametrize('''urls_type''', [str, list, dict] )
def lowerCamelCase__ ( __snake_case, __snake_case, __snake_case ) -> Dict:
"""simple docstring"""
import requests
monkeypatch.setattr(__snake_case, '''request''', __snake_case )
_UpperCamelCase = URL
if issubclass(__snake_case, __snake_case ):
_UpperCamelCase = url
elif issubclass(__snake_case, __snake_case ):
_UpperCamelCase = [url]
elif issubclass(__snake_case, __snake_case ):
_UpperCamelCase = {'''train''': url}
_UpperCamelCase = '''dummy'''
_UpperCamelCase = '''downloads'''
_UpperCamelCase = tmp_path
_UpperCamelCase = DownloadConfig(
cache_dir=os.path.join(__snake_case, __snake_case ), use_etag=__snake_case, )
_UpperCamelCase = DownloadManager(dataset_name=__snake_case, download_config=__snake_case )
_UpperCamelCase = dl_manager.download(__snake_case )
_UpperCamelCase = urls
for downloaded_paths in [downloaded_paths]:
if isinstance(__snake_case, __snake_case ):
_UpperCamelCase = [downloaded_paths]
_UpperCamelCase = [urls]
elif isinstance(__snake_case, __snake_case ):
assert "train" in downloaded_paths.keys()
_UpperCamelCase = downloaded_paths.values()
_UpperCamelCase = urls.values()
assert downloaded_paths
for downloaded_path, input_url in zip(__snake_case, __snake_case ):
assert downloaded_path == dl_manager.downloaded_paths[input_url]
_UpperCamelCase = Path(__snake_case )
_UpperCamelCase = downloaded_path.parts
assert parts[-1] == HASH
assert parts[-2] == cache_subdir
assert downloaded_path.exists()
_UpperCamelCase = downloaded_path.read_text()
assert content == CONTENT
_UpperCamelCase = downloaded_path.with_suffix('''.json''' )
assert metadata_downloaded_path.exists()
_UpperCamelCase = json.loads(metadata_downloaded_path.read_text() )
assert metadata_content == {"url": URL, "etag": None}
@pytest.mark.parametrize('''paths_type''', [str, list, dict] )
def lowerCamelCase__ ( __snake_case, __snake_case, __snake_case ) -> List[str]:
"""simple docstring"""
_UpperCamelCase = str(__snake_case )
if issubclass(__snake_case, __snake_case ):
_UpperCamelCase = filename
elif issubclass(__snake_case, __snake_case ):
_UpperCamelCase = [filename]
elif issubclass(__snake_case, __snake_case ):
_UpperCamelCase = {'''train''': filename}
_UpperCamelCase = '''dummy'''
_UpperCamelCase = xz_file.parent
_UpperCamelCase = '''extracted'''
_UpperCamelCase = DownloadConfig(
cache_dir=__snake_case, use_etag=__snake_case, )
_UpperCamelCase = DownloadManager(dataset_name=__snake_case, download_config=__snake_case )
_UpperCamelCase = dl_manager.extract(__snake_case )
_UpperCamelCase = paths
for extracted_paths in [extracted_paths]:
if isinstance(__snake_case, __snake_case ):
_UpperCamelCase = [extracted_paths]
_UpperCamelCase = [paths]
elif isinstance(__snake_case, __snake_case ):
assert "train" in extracted_paths.keys()
_UpperCamelCase = extracted_paths.values()
_UpperCamelCase = paths.values()
assert extracted_paths
for extracted_path, input_path in zip(__snake_case, __snake_case ):
assert extracted_path == dl_manager.extracted_paths[input_path]
_UpperCamelCase = Path(__snake_case )
_UpperCamelCase = extracted_path.parts
assert parts[-1] == hash_url_to_filename(__snake_case, etag=__snake_case )
assert parts[-2] == extracted_subdir
assert extracted_path.exists()
_UpperCamelCase = extracted_path.read_text()
_UpperCamelCase = text_file.read_text()
assert extracted_file_content == expected_file_content
def lowerCamelCase__ ( __snake_case, __snake_case ) -> Optional[Any]:
"""simple docstring"""
assert path.endswith('''.jsonl''' )
for num_items, line in enumerate(__snake_case, start=1 ):
_UpperCamelCase = json.loads(line.decode('''utf-8''' ) )
assert item.keys() == {"col_1", "col_2", "col_3"}
assert num_items == 4
@pytest.mark.parametrize('''archive_jsonl''', ['''tar_jsonl_path''', '''zip_jsonl_path'''] )
def lowerCamelCase__ ( __snake_case, __snake_case ) -> int:
"""simple docstring"""
_UpperCamelCase = request.getfixturevalue(__snake_case )
_UpperCamelCase = DownloadManager()
for num_jsonl, (path, file) in enumerate(dl_manager.iter_archive(__snake_case ), start=1 ):
_test_jsonl(__snake_case, __snake_case )
assert num_jsonl == 2
@pytest.mark.parametrize('''archive_nested_jsonl''', ['''tar_nested_jsonl_path''', '''zip_nested_jsonl_path'''] )
def lowerCamelCase__ ( __snake_case, __snake_case ) -> int:
"""simple docstring"""
_UpperCamelCase = request.getfixturevalue(__snake_case )
_UpperCamelCase = DownloadManager()
for num_tar, (path, file) in enumerate(dl_manager.iter_archive(__snake_case ), start=1 ):
for num_jsonl, (subpath, subfile) in enumerate(dl_manager.iter_archive(__snake_case ), start=1 ):
_test_jsonl(__snake_case, __snake_case )
assert num_tar == 1
assert num_jsonl == 2
def lowerCamelCase__ ( __snake_case ) -> Dict:
"""simple docstring"""
_UpperCamelCase = DownloadManager()
for num_file, file in enumerate(dl_manager.iter_files(__snake_case ), start=1 ):
assert os.path.basename(__snake_case ) == ("test.txt" if num_file == 1 else "train.txt")
assert num_file == 2
| 194 | 0 |
"""simple docstring"""
import itertools
import random
import unittest
import numpy as np
from transformers import WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST, WavaVecaConfig, WavaVecaFeatureExtractor
from transformers.testing_utils import require_torch, slow
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
_snake_case = random.Random()
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__=1.0 , UpperCamelCase__=None , UpperCamelCase__=None ):
'''simple docstring'''
if rng is None:
_a : Union[str, Any] = global_rng
_a : Optional[int] = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class UpperCamelCase ( unittest.TestCase ):
def __init__( self : int , UpperCAmelCase__ : Dict , UpperCAmelCase__ : str=7 , UpperCAmelCase__ : Tuple=400 , UpperCAmelCase__ : List[str]=2000 , UpperCAmelCase__ : str=1 , UpperCAmelCase__ : Optional[Any]=0.0 , UpperCAmelCase__ : int=16000 , UpperCAmelCase__ : Union[str, Any]=True , UpperCAmelCase__ : str=True , ) -> Optional[int]:
_a : Optional[int] = parent
_a : Optional[int] = batch_size
_a : str = min_seq_length
_a : Union[str, Any] = max_seq_length
_a : List[Any] = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
_a : Dict = feature_size
_a : int = padding_value
_a : Any = sampling_rate
_a : str = return_attention_mask
_a : Any = do_normalize
def _lowercase ( self : Tuple ) -> Optional[Any]:
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def _lowercase ( self : List[str] , UpperCAmelCase__ : Dict=False , UpperCAmelCase__ : Union[str, Any]=False ) -> str:
def _flatten(UpperCAmelCase__ : str ):
return list(itertools.chain(*UpperCAmelCase__ ) )
if equal_length:
_a : str = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
_a : List[str] = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
_a : Tuple = [np.asarray(UpperCAmelCase__ ) for x in speech_inputs]
return speech_inputs
class UpperCamelCase ( snake_case_ , unittest.TestCase ):
UpperCamelCase : Tuple = WavaVecaFeatureExtractor
def _lowercase ( self : Dict ) -> List[str]:
_a : Tuple = WavaVecaFeatureExtractionTester(self )
def _lowercase ( self : Optional[int] , UpperCAmelCase__ : Optional[int] ) -> Optional[int]:
self.assertTrue(np.all(np.mean(UpperCAmelCase__ , axis=0 ) < 1E-3 ) )
self.assertTrue(np.all(np.abs(np.var(UpperCAmelCase__ , axis=0 ) - 1 ) < 1E-3 ) )
def _lowercase ( self : Union[str, Any] ) -> List[Any]:
# Tests that all call wrap to encode_plus and batch_encode_plus
_a : Optional[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
_a : str = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
_a : List[Any] = [np.asarray(UpperCAmelCase__ ) for speech_input in speech_inputs]
# Test not batched input
_a : Tuple = feat_extract(speech_inputs[0] , return_tensors="""np""" ).input_values
_a : str = feat_extract(np_speech_inputs[0] , return_tensors="""np""" ).input_values
self.assertTrue(np.allclose(UpperCAmelCase__ , UpperCAmelCase__ , atol=1E-3 ) )
# Test batched
_a : int = feat_extract(UpperCAmelCase__ , return_tensors="""np""" ).input_values
_a : Optional[int] = feat_extract(UpperCAmelCase__ , return_tensors="""np""" ).input_values
for enc_seq_a, enc_seq_a in zip(UpperCAmelCase__ , UpperCAmelCase__ ):
self.assertTrue(np.allclose(UpperCAmelCase__ , UpperCAmelCase__ , atol=1E-3 ) )
# Test 2-D numpy arrays are batched.
_a : Optional[Any] = [floats_list((1, x) )[0] for x in (800, 800, 800)]
_a : int = np.asarray(UpperCAmelCase__ )
_a : Tuple = feat_extract(UpperCAmelCase__ , return_tensors="""np""" ).input_values
_a : List[Any] = feat_extract(UpperCAmelCase__ , return_tensors="""np""" ).input_values
for enc_seq_a, enc_seq_a in zip(UpperCAmelCase__ , UpperCAmelCase__ ):
self.assertTrue(np.allclose(UpperCAmelCase__ , UpperCAmelCase__ , atol=1E-3 ) )
def _lowercase ( self : Tuple ) -> Tuple:
_a : List[str] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_a : int = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
_a : Optional[Any] = ["""longest""", """max_length""", """do_not_pad"""]
_a : Dict = [None, 1600, None]
for max_length, padding in zip(UpperCAmelCase__ , UpperCAmelCase__ ):
_a : Any = feat_extract(UpperCAmelCase__ , padding=UpperCAmelCase__ , max_length=UpperCAmelCase__ , return_tensors="""np""" )
_a : Tuple = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:800] )
self.assertTrue(input_values[0][800:].sum() < 1E-6 )
self._check_zero_mean_unit_variance(input_values[1][:1000] )
self.assertTrue(input_values[0][1000:].sum() < 1E-6 )
self._check_zero_mean_unit_variance(input_values[2][:1200] )
def _lowercase ( self : List[str] ) -> str:
_a : Optional[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_a : List[str] = range(800 , 1400 , 200 )
_a : int = [floats_list((1, x) )[0] for x in lengths]
_a : str = ["""longest""", """max_length""", """do_not_pad"""]
_a : Optional[int] = [None, 1600, None]
for max_length, padding in zip(UpperCAmelCase__ , UpperCAmelCase__ ):
_a : List[Any] = feat_extract(UpperCAmelCase__ , max_length=UpperCAmelCase__ , padding=UpperCAmelCase__ )
_a : Tuple = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:800] )
self._check_zero_mean_unit_variance(input_values[1][:1000] )
self._check_zero_mean_unit_variance(input_values[2][:1200] )
def _lowercase ( self : int ) -> Dict:
_a : List[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_a : Optional[int] = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
_a : Optional[Any] = feat_extract(
UpperCAmelCase__ , truncation=UpperCAmelCase__ , max_length=1000 , padding="""max_length""" , return_tensors="""np""" )
_a : str = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1] )
self._check_zero_mean_unit_variance(input_values[2] )
def _lowercase ( self : Optional[int] ) -> Optional[Any]:
_a : Optional[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_a : Tuple = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
_a : Tuple = feat_extract(
UpperCAmelCase__ , truncation=UpperCAmelCase__ , max_length=1000 , padding="""longest""" , return_tensors="""np""" )
_a : Dict = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1, :1000] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertTrue(input_values.shape == (3, 1000) )
_a : Optional[int] = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
_a : Optional[int] = feat_extract(
UpperCAmelCase__ , truncation=UpperCAmelCase__ , max_length=2000 , padding="""longest""" , return_tensors="""np""" )
_a : str = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1, :1000] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length > longest -> then pad to longest
self.assertTrue(input_values.shape == (3, 1200) )
@require_torch
def _lowercase ( self : str ) -> Optional[int]:
import torch
_a : Any = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_a : str = np.random.rand(100 ).astype(np.floataa )
_a : Optional[Any] = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
_a : Tuple = feature_extractor.pad([{"""input_values""": inputs}] , return_tensors="""np""" )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
_a : Tuple = feature_extractor.pad([{"""input_values""": inputs}] , return_tensors="""pt""" )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
@slow
@require_torch
def _lowercase ( self : Union[str, Any] ) -> Optional[int]:
# this test makes sure that models that are using
# group norm don't have their feature extractor return the
# attention_mask
for model_id in WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST:
_a : Optional[Any] = WavaVecaConfig.from_pretrained(UpperCAmelCase__ )
_a : List[str] = WavaVecaFeatureExtractor.from_pretrained(UpperCAmelCase__ )
# only "layer" feature extraction norm should make use of
# attention_mask
self.assertEqual(feat_extract.return_attention_mask , config.feat_extract_norm == """layer""" )
| 365 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
_snake_case = {
'configuration_vision_encoder_decoder': ['VisionEncoderDecoderConfig', 'VisionEncoderDecoderOnnxConfig']
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = ['VisionEncoderDecoderModel']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = ['TFVisionEncoderDecoderModel']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = ['FlaxVisionEncoderDecoderModel']
if TYPE_CHECKING:
from .configuration_vision_encoder_decoder import VisionEncoderDecoderConfig, VisionEncoderDecoderOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vision_encoder_decoder import VisionEncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vision_encoder_decoder import TFVisionEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vision_encoder_decoder import FlaxVisionEncoderDecoderModel
else:
import sys
_snake_case = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 324 | 0 |
"""simple docstring"""
import os
import unittest
from transformers import FunnelTokenizer, FunnelTokenizerFast
from transformers.models.funnel.tokenization_funnel import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _lowerCAmelCase ( lowercase ,unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : int = FunnelTokenizer
__UpperCAmelCase : Optional[int] = FunnelTokenizerFast
__UpperCAmelCase : str = True
__UpperCAmelCase : str = True
def _lowercase ( self : str ):
super().setUp()
__lowercase = [
"<unk>",
"<cls>",
"<sep>",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
__lowercase = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file, "w", encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
def _lowercase ( self : List[Any], **UpperCAmelCase__ : int ):
return FunnelTokenizer.from_pretrained(self.tmpdirname, **UpperCAmelCase__ )
def _lowercase ( self : Optional[Any], **UpperCAmelCase__ : Union[str, Any] ):
return FunnelTokenizerFast.from_pretrained(self.tmpdirname, **UpperCAmelCase__ )
def _lowercase ( self : Any, UpperCAmelCase__ : Dict ):
__lowercase = "UNwant\u00E9d,running"
__lowercase = "unwanted, running"
return input_text, output_text
def _lowercase ( self : str ):
__lowercase = self.tokenizer_class(self.vocab_file )
__lowercase = tokenizer.tokenize("UNwant\u00E9d,running" )
self.assertListEqual(UpperCAmelCase__, ["un", "##want", "##ed", ",", "runn", "##ing"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase__ ), [7, 4, 5, 1_0, 8, 9] )
def _lowercase ( self : Optional[int] ):
__lowercase = self.get_tokenizers(do_lower_case=UpperCAmelCase__ )
for tokenizer in tokenizers:
__lowercase = tokenizer("UNwant\u00E9d,running" )
__lowercase = len(inputs["input_ids"] ) - 1
self.assertListEqual(inputs["token_type_ids"], [2] + [0] * sentence_len )
__lowercase = tokenizer("UNwant\u00E9d,running", "UNwant\u00E9d,running" )
self.assertListEqual(inputs["token_type_ids"], [2] + [0] * sentence_len + [1] * sentence_len )
| 17 |
from unittest import TestCase
from datasets import Dataset
from minhash_deduplication import deduplicate_dataset, make_duplicate_clusters
def __snake_case ( ):
"""simple docstring"""
A_ = {
"repo_name": ["test_repo1", "test_repo2", "test_repo3"],
"path": ["test_1.py", "test_2.py", "unit_test.py"],
"content": ["a " * 20, "a " * 30, "b " * 7],
}
A_ = Dataset.from_dict(__UpperCamelCase )
return dataset
class _a ( snake_case_ ):
"""simple docstring"""
def __A ( self : Union[str, Any] ):
A_ = get_dataset()
A_ = make_duplicate_clusters(UpperCAmelCase , 0.85 )
self.assertEqual(len(duplicate_clusters[0] ) , 2 )
def __A ( self : List[Any] ):
A_ = get_dataset()
A_ , A_ = deduplicate_dataset(UpperCAmelCase )
self.assertEqual(len(UpperCAmelCase ) , 2 )
print(UpperCAmelCase )
self.assertEqual(duplicate_clusters[0][0]["copies"] , 2 )
self.assertEqual(duplicate_clusters[0][0]["is_extreme"] , UpperCAmelCase ) | 312 | 0 |
"""simple docstring"""
import os
from collections import deque
import torch
from torch.utils.data import Dataset
class a__ ( SCREAMING_SNAKE_CASE__ ):
def __init__( self : str, lowerCAmelCase : Dict="", lowerCAmelCase : List[Any]="train" ) -> List[Any]:
assert os.path.isdir(lowerCAmelCase )
lowercase : Any = []
lowercase : int = os.listdir(lowerCAmelCase )
for story_filename in story_filenames_list:
if "summary" in story_filename:
continue
lowercase : Optional[Any] = os.path.join(lowerCAmelCase, lowerCAmelCase )
if not os.path.isfile(lowerCAmelCase ):
continue
self.documents.append(lowerCAmelCase )
def __len__( self : List[str] ) -> Union[str, Any]:
return len(self.documents )
def __getitem__( self : Optional[Any], lowerCAmelCase : Any ) -> List[str]:
lowercase : List[str] = self.documents[idx]
lowercase : str = document_path.split('/' )[-1]
with open(lowerCAmelCase, encoding='utf-8' ) as source:
lowercase : List[str] = source.read()
lowercase : List[Any] = process_story(lowerCAmelCase )
return document_name, story_lines, summary_lines
def lowercase__ ( _UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
lowercase : Tuple = list(filter(lambda _UpperCAmelCase : len(_UpperCAmelCase ) != 0 , [line.strip() for line in raw_story.split('\n' )] ) )
# for some unknown reason some lines miss a period, add it
lowercase : List[Any] = [_add_missing_period(_UpperCAmelCase ) for line in nonempty_lines]
# gather article lines
lowercase : List[str] = []
lowercase : Dict = deque(_UpperCAmelCase )
while True:
try:
lowercase : Union[str, Any] = lines.popleft()
if element.startswith('@highlight' ):
break
story_lines.append(_UpperCAmelCase )
except IndexError:
# if "@highlight" is absent from the file we pop
# all elements until there is None, raising an exception.
return story_lines, []
# gather summary lines
lowercase : Tuple = list(filter(lambda _UpperCAmelCase : not t.startswith('@highlight' ) , _UpperCAmelCase ) )
return story_lines, summary_lines
def lowercase__ ( _UpperCAmelCase ) -> Dict:
'''simple docstring'''
lowercase : Optional[int] = ['.', '!', '?', '...', '\'', '`', '"', '\u2019', '\u2019', ')']
if line.startswith('@highlight' ):
return line
if line[-1] in END_TOKENS:
return line
return line + "."
def lowercase__ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
if len(_UpperCAmelCase ) > block_size:
return sequence[:block_size]
else:
sequence.extend([pad_token_id] * (block_size - len(_UpperCAmelCase )) )
return sequence
def lowercase__ ( _UpperCAmelCase , _UpperCAmelCase ) -> int:
'''simple docstring'''
lowercase : str = torch.ones_like(_UpperCAmelCase )
lowercase : List[Any] = sequence == pad_token_id
lowercase : List[Any] = 0
return mask
def lowercase__ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Optional[int]:
'''simple docstring'''
lowercase : Dict = [tokenizer.encode(_UpperCAmelCase ) for line in story_lines]
lowercase : Tuple = [token for sentence in story_lines_token_ids for token in sentence]
lowercase : Optional[int] = [tokenizer.encode(_UpperCAmelCase ) for line in summary_lines]
lowercase : List[str] = [token for sentence in summary_lines_token_ids for token in sentence]
return story_token_ids, summary_token_ids
def lowercase__ ( _UpperCAmelCase , _UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
lowercase : List[Any] = []
for sequence in batch:
lowercase : Dict = -1
lowercase : Optional[Any] = []
for s in sequence:
if s == separator_token_id:
sentence_num += 1
embeddings.append(sentence_num % 2 )
batch_embeddings.append(_UpperCAmelCase )
return torch.tensor(_UpperCAmelCase )
| 356 |
"""simple docstring"""
from typing import Dict, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import flip_channel_order, resize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends
if is_vision_available():
import PIL
# soft dependency
if is_pytesseract_available():
import pytesseract
_UpperCamelCase: Dict = logging.get_logger(__name__)
def lowercase__ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Any:
'''simple docstring'''
return [
int(10_00 * (box[0] / width) ),
int(10_00 * (box[1] / height) ),
int(10_00 * (box[2] / width) ),
int(10_00 * (box[3] / height) ),
]
def lowercase__ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = None ) -> str:
'''simple docstring'''
lowercase : str = tesseract_config if tesseract_config is not None else ''
# apply OCR
lowercase : Tuple = to_pil_image(_UpperCAmelCase )
lowercase , lowercase : Union[str, Any] = pil_image.size
lowercase : Union[str, Any] = pytesseract.image_to_data(_UpperCAmelCase , lang=_UpperCAmelCase , output_type='dict' , config=_UpperCAmelCase )
lowercase , lowercase , lowercase , lowercase , lowercase : str = data['text'], data['left'], data['top'], data['width'], data['height']
# filter empty words and corresponding coordinates
lowercase : str = [idx for idx, word in enumerate(_UpperCAmelCase ) if not word.strip()]
lowercase : Dict = [word for idx, word in enumerate(_UpperCAmelCase ) if idx not in irrelevant_indices]
lowercase : Union[str, Any] = [coord for idx, coord in enumerate(_UpperCAmelCase ) if idx not in irrelevant_indices]
lowercase : str = [coord for idx, coord in enumerate(_UpperCAmelCase ) if idx not in irrelevant_indices]
lowercase : Any = [coord for idx, coord in enumerate(_UpperCAmelCase ) if idx not in irrelevant_indices]
lowercase : Dict = [coord for idx, coord in enumerate(_UpperCAmelCase ) if idx not in irrelevant_indices]
# turn coordinates into (left, top, left+width, top+height) format
lowercase : int = []
for x, y, w, h in zip(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
lowercase : Any = [x, y, x + w, y + h]
actual_boxes.append(_UpperCAmelCase )
# finally, normalize the bounding boxes
lowercase : List[str] = []
for box in actual_boxes:
normalized_boxes.append(normalize_box(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) )
assert len(_UpperCAmelCase ) == len(_UpperCAmelCase ), "Not as many words as there are bounding boxes"
return words, normalized_boxes
class a__ ( SCREAMING_SNAKE_CASE__ ):
_lowerCamelCase = ['pixel_values']
def __init__( self : str, lowerCAmelCase : bool = True, lowerCAmelCase : Dict[str, int] = None, lowerCAmelCase : PILImageResampling = PILImageResampling.BILINEAR, lowerCAmelCase : bool = True, lowerCAmelCase : Optional[str] = None, lowerCAmelCase : Optional[str] = "", **lowerCAmelCase : List[Any], ) -> None:
super().__init__(**lowerCAmelCase )
lowercase : Optional[Any] = size if size is not None else {'height': 224, 'width': 224}
lowercase : List[Any] = get_size_dict(lowerCAmelCase )
lowercase : str = do_resize
lowercase : List[str] = size
lowercase : int = resample
lowercase : List[str] = apply_ocr
lowercase : str = ocr_lang
lowercase : Union[str, Any] = tesseract_config
def lowercase ( self : Optional[Any], lowerCAmelCase : np.ndarray, lowerCAmelCase : Dict[str, int], lowerCAmelCase : PILImageResampling = PILImageResampling.BILINEAR, lowerCAmelCase : Optional[Union[str, ChannelDimension]] = None, **lowerCAmelCase : int, ) -> np.ndarray:
lowercase : Optional[Any] = get_size_dict(lowerCAmelCase )
if "height" not in size or "width" not in size:
raise ValueError(f'''The size dictionary must contain the keys \'height\' and \'width\'. Got {size.keys()}''' )
lowercase : Optional[int] = (size['height'], size['width'])
return resize(lowerCAmelCase, size=lowerCAmelCase, resample=lowerCAmelCase, data_format=lowerCAmelCase, **lowerCAmelCase )
def lowercase ( self : Any, lowerCAmelCase : ImageInput, lowerCAmelCase : bool = None, lowerCAmelCase : Dict[str, int] = None, lowerCAmelCase : PILImageResampling = None, lowerCAmelCase : bool = None, lowerCAmelCase : Optional[str] = None, lowerCAmelCase : Optional[str] = None, lowerCAmelCase : Optional[Union[str, TensorType]] = None, lowerCAmelCase : ChannelDimension = ChannelDimension.FIRST, **lowerCAmelCase : List[Any], ) -> PIL.Image.Image:
lowercase : Any = do_resize if do_resize is not None else self.do_resize
lowercase : Union[str, Any] = size if size is not None else self.size
lowercase : Dict = get_size_dict(lowerCAmelCase )
lowercase : List[str] = resample if resample is not None else self.resample
lowercase : str = apply_ocr if apply_ocr is not None else self.apply_ocr
lowercase : Tuple = ocr_lang if ocr_lang is not None else self.ocr_lang
lowercase : List[str] = tesseract_config if tesseract_config is not None else self.tesseract_config
lowercase : Optional[int] = make_list_of_images(lowerCAmelCase )
if not valid_images(lowerCAmelCase ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.' )
# All transformations expect numpy arrays.
lowercase : int = [to_numpy_array(lowerCAmelCase ) for image in images]
if apply_ocr:
requires_backends(self, 'pytesseract' )
lowercase : str = []
lowercase : Dict = []
for image in images:
lowercase , lowercase : List[str] = apply_tesseract(lowerCAmelCase, lowerCAmelCase, lowerCAmelCase )
words_batch.append(lowerCAmelCase )
boxes_batch.append(lowerCAmelCase )
if do_resize:
lowercase : str = [self.resize(image=lowerCAmelCase, size=lowerCAmelCase, resample=lowerCAmelCase ) for image in images]
# flip color channels from RGB to BGR (as Detectron2 requires this)
lowercase : Any = [flip_channel_order(lowerCAmelCase ) for image in images]
lowercase : Dict = [to_channel_dimension_format(lowerCAmelCase, lowerCAmelCase ) for image in images]
lowercase : Tuple = BatchFeature(data={'pixel_values': images}, tensor_type=lowerCAmelCase )
if apply_ocr:
lowercase : List[Any] = words_batch
lowercase : Tuple = boxes_batch
return data
| 53 | 0 |
"""simple docstring"""
import argparse
import torch
from transformers import RemBertConfig, RemBertModel, load_tf_weights_in_rembert
from transformers.utils import logging
logging.set_verbosity_info()
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> List[str]:
# Initialise PyTorch model
lowercase__ : List[Any] = RemBertConfig.from_json_file(__lowerCamelCase )
print('''Building PyTorch model from configuration: {}'''.format(str(__lowerCamelCase ) ) )
lowercase__ : Dict = RemBertModel(__lowerCamelCase )
# Load weights from tf checkpoint
load_tf_weights_in_rembert(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# Save pytorch-model
print('''Save PyTorch model to {}'''.format(__lowerCamelCase ) )
torch.save(model.state_dict() , __lowerCamelCase )
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--rembert_config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained RemBERT model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
lowerCAmelCase_ = parser.parse_args()
convert_rembert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.rembert_config_file, args.pytorch_dump_path)
| 16 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
lowerCAmelCase_ = {
'google/tapas-base-finetuned-sqa': (
'https://huggingface.co/google/tapas-base-finetuned-sqa/resolve/main/config.json'
),
'google/tapas-base-finetuned-wtq': (
'https://huggingface.co/google/tapas-base-finetuned-wtq/resolve/main/config.json'
),
'google/tapas-base-finetuned-wikisql-supervised': (
'https://huggingface.co/google/tapas-base-finetuned-wikisql-supervised/resolve/main/config.json'
),
'google/tapas-base-finetuned-tabfact': (
'https://huggingface.co/google/tapas-base-finetuned-tabfact/resolve/main/config.json'
),
}
class __A ( A_ ):
'''simple docstring'''
lowerCAmelCase : str = "tapas"
def __init__( self : List[Any] ,_snake_case : Dict=30_522 ,_snake_case : Union[str, Any]=768 ,_snake_case : int=12 ,_snake_case : Union[str, Any]=12 ,_snake_case : Union[str, Any]=3_072 ,_snake_case : List[Any]="gelu" ,_snake_case : Optional[int]=0.1 ,_snake_case : Tuple=0.1 ,_snake_case : List[Any]=1_024 ,_snake_case : Any=[3, 256, 256, 2, 256, 256, 10] ,_snake_case : List[Any]=0.02 ,_snake_case : Union[str, Any]=1e-12 ,_snake_case : str=0 ,_snake_case : Any=10.0 ,_snake_case : int=0 ,_snake_case : Optional[Any]=1.0 ,_snake_case : List[str]=None ,_snake_case : Tuple=1.0 ,_snake_case : Tuple=False ,_snake_case : List[Any]=None ,_snake_case : int=1.0 ,_snake_case : List[Any]=1.0 ,_snake_case : Optional[int]=False ,_snake_case : Optional[int]=False ,_snake_case : Optional[int]="ratio" ,_snake_case : Any=None ,_snake_case : Union[str, Any]=None ,_snake_case : List[str]=64 ,_snake_case : Optional[Any]=32 ,_snake_case : Optional[Any]=False ,_snake_case : Optional[int]=True ,_snake_case : Dict=False ,_snake_case : Tuple=False ,_snake_case : int=True ,_snake_case : List[str]=False ,_snake_case : Dict=None ,_snake_case : Optional[int]=None ,**_snake_case : int ,) -> List[Any]:
"""simple docstring"""
super().__init__(pad_token_id=_snake_case ,**_snake_case )
# BERT hyperparameters (with updated max_position_embeddings and type_vocab_sizes)
lowercase__ : Optional[int] = vocab_size
lowercase__ : List[str] = hidden_size
lowercase__ : Any = num_hidden_layers
lowercase__ : Optional[Any] = num_attention_heads
lowercase__ : Optional[int] = hidden_act
lowercase__ : List[Any] = intermediate_size
lowercase__ : List[Any] = hidden_dropout_prob
lowercase__ : Dict = attention_probs_dropout_prob
lowercase__ : str = max_position_embeddings
lowercase__ : Dict = type_vocab_sizes
lowercase__ : Optional[Any] = initializer_range
lowercase__ : Dict = layer_norm_eps
# Fine-tuning task hyperparameters
lowercase__ : Any = positive_label_weight
lowercase__ : int = num_aggregation_labels
lowercase__ : List[str] = aggregation_loss_weight
lowercase__ : Optional[int] = use_answer_as_supervision
lowercase__ : Optional[Any] = answer_loss_importance
lowercase__ : Union[str, Any] = use_normalized_answer_loss
lowercase__ : str = huber_loss_delta
lowercase__ : str = temperature
lowercase__ : int = aggregation_temperature
lowercase__ : List[Any] = use_gumbel_for_cells
lowercase__ : Tuple = use_gumbel_for_aggregation
lowercase__ : Union[str, Any] = average_approximation_function
lowercase__ : Union[str, Any] = cell_selection_preference
lowercase__ : Any = answer_loss_cutoff
lowercase__ : List[Any] = max_num_rows
lowercase__ : str = max_num_columns
lowercase__ : int = average_logits_per_cell
lowercase__ : str = select_one_column
lowercase__ : str = allow_empty_column_selection
lowercase__ : Any = init_cell_selection_weights_to_zero
lowercase__ : Optional[int] = reset_position_index_per_cell
lowercase__ : Union[str, Any] = disable_per_token_loss
# Aggregation hyperparameters
lowercase__ : Optional[Any] = aggregation_labels
lowercase__ : List[Any] = no_aggregation_label_index
if isinstance(self.aggregation_labels ,_snake_case ):
lowercase__ : Union[str, Any] = {int(_snake_case ): v for k, v in aggregation_labels.items()}
| 16 | 1 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class a__ ( metaclass=UpperCAmelCase__ ):
lowerCamelCase : Optional[Any] =["speech"]
def __init__( self : Dict , *a : Optional[Any] , **a : Optional[Any] ):
"""simple docstring"""
requires_backends(self , ['''speech'''] )
class a__ ( metaclass=UpperCAmelCase__ ):
lowerCamelCase : str =["speech"]
def __init__( self : Any , *a : Tuple , **a : Any ):
"""simple docstring"""
requires_backends(self , ['''speech'''] )
| 237 | '''simple docstring'''
import torch
from diffusers import DPMSolverSDEScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import require_torchsde
from .test_schedulers import SchedulerCommonTest
@require_torchsde
class a__ ( UpperCAmelCase__ ):
lowerCamelCase : Dict =(DPMSolverSDEScheduler,)
lowerCamelCase : List[str] =1_0
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , **a : Optional[int] ):
"""simple docstring"""
__lowerCamelCase = {
'''num_train_timesteps''': 11_00,
'''beta_start''': 0.00_01,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
'''noise_sampler_seed''': 0,
}
config.update(**a )
return config
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
"""simple docstring"""
for timesteps in [10, 50, 1_00, 10_00]:
self.check_over_configs(num_train_timesteps=a )
def SCREAMING_SNAKE_CASE__ ( self : str ):
"""simple docstring"""
for beta_start, beta_end in zip([0.0_00_01, 0.00_01, 0.0_01] , [0.00_02, 0.0_02, 0.02] ):
self.check_over_configs(beta_start=a , beta_end=a )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
"""simple docstring"""
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=a )
def SCREAMING_SNAKE_CASE__ ( self : str ):
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=a )
def SCREAMING_SNAKE_CASE__ ( self : Tuple ):
"""simple docstring"""
__lowerCamelCase = self.scheduler_classes[0]
__lowerCamelCase = self.get_scheduler_config()
__lowerCamelCase = scheduler_class(**a )
scheduler.set_timesteps(self.num_inference_steps )
__lowerCamelCase = self.dummy_model()
__lowerCamelCase = self.dummy_sample_deter * scheduler.init_noise_sigma
__lowerCamelCase = sample.to(a )
for i, t in enumerate(scheduler.timesteps ):
__lowerCamelCase = scheduler.scale_model_input(a , a )
__lowerCamelCase = model(a , a )
__lowerCamelCase = scheduler.step(a , a , a )
__lowerCamelCase = output.prev_sample
__lowerCamelCase = torch.sum(torch.abs(a ) )
__lowerCamelCase = torch.mean(torch.abs(a ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 1_67.47_82_10_44_92_18_75 ) < 1e-2
assert abs(result_mean.item() - 0.21_78_70_59_64_56_52_77 ) < 1e-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 1_71.59_35_21_11_81_64_06 ) < 1e-2
assert abs(result_mean.item() - 0.2_23_42_90_68_92_29_96_52 ) < 1e-3
else:
assert abs(result_sum.item() - 1_62.52_38_34_22_85_15_62 ) < 1e-2
assert abs(result_mean.item() - 0.2_11_61_95_70_85_13_26 ) < 1e-3
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
"""simple docstring"""
__lowerCamelCase = self.scheduler_classes[0]
__lowerCamelCase = self.get_scheduler_config(prediction_type='''v_prediction''' )
__lowerCamelCase = scheduler_class(**a )
scheduler.set_timesteps(self.num_inference_steps )
__lowerCamelCase = self.dummy_model()
__lowerCamelCase = self.dummy_sample_deter * scheduler.init_noise_sigma
__lowerCamelCase = sample.to(a )
for i, t in enumerate(scheduler.timesteps ):
__lowerCamelCase = scheduler.scale_model_input(a , a )
__lowerCamelCase = model(a , a )
__lowerCamelCase = scheduler.step(a , a , a )
__lowerCamelCase = output.prev_sample
__lowerCamelCase = torch.sum(torch.abs(a ) )
__lowerCamelCase = torch.mean(torch.abs(a ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 1_24.77_14_92_00_43_94_53 ) < 1e-2
assert abs(result_mean.item() - 0.1_62_26_28_90_14_81_62_84 ) < 1e-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 1_28.1_66_33_60_59_57_03 ) < 1e-2
assert abs(result_mean.item() - 0.1_66_88_32_60_01_16_72_97 ) < 1e-3
else:
assert abs(result_sum.item() - 1_19.8_48_75_48_82_81_25 ) < 1e-2
assert abs(result_mean.item() - 0.15_60_53_06_62_53_66_21 ) < 1e-3
def SCREAMING_SNAKE_CASE__ ( self : Dict ):
"""simple docstring"""
__lowerCamelCase = self.scheduler_classes[0]
__lowerCamelCase = self.get_scheduler_config()
__lowerCamelCase = scheduler_class(**a )
scheduler.set_timesteps(self.num_inference_steps , device=a )
__lowerCamelCase = self.dummy_model()
__lowerCamelCase = self.dummy_sample_deter.to(a ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
__lowerCamelCase = scheduler.scale_model_input(a , a )
__lowerCamelCase = model(a , a )
__lowerCamelCase = scheduler.step(a , a , a )
__lowerCamelCase = output.prev_sample
__lowerCamelCase = torch.sum(torch.abs(a ) )
__lowerCamelCase = torch.mean(torch.abs(a ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 1_67.46_95_73_97_46_09_38 ) < 1e-2
assert abs(result_mean.item() - 0.2_18_05_93_46_07_98_26_35 ) < 1e-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 1_71.59_35_36_37_69_53_12 ) < 1e-2
assert abs(result_mean.item() - 0.2_23_42_90_83_82_41_57_71 ) < 1e-3
else:
assert abs(result_sum.item() - 1_62.52_38_34_22_85_15_62 ) < 1e-2
assert abs(result_mean.item() - 0.2_11_61_95_70_85_13_26 ) < 1e-3
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
"""simple docstring"""
__lowerCamelCase = self.scheduler_classes[0]
__lowerCamelCase = self.get_scheduler_config()
__lowerCamelCase = scheduler_class(**a , use_karras_sigmas=a )
scheduler.set_timesteps(self.num_inference_steps , device=a )
__lowerCamelCase = self.dummy_model()
__lowerCamelCase = self.dummy_sample_deter.to(a ) * scheduler.init_noise_sigma
__lowerCamelCase = sample.to(a )
for t in scheduler.timesteps:
__lowerCamelCase = scheduler.scale_model_input(a , a )
__lowerCamelCase = model(a , a )
__lowerCamelCase = scheduler.step(a , a , a )
__lowerCamelCase = output.prev_sample
__lowerCamelCase = torch.sum(torch.abs(a ) )
__lowerCamelCase = torch.mean(torch.abs(a ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 1_76.66_97_41_35_74_21_88 ) < 1e-2
assert abs(result_mean.item() - 0.2_30_03_87_27_30_98_18_11 ) < 1e-2
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 1_77.63_65_35_64_45_31_25 ) < 1e-2
assert abs(result_mean.item() - 0.2_30_03_87_27_30_98_18_11 ) < 1e-2
else:
assert abs(result_sum.item() - 1_70.3_13_52_23_38_86_72 ) < 1e-2
assert abs(result_mean.item() - 0.2_30_03_87_27_30_98_18_11 ) < 1e-2
| 237 | 1 |
import darl # noqa
import gym
import tqdm
from diffusers.experimental import ValueGuidedRLPipeline
A__ : Union[str, Any] = {
'''n_samples''': 64,
'''horizon''': 32,
'''num_inference_steps''': 20,
'''n_guide_steps''': 2, # can set to 0 for faster sampling, does not use value network
'''scale_grad_by_std''': True,
'''scale''': 0.1,
'''eta''': 0.0,
'''t_grad_cutoff''': 2,
'''device''': '''cpu''',
}
if __name__ == "__main__":
A__ : Optional[int] = '''hopper-medium-v2'''
A__ : int = gym.make(env_name)
A__ : Optional[int] = ValueGuidedRLPipeline.from_pretrained(
'''bglick13/hopper-medium-v2-value-function-hor32''',
env=env,
)
env.seed(0)
A__ : int = env.reset()
A__ : Optional[int] = 0
A__ : Union[str, Any] = 0
A__ : Union[str, Any] = 1000
A__ : Optional[Any] = [obs.copy()]
try:
for t in tqdm.tqdm(range(T)):
# call the policy
A__ : Union[str, Any] = pipeline(obs, planning_horizon=32)
# execute action in environment
A__ , A__ , A__ , A__ : str = env.step(denorm_actions)
A__ : Dict = env.get_normalized_score(total_reward)
# update return
total_reward += reward
total_score += score
print(
F'''Step: {t}, Reward: {reward}, Total Reward: {total_reward}, Score: {score}, Total Score:'''
F''' {total_score}'''
)
# save observations for rendering
rollout.append(next_observation.copy())
A__ : List[str] = next_observation
except KeyboardInterrupt:
pass
print(F'''Total reward: {total_reward}''')
| 103 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A__ : int = logging.get_logger(__name__)
A__ : Optional[int] = {
'''facebook/data2vec-vision-base-ft''': (
'''https://huggingface.co/facebook/data2vec-vision-base-ft/resolve/main/config.json'''
),
}
class __snake_case ( UpperCamelCase_ ):
_a = '''data2vec-vision'''
def __init__( self : Tuple , A_ : List[Any]=7_6_8 , A_ : Union[str, Any]=1_2 , A_ : Dict=1_2 , A_ : List[Any]=3_0_7_2 , A_ : Dict="gelu" , A_ : Tuple=0.0 , A_ : Dict=0.0 , A_ : List[str]=0.02 , A_ : List[str]=1e-12 , A_ : Tuple=2_2_4 , A_ : Dict=1_6 , A_ : Optional[int]=3 , A_ : Optional[int]=False , A_ : Any=False , A_ : Tuple=False , A_ : Optional[int]=False , A_ : int=0.1 , A_ : Union[str, Any]=0.1 , A_ : List[Any]=True , A_ : List[Any]=[3, 5, 7, 1_1] , A_ : Union[str, Any]=[1, 2, 3, 6] , A_ : Optional[int]=True , A_ : Any=0.4 , A_ : str=2_5_6 , A_ : Optional[int]=1 , A_ : str=False , A_ : Optional[int]=2_5_5 , **A_ : Optional[int] , ):
super().__init__(**A_)
lowerCAmelCase_ : str = hidden_size
lowerCAmelCase_ : List[str] = num_hidden_layers
lowerCAmelCase_ : Optional[Any] = num_attention_heads
lowerCAmelCase_ : int = intermediate_size
lowerCAmelCase_ : Union[str, Any] = hidden_act
lowerCAmelCase_ : List[Any] = hidden_dropout_prob
lowerCAmelCase_ : Tuple = attention_probs_dropout_prob
lowerCAmelCase_ : str = initializer_range
lowerCAmelCase_ : Tuple = layer_norm_eps
lowerCAmelCase_ : List[Any] = image_size
lowerCAmelCase_ : List[Any] = patch_size
lowerCAmelCase_ : Any = num_channels
lowerCAmelCase_ : Any = use_mask_token
lowerCAmelCase_ : Optional[int] = use_absolute_position_embeddings
lowerCAmelCase_ : str = use_relative_position_bias
lowerCAmelCase_ : Optional[Any] = use_shared_relative_position_bias
lowerCAmelCase_ : Dict = layer_scale_init_value
lowerCAmelCase_ : Tuple = drop_path_rate
lowerCAmelCase_ : Optional[int] = use_mean_pooling
# decode head attributes (semantic segmentation)
lowerCAmelCase_ : Any = out_indices
lowerCAmelCase_ : int = pool_scales
# auxiliary head attributes (semantic segmentation)
lowerCAmelCase_ : Dict = use_auxiliary_head
lowerCAmelCase_ : str = auxiliary_loss_weight
lowerCAmelCase_ : Optional[Any] = auxiliary_channels
lowerCAmelCase_ : str = auxiliary_num_convs
lowerCAmelCase_ : str = auxiliary_concat_input
lowerCAmelCase_ : str = semantic_loss_ignore_index
class __snake_case ( UpperCamelCase_ ):
_a = version.parse('''1.11''' )
@property
def UpperCAmelCase__ ( self : Tuple):
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
])
@property
def UpperCAmelCase__ ( self : Dict):
return 1e-4
| 103 | 1 |
from ...utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_torch_available,
is_transformers_available,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .multicontrolnet import MultiControlNetModel
from .pipeline_controlnet import StableDiffusionControlNetPipeline
from .pipeline_controlnet_imgaimg import StableDiffusionControlNetImgaImgPipeline
from .pipeline_controlnet_inpaint import StableDiffusionControlNetInpaintPipeline
if is_transformers_available() and is_flax_available():
from .pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline
| 360 | # Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from .config import config_command_parser
from .config_args import default_config_file, load_config_from_file # noqa: F401
from .default import default_command_parser
from .update import update_command_parser
def lowerCAmelCase_ ( __A=None ) -> str:
'''simple docstring'''
UpperCAmelCase__ = argparse.ArgumentParser(add_help=__A, allow_abbrev=__A )
# The main config parser
UpperCAmelCase__ = config_command_parser(__A )
# The subparser to add commands to
UpperCAmelCase__ = config_parser.add_subparsers(title="subcommands", dest="subcommand" )
# Then add other parsers with the parent parser
default_command_parser(__A, parents=[parent_parser] )
update_command_parser(__A, parents=[parent_parser] )
return config_parser
def lowerCAmelCase_ ( ) -> Any:
'''simple docstring'''
UpperCAmelCase__ = get_config_parser()
UpperCAmelCase__ = config_parser.parse_args()
if not hasattr(__A, "func" ):
config_parser.print_help()
exit(1 )
# Run
args.func(__A )
if __name__ == "__main__":
main()
| 143 | 0 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
PNDMScheduler,
StableDiffusionLDMaDPipeline,
UNetaDConditionModel,
)
from diffusers.utils import nightly, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
enable_full_determinism()
class A ( unittest.TestCase ):
__UpperCAmelCase : List[str] = StableDiffusionLDMaDPipeline
__UpperCAmelCase : Optional[Any] = TEXT_TO_IMAGE_PARAMS
__UpperCAmelCase : Optional[int] = TEXT_TO_IMAGE_BATCH_PARAMS
__UpperCAmelCase : str = TEXT_TO_IMAGE_IMAGE_PARAMS
def lowercase_ (self : Any ) -> Optional[int]:
"""simple docstring"""
torch.manual_seed(0 )
UpperCAmelCase__ = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=3_2 , )
UpperCAmelCase__ = DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule="scaled_linear" , clip_sample=__UpperCAmelCase , set_alpha_to_one=__UpperCAmelCase , )
torch.manual_seed(0 )
UpperCAmelCase__ = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=6 , out_channels=6 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
torch.manual_seed(0 )
UpperCAmelCase__ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , )
UpperCAmelCase__ = CLIPTextModel(__UpperCAmelCase )
UpperCAmelCase__ = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
UpperCAmelCase__ = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"safety_checker": None,
"feature_extractor": None,
}
return components
def lowercase_ (self : Any , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Dict=0 ) -> Optional[Any]:
"""simple docstring"""
if str(__UpperCAmelCase ).startswith("mps" ):
UpperCAmelCase__ = torch.manual_seed(__UpperCAmelCase )
else:
UpperCAmelCase__ = torch.Generator(device=__UpperCAmelCase ).manual_seed(__UpperCAmelCase )
UpperCAmelCase__ = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
"output_type": "numpy",
}
return inputs
def lowercase_ (self : List[str] ) -> str:
"""simple docstring"""
UpperCAmelCase__ = "cpu" # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase__ = self.get_dummy_components()
UpperCAmelCase__ = StableDiffusionLDMaDPipeline(**__UpperCAmelCase )
UpperCAmelCase__ = ldmad_pipe.to(__UpperCAmelCase )
ldmad_pipe.set_progress_bar_config(disable=__UpperCAmelCase )
UpperCAmelCase__ = self.get_dummy_inputs(__UpperCAmelCase )
UpperCAmelCase__ = ldmad_pipe(**__UpperCAmelCase )
UpperCAmelCase__ , UpperCAmelCase__ = output.rgb, output.depth
UpperCAmelCase__ = rgb[0, -3:, -3:, -1]
UpperCAmelCase__ = depth[0, -3:, -1]
assert rgb.shape == (1, 6_4, 6_4, 3)
assert depth.shape == (1, 6_4, 6_4)
UpperCAmelCase__ = np.array(
[0.37338176, 0.70247, 0.74203193, 0.51643604, 0.58256793, 0.60932136, 0.4181095, 0.48355877, 0.46535262] )
UpperCAmelCase__ = np.array([103.46727, 85.812004, 87.849236] )
assert np.abs(image_slice_rgb.flatten() - expected_slice_rgb ).max() < 1E-2
assert np.abs(image_slice_depth.flatten() - expected_slice_depth ).max() < 1E-2
def lowercase_ (self : int ) -> List[str]:
"""simple docstring"""
UpperCAmelCase__ = self.get_dummy_components()
UpperCAmelCase__ = StableDiffusionLDMaDPipeline(**__UpperCAmelCase )
UpperCAmelCase__ = ldmad_pipe.to(__UpperCAmelCase )
ldmad_pipe.set_progress_bar_config(disable=__UpperCAmelCase )
UpperCAmelCase__ = self.get_dummy_inputs(__UpperCAmelCase )
UpperCAmelCase__ = 3 * [inputs["prompt"]]
# forward
UpperCAmelCase__ = ldmad_pipe(**__UpperCAmelCase )
UpperCAmelCase__ , UpperCAmelCase__ = output.rgb, output.depth
UpperCAmelCase__ = rgb_slice_a[0, -3:, -3:, -1]
UpperCAmelCase__ = depth_slice_a[0, -3:, -1]
UpperCAmelCase__ = self.get_dummy_inputs(__UpperCAmelCase )
UpperCAmelCase__ = 3 * [inputs.pop("prompt" )]
UpperCAmelCase__ = ldmad_pipe.tokenizer(
__UpperCAmelCase , padding="max_length" , max_length=ldmad_pipe.tokenizer.model_max_length , truncation=__UpperCAmelCase , return_tensors="pt" , )
UpperCAmelCase__ = text_inputs["input_ids"].to(__UpperCAmelCase )
UpperCAmelCase__ = ldmad_pipe.text_encoder(__UpperCAmelCase )[0]
UpperCAmelCase__ = prompt_embeds
# forward
UpperCAmelCase__ = ldmad_pipe(**__UpperCAmelCase )
UpperCAmelCase__ , UpperCAmelCase__ = output.rgb, output.depth
UpperCAmelCase__ = rgb_slice_a[0, -3:, -3:, -1]
UpperCAmelCase__ = depth_slice_a[0, -3:, -1]
assert np.abs(rgb_slice_a.flatten() - rgb_slice_a.flatten() ).max() < 1E-4
assert np.abs(depth_slice_a.flatten() - depth_slice_a.flatten() ).max() < 1E-4
def lowercase_ (self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase__ = "cpu" # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase__ = self.get_dummy_components()
UpperCAmelCase__ = PNDMScheduler(skip_prk_steps=__UpperCAmelCase )
UpperCAmelCase__ = StableDiffusionLDMaDPipeline(**__UpperCAmelCase )
UpperCAmelCase__ = ldmad_pipe.to(__UpperCAmelCase )
ldmad_pipe.set_progress_bar_config(disable=__UpperCAmelCase )
UpperCAmelCase__ = self.get_dummy_inputs(__UpperCAmelCase )
UpperCAmelCase__ = "french fries"
UpperCAmelCase__ = ldmad_pipe(**__UpperCAmelCase , negative_prompt=__UpperCAmelCase )
UpperCAmelCase__ , UpperCAmelCase__ = output.rgb, output.depth
UpperCAmelCase__ = rgb[0, -3:, -3:, -1]
UpperCAmelCase__ = depth[0, -3:, -1]
assert rgb.shape == (1, 6_4, 6_4, 3)
assert depth.shape == (1, 6_4, 6_4)
UpperCAmelCase__ = np.array(
[0.37044, 0.71811503, 0.7223251, 0.48603675, 0.5638391, 0.6364948, 0.42833704, 0.4901315, 0.47926217] )
UpperCAmelCase__ = np.array([107.84738, 84.62802, 89.962135] )
assert np.abs(rgb_slice.flatten() - expected_slice_rgb ).max() < 1E-2
assert np.abs(depth_slice.flatten() - expected_slice_depth ).max() < 1E-2
@slow
@require_torch_gpu
class A ( unittest.TestCase ):
def lowercase_ (self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase_ (self : Tuple , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Tuple="cpu" , __UpperCAmelCase : Tuple=torch.floataa , __UpperCAmelCase : Optional[int]=0 ) -> int:
"""simple docstring"""
UpperCAmelCase__ = torch.Generator(device=__UpperCAmelCase ).manual_seed(__UpperCAmelCase )
UpperCAmelCase__ = np.random.RandomState(__UpperCAmelCase ).standard_normal((1, 4, 6_4, 6_4) )
UpperCAmelCase__ = torch.from_numpy(__UpperCAmelCase ).to(device=__UpperCAmelCase , dtype=__UpperCAmelCase )
UpperCAmelCase__ = {
"prompt": "a photograph of an astronaut riding a horse",
"latents": latents,
"generator": generator,
"num_inference_steps": 3,
"guidance_scale": 7.5,
"output_type": "numpy",
}
return inputs
def lowercase_ (self : Tuple ) -> Dict:
"""simple docstring"""
UpperCAmelCase__ = StableDiffusionLDMaDPipeline.from_pretrained("Intel/ldm3d" )
UpperCAmelCase__ = ldmad_pipe.to(__UpperCAmelCase )
ldmad_pipe.set_progress_bar_config(disable=__UpperCAmelCase )
UpperCAmelCase__ = self.get_inputs(__UpperCAmelCase )
UpperCAmelCase__ = ldmad_pipe(**__UpperCAmelCase )
UpperCAmelCase__ , UpperCAmelCase__ = output.rgb, output.depth
UpperCAmelCase__ = rgb[0, -3:, -3:, -1].flatten()
UpperCAmelCase__ = rgb[0, -3:, -1].flatten()
assert rgb.shape == (1, 5_1_2, 5_1_2, 3)
assert depth.shape == (1, 5_1_2, 5_1_2)
UpperCAmelCase__ = np.array(
[0.53805465, 0.56707305, 0.5486515, 0.57012236, 0.5814511, 0.56253487, 0.54843014, 0.55092263, 0.6459706] )
UpperCAmelCase__ = np.array(
[0.9263781, 0.6678672, 0.5486515, 0.92202145, 0.67831135, 0.56253487, 0.9241694, 0.7551478, 0.6459706] )
assert np.abs(rgb_slice - expected_slice_rgb ).max() < 3E-3
assert np.abs(depth_slice - expected_slice_depth ).max() < 3E-3
@nightly
@require_torch_gpu
class A ( unittest.TestCase ):
def lowercase_ (self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase_ (self : Optional[int] , __UpperCAmelCase : int , __UpperCAmelCase : Optional[Any]="cpu" , __UpperCAmelCase : Optional[int]=torch.floataa , __UpperCAmelCase : Optional[int]=0 ) -> str:
"""simple docstring"""
UpperCAmelCase__ = torch.Generator(device=__UpperCAmelCase ).manual_seed(__UpperCAmelCase )
UpperCAmelCase__ = np.random.RandomState(__UpperCAmelCase ).standard_normal((1, 4, 6_4, 6_4) )
UpperCAmelCase__ = torch.from_numpy(__UpperCAmelCase ).to(device=__UpperCAmelCase , dtype=__UpperCAmelCase )
UpperCAmelCase__ = {
"prompt": "a photograph of an astronaut riding a horse",
"latents": latents,
"generator": generator,
"num_inference_steps": 5_0,
"guidance_scale": 7.5,
"output_type": "numpy",
}
return inputs
def lowercase_ (self : Any ) -> Any:
"""simple docstring"""
UpperCAmelCase__ = StableDiffusionLDMaDPipeline.from_pretrained("Intel/ldm3d" ).to(__UpperCAmelCase )
ldmad_pipe.set_progress_bar_config(disable=__UpperCAmelCase )
UpperCAmelCase__ = self.get_inputs(__UpperCAmelCase )
UpperCAmelCase__ = ldmad_pipe(**__UpperCAmelCase )
UpperCAmelCase__ , UpperCAmelCase__ = output.rgb, output.depth
UpperCAmelCase__ = 0.495586
UpperCAmelCase__ = 0.33795515
UpperCAmelCase__ = 112.48518
UpperCAmelCase__ = 98.489746
assert np.abs(expected_rgb_mean - rgb.mean() ) < 1E-3
assert np.abs(expected_rgb_std - rgb.std() ) < 1E-3
assert np.abs(expected_depth_mean - depth.mean() ) < 1E-3
assert np.abs(expected_depth_std - depth.std() ) < 1E-3
def lowercase_ (self : Tuple ) -> Tuple:
"""simple docstring"""
UpperCAmelCase__ = StableDiffusionLDMaDPipeline.from_pretrained("Intel/ldm3d-4c" ).to(__UpperCAmelCase )
ldmad_pipe.set_progress_bar_config(disable=__UpperCAmelCase )
UpperCAmelCase__ = self.get_inputs(__UpperCAmelCase )
UpperCAmelCase__ = ldmad_pipe(**__UpperCAmelCase )
UpperCAmelCase__ , UpperCAmelCase__ = output.rgb, output.depth
UpperCAmelCase__ = 0.4194127
UpperCAmelCase__ = 0.35375586
UpperCAmelCase__ = 0.5638502
UpperCAmelCase__ = 0.34686103
assert rgb.shape == (1, 5_1_2, 5_1_2, 3)
assert depth.shape == (1, 5_1_2, 5_1_2, 1)
assert np.abs(expected_rgb_mean - rgb.mean() ) < 1E-3
assert np.abs(expected_rgb_std - rgb.std() ) < 1E-3
assert np.abs(expected_depth_mean - depth.mean() ) < 1E-3
assert np.abs(expected_depth_std - depth.std() ) < 1E-3
| 65 |
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
lowercase_ = 1_6
lowercase_ = 3_2
def a ( A__ : Accelerator , A__ : int = 16 , A__ : str = "bert-base-cased" ) -> Optional[int]:
"""simple docstring"""
_lowercase =AutoTokenizer.from_pretrained(A__ )
_lowercase =load_dataset('glue' , 'mrpc' )
def tokenize_function(A__ : Optional[int] ):
# max_length=None => use the model max length (it's actually the default)
_lowercase =tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=A__ , max_length=A__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
_lowercase =datasets.map(
A__ , batched=A__ , remove_columns=['idx', 'sentence1', 'sentence2'] , load_from_cache_file=A__ )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
_lowercase =tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(A__ : List[str] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(A__ , padding='max_length' , max_length=128 , return_tensors='pt' )
return tokenizer.pad(A__ , padding='longest' , return_tensors='pt' )
# Instantiate dataloaders.
_lowercase =DataLoader(
tokenized_datasets['train'] , shuffle=A__ , collate_fn=A__ , batch_size=A__ )
_lowercase =DataLoader(
tokenized_datasets['validation'] , shuffle=A__ , collate_fn=A__ , batch_size=A__ )
return train_dataloader, eval_dataloader
def a ( A__ : Optional[Any] , A__ : Optional[int] , A__ : List[str] , A__ : Dict ) -> Dict:
"""simple docstring"""
model.eval()
_lowercase =0
for step, batch in enumerate(A__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
_lowercase =model(**A__ )
_lowercase =outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
_lowercase , _lowercase =accelerator.gather(
(predictions, batch['labels']) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(A__ ) - 1:
_lowercase =predictions[: len(eval_dataloader.dataset ) - samples_seen]
_lowercase =references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=A__ , references=A__ , )
_lowercase =metric.compute()
return eval_metric["accuracy"]
def a ( A__ : str , A__ : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
_lowercase =Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
_lowercase =config['lr']
_lowercase =int(config['num_epochs'] )
_lowercase =int(config['seed'] )
_lowercase =int(config['batch_size'] )
_lowercase =args.model_name_or_path
set_seed(A__ )
_lowercase , _lowercase =get_dataloaders(A__ , A__ , A__ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
_lowercase =AutoModelForSequenceClassification.from_pretrained(A__ , return_dict=A__ )
# Instantiate optimizer
_lowercase =(
AdamW
if accelerator.state.deepspeed_plugin is None
or 'optimizer' not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
_lowercase =optimizer_cls(params=model.parameters() , lr=A__ )
if accelerator.state.deepspeed_plugin is not None:
_lowercase =accelerator.state.deepspeed_plugin.deepspeed_config[
'gradient_accumulation_steps'
]
else:
_lowercase =1
_lowercase =(len(A__ ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
_lowercase =get_linear_schedule_with_warmup(
optimizer=A__ , num_warmup_steps=0 , num_training_steps=A__ , )
else:
_lowercase =DummyScheduler(A__ , total_num_steps=A__ , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
_lowercase , _lowercase , _lowercase , _lowercase , _lowercase =accelerator.prepare(
A__ , A__ , A__ , A__ , A__ )
# We need to keep track of how many total steps we have iterated over
_lowercase =0
# We also need to keep track of the stating epoch so files are named properly
_lowercase =0
_lowercase =evaluate.load('glue' , 'mrpc' )
_lowercase =num_epochs
if args.partial_train_epoch is not None:
_lowercase =args.partial_train_epoch
if args.resume_from_checkpoint:
accelerator.load_state(args.resume_from_checkpoint )
_lowercase =args.resume_from_checkpoint.split('epoch_' )[1]
_lowercase =''
for char in epoch_string:
if char.isdigit():
state_epoch_num += char
else:
break
_lowercase =int(A__ ) + 1
_lowercase =evaluation_loop(A__ , A__ , A__ , A__ )
accelerator.print('resumed checkpoint performance:' , A__ )
accelerator.print('resumed checkpoint\'s scheduler\'s lr:' , lr_scheduler.get_lr()[0] )
accelerator.print('resumed optimizers\'s lr:' , optimizer.param_groups[0]['lr'] )
with open(os.path.join(args.output_dir , F'''state_{starting_epoch-1}.json''' ) , 'r' ) as f:
_lowercase =json.load(A__ )
assert resumed_state["accuracy"] == accuracy, "Accuracy mismatch, loading from checkpoint failed"
assert (
resumed_state["lr"] == lr_scheduler.get_lr()[0]
), "Scheduler learning rate mismatch, loading from checkpoint failed"
assert (
resumed_state["optimizer_lr"] == optimizer.param_groups[0]["lr"]
), "Optimizer learning rate mismatch, loading from checkpoint failed"
assert resumed_state["epoch"] == starting_epoch - 1, "Epoch mismatch, loading from checkpoint failed"
return
# Now we train the model
_lowercase ={}
for epoch in range(A__ , A__ ):
model.train()
for step, batch in enumerate(A__ ):
_lowercase =model(**A__ )
_lowercase =outputs.loss
_lowercase =loss / gradient_accumulation_steps
accelerator.backward(A__ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
_lowercase =F'''epoch_{epoch}'''
_lowercase =os.path.join(args.output_dir , A__ )
accelerator.save_state(A__ )
_lowercase =evaluation_loop(A__ , A__ , A__ , A__ )
_lowercase =accuracy
_lowercase =lr_scheduler.get_lr()[0]
_lowercase =optimizer.param_groups[0]['lr']
_lowercase =epoch
_lowercase =overall_step
accelerator.print(F'''epoch {epoch}:''' , A__ )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , F'''state_{epoch}.json''' ) , 'w' ) as f:
json.dump(A__ , A__ )
def a ( ) -> Tuple:
"""simple docstring"""
_lowercase =argparse.ArgumentParser(description='Simple example of training script tracking peak GPU memory usage.' )
parser.add_argument(
'--model_name_or_path' , type=A__ , default='bert-base-cased' , help='Path to pretrained model or model identifier from huggingface.co/models.' , required=A__ , )
parser.add_argument(
'--output_dir' , type=A__ , default='.' , help='Optional save directory where all checkpoint folders will be stored. Default is the current working directory.' , )
parser.add_argument(
'--resume_from_checkpoint' , type=A__ , default=A__ , help='If the training should continue from a checkpoint folder.' , )
parser.add_argument(
'--partial_train_epoch' , type=A__ , default=A__ , help='If passed, the training will stop after this number of epochs.' , )
parser.add_argument(
'--num_epochs' , type=A__ , default=2 , help='Number of train epochs.' , )
_lowercase =parser.parse_args()
_lowercase ={'lr': 2e-5, 'num_epochs': args.num_epochs, 'seed': 42, 'batch_size': 16}
training_function(A__ , A__ )
if __name__ == "__main__":
main()
| 205 | 0 |
"""simple docstring"""
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
A_ = logging.get_logger(__name__)
A_ = {
'''Salesforce/instruct-blip-flan-t5''': '''https://huggingface.co/Salesforce/instruct-blip-flan-t5/resolve/main/config.json''',
}
class lowercase( __a ):
'''simple docstring'''
lowercase__ = "instructblip_vision_model"
def __init__( self: List[Any], a_: str=1_408, a_: int=6_144, a_: int=39, a_: int=16, a_: Optional[int]=224, a_: Any=14, a_: List[Any]="gelu", a_: List[str]=1E-6, a_: int=0.0, a_: Any=1E-10, a_: str=True, **a_: int, ):
'''simple docstring'''
super().__init__(**a_ )
_snake_case : Optional[Any] = hidden_size
_snake_case : List[str] = intermediate_size
_snake_case : List[Any] = num_hidden_layers
_snake_case : Tuple = num_attention_heads
_snake_case : int = patch_size
_snake_case : List[str] = image_size
_snake_case : int = initializer_range
_snake_case : List[Any] = attention_dropout
_snake_case : List[Any] = layer_norm_eps
_snake_case : Tuple = hidden_act
_snake_case : Tuple = qkv_bias
@classmethod
def UpperCamelCase_ ( cls: Tuple, a_: Union[str, os.PathLike], **a_: Union[str, Any] ):
'''simple docstring'''
cls._set_token_in_kwargs(a_ )
_snake_case , _snake_case : Dict = cls.get_config_dict(a_, **a_ )
# get the vision config dict if we are loading from InstructBlipConfig
if config_dict.get("""model_type""" ) == "instructblip":
_snake_case : List[str] = config_dict["""vision_config"""]
if "model_type" in config_dict and hasattr(cls, """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
f"{cls.model_type}. This is not supported for all configurations of models and can yield errors." )
return cls.from_dict(a_, **a_ )
class lowercase( __a ):
'''simple docstring'''
lowercase__ = "instructblip_qformer"
def __init__( self: Dict, a_: Optional[int]=30_522, a_: Optional[Any]=768, a_: Optional[int]=12, a_: List[str]=12, a_: Optional[Any]=3_072, a_: int="gelu", a_: List[str]=0.1, a_: Union[str, Any]=0.1, a_: List[Any]=512, a_: int=0.02, a_: Tuple=1E-12, a_: Tuple=0, a_: Dict="absolute", a_: str=2, a_: str=1_408, **a_: int, ):
'''simple docstring'''
super().__init__(pad_token_id=a_, **a_ )
_snake_case : Dict = vocab_size
_snake_case : Dict = hidden_size
_snake_case : Dict = num_hidden_layers
_snake_case : int = num_attention_heads
_snake_case : str = hidden_act
_snake_case : Dict = intermediate_size
_snake_case : Dict = hidden_dropout_prob
_snake_case : Any = attention_probs_dropout_prob
_snake_case : Tuple = max_position_embeddings
_snake_case : List[Any] = initializer_range
_snake_case : Union[str, Any] = layer_norm_eps
_snake_case : List[str] = position_embedding_type
_snake_case : Union[str, Any] = cross_attention_frequency
_snake_case : Optional[Any] = encoder_hidden_size
@classmethod
def UpperCamelCase_ ( cls: int, a_: Union[str, os.PathLike], **a_: Optional[Any] ):
'''simple docstring'''
cls._set_token_in_kwargs(a_ )
_snake_case , _snake_case : int = cls.get_config_dict(a_, **a_ )
# get the qformer config dict if we are loading from InstructBlipConfig
if config_dict.get("""model_type""" ) == "instructblip":
_snake_case : List[Any] = config_dict["""qformer_config"""]
if "model_type" in config_dict and hasattr(cls, """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
f"{cls.model_type}. This is not supported for all configurations of models and can yield errors." )
return cls.from_dict(a_, **a_ )
class lowercase( __a ):
'''simple docstring'''
lowercase__ = "instructblip"
lowercase__ = True
def __init__( self: Optional[Any], a_: List[Any]=None, a_: List[Any]=None, a_: Any=None, a_: Dict=32, **a_: Dict ):
'''simple docstring'''
super().__init__(**a_ )
if vision_config is None:
_snake_case : int = {}
logger.info("""vision_config is None. initializing the InstructBlipVisionConfig with default values.""" )
if qformer_config is None:
_snake_case : Optional[Any] = {}
logger.info("""qformer_config is None. Initializing the InstructBlipQFormerConfig with default values.""" )
if text_config is None:
_snake_case : Union[str, Any] = {}
logger.info("""text_config is None. Initializing the text config with default values (`OPTConfig`).""" )
_snake_case : List[str] = InstructBlipVisionConfig(**a_ )
_snake_case : Tuple = InstructBlipQFormerConfig(**a_ )
_snake_case : List[Any] = text_config["""model_type"""] if """model_type""" in text_config else """opt"""
_snake_case : Any = CONFIG_MAPPING[text_model_type](**a_ )
_snake_case : Dict = self.text_config.tie_word_embeddings
_snake_case : List[Any] = self.text_config.is_encoder_decoder
_snake_case : Dict = num_query_tokens
_snake_case : Dict = self.vision_config.hidden_size
_snake_case : int = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
_snake_case : Any = 1.0
_snake_case : Union[str, Any] = 0.02
@classmethod
def UpperCamelCase_ ( cls: int, a_: InstructBlipVisionConfig, a_: InstructBlipQFormerConfig, a_: PretrainedConfig, **a_: List[Any], ):
'''simple docstring'''
return cls(
vision_config=vision_config.to_dict(), qformer_config=qformer_config.to_dict(), text_config=text_config.to_dict(), **a_, )
def UpperCamelCase_ ( self: List[str] ):
'''simple docstring'''
_snake_case : List[Any] = copy.deepcopy(self.__dict__ )
_snake_case : Tuple = self.vision_config.to_dict()
_snake_case : Any = self.qformer_config.to_dict()
_snake_case : Union[str, Any] = self.text_config.to_dict()
_snake_case : int = self.__class__.model_type
return output
| 132 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
A_ = logging.get_logger(__name__)
A_ = {
'''microsoft/swin-tiny-patch4-window7-224''': (
'''https://huggingface.co/microsoft/swin-tiny-patch4-window7-224/resolve/main/config.json'''
),
# See all Swin models at https://huggingface.co/models?filter=swin
}
class lowercase( __a , __a ):
'''simple docstring'''
lowercase__ = "swin"
lowercase__ = {
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self: Any, a_: List[str]=224, a_: List[Any]=4, a_: List[Any]=3, a_: Dict=96, a_: List[str]=[2, 2, 6, 2], a_: int=[3, 6, 12, 24], a_: int=7, a_: str=4.0, a_: Optional[Any]=True, a_: Dict=0.0, a_: List[Any]=0.0, a_: List[str]=0.1, a_: Union[str, Any]="gelu", a_: Dict=False, a_: Union[str, Any]=0.02, a_: Optional[int]=1E-5, a_: Optional[int]=32, a_: Tuple=None, a_: Union[str, Any]=None, **a_: Any, ):
'''simple docstring'''
super().__init__(**a_ )
_snake_case : Any = image_size
_snake_case : List[Any] = patch_size
_snake_case : Tuple = num_channels
_snake_case : str = embed_dim
_snake_case : Union[str, Any] = depths
_snake_case : int = len(a_ )
_snake_case : Union[str, Any] = num_heads
_snake_case : List[str] = window_size
_snake_case : str = mlp_ratio
_snake_case : Union[str, Any] = qkv_bias
_snake_case : Dict = hidden_dropout_prob
_snake_case : str = attention_probs_dropout_prob
_snake_case : Union[str, Any] = drop_path_rate
_snake_case : Optional[int] = hidden_act
_snake_case : str = use_absolute_embeddings
_snake_case : Tuple = layer_norm_eps
_snake_case : List[Any] = initializer_range
_snake_case : Optional[Any] = encoder_stride
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
_snake_case : Any = int(embed_dim * 2 ** (len(a_ ) - 1) )
_snake_case : Any = ["""stem"""] + [f"stage{idx}" for idx in range(1, len(a_ ) + 1 )]
_snake_case , _snake_case : List[str] = get_aligned_output_features_output_indices(
out_features=a_, out_indices=a_, stage_names=self.stage_names )
class lowercase( __a ):
'''simple docstring'''
lowercase__ = version.parse("1.11" )
@property
def UpperCamelCase_ ( self: Any ):
'''simple docstring'''
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def UpperCamelCase_ ( self: List[str] ):
'''simple docstring'''
return 1E-4
| 132 | 1 |
from __future__ import annotations
import unittest
from transformers import is_tf_available, is_torch_available
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, SMALL_MODEL_IDENTIFIER, is_pt_tf_cross_test, slow
if is_tf_available():
from transformers import (
AutoConfig,
BertConfig,
GPTaConfig,
TaConfig,
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSequenceClassification,
TFAutoModelWithLMHead,
TFBertForMaskedLM,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertModel,
TFGPTaLMHeadModel,
TFRobertaForMaskedLM,
TFTaForConditionalGeneration,
)
from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST
if is_torch_available():
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForPreTraining,
AutoModelForQuestionAnswering,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoModelWithLMHead,
BertForMaskedLM,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
BertModel,
GPTaLMHeadModel,
RobertaForMaskedLM,
TaForConditionalGeneration,
)
@is_pt_tf_cross_test
class lowercase ( unittest.TestCase ):
@slow
def a ( self ):
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
snake_case_ = AutoConfig.from_pretrained(snake_case )
self.assertIsNotNone(snake_case )
self.assertIsInstance(snake_case , snake_case )
snake_case_ = TFAutoModel.from_pretrained(snake_case , from_pt=snake_case )
self.assertIsNotNone(snake_case )
self.assertIsInstance(snake_case , snake_case )
snake_case_ = AutoModel.from_pretrained(snake_case , from_tf=snake_case )
self.assertIsNotNone(snake_case )
self.assertIsInstance(snake_case , snake_case )
@slow
def a ( self ):
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
snake_case_ = AutoConfig.from_pretrained(snake_case )
self.assertIsNotNone(snake_case )
self.assertIsInstance(snake_case , snake_case )
snake_case_ = TFAutoModelForPreTraining.from_pretrained(snake_case , from_pt=snake_case )
self.assertIsNotNone(snake_case )
self.assertIsInstance(snake_case , snake_case )
snake_case_ = AutoModelForPreTraining.from_pretrained(snake_case , from_tf=snake_case )
self.assertIsNotNone(snake_case )
self.assertIsInstance(snake_case , snake_case )
@slow
def a ( self ):
for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case_ = AutoConfig.from_pretrained(snake_case )
self.assertIsNotNone(snake_case )
self.assertIsInstance(snake_case , snake_case )
snake_case_ = TFAutoModelForCausalLM.from_pretrained(snake_case , from_pt=snake_case )
snake_case_ , snake_case_ = TFAutoModelForCausalLM.from_pretrained(
snake_case , output_loading_info=snake_case , from_pt=snake_case )
self.assertIsNotNone(snake_case )
self.assertIsInstance(snake_case , snake_case )
snake_case_ = AutoModelForCausalLM.from_pretrained(snake_case , from_tf=snake_case )
snake_case_ , snake_case_ = AutoModelForCausalLM.from_pretrained(
snake_case , output_loading_info=snake_case , from_tf=snake_case )
self.assertIsNotNone(snake_case )
self.assertIsInstance(snake_case , snake_case )
@slow
def a ( self ):
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case_ = AutoConfig.from_pretrained(snake_case )
self.assertIsNotNone(snake_case )
self.assertIsInstance(snake_case , snake_case )
snake_case_ = TFAutoModelWithLMHead.from_pretrained(snake_case , from_pt=snake_case )
self.assertIsNotNone(snake_case )
self.assertIsInstance(snake_case , snake_case )
snake_case_ = AutoModelWithLMHead.from_pretrained(snake_case , from_tf=snake_case )
self.assertIsNotNone(snake_case )
self.assertIsInstance(snake_case , snake_case )
@slow
def a ( self ):
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case_ = AutoConfig.from_pretrained(snake_case )
self.assertIsNotNone(snake_case )
self.assertIsInstance(snake_case , snake_case )
snake_case_ = TFAutoModelForMaskedLM.from_pretrained(snake_case , from_pt=snake_case )
snake_case_ , snake_case_ = TFAutoModelForMaskedLM.from_pretrained(
snake_case , output_loading_info=snake_case , from_pt=snake_case )
self.assertIsNotNone(snake_case )
self.assertIsInstance(snake_case , snake_case )
snake_case_ = AutoModelForMaskedLM.from_pretrained(snake_case , from_tf=snake_case )
snake_case_ , snake_case_ = AutoModelForMaskedLM.from_pretrained(
snake_case , output_loading_info=snake_case , from_tf=snake_case )
self.assertIsNotNone(snake_case )
self.assertIsInstance(snake_case , snake_case )
@slow
def a ( self ):
for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case_ = AutoConfig.from_pretrained(snake_case )
self.assertIsNotNone(snake_case )
self.assertIsInstance(snake_case , snake_case )
snake_case_ = TFAutoModelForSeqaSeqLM.from_pretrained(snake_case , from_pt=snake_case )
snake_case_ , snake_case_ = TFAutoModelForSeqaSeqLM.from_pretrained(
snake_case , output_loading_info=snake_case , from_pt=snake_case )
self.assertIsNotNone(snake_case )
self.assertIsInstance(snake_case , snake_case )
snake_case_ = AutoModelForSeqaSeqLM.from_pretrained(snake_case , from_tf=snake_case )
snake_case_ , snake_case_ = AutoModelForSeqaSeqLM.from_pretrained(
snake_case , output_loading_info=snake_case , from_tf=snake_case )
self.assertIsNotNone(snake_case )
self.assertIsInstance(snake_case , snake_case )
@slow
def a ( self ):
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
snake_case_ = AutoConfig.from_pretrained(snake_case )
self.assertIsNotNone(snake_case )
self.assertIsInstance(snake_case , snake_case )
snake_case_ = TFAutoModelForSequenceClassification.from_pretrained(snake_case , from_pt=snake_case )
self.assertIsNotNone(snake_case )
self.assertIsInstance(snake_case , snake_case )
snake_case_ = AutoModelForSequenceClassification.from_pretrained(snake_case , from_tf=snake_case )
self.assertIsNotNone(snake_case )
self.assertIsInstance(snake_case , snake_case )
@slow
def a ( self ):
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
snake_case_ = AutoConfig.from_pretrained(snake_case )
self.assertIsNotNone(snake_case )
self.assertIsInstance(snake_case , snake_case )
snake_case_ = TFAutoModelForQuestionAnswering.from_pretrained(snake_case , from_pt=snake_case )
self.assertIsNotNone(snake_case )
self.assertIsInstance(snake_case , snake_case )
snake_case_ = AutoModelForQuestionAnswering.from_pretrained(snake_case , from_tf=snake_case )
self.assertIsNotNone(snake_case )
self.assertIsInstance(snake_case , snake_case )
def a ( self ):
snake_case_ = TFAutoModelWithLMHead.from_pretrained(snake_case , from_pt=snake_case )
self.assertIsInstance(snake_case , snake_case )
self.assertEqual(model.num_parameters() , 1_4410 )
self.assertEqual(model.num_parameters(only_trainable=snake_case ) , 1_4410 )
snake_case_ = AutoModelWithLMHead.from_pretrained(snake_case , from_tf=snake_case )
self.assertIsInstance(snake_case , snake_case )
self.assertEqual(model.num_parameters() , 1_4410 )
self.assertEqual(model.num_parameters(only_trainable=snake_case ) , 1_4410 )
def a ( self ):
snake_case_ = TFAutoModelWithLMHead.from_pretrained(snake_case , from_pt=snake_case )
self.assertIsInstance(snake_case , snake_case )
self.assertEqual(model.num_parameters() , 1_4410 )
self.assertEqual(model.num_parameters(only_trainable=snake_case ) , 1_4410 )
snake_case_ = AutoModelWithLMHead.from_pretrained(snake_case , from_tf=snake_case )
self.assertIsInstance(snake_case , snake_case )
self.assertEqual(model.num_parameters() , 1_4410 )
self.assertEqual(model.num_parameters(only_trainable=snake_case ) , 1_4410 )
| 285 |
import gc
import unittest
from diffusers import FlaxStableDiffusionInpaintPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class lowercase ( unittest.TestCase ):
def a ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def a ( self ):
snake_case_ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/sd2-inpaint/init_image.png' )
snake_case_ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png' )
snake_case_ = 'xvjiarui/stable-diffusion-2-inpainting'
snake_case_ , snake_case_ = FlaxStableDiffusionInpaintPipeline.from_pretrained(snake_case , safety_checker=snake_case )
snake_case_ = 'Face of a yellow cat, high resolution, sitting on a park bench'
snake_case_ = jax.random.PRNGKey(0 )
snake_case_ = 50
snake_case_ = jax.device_count()
snake_case_ = num_samples * [prompt]
snake_case_ = num_samples * [init_image]
snake_case_ = num_samples * [mask_image]
snake_case_ , snake_case_ , snake_case_ = pipeline.prepare_inputs(snake_case , snake_case , snake_case )
# shard inputs and rng
snake_case_ = replicate(snake_case )
snake_case_ = jax.random.split(snake_case , jax.device_count() )
snake_case_ = shard(snake_case )
snake_case_ = shard(snake_case )
snake_case_ = shard(snake_case )
snake_case_ = pipeline(
snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , jit=snake_case )
snake_case_ = output.images.reshape(snake_case , 512 , 512 , 3 )
snake_case_ = images[0, 253:256, 253:256, -1]
snake_case_ = jnp.asarray(jax.device_get(image_slice.flatten() ) )
snake_case_ = jnp.array(
[0.3_61_13_07, 0.37_64_97_36, 0.3_75_74_08, 0.38_21_39_53, 0.39_29_51_67, 0.3_84_16_31, 0.41_55_49_78, 0.4_13_74_75, 0.4_21_70_84] )
print(F'''output_slice: {output_slice}''' )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
| 285 | 1 |
"""simple docstring"""
def __lowercase ( snake_case_ : int ) ->str:
'''simple docstring'''
if number > 0:
raise ValueError('''input must be a negative integer''' )
__A : int = len(bin(snake_case_ )[3:] )
__A : int = bin(abs(snake_case_ ) - (1 << binary_number_length) )[3:]
__A : int = (
(
'''1'''
+ '''0''' * (binary_number_length - len(snake_case_ ))
+ twos_complement_number
)
if number < 0
else '''0'''
)
return "0b" + twos_complement_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 291 |
"""simple docstring"""
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {
"""asapp/sew-d-tiny-100k""": """https://huggingface.co/asapp/sew-d-tiny-100k/resolve/main/config.json""",
# See all SEW-D models at https://huggingface.co/models?filter=sew-d
}
class __snake_case ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = """sew-d"""
def __init__( self , __lowerCamelCase=32 , __lowerCamelCase=768 , __lowerCamelCase=12 , __lowerCamelCase=12 , __lowerCamelCase=3072 , __lowerCamelCase=2 , __lowerCamelCase=512 , __lowerCamelCase=256 , __lowerCamelCase=True , __lowerCamelCase=True , __lowerCamelCase=("p2c", "c2p") , __lowerCamelCase="layer_norm" , __lowerCamelCase="gelu_python" , __lowerCamelCase=0.1 , __lowerCamelCase=0.1 , __lowerCamelCase=0.1 , __lowerCamelCase=0.0 , __lowerCamelCase=0.1 , __lowerCamelCase=0.0_2 , __lowerCamelCase=1e-7 , __lowerCamelCase=1e-5 , __lowerCamelCase="group" , __lowerCamelCase="gelu" , __lowerCamelCase=(64, 128, 128, 128, 128, 256, 256, 256, 256, 512, 512, 512, 512) , __lowerCamelCase=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , __lowerCamelCase=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , __lowerCamelCase=False , __lowerCamelCase=128 , __lowerCamelCase=16 , __lowerCamelCase=True , __lowerCamelCase=0.0_5 , __lowerCamelCase=10 , __lowerCamelCase=2 , __lowerCamelCase=0.0 , __lowerCamelCase=10 , __lowerCamelCase=0 , __lowerCamelCase="mean" , __lowerCamelCase=False , __lowerCamelCase=False , __lowerCamelCase=256 , __lowerCamelCase=0 , __lowerCamelCase=1 , __lowerCamelCase=2 , **__lowerCamelCase , ):
'''simple docstring'''
super().__init__(**__lowerCamelCase , pad_token_id=__lowerCamelCase , bos_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase )
__A : str = hidden_size
__A : List[Any] = feat_extract_norm
__A : Tuple = feat_extract_activation
__A : Dict = list(__lowerCamelCase )
__A : int = list(__lowerCamelCase )
__A : List[Any] = list(__lowerCamelCase )
__A : Any = conv_bias
__A : List[Any] = num_conv_pos_embeddings
__A : Any = num_conv_pos_embedding_groups
__A : Optional[Any] = len(self.conv_dim )
__A : int = num_hidden_layers
__A : Union[str, Any] = intermediate_size
__A : Union[str, Any] = squeeze_factor
__A : int = max_position_embeddings
__A : Tuple = position_buckets
__A : Tuple = share_att_key
__A : List[str] = relative_attention
__A : Optional[Any] = norm_rel_ebd
__A : Dict = list(__lowerCamelCase )
__A : str = hidden_act
__A : List[str] = num_attention_heads
__A : Union[str, Any] = hidden_dropout
__A : Optional[int] = attention_dropout
__A : Optional[Any] = activation_dropout
__A : List[str] = feat_proj_dropout
__A : str = final_dropout
__A : Tuple = layer_norm_eps
__A : int = feature_layer_norm_eps
__A : Optional[int] = initializer_range
__A : Dict = vocab_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect.'''
'''It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,'''
F"""but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)"""
F"""= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
__A : int = apply_spec_augment
__A : Any = mask_time_prob
__A : Optional[int] = mask_time_length
__A : Any = mask_time_min_masks
__A : int = mask_feature_prob
__A : Tuple = mask_feature_length
__A : Dict = mask_feature_min_masks
# ctc loss
__A : Tuple = ctc_loss_reduction
__A : Union[str, Any] = ctc_zero_infinity
# sequence classification
__A : Tuple = use_weighted_layer_sum
__A : List[str] = classifier_proj_size
@property
def UpperCamelCase__( self ):
'''simple docstring'''
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 291 | 1 |
import collections
import inspect
import unittest
from typing import Dict, List, Tuple
from transformers import MaskFormerSwinConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, torch_device
from transformers.utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MaskFormerSwinBackbone
from transformers.models.maskformer import MaskFormerSwinModel
class UpperCAmelCase_ :
'''simple docstring'''
def __init__( self : Optional[int] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Tuple=13 , UpperCamelCase__ : Optional[Any]=32 , UpperCamelCase__ : Dict=2 , UpperCamelCase__ : Dict=3 , UpperCamelCase__ : Union[str, Any]=16 , UpperCamelCase__ : Any=[1, 2, 1] , UpperCamelCase__ : int=[2, 2, 4] , UpperCamelCase__ : int=2 , UpperCamelCase__ : Optional[int]=2.0 , UpperCamelCase__ : Optional[int]=True , UpperCamelCase__ : Any=0.0 , UpperCamelCase__ : str=0.0 , UpperCamelCase__ : Optional[Any]=0.1 , UpperCamelCase__ : Tuple="gelu" , UpperCamelCase__ : Optional[Any]=False , UpperCamelCase__ : Any=True , UpperCamelCase__ : List[str]=0.02 , UpperCamelCase__ : Union[str, Any]=1E-5 , UpperCamelCase__ : str=True , UpperCamelCase__ : List[str]=None , UpperCamelCase__ : Union[str, Any]=True , UpperCamelCase__ : Tuple=10 , UpperCamelCase__ : Dict=8 , UpperCamelCase__ : Tuple=["stage1", "stage2", "stage3"] , UpperCamelCase__ : Tuple=[1, 2, 3] , ) -> Dict:
"""simple docstring"""
__magic_name__ = parent
__magic_name__ = batch_size
__magic_name__ = image_size
__magic_name__ = patch_size
__magic_name__ = num_channels
__magic_name__ = embed_dim
__magic_name__ = depths
__magic_name__ = num_heads
__magic_name__ = window_size
__magic_name__ = mlp_ratio
__magic_name__ = qkv_bias
__magic_name__ = hidden_dropout_prob
__magic_name__ = attention_probs_dropout_prob
__magic_name__ = drop_path_rate
__magic_name__ = hidden_act
__magic_name__ = use_absolute_embeddings
__magic_name__ = patch_norm
__magic_name__ = layer_norm_eps
__magic_name__ = initializer_range
__magic_name__ = is_training
__magic_name__ = scope
__magic_name__ = use_labels
__magic_name__ = type_sequence_label_size
__magic_name__ = encoder_stride
__magic_name__ = out_features
__magic_name__ = out_indices
def _lowercase ( self : str ) -> Optional[int]:
"""simple docstring"""
__magic_name__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__magic_name__ = None
if self.use_labels:
__magic_name__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__magic_name__ = self.get_config()
return config, pixel_values, labels
def _lowercase ( self : Tuple ) -> str:
"""simple docstring"""
return MaskFormerSwinConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def _lowercase ( self : Optional[int] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Optional[int] ) -> List[str]:
"""simple docstring"""
__magic_name__ = MaskFormerSwinModel(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
__magic_name__ = model(UpperCamelCase__ )
__magic_name__ = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
__magic_name__ = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def _lowercase ( self : List[str] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : List[str] ) -> Tuple:
"""simple docstring"""
__magic_name__ = MaskFormerSwinBackbone(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
__magic_name__ = model(UpperCamelCase__ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [13, 16, 16, 16] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , [16, 32, 64] )
# verify ValueError
with self.parent.assertRaises(UpperCamelCase__ ):
__magic_name__ = ["""stem"""]
__magic_name__ = MaskFormerSwinBackbone(config=UpperCamelCase__ )
def _lowercase ( self : Any ) -> Any:
"""simple docstring"""
__magic_name__ = self.prepare_config_and_inputs()
__magic_name__ , __magic_name__ , __magic_name__ = config_and_inputs
__magic_name__ = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ ( _A , _A , unittest.TestCase ):
'''simple docstring'''
a__ = (
(
MaskFormerSwinModel,
MaskFormerSwinBackbone,
)
if is_torch_available()
else ()
)
a__ = {"""feature-extraction""": MaskFormerSwinModel} if is_torch_available() else {}
a__ = False
a__ = False
a__ = False
a__ = False
a__ = False
def _lowercase ( self : Any ) -> List[str]:
"""simple docstring"""
__magic_name__ = MaskFormerSwinModelTester(self )
__magic_name__ = ConfigTester(self , config_class=UpperCamelCase__ , embed_dim=37 )
@require_torch_multi_gpu
@unittest.skip(
reason=(
"""`MaskFormerSwinModel` outputs `hidden_states_spatial_dimensions` which doesn't work well with"""
""" `nn.DataParallel`"""
) )
def _lowercase ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
pass
def _lowercase ( self : str ) -> Dict:
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _lowercase ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
return
def _lowercase ( self : str ) -> str:
"""simple docstring"""
__magic_name__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def _lowercase ( self : int ) -> Optional[Any]:
"""simple docstring"""
__magic_name__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*UpperCamelCase__ )
@unittest.skip("""Swin does not use inputs_embeds""" )
def _lowercase ( self : Any ) -> int:
"""simple docstring"""
pass
@unittest.skip("""Swin does not support feedforward chunking""" )
def _lowercase ( self : str ) -> List[Any]:
"""simple docstring"""
pass
def _lowercase ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
__magic_name__ , __magic_name__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__magic_name__ = model_class(UpperCamelCase__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__magic_name__ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCamelCase__ , nn.Linear ) )
def _lowercase ( self : Tuple ) -> Dict:
"""simple docstring"""
__magic_name__ , __magic_name__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__magic_name__ = model_class(UpperCamelCase__ )
__magic_name__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__magic_name__ = [*signature.parameters.keys()]
__magic_name__ = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , UpperCamelCase__ )
@unittest.skip(reason="""MaskFormerSwin is only used as backbone and doesn't support output_attentions""" )
def _lowercase ( self : Tuple ) -> int:
"""simple docstring"""
pass
@unittest.skip(reason="""MaskFormerSwin is only used as an internal backbone""" )
def _lowercase ( self : List[str] ) -> Dict:
"""simple docstring"""
pass
def _lowercase ( self : Union[str, Any] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Any , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Union[str, Any] ) -> Any:
"""simple docstring"""
__magic_name__ = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
with torch.no_grad():
__magic_name__ = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
__magic_name__ = outputs.hidden_states
__magic_name__ = getattr(
self.model_tester , """expected_num_hidden_layers""" , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(UpperCamelCase__ ) , UpperCamelCase__ )
# Swin has a different seq_length
__magic_name__ = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
__magic_name__ = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def _lowercase ( self : Dict ) -> Dict:
"""simple docstring"""
__magic_name__ , __magic_name__ = self.model_tester.prepare_config_and_inputs_for_common()
__magic_name__ = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
__magic_name__ = True
self.check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__magic_name__ = True
self.check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
def _lowercase ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
__magic_name__ , __magic_name__ = self.model_tester.prepare_config_and_inputs_for_common()
__magic_name__ = 3
__magic_name__ = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
__magic_name__ = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
__magic_name__ = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
__magic_name__ = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
__magic_name__ = True
self.check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__magic_name__ = True
self.check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , (padded_height, padded_width) )
@unittest.skip(reason="""MaskFormerSwin doesn't have pretrained checkpoints""" )
def _lowercase ( self : Optional[int] ) -> int:
"""simple docstring"""
pass
@unittest.skip(reason="""This will be fixed once MaskFormerSwin is replaced by native Swin""" )
def _lowercase ( self : List[str] ) -> List[Any]:
"""simple docstring"""
pass
@unittest.skip(reason="""This will be fixed once MaskFormerSwin is replaced by native Swin""" )
def _lowercase ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
pass
def _lowercase ( self : Dict ) -> Any:
"""simple docstring"""
__magic_name__ , __magic_name__ = self.model_tester.prepare_config_and_inputs_for_common()
def set_nan_tensor_to_zero(UpperCamelCase__ : Union[str, Any] ):
__magic_name__ = 0
return t
def check_equivalence(UpperCamelCase__ : str , UpperCamelCase__ : Dict , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : int={} ):
with torch.no_grad():
__magic_name__ = model(**UpperCamelCase__ , return_dict=UpperCamelCase__ , **UpperCamelCase__ )
__magic_name__ = model(**UpperCamelCase__ , return_dict=UpperCamelCase__ , **UpperCamelCase__ ).to_tuple()
def recursive_check(UpperCamelCase__ : Optional[int] , UpperCamelCase__ : List[Any] ):
if isinstance(UpperCamelCase__ , (List, Tuple) ):
for tuple_iterable_value, dict_iterable_value in zip(UpperCamelCase__ , UpperCamelCase__ ):
recursive_check(UpperCamelCase__ , UpperCamelCase__ )
elif isinstance(UpperCamelCase__ , UpperCamelCase__ ):
for tuple_iterable_value, dict_iterable_value in zip(
tuple_object.values() , dict_object.values() ):
recursive_check(UpperCamelCase__ , UpperCamelCase__ )
elif tuple_object is None:
return
else:
self.assertTrue(
torch.allclose(
set_nan_tensor_to_zero(UpperCamelCase__ ) , set_nan_tensor_to_zero(UpperCamelCase__ ) , atol=1E-5 ) , msg=(
"""Tuple and dict output are not equal. Difference:"""
F''' {torch.max(torch.abs(tuple_object - dict_object ) )}. Tuple has `nan`:'''
F''' {torch.isnan(UpperCamelCase__ ).any()} and `inf`: {torch.isinf(UpperCamelCase__ )}. Dict has'''
F''' `nan`: {torch.isnan(UpperCamelCase__ ).any()} and `inf`: {torch.isinf(UpperCamelCase__ )}.'''
) , )
recursive_check(UpperCamelCase__ , UpperCamelCase__ )
for model_class in self.all_model_classes:
__magic_name__ = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
__magic_name__ = self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ )
__magic_name__ = self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ )
check_equivalence(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
__magic_name__ = self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ , return_labels=UpperCamelCase__ )
__magic_name__ = self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ , return_labels=UpperCamelCase__ )
check_equivalence(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
__magic_name__ = self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ )
__magic_name__ = self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ )
check_equivalence(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , {"""output_hidden_states""": True} )
__magic_name__ = self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ , return_labels=UpperCamelCase__ )
__magic_name__ = self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ , return_labels=UpperCamelCase__ )
check_equivalence(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , {"""output_hidden_states""": True} )
@require_torch
class UpperCAmelCase_ ( unittest.TestCase , _A ):
'''simple docstring'''
a__ = (MaskFormerSwinBackbone,) if is_torch_available() else ()
a__ = MaskFormerSwinConfig
def _lowercase ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
__magic_name__ = MaskFormerSwinModelTester(self )
def _lowercase ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
__magic_name__ , __magic_name__ = self.model_tester.prepare_config_and_inputs_for_common()
__magic_name__ = inputs_dict["""pixel_values"""].shape[0]
for backbone_class in self.all_model_classes:
__magic_name__ = backbone_class(UpperCamelCase__ )
backbone.to(UpperCamelCase__ )
backbone.eval()
__magic_name__ = backbone(**UpperCamelCase__ )
# Test default outputs and verify feature maps
self.assertIsInstance(outputs.feature_maps , UpperCamelCase__ )
self.assertTrue(len(outputs.feature_maps ) == len(backbone.channels ) )
for feature_map, n_channels in zip(outputs.feature_maps , backbone.channels ):
self.assertTrue(feature_map.shape[:2] , (batch_size, n_channels) )
self.assertIsNone(outputs.hidden_states )
self.assertIsNone(outputs.attentions )
# Test output_hidden_states=True
__magic_name__ = backbone(**UpperCamelCase__ , output_hidden_states=UpperCamelCase__ )
self.assertIsNotNone(outputs.hidden_states )
self.assertTrue(len(outputs.hidden_states ) , len(backbone.stage_names ) )
# We skip the stem layer
for hidden_states, n_channels in zip(outputs.hidden_states[1:] , backbone.channels ):
for hidden_state in hidden_states:
# Hidden states are in the format (batch_size, (height * width), n_channels)
__magic_name__ , __magic_name__ , __magic_name__ = hidden_state.shape
self.assertTrue((h_batch_size, h_n_channels) , (batch_size, n_channels) )
# Test output_attentions=True
if self.has_attentions:
__magic_name__ = backbone(**UpperCamelCase__ , output_attentions=UpperCamelCase__ )
self.assertIsNotNone(outputs.attentions )
| 88 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_batched,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
lowerCamelCase__ = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ ):
__lowerCamelCase : Dict =['pixel_values']
def __init__( self : Optional[int] , __lowercase : bool = True , __lowercase : Optional[Dict[str, int]] = None , __lowercase : PILImageResampling = PILImageResampling.BICUBIC , __lowercase : bool = True , __lowercase : bool = True , __lowercase : Union[int, float] = 1 / 255 , __lowercase : Dict[str, int] = None , __lowercase : bool = True , __lowercase : Optional[Union[float, List[float]]] = None , __lowercase : Optional[Union[float, List[float]]] = None , **__lowercase : Dict , ):
'''simple docstring'''
super().__init__(**__lowercase )
__a = size if size is not None else {"""height""": 224, """width""": 224}
__a = get_size_dict(__lowercase )
__a = crop_size if crop_size is not None else {"""height""": 224, """width""": 224}
__a = get_size_dict(__lowercase , default_to_square=__lowercase , param_name="""crop_size""" )
__a = do_resize
__a = do_rescale
__a = do_normalize
__a = do_center_crop
__a = crop_size
__a = size
__a = resample
__a = rescale_factor
__a = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
__a = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def UpperCamelCase_ ( self : Any , __lowercase : np.ndarray , __lowercase : Dict[str, int] , __lowercase : PILImageResampling = PILImageResampling.BILINEAR , __lowercase : Optional[Union[str, ChannelDimension]] = None , **__lowercase : Optional[Any] , ):
'''simple docstring'''
__a = get_size_dict(__lowercase )
if "shortest_edge" in size:
__a = get_resize_output_image_size(__lowercase , size=size["""shortest_edge"""] , default_to_square=__lowercase )
# size = get_resize_output_image_size(image, size["shortest_edge"], size["longest_edge"])
elif "height" in size and "width" in size:
__a = (size["""height"""], size["""width"""])
else:
raise ValueError(F"Size must contain 'height' and 'width' keys or 'shortest_edge' key. Got {size.keys()}" )
return resize(__lowercase , size=__lowercase , resample=__lowercase , data_format=__lowercase , **__lowercase )
def UpperCamelCase_ ( self : str , __lowercase : np.ndarray , __lowercase : Dict[str, int] , __lowercase : Optional[Union[str, ChannelDimension]] = None , **__lowercase : List[Any] , ):
'''simple docstring'''
__a = get_size_dict(__lowercase )
if "height" not in size or "width" not in size:
raise ValueError(F"The `size` parameter must contain the keys (height, width). Got {size.keys()}" )
return center_crop(__lowercase , size=(size["""height"""], size["""width"""]) , data_format=__lowercase , **__lowercase )
def UpperCamelCase_ ( self : Any , __lowercase : np.ndarray , __lowercase : float , __lowercase : Optional[Union[str, ChannelDimension]] = None , **__lowercase : str ):
'''simple docstring'''
return rescale(__lowercase , scale=__lowercase , data_format=__lowercase , **__lowercase )
def UpperCamelCase_ ( self : List[Any] , __lowercase : np.ndarray , __lowercase : Union[float, List[float]] , __lowercase : Union[float, List[float]] , __lowercase : Optional[Union[str, ChannelDimension]] = None , **__lowercase : Any , ):
'''simple docstring'''
return normalize(__lowercase , mean=__lowercase , std=__lowercase , data_format=__lowercase , **__lowercase )
def UpperCamelCase_ ( self : Tuple , __lowercase : ImageInput , __lowercase : Optional[bool] = None , __lowercase : Dict[str, int] = None , __lowercase : PILImageResampling = None , __lowercase : bool = None , __lowercase : int = None , __lowercase : Optional[bool] = None , __lowercase : Optional[float] = None , __lowercase : Optional[bool] = None , __lowercase : Optional[Union[float, List[float]]] = None , __lowercase : Optional[Union[float, List[float]]] = None , __lowercase : Optional[Union[str, TensorType]] = None , __lowercase : Union[str, ChannelDimension] = ChannelDimension.FIRST , **__lowercase : List[Any] , ):
'''simple docstring'''
__a = do_resize if do_resize is not None else self.do_resize
__a = do_rescale if do_rescale is not None else self.do_rescale
__a = do_normalize if do_normalize is not None else self.do_normalize
__a = do_center_crop if do_center_crop is not None else self.do_center_crop
__a = crop_size if crop_size is not None else self.crop_size
__a = get_size_dict(__lowercase , param_name="""crop_size""" , default_to_square=__lowercase )
__a = resample if resample is not None else self.resample
__a = rescale_factor if rescale_factor is not None else self.rescale_factor
__a = image_mean if image_mean is not None else self.image_mean
__a = image_std if image_std is not None else self.image_std
__a = size if size is not None else self.size
__a = get_size_dict(__lowercase )
if not is_batched(__lowercase ):
__a = [images]
if not valid_images(__lowercase ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
# All transformations expect numpy arrays.
__a = [to_numpy_array(__lowercase ) for image in images]
if do_resize:
__a = [self.resize(image=__lowercase , size=__lowercase , resample=__lowercase ) for image in images]
if do_center_crop:
__a = [self.center_crop(image=__lowercase , size=__lowercase ) for image in images]
if do_rescale:
__a = [self.rescale(image=__lowercase , scale=__lowercase ) for image in images]
if do_normalize:
__a = [self.normalize(image=__lowercase , mean=__lowercase , std=__lowercase ) for image in images]
__a = [to_channel_dimension_format(__lowercase , __lowercase ) for image in images]
__a = {"""pixel_values""": images}
return BatchFeature(data=__lowercase , tensor_type=__lowercase )
| 302 | 0 |
"""simple docstring"""
def __SCREAMING_SNAKE_CASE ( A_ ):
lowerCAmelCase__ : Tuple = 1
lowerCAmelCase__ : Tuple = 2
while i * i <= n:
lowerCAmelCase__ : int = 0
while n % i == 0:
n //= i
multiplicity += 1
n_divisors *= multiplicity + 1
i += 1
if n > 1:
n_divisors *= 2
return n_divisors
def __SCREAMING_SNAKE_CASE ( ):
lowerCAmelCase__ : Optional[Any] = 1
lowerCAmelCase__ : str = 1
while True:
i += 1
t_num += i
if count_divisors(A_ ) > 5_00:
break
return t_num
if __name__ == "__main__":
print(solution())
| 74 |
"""simple docstring"""
from typing import List, Optional
from tokenizers import ByteLevelBPETokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot_small import BlenderbotSmallTokenizer
__UpperCamelCase : Union[str, Any] = logging.get_logger(__name__)
__UpperCamelCase : List[str] = {
'''vocab_file''': '''vocab.json''',
'''merges_file''': '''merges.txt''',
'''tokenizer_config_file''': '''tokenizer_config.json''',
}
__UpperCamelCase : Tuple = {
'''vocab_file''': {
'''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json'''
},
'''merges_file''': {
'''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt'''
},
'''tokenizer_config_file''': {
'''facebook/blenderbot_small-90M''': (
'''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json'''
)
},
}
__UpperCamelCase : List[Any] = {
'''facebook/blenderbot_small-90M''': 5_1_2,
}
class SCREAMING_SNAKE_CASE ( a_ ):
"""simple docstring"""
lowercase__ = VOCAB_FILES_NAMES
lowercase__ = PRETRAINED_VOCAB_FILES_MAP
lowercase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__ = BlenderbotSmallTokenizer
def __init__( self : Dict ,lowercase_ : Dict=None ,lowercase_ : Union[str, Any]=None ,lowercase_ : Any="<|endoftext|>" ,lowercase_ : Optional[Any]="<|endoftext|>" ,lowercase_ : Dict="<|endoftext|>" ,lowercase_ : Optional[int]=False ,lowercase_ : Union[str, Any]=True ,**lowercase_ : Union[str, Any] ,):
super().__init__(
ByteLevelBPETokenizer(
vocab=lowercase_ ,merges=lowercase_ ,add_prefix_space=lowercase_ ,trim_offsets=lowercase_ ,) ,bos_token=lowercase_ ,eos_token=lowercase_ ,unk_token=lowercase_ ,**lowercase_ ,)
lowerCAmelCase__ : Tuple = add_prefix_space
def __lowerCAmelCase ( self : Optional[Any] ,lowercase_ : Optional[int] ,lowercase_ : int=None ):
lowerCAmelCase__ : Dict = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def __lowerCAmelCase ( self : List[Any] ,lowercase_ : List[int] ,lowercase_ : Optional[List[int]] = None ):
lowerCAmelCase__ : Union[str, Any] = [self.sep_token_id]
lowerCAmelCase__ : Dict = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 74 | 1 |
'''simple docstring'''
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, PerceiverTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
A_ = "pt"
elif is_tf_available():
A_ = "tf"
else:
A_ = "jax"
class _snake_case ( A_ , unittest.TestCase ):
_A : Dict = PerceiverTokenizer
_A : Any = False
def __UpperCamelCase ( self : Union[str, Any] ):
super().setUp()
SCREAMING_SNAKE_CASE:List[str] = PerceiverTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def __UpperCamelCase ( self : Dict ):
return PerceiverTokenizer.from_pretrained("deepmind/language-perceiver" )
def __UpperCamelCase ( self : int ,**SCREAMING_SNAKE_CASE__ : Optional[Any] ):
return self.tokenizer_class.from_pretrained(self.tmpdirname ,**_lowerCamelCase )
def __UpperCamelCase ( self : int ,SCREAMING_SNAKE_CASE__ : Dict ,SCREAMING_SNAKE_CASE__ : List[str]=False ,SCREAMING_SNAKE_CASE__ : Optional[int]=20 ,SCREAMING_SNAKE_CASE__ : List[str]=5 ):
# XXX The default common tokenizer tests assume that every ID is decodable on its own.
# This assumption is invalid for Perceiver because single bytes might not be
# valid utf-8 (byte 128 for instance).
# Here we're overriding the smallest possible method to provide
# a clean sequence without making the same assumption.
SCREAMING_SNAKE_CASE:Any = []
for i in range(len(_lowerCamelCase ) ):
try:
SCREAMING_SNAKE_CASE:Dict = tokenizer.decode([i] ,clean_up_tokenization_spaces=_lowerCamelCase )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
SCREAMING_SNAKE_CASE:Tuple = list(filter(lambda SCREAMING_SNAKE_CASE__ : re.match(R"^[ a-zA-Z]+$" ,t[1] ) ,_lowerCamelCase ) )
SCREAMING_SNAKE_CASE:Optional[Any] = list(filter(lambda SCREAMING_SNAKE_CASE__ : [t[0]] == tokenizer.encode(t[1] ,add_special_tokens=_lowerCamelCase ) ,_lowerCamelCase ) )
if max_length is not None and len(_lowerCamelCase ) > max_length:
SCREAMING_SNAKE_CASE:List[str] = toks[:max_length]
if min_length is not None and len(_lowerCamelCase ) < min_length and len(_lowerCamelCase ) > 0:
while len(_lowerCamelCase ) < min_length:
SCREAMING_SNAKE_CASE:str = toks + toks
# toks_str = [t[1] for t in toks]
SCREAMING_SNAKE_CASE:str = [t[0] for t in toks]
# Ensure consistency
SCREAMING_SNAKE_CASE:Any = tokenizer.decode(_lowerCamelCase ,clean_up_tokenization_spaces=_lowerCamelCase )
if " " not in output_txt and len(_lowerCamelCase ) > 1:
SCREAMING_SNAKE_CASE:str = (
tokenizer.decode([toks_ids[0]] ,clean_up_tokenization_spaces=_lowerCamelCase )
+ " "
+ tokenizer.decode(toks_ids[1:] ,clean_up_tokenization_spaces=_lowerCamelCase )
)
if with_prefix_space:
SCREAMING_SNAKE_CASE:Any = " " + output_txt
SCREAMING_SNAKE_CASE:List[str] = tokenizer.encode(_lowerCamelCase ,add_special_tokens=_lowerCamelCase )
return output_txt, output_ids
def __UpperCamelCase ( self : List[Any] ):
SCREAMING_SNAKE_CASE:Dict = self.perceiver_tokenizer
SCREAMING_SNAKE_CASE:Any = "Unicode €."
SCREAMING_SNAKE_CASE:Optional[Any] = tokenizer(_lowerCamelCase )
SCREAMING_SNAKE_CASE:Any = [4, 91, 116, 111, 105, 117, 106, 107, 38, 232, 136, 178, 52, 5]
self.assertEqual(encoded["input_ids"] ,_lowerCamelCase )
# decoding
SCREAMING_SNAKE_CASE:List[str] = tokenizer.decode(_lowerCamelCase )
self.assertEqual(_lowerCamelCase ,"[CLS]Unicode €.[SEP]" )
SCREAMING_SNAKE_CASE:Any = tokenizer("e è é ê ë" )
SCREAMING_SNAKE_CASE:Tuple = [4, 107, 38, 201, 174, 38, 201, 175, 38, 201, 176, 38, 201, 177, 5]
self.assertEqual(encoded["input_ids"] ,_lowerCamelCase )
# decoding
SCREAMING_SNAKE_CASE:Union[str, Any] = tokenizer.decode(_lowerCamelCase )
self.assertEqual(_lowerCamelCase ,"[CLS]e è é ê ë[SEP]" )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode("e è é ê ë" ) ) ,"[CLS]e è é ê ë[SEP]" )
def __UpperCamelCase ( self : List[str] ):
SCREAMING_SNAKE_CASE:Optional[int] = self.perceiver_tokenizer
SCREAMING_SNAKE_CASE:Optional[Any] = ["A long paragraph for summarization.", "Another paragraph for summarization."]
# fmt: off
SCREAMING_SNAKE_CASE:Tuple = [4, 71, 38, 114, 117, 116, 109, 38, 118, 103, 120, 103, 109, 120, 103, 118, 110, 38, 108, 117, 120, 38, 121, 123, 115, 115, 103, 120, 111, 128, 103, 122, 111, 117, 116, 52, 5, 0]
# fmt: on
SCREAMING_SNAKE_CASE:Union[str, Any] = tokenizer(_lowerCamelCase ,padding=_lowerCamelCase ,return_tensors=_lowerCamelCase )
self.assertIsInstance(_lowerCamelCase ,_lowerCamelCase )
if FRAMEWORK != "jax":
SCREAMING_SNAKE_CASE:Any = list(batch.input_ids.numpy()[0] )
else:
SCREAMING_SNAKE_CASE:str = list(batch.input_ids.tolist()[0] )
self.assertListEqual(_lowerCamelCase ,_lowerCamelCase )
self.assertEqual((2, 38) ,batch.input_ids.shape )
self.assertEqual((2, 38) ,batch.attention_mask.shape )
def __UpperCamelCase ( self : Optional[int] ):
SCREAMING_SNAKE_CASE:List[str] = self.perceiver_tokenizer
SCREAMING_SNAKE_CASE:Any = ["A long paragraph for summarization.", "Another paragraph for summarization."]
SCREAMING_SNAKE_CASE:str = tokenizer(_lowerCamelCase ,padding=_lowerCamelCase ,return_tensors=_lowerCamelCase )
# check if input_ids are returned and no decoder_input_ids
self.assertIn("input_ids" ,_lowerCamelCase )
self.assertIn("attention_mask" ,_lowerCamelCase )
self.assertNotIn("decoder_input_ids" ,_lowerCamelCase )
self.assertNotIn("decoder_attention_mask" ,_lowerCamelCase )
def __UpperCamelCase ( self : List[Any] ):
SCREAMING_SNAKE_CASE:List[str] = self.perceiver_tokenizer
SCREAMING_SNAKE_CASE:Union[str, Any] = [
"Summary of the text.",
"Another summary.",
]
SCREAMING_SNAKE_CASE:Any = tokenizer(
text_target=_lowerCamelCase ,max_length=32 ,padding="max_length" ,truncation=_lowerCamelCase ,return_tensors=_lowerCamelCase )
self.assertEqual(32 ,targets["input_ids"].shape[1] )
def __UpperCamelCase ( self : List[Any] ):
# safety check on max_len default value so we are sure the test works
SCREAMING_SNAKE_CASE:str = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
self.assertNotEqual(tokenizer.model_max_length ,42 )
# Now let's start the test
SCREAMING_SNAKE_CASE:Optional[Any] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
SCREAMING_SNAKE_CASE:Optional[int] = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE:str = " He is very happy, UNwant\u00E9d,running"
SCREAMING_SNAKE_CASE:List[Any] = tokenizer.encode(_lowerCamelCase ,add_special_tokens=_lowerCamelCase )
tokenizer.save_pretrained(_lowerCamelCase )
SCREAMING_SNAKE_CASE:Any = tokenizer.__class__.from_pretrained(_lowerCamelCase )
SCREAMING_SNAKE_CASE:Any = after_tokenizer.encode(_lowerCamelCase ,add_special_tokens=_lowerCamelCase )
self.assertListEqual(_lowerCamelCase ,_lowerCamelCase )
shutil.rmtree(_lowerCamelCase )
SCREAMING_SNAKE_CASE:Any = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
SCREAMING_SNAKE_CASE:Union[str, Any] = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE:int = " He is very happy, UNwant\u00E9d,running"
tokenizer.add_tokens(["bim", "bambam"] )
SCREAMING_SNAKE_CASE:Optional[int] = tokenizer.additional_special_tokens
additional_special_tokens.append("new_additional_special_token" )
tokenizer.add_special_tokens({"additional_special_tokens": additional_special_tokens} )
SCREAMING_SNAKE_CASE:Union[str, Any] = tokenizer.encode(_lowerCamelCase ,add_special_tokens=_lowerCamelCase )
tokenizer.save_pretrained(_lowerCamelCase )
SCREAMING_SNAKE_CASE:int = tokenizer.__class__.from_pretrained(_lowerCamelCase )
SCREAMING_SNAKE_CASE:List[str] = after_tokenizer.encode(_lowerCamelCase ,add_special_tokens=_lowerCamelCase )
self.assertListEqual(_lowerCamelCase ,_lowerCamelCase )
self.assertIn("new_additional_special_token" ,after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length ,42 )
SCREAMING_SNAKE_CASE:Dict = tokenizer.__class__.from_pretrained(_lowerCamelCase ,model_max_length=43 )
self.assertEqual(tokenizer.model_max_length ,43 )
shutil.rmtree(_lowerCamelCase )
def __UpperCamelCase ( self : Optional[int] ):
SCREAMING_SNAKE_CASE:int = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(_lowerCamelCase )
with open(os.path.join(_lowerCamelCase ,"special_tokens_map.json" ) ,encoding="utf-8" ) as json_file:
SCREAMING_SNAKE_CASE:List[Any] = json.load(_lowerCamelCase )
with open(os.path.join(_lowerCamelCase ,"tokenizer_config.json" ) ,encoding="utf-8" ) as json_file:
SCREAMING_SNAKE_CASE:Dict = json.load(_lowerCamelCase )
SCREAMING_SNAKE_CASE:Tuple = [F'''<extra_id_{i}>''' for i in range(125 )]
SCREAMING_SNAKE_CASE:Union[str, Any] = added_tokens_extra_ids + [
"an_additional_special_token"
]
SCREAMING_SNAKE_CASE:Optional[int] = added_tokens_extra_ids + [
"an_additional_special_token"
]
with open(os.path.join(_lowerCamelCase ,"special_tokens_map.json" ) ,"w" ,encoding="utf-8" ) as outfile:
json.dump(_lowerCamelCase ,_lowerCamelCase )
with open(os.path.join(_lowerCamelCase ,"tokenizer_config.json" ) ,"w" ,encoding="utf-8" ) as outfile:
json.dump(_lowerCamelCase ,_lowerCamelCase )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
SCREAMING_SNAKE_CASE:List[Any] = tokenizer_class.from_pretrained(
_lowerCamelCase ,)
self.assertIn(
"an_additional_special_token" ,tokenizer_without_change_in_init.additional_special_tokens )
self.assertEqual(
["an_additional_special_token"] ,tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(["an_additional_special_token"] ) ) ,)
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
SCREAMING_SNAKE_CASE:Union[str, Any] = added_tokens_extra_ids + [AddedToken("a_new_additional_special_token" ,lstrip=_lowerCamelCase )]
SCREAMING_SNAKE_CASE:List[str] = tokenizer_class.from_pretrained(
_lowerCamelCase ,additional_special_tokens=_lowerCamelCase ,)
self.assertIn("a_new_additional_special_token" ,tokenizer.additional_special_tokens )
self.assertEqual(
["a_new_additional_special_token"] ,tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(["a_new_additional_special_token"] ) ) ,)
def __UpperCamelCase ( self : Any ):
SCREAMING_SNAKE_CASE:Any = self.perceiver_tokenizer
self.assertEqual(tokenizer.decode([178] ) ,"�" )
def __UpperCamelCase ( self : List[str] ):
pass
def __UpperCamelCase ( self : Any ):
pass
def __UpperCamelCase ( self : str ):
pass
def __UpperCamelCase ( self : int ):
pass
def __UpperCamelCase ( self : Any ):
# The default common tokenizer tests uses invalid tokens for Perceiver that can only accept one-character
# strings and special added tokens as tokens
SCREAMING_SNAKE_CASE:int = self.get_tokenizers(fast=_lowerCamelCase ,do_lower_case=_lowerCamelCase )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
SCREAMING_SNAKE_CASE:List[Any] = ["[CLS]", "t", "h", "i", "s", " ", "i", "s", " ", "a", " ", "t", "e", "s", "t", "[SEP]"]
SCREAMING_SNAKE_CASE:Any = tokenizer.convert_tokens_to_string(_lowerCamelCase )
self.assertIsInstance(_lowerCamelCase ,_lowerCamelCase )
| 139 |
"""simple docstring"""
from math import sqrt
def _UpperCAmelCase ( __lowerCamelCase : int = 1_00_00_00 ) -> int:
_snake_case = 0
_snake_case = 0
_snake_case = 42
while num_cuboids <= limit:
max_cuboid_size += 1
for sum_shortest_sides in range(2 , 2 * max_cuboid_size + 1 ):
if sqrt(sum_shortest_sides**2 + max_cuboid_size**2 ).is_integer():
num_cuboids += (
min(__lowerCamelCase , sum_shortest_sides // 2 )
- max(1 , sum_shortest_sides - max_cuboid_size )
+ 1
)
return max_cuboid_size
if __name__ == "__main__":
print(F"{solution() = }")
| 288 | 0 |
import heapq
def UpperCamelCase_( snake_case__: dict ) -> set[int]:
UpperCAmelCase__ = []
# for each node and his adjacency list add them and the rank of the node to queue
# using heapq module the queue will be filled like a Priority Queue
# heapq works with a min priority queue, so I used -1*len(v) to build it
for key, value in graph.items():
# O(log(n))
heapq.heappush(snake_case__ , [-1 * len(snake_case__ ), (key, value)] )
# chosen_vertices = set of chosen vertices
UpperCAmelCase__ = set()
# while queue isn't empty and there are still edges
# (queue[0][0] is the rank of the node with max rank)
while queue and queue[0][0] != 0:
# extract vertex with max rank from queue and add it to chosen_vertices
UpperCAmelCase__ = heapq.heappop(snake_case__ )[1][0]
chosen_vertices.add(snake_case__ )
# Remove all arcs adjacent to argmax
for elem in queue:
# if v haven't adjacent node, skip
if elem[0] == 0:
continue
# if argmax is reachable from elem
# remove argmax from elem's adjacent list and update his rank
if argmax in elem[1][1]:
UpperCAmelCase__ = elem[1][1].index(snake_case__ )
del elem[1][1][index]
elem[0] += 1
# re-order the queue
heapq.heapify(snake_case__ )
return chosen_vertices
if __name__ == "__main__":
import doctest
doctest.testmod()
_UpperCamelCase = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
print(F"""Minimum vertex cover:\n{greedy_min_vertex_cover(graph)}""")
| 369 |
from .constants import (
MODEL_NAME,
OPTIMIZER_NAME,
RNG_STATE_NAME,
SAFE_WEIGHTS_INDEX_NAME,
SAFE_WEIGHTS_NAME,
SCALER_NAME,
SCHEDULER_NAME,
TORCH_LAUNCH_PARAMS,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
)
from .dataclasses import (
BnbQuantizationConfig,
ComputeEnvironment,
CustomDtype,
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
DynamoBackend,
FPaRecipeKwargs,
FullyShardedDataParallelPlugin,
GradientAccumulationPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
KwargsHandler,
LoggerType,
MegatronLMPlugin,
PrecisionType,
ProjectConfiguration,
RNGType,
SageMakerDistributedType,
TensorInformation,
TorchDynamoPlugin,
)
from .environment import get_int_from_env, parse_choice_from_env, parse_flag_from_env
from .imports import (
get_ccl_version,
is_abit_bnb_available,
is_abit_bnb_available,
is_aim_available,
is_bfaa_available,
is_bnb_available,
is_botoa_available,
is_ccl_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_fpa_available,
is_ipex_available,
is_megatron_lm_available,
is_mlflow_available,
is_mps_available,
is_npu_available,
is_rich_available,
is_safetensors_available,
is_sagemaker_available,
is_tensorboard_available,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
from .modeling import (
check_device_map,
check_tied_parameters_in_config,
check_tied_parameters_on_same_device,
compute_module_sizes,
convert_file_size_to_int,
dtype_byte_size,
find_tied_parameters,
get_balanced_memory,
get_max_layer_size,
get_max_memory,
get_mixed_precision_context_manager,
id_tensor_storage,
infer_auto_device_map,
load_checkpoint_in_model,
load_offloaded_weights,
load_state_dict,
named_module_tensors,
retie_parameters,
set_module_tensor_to_device,
shard_checkpoint,
)
from .offload import (
OffloadedWeightsLoader,
PrefixedDataset,
extract_submodules_state_dict,
load_offloaded_weight,
offload_state_dict,
offload_weight,
save_offload_index,
)
from .operations import (
broadcast,
broadcast_object_list,
concatenate,
convert_outputs_to_fpaa,
convert_to_fpaa,
find_batch_size,
find_device,
gather,
gather_object,
get_data_structure,
honor_type,
initialize_tensors,
is_namedtuple,
is_tensor_information,
is_torch_tensor,
listify,
pad_across_processes,
recursively_apply,
reduce,
send_to_device,
slice_tensors,
)
from .versions import compare_versions, is_torch_version
if is_deepspeed_available():
from .deepspeed import (
DeepSpeedEngineWrapper,
DeepSpeedOptimizerWrapper,
DeepSpeedSchedulerWrapper,
DummyOptim,
DummyScheduler,
HfDeepSpeedConfig,
)
from .bnb import has_abit_bnb_layers, load_and_quantize_model
from .fsdp_utils import load_fsdp_model, load_fsdp_optimizer, save_fsdp_model, save_fsdp_optimizer
from .launch import (
PrepareForLaunch,
_filter_args,
prepare_deepspeed_cmd_env,
prepare_multi_gpu_env,
prepare_sagemager_args_inputs,
prepare_simple_launcher_cmd_env,
prepare_tpu,
)
from .megatron_lm import (
AbstractTrainStep,
BertTrainStep,
GPTTrainStep,
MegatronEngine,
MegatronLMDummyDataLoader,
MegatronLMDummyScheduler,
MegatronLMOptimizerWrapper,
MegatronLMSchedulerWrapper,
TaTrainStep,
avg_losses_across_data_parallel_group,
gather_across_data_parallel_groups,
)
from .megatron_lm import initialize as megatron_lm_initialize
from .megatron_lm import prepare_data_loader as megatron_lm_prepare_data_loader
from .megatron_lm import prepare_model as megatron_lm_prepare_model
from .megatron_lm import prepare_optimizer as megatron_lm_prepare_optimizer
from .megatron_lm import prepare_scheduler as megatron_lm_prepare_scheduler
from .memory import find_executable_batch_size, release_memory
from .other import (
extract_model_from_parallel,
get_pretty_name,
is_port_in_use,
merge_dicts,
patch_environment,
save,
wait_for_everyone,
write_basic_config,
)
from .random import set_seed, synchronize_rng_state, synchronize_rng_states
from .torch_xla import install_xla
from .tqdm import tqdm
from .transformer_engine import convert_model, has_transformer_engine_layers
| 335 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
a_ = {'configuration_encoder_decoder': ['EncoderDecoderConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = ['EncoderDecoderModel']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = ['TFEncoderDecoderModel']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = ['FlaxEncoderDecoderModel']
if TYPE_CHECKING:
from .configuration_encoder_decoder import EncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encoder_decoder import EncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_encoder_decoder import TFEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_encoder_decoder import FlaxEncoderDecoderModel
else:
import sys
a_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 249 |
"""simple docstring"""
import itertools
from dataclasses import dataclass
from typing import List, Optional
import pyarrow as pa
import pyarrow.parquet as pq
import datasets
from datasets.table import table_cast
a_ = datasets.utils.logging.get_logger(__name__)
@dataclass
class UpperCAmelCase_ ( datasets.BuilderConfig ):
UpperCamelCase =1_00_00
UpperCamelCase =None
UpperCamelCase =None
class UpperCAmelCase_ ( datasets.ArrowBasedBuilder ):
UpperCamelCase =ParquetConfig
def _lowerCamelCase ( self ) -> List[str]:
return datasets.DatasetInfo(features=self.config.features )
def _lowerCamelCase ( self , UpperCamelCase_ ) -> Tuple:
if not self.config.data_files:
raise ValueError(F"""At least one data file must be specified, but got data_files={self.config.data_files}""" )
__lowercase : Optional[Any] = dl_manager.download_and_extract(self.config.data_files )
if isinstance(UpperCamelCase_ , (str, list, tuple) ):
__lowercase : str = data_files
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
__lowercase : Union[str, Any] = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
__lowercase : int = [dl_manager.iter_files(UpperCamelCase_ ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''files''': files} )]
__lowercase : int = []
for split_name, files in data_files.items():
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
__lowercase : List[str] = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
__lowercase : Any = [dl_manager.iter_files(UpperCamelCase_ ) for file in files]
# Infer features is they are stoed in the arrow schema
if self.info.features is None:
for file in itertools.chain.from_iterable(UpperCamelCase_ ):
with open(UpperCamelCase_ , '''rb''' ) as f:
__lowercase : Any = datasets.Features.from_arrow_schema(pq.read_schema(UpperCamelCase_ ) )
break
splits.append(datasets.SplitGenerator(name=UpperCamelCase_ , gen_kwargs={'''files''': files} ) )
return splits
def _lowerCamelCase ( self , UpperCamelCase_ ) -> pa.Table:
if self.info.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
__lowercase : Tuple = table_cast(UpperCamelCase_ , self.info.features.arrow_schema )
return pa_table
def _lowerCamelCase ( self , UpperCamelCase_ ) -> Tuple:
__lowercase : Union[str, Any] = self.info.features.arrow_schema if self.info.features is not None else None
if self.info.features is not None and self.config.columns is not None:
if sorted(field.name for field in schema ) != sorted(self.config.columns ):
raise ValueError(
F"""Tried to load parquet data with columns '{self.config.columns}' with mismatching features '{self.info.features}'""" )
for file_idx, file in enumerate(itertools.chain.from_iterable(UpperCamelCase_ ) ):
with open(UpperCamelCase_ , '''rb''' ) as f:
__lowercase : Union[str, Any] = pq.ParquetFile(UpperCamelCase_ )
try:
for batch_idx, record_batch in enumerate(
parquet_file.iter_batches(batch_size=self.config.batch_size , columns=self.config.columns ) ):
__lowercase : Dict = pa.Table.from_batches([record_batch] )
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield F"""{file_idx}_{batch_idx}""", self._cast_table(UpperCamelCase_ )
except ValueError as e:
logger.error(F"""Failed to read file '{file}' with error {type(UpperCamelCase_ )}: {e}""" )
raise
| 249 | 1 |
'''simple docstring'''
from __future__ import annotations
import copy
import tempfile
import unittest
from transformers import CONFIG_MAPPING, AutoConfig, BertConfig, GPTaConfig, TaConfig, TapasConfig, is_tf_available
from transformers.testing_utils import (
DUMMY_UNKNOWN_IDENTIFIER,
SMALL_MODEL_IDENTIFIER,
RequestCounter,
require_tensorflow_probability,
require_tf,
slow,
)
from ..bert.test_modeling_bert import BertModelTester
if is_tf_available():
from transformers import (
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSequenceClassification,
TFAutoModelForTableQuestionAnswering,
TFAutoModelForTokenClassification,
TFAutoModelWithLMHead,
TFBertForMaskedLM,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertModel,
TFFunnelBaseModel,
TFFunnelModel,
TFGPTaLMHeadModel,
TFRobertaForMaskedLM,
TFTaForConditionalGeneration,
TFTapasForQuestionAnswering,
)
from transformers.models.auto.modeling_tf_auto import (
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_MASKED_LM_MAPPING,
TF_MODEL_FOR_PRETRAINING_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
TF_MODEL_MAPPING,
)
from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.tapas.modeling_tf_tapas import TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST
class __lowercase ( lowerCAmelCase__ ):
'''simple docstring'''
a : List[Any] = "new-model"
if is_tf_available():
class __lowercase ( lowerCAmelCase__ ):
'''simple docstring'''
a : Union[str, Any] = NewModelConfig
@require_tf
class __lowercase ( unittest.TestCase ):
'''simple docstring'''
@slow
def _UpperCAmelCase (self ) -> Optional[Any]:
'''simple docstring'''
__lowercase = '''bert-base-cased'''
__lowercase = AutoConfig.from_pretrained(_lowerCamelCase )
self.assertIsNotNone(_lowerCamelCase )
self.assertIsInstance(_lowerCamelCase ,_lowerCamelCase )
__lowercase = TFAutoModel.from_pretrained(_lowerCamelCase )
self.assertIsNotNone(_lowerCamelCase )
self.assertIsInstance(_lowerCamelCase ,_lowerCamelCase )
@slow
def _UpperCAmelCase (self ) -> Optional[int]:
'''simple docstring'''
__lowercase = '''bert-base-cased'''
__lowercase = AutoConfig.from_pretrained(_lowerCamelCase )
self.assertIsNotNone(_lowerCamelCase )
self.assertIsInstance(_lowerCamelCase ,_lowerCamelCase )
__lowercase = TFAutoModelForPreTraining.from_pretrained(_lowerCamelCase )
self.assertIsNotNone(_lowerCamelCase )
self.assertIsInstance(_lowerCamelCase ,_lowerCamelCase )
@slow
def _UpperCAmelCase (self ) -> int:
'''simple docstring'''
for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowercase = AutoConfig.from_pretrained(_lowerCamelCase )
self.assertIsNotNone(_lowerCamelCase )
self.assertIsInstance(_lowerCamelCase ,_lowerCamelCase )
__lowercase = TFAutoModelForCausalLM.from_pretrained(_lowerCamelCase )
__lowercase , __lowercase = TFAutoModelForCausalLM.from_pretrained(_lowerCamelCase ,output_loading_info=_lowerCamelCase )
self.assertIsNotNone(_lowerCamelCase )
self.assertIsInstance(_lowerCamelCase ,_lowerCamelCase )
@slow
def _UpperCAmelCase (self ) -> Optional[int]:
'''simple docstring'''
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowercase = AutoConfig.from_pretrained(_lowerCamelCase )
self.assertIsNotNone(_lowerCamelCase )
self.assertIsInstance(_lowerCamelCase ,_lowerCamelCase )
__lowercase = TFAutoModelWithLMHead.from_pretrained(_lowerCamelCase )
self.assertIsNotNone(_lowerCamelCase )
self.assertIsInstance(_lowerCamelCase ,_lowerCamelCase )
@slow
def _UpperCAmelCase (self ) -> List[str]:
'''simple docstring'''
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowercase = AutoConfig.from_pretrained(_lowerCamelCase )
self.assertIsNotNone(_lowerCamelCase )
self.assertIsInstance(_lowerCamelCase ,_lowerCamelCase )
__lowercase = TFAutoModelForMaskedLM.from_pretrained(_lowerCamelCase )
__lowercase , __lowercase = TFAutoModelForMaskedLM.from_pretrained(_lowerCamelCase ,output_loading_info=_lowerCamelCase )
self.assertIsNotNone(_lowerCamelCase )
self.assertIsInstance(_lowerCamelCase ,_lowerCamelCase )
@slow
def _UpperCAmelCase (self ) -> Optional[int]:
'''simple docstring'''
for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowercase = AutoConfig.from_pretrained(_lowerCamelCase )
self.assertIsNotNone(_lowerCamelCase )
self.assertIsInstance(_lowerCamelCase ,_lowerCamelCase )
__lowercase = TFAutoModelForSeqaSeqLM.from_pretrained(_lowerCamelCase )
__lowercase , __lowercase = TFAutoModelForSeqaSeqLM.from_pretrained(_lowerCamelCase ,output_loading_info=_lowerCamelCase )
self.assertIsNotNone(_lowerCamelCase )
self.assertIsInstance(_lowerCamelCase ,_lowerCamelCase )
@slow
def _UpperCAmelCase (self ) -> int:
'''simple docstring'''
for model_name in ["bert-base-uncased"]:
__lowercase = AutoConfig.from_pretrained(_lowerCamelCase )
self.assertIsNotNone(_lowerCamelCase )
self.assertIsInstance(_lowerCamelCase ,_lowerCamelCase )
__lowercase = TFAutoModelForSequenceClassification.from_pretrained(_lowerCamelCase )
self.assertIsNotNone(_lowerCamelCase )
self.assertIsInstance(_lowerCamelCase ,_lowerCamelCase )
@slow
def _UpperCAmelCase (self ) -> Union[str, Any]:
'''simple docstring'''
for model_name in ["bert-base-uncased"]:
__lowercase = AutoConfig.from_pretrained(_lowerCamelCase )
self.assertIsNotNone(_lowerCamelCase )
self.assertIsInstance(_lowerCamelCase ,_lowerCamelCase )
__lowercase = TFAutoModelForQuestionAnswering.from_pretrained(_lowerCamelCase )
self.assertIsNotNone(_lowerCamelCase )
self.assertIsInstance(_lowerCamelCase ,_lowerCamelCase )
@slow
@require_tensorflow_probability
def _UpperCAmelCase (self ) -> List[Any]:
'''simple docstring'''
for model_name in TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST[5:6]:
__lowercase = AutoConfig.from_pretrained(_lowerCamelCase )
self.assertIsNotNone(_lowerCamelCase )
self.assertIsInstance(_lowerCamelCase ,_lowerCamelCase )
__lowercase = TFAutoModelForTableQuestionAnswering.from_pretrained(_lowerCamelCase )
__lowercase , __lowercase = TFAutoModelForTableQuestionAnswering.from_pretrained(
_lowerCamelCase ,output_loading_info=_lowerCamelCase )
self.assertIsNotNone(_lowerCamelCase )
self.assertIsInstance(_lowerCamelCase ,_lowerCamelCase )
def _UpperCAmelCase (self ) -> str:
'''simple docstring'''
__lowercase = TFAutoModelWithLMHead.from_pretrained(_lowerCamelCase )
self.assertIsInstance(_lowerCamelCase ,_lowerCamelCase )
self.assertEqual(model.num_parameters() ,14410 )
self.assertEqual(model.num_parameters(only_trainable=_lowerCamelCase ) ,14410 )
def _UpperCAmelCase (self ) -> Optional[int]:
'''simple docstring'''
__lowercase = TFAutoModelWithLMHead.from_pretrained(_lowerCamelCase )
self.assertIsInstance(_lowerCamelCase ,_lowerCamelCase )
self.assertEqual(model.num_parameters() ,14410 )
self.assertEqual(model.num_parameters(only_trainable=_lowerCamelCase ) ,14410 )
def _UpperCAmelCase (self ) -> List[str]:
'''simple docstring'''
__lowercase = TFAutoModel.from_pretrained('''sgugger/funnel-random-tiny''' )
self.assertIsInstance(_lowerCamelCase ,_lowerCamelCase )
__lowercase = copy.deepcopy(model.config )
__lowercase = ['''FunnelBaseModel''']
__lowercase = TFAutoModel.from_config(_lowerCamelCase )
self.assertIsInstance(_lowerCamelCase ,_lowerCamelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(_lowerCamelCase )
__lowercase = TFAutoModel.from_pretrained(_lowerCamelCase )
self.assertIsInstance(_lowerCamelCase ,_lowerCamelCase )
def _UpperCAmelCase (self ) -> int:
'''simple docstring'''
try:
AutoConfig.register('''new-model''' ,_lowerCamelCase )
__lowercase = [
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSequenceClassification,
TFAutoModelForTokenClassification,
]
for auto_class in auto_classes:
with self.subTest(auto_class.__name__ ):
# Wrong config class will raise an error
with self.assertRaises(_lowerCamelCase ):
auto_class.register(_lowerCamelCase ,_lowerCamelCase )
auto_class.register(_lowerCamelCase ,_lowerCamelCase )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(_lowerCamelCase ):
auto_class.register(_lowerCamelCase ,_lowerCamelCase )
# Now that the config is registered, it can be used as any other config with the auto-API
__lowercase = BertModelTester(self ).get_config()
__lowercase = NewModelConfig(**tiny_config.to_dict() )
__lowercase = auto_class.from_config(_lowerCamelCase )
self.assertIsInstance(_lowerCamelCase ,_lowerCamelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(_lowerCamelCase )
__lowercase = auto_class.from_pretrained(_lowerCamelCase )
self.assertIsInstance(_lowerCamelCase ,_lowerCamelCase )
finally:
if "new-model" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["new-model"]
for mapping in (
TF_MODEL_MAPPING,
TF_MODEL_FOR_PRETRAINING_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_MASKED_LM_MAPPING,
):
if NewModelConfig in mapping._extra_content:
del mapping._extra_content[NewModelConfig]
def _UpperCAmelCase (self ) -> List[Any]:
'''simple docstring'''
with self.assertRaisesRegex(
_lowerCamelCase ,'''bert-base is not a local folder and is not a valid model identifier''' ):
__lowercase = TFAutoModel.from_pretrained('''bert-base''' )
def _UpperCAmelCase (self ) -> List[str]:
'''simple docstring'''
with self.assertRaisesRegex(
_lowerCamelCase ,R'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ):
__lowercase = TFAutoModel.from_pretrained(_lowerCamelCase ,revision='''aaaaaa''' )
def _UpperCAmelCase (self ) -> Optional[int]:
'''simple docstring'''
with self.assertRaisesRegex(
_lowerCamelCase ,'''hf-internal-testing/config-no-model does not appear to have a file named pytorch_model.bin''' ,):
__lowercase = TFAutoModel.from_pretrained('''hf-internal-testing/config-no-model''' )
def _UpperCAmelCase (self ) -> List[Any]:
'''simple docstring'''
with self.assertRaisesRegex(_lowerCamelCase ,'''Use `from_pt=True` to load this model''' ):
__lowercase = TFAutoModel.from_pretrained('''hf-internal-testing/tiny-bert-pt-only''' )
def _UpperCAmelCase (self ) -> Any:
'''simple docstring'''
__lowercase = TFAutoModel.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
with RequestCounter() as counter:
__lowercase = TFAutoModel.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
self.assertEqual(counter.get_request_count ,0 )
self.assertEqual(counter.head_request_count ,1 )
self.assertEqual(counter.other_request_count ,0 )
# With a sharded checkpoint
__lowercase = TFAutoModel.from_pretrained('''ArthurZ/tiny-random-bert-sharded''' )
with RequestCounter() as counter:
__lowercase = TFAutoModel.from_pretrained('''ArthurZ/tiny-random-bert-sharded''' )
self.assertEqual(counter.get_request_count ,0 )
self.assertEqual(counter.head_request_count ,1 )
self.assertEqual(counter.other_request_count ,0 )
| 217 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {
'''abeja/gpt-neox-japanese-2.7b''': '''https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/config.json''',
}
class __lowercase ( lowerCAmelCase__ ):
'''simple docstring'''
a : Union[str, Any] = "gpt_neox_japanese"
def __init__(self ,_lowerCamelCase=32000 ,_lowerCamelCase=2560 ,_lowerCamelCase=32 ,_lowerCamelCase=32 ,_lowerCamelCase=4 ,_lowerCamelCase="gelu" ,_lowerCamelCase=1.0_0 ,_lowerCamelCase=10000 ,_lowerCamelCase=2048 ,_lowerCamelCase=0.0_2 ,_lowerCamelCase=1E-5 ,_lowerCamelCase=True ,_lowerCamelCase=31996 ,_lowerCamelCase=31999 ,_lowerCamelCase=0.1 ,_lowerCamelCase=0.0 ,**_lowerCamelCase ,) -> Optional[int]:
'''simple docstring'''
super().__init__(bos_token_id=_lowerCamelCase ,eos_token_id=_lowerCamelCase ,**_lowerCamelCase )
__lowercase = vocab_size
__lowercase = max_position_embeddings
__lowercase = hidden_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = intermediate_multiple_size
__lowercase = hidden_act
__lowercase = rotary_pct
__lowercase = rotary_emb_base
__lowercase = initializer_range
__lowercase = layer_norm_eps
__lowercase = use_cache
__lowercase = attention_dropout
__lowercase = hidden_dropout
| 217 | 1 |
'''simple docstring'''
import warnings
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
from ...models import UNetaDModel
from ...schedulers import RePaintScheduler
from ...utils import PIL_INTERPOLATION, logging, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
a__ : Union[str, Any] =logging.get_logger(__name__) # pylint: disable=invalid-name
def lowercase__ ( __lowercase : Optional[Any] ) -> List[str]:
"""simple docstring"""
warnings.warn(
'The preprocess method is deprecated and will be removed in a future version. Please'
' use VaeImageProcessor.preprocess instead' , __lowercase , )
if isinstance(__lowercase , torch.Tensor ):
return image
elif isinstance(__lowercase , PIL.Image.Image ):
__UpperCamelCase = [image]
if isinstance(image[0] , PIL.Image.Image ):
__UpperCamelCase = image[0].size
__UpperCamelCase = (x - x % 8 for x in (w, h)) # resize to integer multiple of 8
__UpperCamelCase = [np.array(i.resize((w, h) , resample=PIL_INTERPOLATION['lanczos'] ) )[None, :] for i in image]
__UpperCamelCase = np.concatenate(__lowercase , axis=0 )
__UpperCamelCase = np.array(__lowercase ).astype(np.floataa ) / 2_5_5.0
__UpperCamelCase = image.transpose(0 , 3 , 1 , 2 )
__UpperCamelCase = 2.0 * image - 1.0
__UpperCamelCase = torch.from_numpy(__lowercase )
elif isinstance(image[0] , torch.Tensor ):
__UpperCamelCase = torch.cat(__lowercase , dim=0 )
return image
def lowercase__ ( __lowercase : Any ) -> List[str]:
"""simple docstring"""
if isinstance(__lowercase , torch.Tensor ):
return mask
elif isinstance(__lowercase , PIL.Image.Image ):
__UpperCamelCase = [mask]
if isinstance(mask[0] , PIL.Image.Image ):
__UpperCamelCase = mask[0].size
__UpperCamelCase = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
__UpperCamelCase = [np.array(m.convert('L' ).resize((w, h) , resample=PIL_INTERPOLATION['nearest'] ) )[None, :] for m in mask]
__UpperCamelCase = np.concatenate(__lowercase , axis=0 )
__UpperCamelCase = mask.astype(np.floataa ) / 2_5_5.0
__UpperCamelCase = 0
__UpperCamelCase = 1
__UpperCamelCase = torch.from_numpy(__lowercase )
elif isinstance(mask[0] , torch.Tensor ):
__UpperCamelCase = torch.cat(__lowercase , dim=0 )
return mask
class snake_case ( UpperCAmelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : UNetaDModel
SCREAMING_SNAKE_CASE_ : RePaintScheduler
def __init__( self : Optional[int] , __A : Optional[Any] , __A : Dict ):
super().__init__()
self.register_modules(unet=__A , scheduler=__A )
@torch.no_grad()
def __call__( self : int , __A : Union[torch.Tensor, PIL.Image.Image] , __A : Union[torch.Tensor, PIL.Image.Image] , __A : int = 2_5_0 , __A : float = 0.0 , __A : int = 1_0 , __A : int = 1_0 , __A : Optional[Union[torch.Generator, List[torch.Generator]]] = None , __A : Optional[str] = "pil" , __A : bool = True , ):
__UpperCamelCase = image
__UpperCamelCase = _preprocess_image(__A )
__UpperCamelCase = original_image.to(device=self.device , dtype=self.unet.dtype )
__UpperCamelCase = _preprocess_mask(__A )
__UpperCamelCase = mask_image.to(device=self.device , dtype=self.unet.dtype )
__UpperCamelCase = original_image.shape[0]
# sample gaussian noise to begin the loop
if isinstance(__A , __A ) and len(__A ) != batch_size:
raise ValueError(
f'''You have passed a list of generators of length {len(__A )}, but requested an effective batch'''
f''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' )
__UpperCamelCase = original_image.shape
__UpperCamelCase = randn_tensor(__A , generator=__A , device=self.device , dtype=self.unet.dtype )
# set step values
self.scheduler.set_timesteps(__A , __A , __A , self.device )
__UpperCamelCase = eta
__UpperCamelCase = self.scheduler.timesteps[0] + 1
__UpperCamelCase = generator[0] if isinstance(__A , __A ) else generator
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
if t < t_last:
# predict the noise residual
__UpperCamelCase = self.unet(__A , __A ).sample
# compute previous image: x_t -> x_t-1
__UpperCamelCase = self.scheduler.step(__A , __A , __A , __A , __A , __A ).prev_sample
else:
# compute the reverse: x_t-1 -> x_t
__UpperCamelCase = self.scheduler.undo_step(__A , __A , __A )
__UpperCamelCase = t
__UpperCamelCase = (image / 2 + 0.5).clamp(0 , 1 )
__UpperCamelCase = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
__UpperCamelCase = self.numpy_to_pil(__A )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__A )
| 53 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyInpaintPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class a ( UpperCAmelCase__ , unittest.TestCase ):
UpperCamelCase : int = KandinskyInpaintPipeline
UpperCamelCase : Optional[Any] = ['prompt', 'image_embeds', 'negative_image_embeds', 'image', 'mask_image']
UpperCamelCase : int = [
'prompt',
'negative_prompt',
'image_embeds',
'negative_image_embeds',
'image',
'mask_image',
]
UpperCamelCase : Any = [
'generator',
'height',
'width',
'latents',
'guidance_scale',
'negative_prompt',
'num_inference_steps',
'return_dict',
'guidance_scale',
'num_images_per_prompt',
'output_type',
'return_dict',
]
UpperCamelCase : Tuple = False
@property
def lowerCamelCase__ ( self : Union[str, Any] ) -> str:
'''simple docstring'''
return 32
@property
def lowerCamelCase__ ( self : Optional[Any] ) -> Tuple:
'''simple docstring'''
return 32
@property
def lowerCamelCase__ ( self : List[Any] ) -> int:
'''simple docstring'''
return self.time_input_dim
@property
def lowerCamelCase__ ( self : Optional[Any] ) -> int:
'''simple docstring'''
return self.time_input_dim * 4
@property
def lowerCamelCase__ ( self : Dict ) -> List[Any]:
'''simple docstring'''
return 100
@property
def lowerCamelCase__ ( self : str ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: int =XLMRobertaTokenizerFast.from_pretrained("""YiYiXu/tiny-random-mclip-base""" )
return tokenizer
@property
def lowerCamelCase__ ( self : Dict ) -> Dict:
'''simple docstring'''
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_: Optional[Any] =MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=1005 , )
SCREAMING_SNAKE_CASE_: List[str] =MultilingualCLIP(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: str =text_encoder.eval()
return text_encoder
@property
def lowerCamelCase__ ( self : Optional[int] ) -> Dict:
'''simple docstring'''
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_: Optional[Any] ={
"""in_channels""": 9,
# Out channels is double in channels because predicts mean and variance
"""out_channels""": 8,
"""addition_embed_type""": """text_image""",
"""down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""),
"""up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""),
"""mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""",
"""block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2),
"""layers_per_block""": 1,
"""encoder_hid_dim""": self.text_embedder_hidden_size,
"""encoder_hid_dim_type""": """text_image_proj""",
"""cross_attention_dim""": self.cross_attention_dim,
"""attention_head_dim""": 4,
"""resnet_time_scale_shift""": """scale_shift""",
"""class_embed_type""": None,
}
SCREAMING_SNAKE_CASE_: str =UNetaDConditionModel(**lowerCAmelCase )
return model
@property
def lowerCamelCase__ ( self : Any ) -> Tuple:
'''simple docstring'''
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def lowerCamelCase__ ( self : List[Any] ) -> List[Any]:
'''simple docstring'''
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_: List[str] =VQModel(**self.dummy_movq_kwargs )
return model
def lowerCamelCase__ ( self : Optional[Any] ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Tuple =self.dummy_text_encoder
SCREAMING_SNAKE_CASE_: Optional[Any] =self.dummy_tokenizer
SCREAMING_SNAKE_CASE_: List[str] =self.dummy_unet
SCREAMING_SNAKE_CASE_: Union[str, Any] =self.dummy_movq
SCREAMING_SNAKE_CASE_: int =DDIMScheduler(
num_train_timesteps=1000 , beta_schedule="""linear""" , beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , clip_sample=lowerCAmelCase , set_alpha_to_one=lowerCAmelCase , steps_offset=1 , prediction_type="""epsilon""" , thresholding=lowerCAmelCase , )
SCREAMING_SNAKE_CASE_: str ={
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""unet""": unet,
"""scheduler""": scheduler,
"""movq""": movq,
}
return components
def lowerCamelCase__ ( self : Union[str, Any] , lowerCAmelCase : List[str] , lowerCAmelCase : List[str]=0 ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: int =floats_tensor((1, self.cross_attention_dim) , rng=random.Random(lowerCAmelCase ) ).to(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: List[Any] =floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(lowerCAmelCase )
# create init_image
SCREAMING_SNAKE_CASE_: List[Any] =floats_tensor((1, 3, 64, 64) , rng=random.Random(lowerCAmelCase ) ).to(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: List[str] =image.cpu().permute(0 , 2 , 3 , 1 )[0]
SCREAMING_SNAKE_CASE_: List[str] =Image.fromarray(np.uinta(lowerCAmelCase ) ).convert("""RGB""" ).resize((256, 256) )
# create mask
SCREAMING_SNAKE_CASE_: Dict =np.ones((64, 64) , dtype=np.floataa )
SCREAMING_SNAKE_CASE_: Optional[Any] =0
if str(lowerCAmelCase ).startswith("""mps""" ):
SCREAMING_SNAKE_CASE_: Optional[int] =torch.manual_seed(lowerCAmelCase )
else:
SCREAMING_SNAKE_CASE_: List[Any] =torch.Generator(device=lowerCAmelCase ).manual_seed(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Tuple ={
"""prompt""": """horse""",
"""image""": init_image,
"""mask_image""": mask,
"""image_embeds""": image_embeds,
"""negative_image_embeds""": negative_image_embeds,
"""generator""": generator,
"""height""": 64,
"""width""": 64,
"""num_inference_steps""": 2,
"""guidance_scale""": 4.0,
"""output_type""": """np""",
}
return inputs
def lowerCamelCase__ ( self : List[str] ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Dict ="""cpu"""
SCREAMING_SNAKE_CASE_: List[Any] =self.get_dummy_components()
SCREAMING_SNAKE_CASE_: Optional[int] =self.pipeline_class(**lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Union[str, Any] =pipe.to(lowerCAmelCase )
pipe.set_progress_bar_config(disable=lowerCAmelCase )
SCREAMING_SNAKE_CASE_: List[str] =pipe(**self.get_dummy_inputs(lowerCAmelCase ) )
SCREAMING_SNAKE_CASE_: int =output.images
SCREAMING_SNAKE_CASE_: Optional[int] =pipe(
**self.get_dummy_inputs(lowerCAmelCase ) , return_dict=lowerCAmelCase , )[0]
SCREAMING_SNAKE_CASE_: Tuple =image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE_: Optional[int] =image_from_tuple[0, -3:, -3:, -1]
print(f'''image.shape {image.shape}''' )
assert image.shape == (1, 64, 64, 3)
SCREAMING_SNAKE_CASE_: List[Any] =np.array(
[0.8_3_2_6_9_1_9, 0.7_3_7_9_0_4_6_7, 0.2_0_9_1_8_5_8_1, 0.9_3_0_9_6_1_2, 0.5_5_1_1_7_9_1, 0.4_3_7_1_3_3_2_8, 0.5_5_1_3_3_2_1, 0.4_9_9_2_2_9_3_4, 0.5_9_4_9_7_7_8_6] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), f''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), f''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
def lowerCamelCase__ ( self : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class a ( unittest.TestCase ):
def lowerCamelCase__ ( self : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase__ ( self : List[Any] ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Any =load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinsky/kandinsky_inpaint_cat_with_hat_fp16.npy""" )
SCREAMING_SNAKE_CASE_: str =load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinsky/cat.png""" )
SCREAMING_SNAKE_CASE_: List[str] =np.ones((768, 768) , dtype=np.floataa )
SCREAMING_SNAKE_CASE_: List[str] =0
SCREAMING_SNAKE_CASE_: Union[str, Any] ="""a hat"""
SCREAMING_SNAKE_CASE_: str =KandinskyPriorPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-1-prior""" , torch_dtype=torch.floataa )
pipe_prior.to(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Optional[Any] =KandinskyInpaintPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-1-inpaint""" , torch_dtype=torch.floataa )
SCREAMING_SNAKE_CASE_: List[str] =pipeline.to(lowerCAmelCase )
pipeline.set_progress_bar_config(disable=lowerCAmelCase )
SCREAMING_SNAKE_CASE_: List[str] =torch.Generator(device="""cpu""" ).manual_seed(0 )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: List[Any] =pipe_prior(
lowerCAmelCase , generator=lowerCAmelCase , num_inference_steps=5 , negative_prompt="""""" , ).to_tuple()
SCREAMING_SNAKE_CASE_: List[Any] =pipeline(
lowerCAmelCase , image=lowerCAmelCase , mask_image=lowerCAmelCase , image_embeds=lowerCAmelCase , negative_image_embeds=lowerCAmelCase , generator=lowerCAmelCase , num_inference_steps=100 , height=768 , width=768 , output_type="""np""" , )
SCREAMING_SNAKE_CASE_: int =output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(lowerCAmelCase , lowerCAmelCase )
| 173 | 0 |
def snake_case_ ( lowerCAmelCase_ : int ):
__lowercase : Tuple = 0
while num > 0:
digit_sum += num % 10
num //= 10
return digit_sum
def snake_case_ ( lowerCAmelCase_ : int = 100 ):
__lowercase : List[Any] = 1
__lowercase : str = 2
for i in range(2 , max_n + 1 ):
__lowercase : Dict = pre_numerator
__lowercase : Optional[int] = 2 * i // 3 if i % 3 == 0 else 1
__lowercase : Any = cur_numerator
__lowercase : Tuple = e_cont * pre_numerator + temp
return sum_digits(lowerCAmelCase_ )
if __name__ == "__main__":
print(f'''{solution() = }''') | 370 |
import argparse
import logging
import os
import time
import timeit
import datasets
import numpy as np
import pycuda.autoinit # noqa: F401
import pycuda.driver as cuda
import tensorrt as trt
import torch
from absl import logging as absl_logging
from accelerate import Accelerator
from datasets import load_dataset, load_metric
from torch.utils.data import DataLoader
from utils_qa import postprocess_qa_predictions
import transformers
from transformers import AutoTokenizer, EvalPrediction, default_data_collator, set_seed
from transformers.trainer_pt_utils import nested_concat, nested_truncate
lowerCamelCase : str = trt.Logger(trt.Logger.WARNING)
lowerCamelCase : Any = absl_logging.get_absl_logger()
absl_logger.setLevel(logging.WARNING)
lowerCamelCase : Optional[Any] = logging.getLogger(__name__)
lowerCamelCase : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--onnx_model_path''',
default=None,
type=str,
required=True,
help='''Path to ONNX model: ''',
)
parser.add_argument(
'''--output_dir''',
default=None,
type=str,
required=True,
help='''The output directory where the model checkpoints and predictions will be written.''',
)
# Other parameters
parser.add_argument(
'''--tokenizer_name''',
default='''''',
type=str,
required=True,
help='''Pretrained tokenizer name or path if not the same as model_name''',
)
parser.add_argument(
'''--version_2_with_negative''',
action='''store_true''',
help='''If true, the SQuAD examples contain some that do not have an answer.''',
)
parser.add_argument(
'''--null_score_diff_threshold''',
type=float,
default=0.0,
help='''If null_score - best_non_null is greater than the threshold predict null.''',
)
parser.add_argument(
'''--max_seq_length''',
default=3_84,
type=int,
help=(
'''The maximum total input sequence length after WordPiece tokenization. Sequences '''
'''longer than this will be truncated, and sequences shorter than this will be padded.'''
),
)
parser.add_argument(
'''--doc_stride''',
default=1_28,
type=int,
help='''When splitting up a long document into chunks, how much stride to take between chunks.''',
)
parser.add_argument('''--per_device_eval_batch_size''', default=8, type=int, help='''Batch size per GPU/CPU for evaluation.''')
parser.add_argument(
'''--n_best_size''',
default=20,
type=int,
help='''The total number of n-best predictions to generate in the nbest_predictions.json output file.''',
)
parser.add_argument(
'''--max_answer_length''',
default=30,
type=int,
help=(
'''The maximum length of an answer that can be generated. This is needed because the start '''
'''and end predictions are not conditioned on one another.'''
),
)
parser.add_argument('''--seed''', type=int, default=42, help='''random seed for initialization''')
parser.add_argument(
'''--dataset_name''',
type=str,
default=None,
required=True,
help='''The name of the dataset to use (via the datasets library).''',
)
parser.add_argument(
'''--dataset_config_name''',
type=str,
default=None,
help='''The configuration name of the dataset to use (via the datasets library).''',
)
parser.add_argument(
'''--preprocessing_num_workers''', type=int, default=4, help='''A csv or a json file containing the training data.'''
)
parser.add_argument('''--overwrite_cache''', action='''store_true''', help='''Overwrite the cached training and evaluation sets''')
parser.add_argument(
'''--fp16''',
action='''store_true''',
help='''Whether to use 16-bit (mixed) precision instead of 32-bit''',
)
parser.add_argument(
'''--int8''',
action='''store_true''',
help='''Whether to use INT8''',
)
lowerCamelCase : Dict = parser.parse_args()
if args.tokenizer_name:
lowerCamelCase : str = AutoTokenizer.from_pretrained(args.tokenizer_name, use_fast=True)
else:
raise ValueError(
'''You are instantiating a new tokenizer from scratch. This is not supported by this script.'''
'''You can do it from another script, save it, and load it from here, using --tokenizer_name.'''
)
logger.info('''Training/evaluation parameters %s''', args)
lowerCamelCase : List[str] = args.per_device_eval_batch_size
lowerCamelCase : Any = (args.eval_batch_size, args.max_seq_length)
# TRT Engine properties
lowerCamelCase : List[str] = True
lowerCamelCase : List[Any] = '''temp_engine/bert-fp32.engine'''
if args.fpaa:
lowerCamelCase : Optional[Any] = '''temp_engine/bert-fp16.engine'''
if args.inta:
lowerCamelCase : int = '''temp_engine/bert-int8.engine'''
# import ONNX file
if not os.path.exists('''temp_engine'''):
os.makedirs('''temp_engine''')
lowerCamelCase : int = 1 << (int)(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)
with trt.Builder(TRT_LOGGER) as builder, builder.create_network(EXPLICIT_BATCH) as network, trt.OnnxParser(
network, TRT_LOGGER
) as parser:
with open(args.onnx_model_path, '''rb''') as model:
if not parser.parse(model.read()):
for error in range(parser.num_errors):
print(parser.get_error(error))
# Query input names and shapes from parsed TensorRT network
lowerCamelCase : Union[str, Any] = [network.get_input(i) for i in range(network.num_inputs)]
lowerCamelCase : Dict = [_input.name for _input in network_inputs] # ex: ["actual_input1"]
with builder.create_builder_config() as config:
lowerCamelCase : List[str] = 1 << 50
if STRICT_TYPES:
config.set_flag(trt.BuilderFlag.STRICT_TYPES)
if args.fpaa:
config.set_flag(trt.BuilderFlag.FPaa)
if args.inta:
config.set_flag(trt.BuilderFlag.INTa)
lowerCamelCase : Optional[int] = builder.create_optimization_profile()
config.add_optimization_profile(profile)
for i in range(len(input_names)):
profile.set_shape(input_names[i], INPUT_SHAPE, INPUT_SHAPE, INPUT_SHAPE)
lowerCamelCase : Optional[Any] = builder.build_engine(network, config)
# serialize_engine and store in file (can be directly loaded and deserialized):
with open(engine_name, '''wb''') as f:
f.write(engine.serialize())
def snake_case_ ( lowerCAmelCase_ : Any , lowerCAmelCase_ : int , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Any , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : str , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Tuple ):
__lowercase : List[str] = np.asarray(inputs["""input_ids"""] , dtype=np.intaa )
__lowercase : Union[str, Any] = np.asarray(inputs["""attention_mask"""] , dtype=np.intaa )
__lowercase : int = np.asarray(inputs["""token_type_ids"""] , dtype=np.intaa )
# Copy inputs
cuda.memcpy_htod_async(d_inputs[0] , input_ids.ravel() , lowerCAmelCase_ )
cuda.memcpy_htod_async(d_inputs[1] , attention_mask.ravel() , lowerCAmelCase_ )
cuda.memcpy_htod_async(d_inputs[2] , token_type_ids.ravel() , lowerCAmelCase_ )
# start time
__lowercase : Optional[Any] = time.time()
# Run inference
context.execute_async(
bindings=[int(lowerCAmelCase_ ) for d_inp in d_inputs] + [int(lowerCAmelCase_ ), int(lowerCAmelCase_ )] , stream_handle=stream.handle )
# Transfer predictions back from GPU
cuda.memcpy_dtoh_async(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
cuda.memcpy_dtoh_async(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
# Synchronize the stream and take time
stream.synchronize()
# end time
__lowercase : int = time.time()
__lowercase : Union[str, Any] = end_time - start_time
__lowercase : Any = (h_outputa, h_outputa)
# print(outputs)
return outputs, infer_time
# Initialize the accelerator. We will let the accelerator handle device placement for us in this example.
lowerCamelCase : Tuple = Accelerator()
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''',
datefmt='''%m/%d/%Y %H:%M:%S''',
level=logging.INFO,
)
# Setup logging, we only want one process per machine to log things on the screen.
# accelerator.is_local_main_process is only True for one process per machine.
logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR)
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed)
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
if args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
lowerCamelCase : List[Any] = load_dataset(args.dataset_name, args.dataset_config_name)
else:
raise ValueError('''Evaluation requires a dataset name''')
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Preprocessing the datasets.
# Preprocessing is slighlty different for training and evaluation.
lowerCamelCase : Optional[Any] = raw_datasets['''validation'''].column_names
lowerCamelCase : Union[str, Any] = '''question''' if '''question''' in column_names else column_names[0]
lowerCamelCase : str = '''context''' if '''context''' in column_names else column_names[1]
lowerCamelCase : Dict = '''answers''' if '''answers''' in column_names else column_names[2]
# Padding side determines if we do (question|context) or (context|question).
lowerCamelCase : Dict = tokenizer.padding_side == '''right'''
if args.max_seq_length > tokenizer.model_max_length:
logger.warning(
f'''The max_seq_length passed ({args.max_seq_length}) is larger than the maximum length for the'''
f'''model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.'''
)
lowerCamelCase : Tuple = min(args.max_seq_length, tokenizer.model_max_length)
def snake_case_ ( lowerCAmelCase_ : int ):
# Some of the questions have lots of whitespace on the left, which is not useful and will make the
# truncation of the context fail (the tokenized question will take a lots of space). So we remove that
# left whitespace
__lowercase : str = [q.lstrip() for q in examples[question_column_name]]
# Tokenize our examples with truncation and maybe padding, but keep the overflows using a stride. This results
# in one example possible giving several features when a context is long, each of those features having a
# context that overlaps a bit the context of the previous feature.
__lowercase : List[str] = tokenizer(
examples[question_column_name if pad_on_right else context_column_name] , examples[context_column_name if pad_on_right else question_column_name] , truncation="""only_second""" if pad_on_right else """only_first""" , max_length=lowerCAmelCase_ , stride=args.doc_stride , return_overflowing_tokens=lowerCAmelCase_ , return_offsets_mapping=lowerCAmelCase_ , padding="""max_length""" , )
# Since one example might give us several features if it has a long context, we need a map from a feature to
# its corresponding example. This key gives us just that.
__lowercase : List[str] = tokenized_examples.pop("""overflow_to_sample_mapping""" )
# For evaluation, we will need to convert our predictions to substrings of the context, so we keep the
# corresponding example_id and we will store the offset mappings.
__lowercase : Any = []
for i in range(len(tokenized_examples["""input_ids"""] ) ):
# Grab the sequence corresponding to that example (to know what is the context and what is the question).
__lowercase : Dict = tokenized_examples.sequence_ids(lowerCAmelCase_ )
__lowercase : List[Any] = 1 if pad_on_right else 0
# One example can give several spans, this is the index of the example containing this span of text.
__lowercase : List[str] = sample_mapping[i]
tokenized_examples["example_id"].append(examples["""id"""][sample_index] )
# Set to None the offset_mapping that are not part of the context so it's easy to determine if a token
# position is part of the context or not.
__lowercase : Dict = [
(o if sequence_ids[k] == context_index else None)
for k, o in enumerate(tokenized_examples["""offset_mapping"""][i] )
]
return tokenized_examples
lowerCamelCase : Tuple = raw_datasets['''validation''']
# Validation Feature Creation
lowerCamelCase : Optional[int] = eval_examples.map(
prepare_validation_features,
batched=True,
num_proc=args.preprocessing_num_workers,
remove_columns=column_names,
load_from_cache_file=not args.overwrite_cache,
desc='''Running tokenizer on validation dataset''',
)
lowerCamelCase : Union[str, Any] = default_data_collator
lowerCamelCase : Optional[Any] = eval_dataset.remove_columns(['''example_id''', '''offset_mapping'''])
lowerCamelCase : List[str] = DataLoader(
eval_dataset_for_model, collate_fn=data_collator, batch_size=args.per_device_eval_batch_size
)
def snake_case_ ( lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Dict="eval" ):
# Post-processing: we match the start logits and end logits to answers in the original context.
__lowercase : int = postprocess_qa_predictions(
examples=lowerCAmelCase_ , features=lowerCAmelCase_ , predictions=lowerCAmelCase_ , version_2_with_negative=args.version_2_with_negative , n_best_size=args.n_best_size , max_answer_length=args.max_answer_length , null_score_diff_threshold=args.null_score_diff_threshold , output_dir=args.output_dir , prefix=lowerCAmelCase_ , )
# Format the result to the format the metric expects.
if args.version_2_with_negative:
__lowercase : Optional[int] = [
{"""id""": k, """prediction_text""": v, """no_answer_probability""": 0.0} for k, v in predictions.items()
]
else:
__lowercase : List[Any] = [{"""id""": k, """prediction_text""": v} for k, v in predictions.items()]
__lowercase : Optional[int] = [{"""id""": ex["""id"""], """answers""": ex[answer_column_name]} for ex in examples]
return EvalPrediction(predictions=lowerCAmelCase_ , label_ids=lowerCAmelCase_ )
lowerCamelCase : Dict = load_metric('''squad_v2''' if args.version_2_with_negative else '''squad''')
# Evaluation!
logger.info('''Loading ONNX model %s for evaluation''', args.onnx_model_path)
with open(engine_name, '''rb''') as f, trt.Runtime(TRT_LOGGER) as runtime, runtime.deserialize_cuda_engine(
f.read()
) as engine, engine.create_execution_context() as context:
# setup for TRT inferrence
for i in range(len(input_names)):
context.set_binding_shape(i, INPUT_SHAPE)
assert context.all_binding_shapes_specified
def snake_case_ ( lowerCAmelCase_ : str ):
return trt.volume(engine.get_binding_shape(lowerCAmelCase_ ) ) * engine.get_binding_dtype(lowerCAmelCase_ ).itemsize
# Allocate device memory for inputs and outputs.
lowerCamelCase : int = [cuda.mem_alloc(binding_nbytes(binding)) for binding in engine if engine.binding_is_input(binding)]
# Allocate output buffer
lowerCamelCase : Dict = cuda.pagelocked_empty(tuple(context.get_binding_shape(3)), dtype=np.floataa)
lowerCamelCase : str = cuda.pagelocked_empty(tuple(context.get_binding_shape(4)), dtype=np.floataa)
lowerCamelCase : Dict = cuda.mem_alloc(h_outputa.nbytes)
lowerCamelCase : Optional[Any] = cuda.mem_alloc(h_outputa.nbytes)
# Create a stream in which to copy inputs/outputs and run inference.
lowerCamelCase : Optional[int] = cuda.Stream()
# Evaluation
logger.info('''***** Running Evaluation *****''')
logger.info(f''' Num examples = {len(eval_dataset)}''')
logger.info(f''' Batch size = {args.per_device_eval_batch_size}''')
lowerCamelCase : int = 0.0
lowerCamelCase : List[str] = 0
lowerCamelCase : List[str] = timeit.default_timer()
lowerCamelCase : List[Any] = None
for step, batch in enumerate(eval_dataloader):
lowerCamelCase ,lowerCamelCase : str = model_infer(batch, context, d_inputs, h_outputa, h_outputa, d_outputa, d_outputa, stream)
total_time += infer_time
niter += 1
lowerCamelCase ,lowerCamelCase : Union[str, Any] = outputs
lowerCamelCase : Optional[Any] = torch.tensor(start_logits)
lowerCamelCase : List[str] = torch.tensor(end_logits)
# necessary to pad predictions and labels for being gathered
lowerCamelCase : Optional[int] = accelerator.pad_across_processes(start_logits, dim=1, pad_index=-1_00)
lowerCamelCase : Dict = accelerator.pad_across_processes(end_logits, dim=1, pad_index=-1_00)
lowerCamelCase : List[Any] = (accelerator.gather(start_logits).cpu().numpy(), accelerator.gather(end_logits).cpu().numpy())
lowerCamelCase : Dict = logits if all_preds is None else nested_concat(all_preds, logits, padding_index=-1_00)
if all_preds is not None:
lowerCamelCase : Tuple = nested_truncate(all_preds, len(eval_dataset))
lowerCamelCase : Dict = timeit.default_timer() - start_time
logger.info(''' Evaluation done in total %f secs (%f sec per example)''', evalTime, evalTime / len(eval_dataset))
# Inference time from TRT
logger.info('''Average Inference Time = {:.3f} ms'''.format(total_time * 10_00 / niter))
logger.info('''Total Inference Time = {:.3f} ms'''.format(total_time * 10_00))
logger.info('''Total Number of Inference = %d''', niter)
lowerCamelCase : str = post_processing_function(eval_examples, eval_dataset, all_preds)
lowerCamelCase : Optional[Any] = metric.compute(predictions=prediction.predictions, references=prediction.label_ids)
logger.info(f'''Evaluation metrics: {eval_metric}''') | 306 | 0 |
import json
import os
import tempfile
import datasets
from utils import generate_example_dataset, get_duration
a_ = 50_000
a_ = 5_000
a_ , a_ = os.path.split(__file__)
a_ = os.path.join(RESULTS_BASEPATH, """results""", RESULTS_FILENAME.replace(""".py""", """.json"""))
@get_duration
def a__ ( _UpperCamelCase : datasets.Dataset ,_UpperCamelCase : Tuple ):
for i in range(snake_case_ ):
__lowerCamelCase = dataset[i]
@get_duration
def a__ ( _UpperCamelCase : datasets.Dataset ,_UpperCamelCase : Optional[Any] ,_UpperCamelCase : int ):
for i in range(0 ,len(snake_case_ ) ,snake_case_ ):
__lowerCamelCase = dataset[i : i + batch_size]
@get_duration
def a__ ( _UpperCamelCase : datasets.Dataset ,_UpperCamelCase : List[Any] ,_UpperCamelCase : Any ):
with dataset.formatted_as(type=snake_case_ ):
for i in range(snake_case_ ):
__lowerCamelCase = dataset[i]
@get_duration
def a__ ( _UpperCamelCase : datasets.Dataset ,_UpperCamelCase : Any ,_UpperCamelCase : Union[str, Any] ,_UpperCamelCase : Optional[int] ):
with dataset.formatted_as(type=snake_case_ ):
for i in range(0 ,snake_case_ ,snake_case_ ):
__lowerCamelCase = dataset[i : i + batch_size]
def a__ ( ):
__lowerCamelCase = {'''num examples''': SPEED_TEST_N_EXAMPLES}
__lowerCamelCase = [
(read, {'''length''': SMALL_TEST}),
(read, {'''length''': SPEED_TEST_N_EXAMPLES}),
(read_batch, {'''length''': SPEED_TEST_N_EXAMPLES, '''batch_size''': 10}),
(read_batch, {'''length''': SPEED_TEST_N_EXAMPLES, '''batch_size''': 1_00}),
(read_batch, {'''length''': SPEED_TEST_N_EXAMPLES, '''batch_size''': 10_00}),
(read_formatted, {'''type''': '''numpy''', '''length''': SMALL_TEST}),
(read_formatted, {'''type''': '''pandas''', '''length''': SMALL_TEST}),
(read_formatted, {'''type''': '''torch''', '''length''': SMALL_TEST}),
(read_formatted, {'''type''': '''tensorflow''', '''length''': SMALL_TEST}),
(read_formatted_batch, {'''type''': '''numpy''', '''length''': SMALL_TEST, '''batch_size''': 10}),
(read_formatted_batch, {'''type''': '''numpy''', '''length''': SMALL_TEST, '''batch_size''': 10_00}),
]
__lowerCamelCase = [
(read, {'''length''': SMALL_TEST}),
(read, {'''length''': SPEED_TEST_N_EXAMPLES}),
(read_batch, {'''length''': SPEED_TEST_N_EXAMPLES, '''batch_size''': 10}),
(read_batch, {'''length''': SPEED_TEST_N_EXAMPLES, '''batch_size''': 1_00}),
(read_batch, {'''length''': SPEED_TEST_N_EXAMPLES, '''batch_size''': 10_00}),
(read_formatted, {'''type''': '''numpy''', '''length''': SMALL_TEST}),
(read_formatted_batch, {'''type''': '''numpy''', '''length''': SMALL_TEST, '''batch_size''': 10}),
(read_formatted_batch, {'''type''': '''numpy''', '''length''': SMALL_TEST, '''batch_size''': 10_00}),
]
with tempfile.TemporaryDirectory() as tmp_dir:
print('''generating dataset''' )
__lowerCamelCase = datasets.Features(
{'''list''': datasets.Sequence(datasets.Value('''float32''' ) ), '''numbers''': datasets.Value('''float32''' )} )
__lowerCamelCase = generate_example_dataset(
os.path.join(snake_case_ ,'''dataset.arrow''' ) ,snake_case_ ,num_examples=snake_case_ ,seq_shapes={'''list''': (1_00,)} ,)
print('''first set of iterations''' )
for func, kwargs in functions:
print(func.__name__ ,str(snake_case_ ) )
__lowerCamelCase = func(snake_case_ ,**snake_case_ )
print('''shuffling dataset''' )
__lowerCamelCase = dataset.shuffle()
print('''Second set of iterations (after shuffling''' )
for func, kwargs in functions_shuffled:
print('''shuffled ''' ,func.__name__ ,str(snake_case_ ) )
__lowerCamelCase = func(
snake_case_ ,**snake_case_ )
with open(snake_case_ ,'''wb''' ) as f:
f.write(json.dumps(snake_case_ ).encode('''utf-8''' ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_iterating()
| 330 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
a_ = {
"""configuration_perceiver""": ["""PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """PerceiverConfig""", """PerceiverOnnxConfig"""],
"""tokenization_perceiver""": ["""PerceiverTokenizer"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = ["""PerceiverFeatureExtractor"""]
a_ = ["""PerceiverImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
"""PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""PerceiverForImageClassificationConvProcessing""",
"""PerceiverForImageClassificationFourier""",
"""PerceiverForImageClassificationLearned""",
"""PerceiverForMaskedLM""",
"""PerceiverForMultimodalAutoencoding""",
"""PerceiverForOpticalFlow""",
"""PerceiverForSequenceClassification""",
"""PerceiverLayer""",
"""PerceiverModel""",
"""PerceiverPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_perceiver import PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP, PerceiverConfig, PerceiverOnnxConfig
from .tokenization_perceiver import PerceiverTokenizer
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_perceiver import PerceiverFeatureExtractor
from .image_processing_perceiver import PerceiverImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_perceiver import (
PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST,
PerceiverForImageClassificationConvProcessing,
PerceiverForImageClassificationFourier,
PerceiverForImageClassificationLearned,
PerceiverForMaskedLM,
PerceiverForMultimodalAutoencoding,
PerceiverForOpticalFlow,
PerceiverForSequenceClassification,
PerceiverLayer,
PerceiverModel,
PerceiverPreTrainedModel,
)
else:
import sys
a_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 179 | 0 |
from __future__ import annotations
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
lowercase__ = len(SCREAMING_SNAKE_CASE )
# If row is equal to the size of the board it means there are a queen in each row in
# the current board (possible_board)
if row == n:
# We convert the variable possible_board that looks like this: [1, 3, 0, 2] to
# this: ['. Q . . ', '. . . Q ', 'Q . . . ', '. . Q . ']
boards.append(['''. ''' * i + '''Q ''' + '''. ''' * (n - 1 - i) for i in possible_board] )
return
# We iterate each column in the row to find all possible results in each row
for col in range(SCREAMING_SNAKE_CASE ):
# We apply that we learned previously. First we check that in the current board
# (possible_board) there are not other same value because if there is it means
# that there are a collision in vertical. Then we apply the two formulas we
# learned before:
#
# 45º: y - x = b or 45: row - col = b
# 135º: y + x = b or row + col = b.
#
# And we verify if the results of this two formulas not exist in their variables
# respectively. (diagonal_right_collisions, diagonal_left_collisions)
#
# If any or these are True it means there is a collision so we continue to the
# next value in the for loop.
if (
col in possible_board
or row - col in diagonal_right_collisions
or row + col in diagonal_left_collisions
):
continue
# If it is False we call dfs function again and we update the inputs
depth_first_search(
[*possible_board, col] , [*diagonal_right_collisions, row - col] , [*diagonal_left_collisions, row + col] , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , )
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = []
depth_first_search([] , [] , [] , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Print all the boards
for board in boards:
for column in board:
print(SCREAMING_SNAKE_CASE )
print('''''' )
print(len(SCREAMING_SNAKE_CASE ) , '''solutions were found.''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
n_queens_solution(4)
| 93 |
import json
import logging
import os
import socket
import git
import numpy as np
import torch
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - PID: %(process)d - %(message)s',
datefmt='%m/%d/%Y %H:%M:%S',
level=logging.INFO,
)
lowerCAmelCase = logging.getLogger(__name__)
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = git.Repo(search_parent_directories=SCREAMING_SNAKE_CASE )
lowercase__ = {
'''repo_id''': str(SCREAMING_SNAKE_CASE ),
'''repo_sha''': str(repo.head.object.hexsha ),
'''repo_branch''': str(repo.active_branch ),
}
with open(os.path.join(SCREAMING_SNAKE_CASE , '''git_log.json''' ) , '''w''' ) as f:
json.dump(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , indent=4 )
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if params.n_gpu <= 0:
lowercase__ = 0
lowercase__ = -1
lowercase__ = True
lowercase__ = False
return
assert torch.cuda.is_available()
logger.info('''Initializing GPUs''' )
if params.n_gpu > 1:
assert params.local_rank != -1
lowercase__ = int(os.environ['''WORLD_SIZE'''] )
lowercase__ = int(os.environ['''N_GPU_NODE'''] )
lowercase__ = int(os.environ['''RANK'''] )
# number of nodes / node ID
lowercase__ = params.world_size // params.n_gpu_per_node
lowercase__ = params.global_rank // params.n_gpu_per_node
lowercase__ = True
assert params.n_nodes == int(os.environ['''N_NODES'''] )
assert params.node_id == int(os.environ['''NODE_RANK'''] )
# local job (single GPU)
else:
assert params.local_rank == -1
lowercase__ = 1
lowercase__ = 0
lowercase__ = 0
lowercase__ = 0
lowercase__ = 1
lowercase__ = 1
lowercase__ = False
# sanity checks
assert params.n_nodes >= 1
assert 0 <= params.node_id < params.n_nodes
assert 0 <= params.local_rank <= params.global_rank < params.world_size
assert params.world_size == params.n_nodes * params.n_gpu_per_node
# define whether this is the master process / if we are in multi-node distributed mode
lowercase__ = params.node_id == 0 and params.local_rank == 0
lowercase__ = params.n_nodes > 1
# summary
lowercase__ = f'--- Global rank: {params.global_rank} - '
logger.info(PREFIX + '''Number of nodes: %i''' % params.n_nodes )
logger.info(PREFIX + '''Node ID : %i''' % params.node_id )
logger.info(PREFIX + '''Local rank : %i''' % params.local_rank )
logger.info(PREFIX + '''World size : %i''' % params.world_size )
logger.info(PREFIX + '''GPUs per node : %i''' % params.n_gpu_per_node )
logger.info(PREFIX + '''Master : %s''' % str(params.is_master ) )
logger.info(PREFIX + '''Multi-node : %s''' % str(params.multi_node ) )
logger.info(PREFIX + '''Multi-GPU : %s''' % str(params.multi_gpu ) )
logger.info(PREFIX + '''Hostname : %s''' % socket.gethostname() )
# set GPU device
torch.cuda.set_device(params.local_rank )
# initialize multi-GPU
if params.multi_gpu:
logger.info('''Initializing PyTorch distributed''' )
torch.distributed.init_process_group(
init_method='''env://''' , backend='''nccl''' , )
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
np.random.seed(args.seed )
torch.manual_seed(args.seed )
if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed )
| 93 | 1 |
import string
def __magic_name__ ( __lowerCAmelCase : str ) -> None:
for key in range(len(string.ascii_uppercase ) ):
__lowerCamelCase = ''''''
for symbol in message:
if symbol in string.ascii_uppercase:
__lowerCamelCase = string.ascii_uppercase.find(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = num - key
if num < 0:
__lowerCamelCase = num + len(string.ascii_uppercase )
__lowerCamelCase = translated + string.ascii_uppercase[num]
else:
__lowerCamelCase = translated + symbol
print(f'''Decryption using Key #{key}: {translated}''' )
def __magic_name__ ( ) -> None:
__lowerCamelCase = input('''Encrypted message: ''' )
__lowerCamelCase = message.upper()
decrypt(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 270 |
'''simple docstring'''
import unittest
from transformers import EsmConfig, is_torch_available
from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers.models.esm.modeling_esmfold import EsmForProteinFolding
class __a :
def __init__( self : str , __magic_name__ : Any , __magic_name__ : Union[str, Any]=13 , __magic_name__ : int=7 , __magic_name__ : int=False , __magic_name__ : str=True , __magic_name__ : int=False , __magic_name__ : Optional[int]=False , __magic_name__ : List[Any]=19 , __magic_name__ : Any=32 , __magic_name__ : List[Any]=5 , __magic_name__ : Dict=4 , __magic_name__ : Optional[int]=37 , __magic_name__ : str="gelu" , __magic_name__ : int=0.1 , __magic_name__ : List[str]=0.1 , __magic_name__ : List[str]=5_12 , __magic_name__ : Union[str, Any]=16 , __magic_name__ : Tuple=2 , __magic_name__ : Union[str, Any]=0.0_2 , __magic_name__ : int=3 , __magic_name__ : Dict=4 , __magic_name__ : str=None , ) -> str:
"""simple docstring"""
UpperCAmelCase_ : Tuple = parent
UpperCAmelCase_ : Dict = batch_size
UpperCAmelCase_ : Tuple = seq_length
UpperCAmelCase_ : Tuple = is_training
UpperCAmelCase_ : Tuple = use_input_mask
UpperCAmelCase_ : Union[str, Any] = use_token_type_ids
UpperCAmelCase_ : Optional[Any] = use_labels
UpperCAmelCase_ : Union[str, Any] = vocab_size
UpperCAmelCase_ : Dict = hidden_size
UpperCAmelCase_ : Any = num_hidden_layers
UpperCAmelCase_ : Optional[Any] = num_attention_heads
UpperCAmelCase_ : str = intermediate_size
UpperCAmelCase_ : List[str] = hidden_act
UpperCAmelCase_ : Tuple = hidden_dropout_prob
UpperCAmelCase_ : int = attention_probs_dropout_prob
UpperCAmelCase_ : Dict = max_position_embeddings
UpperCAmelCase_ : Tuple = type_vocab_size
UpperCAmelCase_ : Dict = type_sequence_label_size
UpperCAmelCase_ : Optional[int] = initializer_range
UpperCAmelCase_ : List[Any] = num_labels
UpperCAmelCase_ : List[str] = num_choices
UpperCAmelCase_ : Optional[Any] = scope
def UpperCAmelCase__ ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase_ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase_ : Tuple = None
if self.use_input_mask:
UpperCAmelCase_ : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase_ : Tuple = None
UpperCAmelCase_ : List[str] = None
UpperCAmelCase_ : Tuple = None
if self.use_labels:
UpperCAmelCase_ : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase_ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCAmelCase_ : List[str] = ids_tensor([self.batch_size] , self.num_choices )
UpperCAmelCase_ : str = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCAmelCase__ ( self : Optional[int] ) -> Dict:
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = EsmConfig(
vocab_size=33 , hidden_size=self.hidden_size , pad_token_id=1 , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , is_folding_model=__magic_name__ , esmfold_config={'''trunk''': {'''num_blocks''': 2}, '''fp16_esm''': False} , )
return config
def UpperCAmelCase__ ( self : str , __magic_name__ : int , __magic_name__ : Tuple , __magic_name__ : Union[str, Any] , __magic_name__ : List[Any] , __magic_name__ : Union[str, Any] , __magic_name__ : Optional[int] ) -> List[str]:
"""simple docstring"""
UpperCAmelCase_ : List[str] = EsmForProteinFolding(config=__magic_name__ ).float()
model.to(__magic_name__ )
model.eval()
UpperCAmelCase_ : Any = model(__magic_name__ , attention_mask=__magic_name__ )
UpperCAmelCase_ : List[Any] = model(__magic_name__ )
UpperCAmelCase_ : Union[str, Any] = model(__magic_name__ )
self.parent.assertEqual(result.positions.shape , (8, self.batch_size, self.seq_length, 14, 3) )
self.parent.assertEqual(result.angles.shape , (8, self.batch_size, self.seq_length, 7, 2) )
def UpperCAmelCase__ ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase_ : List[Any] = self.prepare_config_and_inputs()
(
(
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) ,
) : int = config_and_inputs
UpperCAmelCase_ : str = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class __a (lowerCamelCase , lowerCamelCase , unittest.TestCase ):
__a : Any = False
__a : Any = (EsmForProteinFolding,) if is_torch_available() else ()
__a : Any = ()
__a : Optional[Any] = {} if is_torch_available() else {}
__a : List[str] = False
def UpperCAmelCase__ ( self : List[str] ) -> List[str]:
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = EsmFoldModelTester(self )
UpperCAmelCase_ : int = ConfigTester(self , config_class=__magic_name__ , hidden_size=37 )
def UpperCAmelCase__ ( self : int ) -> str:
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCAmelCase__ ( self : List[str] ) -> Dict:
"""simple docstring"""
UpperCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__magic_name__ )
@unittest.skip('''Does not support attention outputs''' )
def UpperCAmelCase__ ( self : Dict ) -> Dict:
"""simple docstring"""
pass
@unittest.skip
def UpperCAmelCase__ ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
pass
@unittest.skip('''Esm does not support embedding resizing''' )
def UpperCAmelCase__ ( self : List[Any] ) -> Any:
"""simple docstring"""
pass
@unittest.skip('''Esm does not support embedding resizing''' )
def UpperCAmelCase__ ( self : Tuple ) -> Any:
"""simple docstring"""
pass
@unittest.skip('''ESMFold does not support passing input embeds!''' )
def UpperCAmelCase__ ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
pass
@unittest.skip('''ESMFold does not support head pruning.''' )
def UpperCAmelCase__ ( self : List[Any] ) -> Any:
"""simple docstring"""
pass
@unittest.skip('''ESMFold does not support head pruning.''' )
def UpperCAmelCase__ ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
pass
@unittest.skip('''ESMFold does not support head pruning.''' )
def UpperCAmelCase__ ( self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
pass
@unittest.skip('''ESMFold does not support head pruning.''' )
def UpperCAmelCase__ ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
pass
@unittest.skip('''ESMFold does not support head pruning.''' )
def UpperCAmelCase__ ( self : Optional[Any] ) -> int:
"""simple docstring"""
pass
@unittest.skip('''ESMFold does not output hidden states in the normal way.''' )
def UpperCAmelCase__ ( self : Optional[Any] ) -> int:
"""simple docstring"""
pass
@unittest.skip('''ESMfold does not output hidden states in the normal way.''' )
def UpperCAmelCase__ ( self : List[str] ) -> Dict:
"""simple docstring"""
pass
@unittest.skip('''ESMFold only has one output format.''' )
def UpperCAmelCase__ ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
pass
@unittest.skip('''This test doesn\'t work for ESMFold and doesn\'t test core functionality''' )
def UpperCAmelCase__ ( self : Any ) -> str:
"""simple docstring"""
pass
@unittest.skip('''ESMFold does not support input chunking.''' )
def UpperCAmelCase__ ( self : Dict ) -> List[str]:
"""simple docstring"""
pass
@unittest.skip('''ESMFold doesn\'t respect you and it certainly doesn\'t respect your initialization arguments.''' )
def UpperCAmelCase__ ( self : Optional[Any] ) -> str:
"""simple docstring"""
pass
@unittest.skip('''ESMFold doesn\'t support torchscript compilation.''' )
def UpperCAmelCase__ ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
pass
@unittest.skip('''ESMFold doesn\'t support torchscript compilation.''' )
def UpperCAmelCase__ ( self : str ) -> Dict:
"""simple docstring"""
pass
@unittest.skip('''ESMFold doesn\'t support torchscript compilation.''' )
def UpperCAmelCase__ ( self : Any ) -> Any:
"""simple docstring"""
pass
@unittest.skip('''ESMFold doesn\'t support data parallel.''' )
def UpperCAmelCase__ ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def UpperCAmelCase__ ( self : Tuple ) -> int:
"""simple docstring"""
pass
@require_torch
class __a (lowerCamelCase ):
@slow
def UpperCAmelCase__ ( self : List[str] ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase_ : List[Any] = EsmForProteinFolding.from_pretrained('''facebook/esmfold_v1''' ).float()
model.eval()
UpperCAmelCase_ : Optional[int] = torch.tensor([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
UpperCAmelCase_ : Tuple = model(__magic_name__ )['''positions''']
UpperCAmelCase_ : Dict = torch.tensor([2.5_8_2_8, 0.7_9_9_3, -1_0.9_3_3_4] , dtype=torch.floataa )
self.assertTrue(torch.allclose(position_outputs[0, 0, 0, 0] , __magic_name__ , atol=1E-4 ) )
| 125 | 0 |
import collections
import os
from typing import List, Optional, Tuple
from transformers.utils import is_jieba_available, requires_backends
if is_jieba_available():
import jieba
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_UpperCAmelCase = logging.get_logger(__name__)
_UpperCAmelCase = {"""vocab_file""": """vocab.txt"""}
_UpperCAmelCase = {
"""vocab_file""": {
"""openbmb/cpm-ant-10b""": """https://huggingface.co/openbmb/cpm-ant-10b/blob/main/vocab.txt""",
},
}
_UpperCAmelCase = {
"""openbmb/cpm-ant-10b""": 1024,
}
def UpperCamelCase ( __lowercase : str ):
'''simple docstring'''
A_ : List[str] = collections.OrderedDict()
with open(_snake_case ,'r' ,encoding='utf-8' ) as reader:
A_ : Optional[Any] = reader.readlines()
for index, token in enumerate(_snake_case ):
A_ : Union[str, Any] = token.rstrip('\n' )
A_ : Tuple = index
return vocab
class UpperCAmelCase ( __snake_case ):
'''simple docstring'''
def __init__( self , lowercase , lowercase="<unk>" , lowercase=2_0_0 ):
"""simple docstring"""
A_ : Dict = vocab
A_ : Dict = unk_token
A_ : Optional[Any] = max_input_chars_per_word
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
A_ : Any = list(a_ )
if len(a_ ) > self.max_input_chars_per_word:
return [self.unk_token]
A_ : Union[str, Any] = 0
A_ : Union[str, Any] = []
while start < len(a_ ):
A_ : int = len(a_ )
A_ : List[Any] = None
while start < end:
A_ : List[Any] = ''''''.join(chars[start:end] )
if substr in self.vocab:
A_ : int = substr
break
end -= 1
if cur_substr is None:
sub_tokens.append(self.unk_token )
start += 1
else:
sub_tokens.append(a_ )
A_ : Tuple = end
return sub_tokens
class UpperCAmelCase ( __snake_case ):
'''simple docstring'''
lowerCamelCase_ = VOCAB_FILES_NAMES
lowerCamelCase_ = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase_ = ['''input_ids''', '''attention_mask''']
lowerCamelCase_ = False
def __init__( self , lowercase , lowercase="<d>" , lowercase="</d>" , lowercase="<s>" , lowercase="</s>" , lowercase="<pad>" , lowercase="<unk>" , lowercase="</n>" , lowercase="</_>" , lowercase="left" , **lowercase , ):
"""simple docstring"""
requires_backends(self , ['jieba'] )
super().__init__(
bod_token=a_ , eod_token=a_ , bos_token=a_ , eos_token=a_ , pad_token=a_ , unk_token=a_ , line_token=a_ , space_token=a_ , padding_side=a_ , **a_ , )
A_ : Dict = bod_token
A_ : Dict = eod_token
A_ : Optional[Any] = load_vocab(a_ )
A_ : Dict = self.encoder[space_token]
A_ : Union[str, Any] = self.encoder[line_token]
del self.encoder[space_token]
del self.encoder[line_token]
A_ : Optional[Any] = collections.OrderedDict(sorted(self.encoder.items() , key=lambda lowercase : x[1] ) )
A_ : Optional[int] = {v: k for k, v in self.encoder.items()}
A_ : Union[str, Any] = WordpieceTokenizer(vocab=self.encoder , unk_token=self.unk_token )
@property
def lowerCAmelCase_ ( self ):
"""simple docstring"""
return self.encoder[self.bod_token]
@property
def lowerCAmelCase_ ( self ):
"""simple docstring"""
return self.encoder[self.eod_token]
@property
def lowerCAmelCase_ ( self ):
"""simple docstring"""
return self.encoder["\n"]
@property
def lowerCAmelCase_ ( self ):
"""simple docstring"""
return len(self.encoder )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
return dict(self.encoder , **self.added_tokens_encoder )
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
A_ : str = []
for x in jieba.cut(a_ , cut_all=a_ ):
output_tokens.extend(self.wordpiece_tokenizer.tokenize(a_ ) )
return output_tokens
def lowerCAmelCase_ ( self , lowercase , **lowercase ):
"""simple docstring"""
A_ : Union[str, Any] = [i for i in token_ids if i >= 0]
A_ : Optional[Any] = [
x for x in token_ids if x != self.pad_token_id and x != self.eos_token_id and x != self.bos_token_id
]
return super()._decode(a_ , **a_ )
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
return token in self.encoder
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
return "".join(a_ )
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
return self.encoder.get(a_ , self.encoder.get(self.unk_token ) )
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
return self.decoder.get(a_ , self.unk_token )
def lowerCAmelCase_ ( self , lowercase , lowercase = None ):
"""simple docstring"""
if os.path.isdir(a_ ):
A_ : Any = os.path.join(
a_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
else:
A_ : Dict = (filename_prefix + '''-''' if filename_prefix else '''''') + save_directory
A_ : Any = 0
if " " in self.encoder:
A_ : Any = self.encoder[''' ''']
del self.encoder[" "]
if "\n" in self.encoder:
A_ : List[str] = self.encoder['''\n''']
del self.encoder["\n"]
A_ : str = collections.OrderedDict(sorted(self.encoder.items() , key=lambda lowercase : x[1] ) )
with open(a_ , 'w' , encoding='utf-8' ) as writer:
for token, token_index in self.encoder.items():
if index != token_index:
logger.warning(
F'''Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.'''
' Please check that the vocabulary is not corrupted!' )
A_ : Tuple = token_index
writer.write(token + '\n' )
index += 1
return (vocab_file,)
def lowerCAmelCase_ ( self , lowercase , lowercase = None ):
"""simple docstring"""
if token_ids_a is None:
return [self.bos_token_id] + token_ids_a
return [self.bos_token_id] + token_ids_a + [self.bos_token_id] + token_ids_a
def lowerCAmelCase_ ( self , lowercase , lowercase = None , lowercase = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=a_ , token_ids_a=a_ , already_has_special_tokens=a_ )
if token_ids_a is not None:
return [1] + ([0] * len(a_ )) + [1] + ([0] * len(a_ ))
return [1] + ([0] * len(a_ ))
| 352 | import json
import os
import shutil
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoConfig, BertConfig, GPTaConfig
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / """utils"""))
from test_module.custom_configuration import CustomConfig # noqa E402
_UpperCAmelCase = {
"""return_dict""": False,
"""output_hidden_states""": True,
"""output_attentions""": True,
"""torchscript""": True,
"""torch_dtype""": """float16""",
"""use_bfloat16""": True,
"""tf_legacy_loss""": True,
"""pruned_heads""": {"""a""": 1},
"""tie_word_embeddings""": False,
"""is_decoder""": True,
"""cross_attention_hidden_size""": 128,
"""add_cross_attention""": True,
"""tie_encoder_decoder""": True,
"""max_length""": 50,
"""min_length""": 3,
"""do_sample""": True,
"""early_stopping""": True,
"""num_beams""": 3,
"""num_beam_groups""": 3,
"""diversity_penalty""": 0.5,
"""temperature""": 2.0,
"""top_k""": 10,
"""top_p""": 0.7,
"""typical_p""": 0.2,
"""repetition_penalty""": 0.8,
"""length_penalty""": 0.8,
"""no_repeat_ngram_size""": 5,
"""encoder_no_repeat_ngram_size""": 5,
"""bad_words_ids""": [1, 2, 3],
"""num_return_sequences""": 3,
"""chunk_size_feed_forward""": 5,
"""output_scores""": True,
"""return_dict_in_generate""": True,
"""forced_bos_token_id""": 2,
"""forced_eos_token_id""": 3,
"""remove_invalid_values""": True,
"""architectures""": ["""BertModel"""],
"""finetuning_task""": """translation""",
"""id2label""": {0: """label"""},
"""label2id""": {"""label""": """0"""},
"""tokenizer_class""": """BertTokenizerFast""",
"""prefix""": """prefix""",
"""bos_token_id""": 6,
"""pad_token_id""": 7,
"""eos_token_id""": 8,
"""sep_token_id""": 9,
"""decoder_start_token_id""": 10,
"""exponential_decay_length_penalty""": (5, 1.01),
"""suppress_tokens""": [0, 1],
"""begin_suppress_tokens""": 2,
"""task_specific_params""": {"""translation""": """some_params"""},
"""problem_type""": """regression""",
}
@is_staging_test
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@classmethod
def lowerCAmelCase_ ( cls ):
"""simple docstring"""
A_ : Optional[int] = TOKEN
HfFolder.save_token(lowercase )
@classmethod
def lowerCAmelCase_ ( cls ):
"""simple docstring"""
try:
delete_repo(token=cls._token , repo_id='test-config' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='valid_org/test-config-org' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='test-dynamic-config' )
except HTTPError:
pass
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : Union[str, Any] = BertConfig(
vocab_size=9_9 , hidden_size=3_2 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=3_7 )
config.push_to_hub('test-config' , use_auth_token=self._token )
A_ : Dict = BertConfig.from_pretrained(F'''{USER}/test-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowercase , getattr(lowercase , lowercase ) )
# Reset repo
delete_repo(token=self._token , repo_id='test-config' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(lowercase , repo_id='test-config' , push_to_hub=lowercase , use_auth_token=self._token )
A_ : Dict = BertConfig.from_pretrained(F'''{USER}/test-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowercase , getattr(lowercase , lowercase ) )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : List[str] = BertConfig(
vocab_size=9_9 , hidden_size=3_2 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=3_7 )
config.push_to_hub('valid_org/test-config-org' , use_auth_token=self._token )
A_ : List[Any] = BertConfig.from_pretrained('valid_org/test-config-org' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowercase , getattr(lowercase , lowercase ) )
# Reset repo
delete_repo(token=self._token , repo_id='valid_org/test-config-org' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
lowercase , repo_id='valid_org/test-config-org' , push_to_hub=lowercase , use_auth_token=self._token )
A_ : Optional[Any] = BertConfig.from_pretrained('valid_org/test-config-org' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowercase , getattr(lowercase , lowercase ) )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
CustomConfig.register_for_auto_class()
A_ : Optional[int] = CustomConfig(attribute=4_2 )
config.push_to_hub('test-dynamic-config' , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(config.auto_map , {'AutoConfig': 'custom_configuration.CustomConfig'} )
A_ : Optional[int] = AutoConfig.from_pretrained(F'''{USER}/test-dynamic-config''' , trust_remote_code=lowercase )
# Can't make an isinstance check because the new_config is from the FakeConfig class of a dynamic module
self.assertEqual(new_config.__class__.__name__ , 'CustomConfig' )
self.assertEqual(new_config.attribute , 4_2 )
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : int = GPTaConfig()
# attempt to modify each of int/float/bool/str config records and verify they were updated
A_ : Optional[int] = c.n_embd + 1 # int
A_ : List[str] = c.resid_pdrop + 1.0 # float
A_ : str = not c.scale_attn_weights # bool
A_ : Optional[int] = c.summary_type + 'foo' # str
c.update_from_string(
F'''n_embd={n_embd},resid_pdrop={resid_pdrop},scale_attn_weights={scale_attn_weights},summary_type={summary_type}''' )
self.assertEqual(lowercase , c.n_embd , 'mismatch for key: n_embd' )
self.assertEqual(lowercase , c.resid_pdrop , 'mismatch for key: resid_pdrop' )
self.assertEqual(lowercase , c.scale_attn_weights , 'mismatch for key: scale_attn_weights' )
self.assertEqual(lowercase , c.summary_type , 'mismatch for key: summary_type' )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : Optional[int] = PretrainedConfig()
A_ : int = [key for key in base_config.__dict__ if key not in config_common_kwargs]
# If this part of the test fails, you have arguments to addin config_common_kwargs above.
self.assertListEqual(
lowercase , ['is_encoder_decoder', '_name_or_path', '_commit_hash', 'transformers_version'] )
A_ : List[str] = [key for key, value in config_common_kwargs.items() if value == getattr(lowercase , lowercase )]
if len(lowercase ) > 0:
raise ValueError(
'The following keys are set with the default values in'
' `test_configuration_common.config_common_kwargs` pick another value for them:'
F''' {', '.join(lowercase )}.''' )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
with self.assertRaises(lowercase ):
# config is in subfolder, the following should not work without specifying the subfolder
A_ : Union[str, Any] = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert-subfolder' )
A_ : Optional[int] = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert-subfolder' , subfolder='bert' )
self.assertIsNotNone(lowercase )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : int = mock.Mock()
A_ : int = 5_0_0
A_ : Union[str, Any] = {}
A_ : List[str] = HTTPError
A_ : List[Any] = {}
# Download this model to make sure it's in the cache.
A_ : Tuple = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert' )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch('requests.Session.request' , return_value=lowercase ) as mock_head:
A_ : Optional[int] = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert' )
# This check we did call the fake head request
mock_head.assert_called()
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : Any = BertConfig.from_pretrained(
'https://huggingface.co/hf-internal-testing/tiny-random-bert/resolve/main/config.json' )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : int = AutoConfig.from_pretrained('bert-base-cased' )
A_ : Tuple = ['config.4.0.0.json']
with tempfile.TemporaryDirectory() as tmp_dir:
configuration.save_pretrained(lowercase )
A_ : Dict = 2
json.dump(configuration.to_dict() , open(os.path.join(lowercase , 'config.4.0.0.json' ) , 'w' ) )
# This should pick the new configuration file as the version of Transformers is > 4.0.0
A_ : Tuple = AutoConfig.from_pretrained(lowercase )
self.assertEqual(new_configuration.hidden_size , 2 )
# Will need to be adjusted if we reach v42 and this test is still here.
# Should pick the old configuration file as the version of Transformers is < 4.42.0
A_ : int = ['config.42.0.0.json']
A_ : str = 7_6_8
configuration.save_pretrained(lowercase )
shutil.move(os.path.join(lowercase , 'config.4.0.0.json' ) , os.path.join(lowercase , 'config.42.0.0.json' ) )
A_ : str = AutoConfig.from_pretrained(lowercase )
self.assertEqual(new_configuration.hidden_size , 7_6_8 )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : List[Any] = 'hf-internal-testing/test-two-configs'
import transformers as new_transformers
A_ : List[Any] = 'v4.0.0'
A_ , A_ : List[str] = new_transformers.models.auto.AutoConfig.from_pretrained(
lowercase , return_unused_kwargs=lowercase )
self.assertEqual(new_configuration.hidden_size , 2 )
# This checks `_configuration_file` ia not kept in the kwargs by mistake.
self.assertDictEqual(lowercase , {} )
# Testing an older version by monkey-patching the version in the module it's used.
import transformers as old_transformers
A_ : Optional[int] = 'v3.0.0'
A_ : List[Any] = old_transformers.models.auto.AutoConfig.from_pretrained(lowercase )
self.assertEqual(old_configuration.hidden_size , 7_6_8 )
| 192 | 0 |
from __future__ import annotations
from collections.abc import Callable
UpperCamelCase = list[list[float | int]]
def __lowerCamelCase ( snake_case__ ,snake_case__ ) -> List[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = len(a_ )
_SCREAMING_SNAKE_CASE = [[0 for _ in range(size + 1 )] for _ in range(a_ )]
_SCREAMING_SNAKE_CASE = 42
_SCREAMING_SNAKE_CASE = 42
_SCREAMING_SNAKE_CASE = 42
_SCREAMING_SNAKE_CASE = 42
_SCREAMING_SNAKE_CASE = 42
_SCREAMING_SNAKE_CASE = 42
for row in range(a_ ):
for col in range(a_ ):
_SCREAMING_SNAKE_CASE = matrix[row][col]
_SCREAMING_SNAKE_CASE = vector[row][0]
_SCREAMING_SNAKE_CASE = 0
_SCREAMING_SNAKE_CASE = 0
while row < size and col < size:
# pivoting
_SCREAMING_SNAKE_CASE = max((abs(augmented[rowa][col] ), rowa) for rowa in range(a_ ,a_ ) )[
1
]
if augmented[pivot_row][col] == 0:
col += 1
continue
else:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = augmented[pivot_row], augmented[row]
for rowa in range(row + 1 ,a_ ):
_SCREAMING_SNAKE_CASE = augmented[rowa][col] / augmented[row][col]
_SCREAMING_SNAKE_CASE = 0
for cola in range(col + 1 ,size + 1 ):
augmented[rowa][cola] -= augmented[row][cola] * ratio
row += 1
col += 1
# back substitution
for col in range(1 ,a_ ):
for row in range(a_ ):
_SCREAMING_SNAKE_CASE = augmented[row][col] / augmented[col][col]
for cola in range(a_ ,size + 1 ):
augmented[row][cola] -= augmented[col][cola] * ratio
# round to get rid of numbers like 2.000000000000004
return [
[round(augmented[row][size] / augmented[row][row] ,10 )] for row in range(a_ )
]
def __lowerCamelCase ( snake_case__ ) -> List[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = len(a_ )
_SCREAMING_SNAKE_CASE = [[0 for _ in range(a_ )] for _ in range(a_ )]
_SCREAMING_SNAKE_CASE = [[0] for _ in range(a_ )]
_SCREAMING_SNAKE_CASE = 42
_SCREAMING_SNAKE_CASE = 42
_SCREAMING_SNAKE_CASE = 42
_SCREAMING_SNAKE_CASE = 42
for x_val, y_val in enumerate(a_ ):
for col in range(a_ ):
_SCREAMING_SNAKE_CASE = (x_val + 1) ** (size - col - 1)
_SCREAMING_SNAKE_CASE = y_val
_SCREAMING_SNAKE_CASE = solve(a_ ,a_ )
def interpolated_func(snake_case__ ) -> int:
return sum(
round(coeffs[x_val][0] ) * (var ** (size - x_val - 1))
for x_val in range(a_ ) )
return interpolated_func
def __lowerCamelCase ( snake_case__ ) -> Any:
"""simple docstring"""
return (
1
- variable
+ variable**2
- variable**3
+ variable**4
- variable**5
+ variable**6
- variable**7
+ variable**8
- variable**9
+ variable**10
)
def __lowerCamelCase ( snake_case__ = question_function ,snake_case__ = 10 ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = [func(a_ ) for x_val in range(1 ,order + 1 )]
_SCREAMING_SNAKE_CASE = [
interpolate(data_points[:max_coeff] ) for max_coeff in range(1 ,order + 1 )
]
_SCREAMING_SNAKE_CASE = 0
_SCREAMING_SNAKE_CASE = 42
_SCREAMING_SNAKE_CASE = 42
for poly in polynomials:
_SCREAMING_SNAKE_CASE = 1
while func(a_ ) == poly(a_ ):
x_val += 1
ret += poly(a_ )
return ret
if __name__ == "__main__":
print(f"{solution() = }")
| 306 |
import unittest
from transformers import GPTNeoXJapaneseConfig, is_torch_available
from transformers.models.gpt_neox_japanese.tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import GPTNeoXJapaneseForCausalLM, GPTNeoXJapaneseModel
class UpperCamelCase_ :
'''simple docstring'''
def __init__( self , a , a=13 , a=7 , a=True , a=True , a=True , a=True , a=99 , a=32 , a=5 , a=4 , a=4 , a="gelu" , a=0.0 , a=0.1 , a=True , a=5_12 , a=16 , a=2 , a=0.02 , a=3 , a=4 , a=None , ) -> Optional[Any]:
snake_case_ = parent
snake_case_ = batch_size
snake_case_ = seq_length
snake_case_ = is_training
snake_case_ = use_input_mask
snake_case_ = use_token_type_ids
snake_case_ = use_labels
snake_case_ = vocab_size
snake_case_ = hidden_size
snake_case_ = num_hidden_layers
snake_case_ = num_attention_heads
snake_case_ = intermediate_multiple_size
snake_case_ = hidden_act
snake_case_ = hidden_dropout
snake_case_ = attention_dropout
snake_case_ = weight_tying
snake_case_ = max_position_embeddings
snake_case_ = type_vocab_size
snake_case_ = type_sequence_label_size
snake_case_ = initializer_range
snake_case_ = num_labels
snake_case_ = num_choices
snake_case_ = scope
def _UpperCamelCase ( self ) -> List[Any]:
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case_ = None
if self.use_input_mask:
snake_case_ = random_attention_mask([self.batch_size, self.seq_length] )
snake_case_ = None
if self.use_labels:
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
snake_case_ = self.get_config()
return config, input_ids, input_mask, token_labels
def _UpperCamelCase ( self ) -> Dict:
return GPTNeoXJapaneseConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_multiple_size=self.intermediate_multiple_size , hidden_act=self.hidden_act , hidden_dropout=self.hidden_dropout , attention_dropout=self.attention_dropout , weight_tying=self.weight_tying , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=a , initializer_range=self.initializer_range , )
def _UpperCamelCase ( self ) -> int:
snake_case_ , snake_case_ , snake_case_ , snake_case_ = self.prepare_config_and_inputs()
snake_case_ = True
return config, input_ids, input_mask, token_labels
def _UpperCamelCase ( self , a , a , a ) -> Any:
snake_case_ = GPTNeoXJapaneseModel(config=a )
model.to(a )
model.eval()
snake_case_ = model(a , attention_mask=a )
snake_case_ = model(a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _UpperCamelCase ( self , a , a , a ) -> Union[str, Any]:
snake_case_ = True
snake_case_ = GPTNeoXJapaneseModel(a )
model.to(a )
model.eval()
snake_case_ = model(a , attention_mask=a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _UpperCamelCase ( self , a , a , a , a ) -> int:
snake_case_ = GPTNeoXJapaneseForCausalLM(config=a )
model.to(a )
model.eval()
snake_case_ = model(a , attention_mask=a , labels=a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _UpperCamelCase ( self , a , a , a ) -> Tuple:
snake_case_ = True
snake_case_ = GPTNeoXJapaneseForCausalLM(config=a )
model.to(a )
model.eval()
# first forward pass
snake_case_ = model(a , attention_mask=a , use_cache=a )
snake_case_ = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
snake_case_ = ids_tensor((self.batch_size, 3) , config.vocab_size )
snake_case_ = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
snake_case_ = torch.cat([input_ids, next_tokens] , dim=-1 )
snake_case_ = torch.cat([input_mask, next_mask] , dim=-1 )
snake_case_ = model(a , attention_mask=a , output_hidden_states=a )
snake_case_ = output_from_no_past['hidden_states'][0]
snake_case_ = model(
a , attention_mask=a , past_key_values=a , output_hidden_states=a , )['hidden_states'][0]
# select random slice
snake_case_ = ids_tensor((1,) , output_from_past.shape[-1] ).item()
snake_case_ = output_from_no_past[:, -3:, random_slice_idx].detach()
snake_case_ = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(a , a , atol=1E-3 ) )
def _UpperCamelCase ( self ) -> Dict:
snake_case_ = self.prepare_config_and_inputs()
snake_case_ , snake_case_ , snake_case_ , snake_case_ = config_and_inputs
snake_case_ = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class UpperCamelCase_ ( snake_case_ , snake_case_ , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase = (GPTNeoXJapaneseModel, GPTNeoXJapaneseForCausalLM) if is_torch_available() else ()
lowerCAmelCase = (GPTNeoXJapaneseForCausalLM,) if is_torch_available() else ()
lowerCAmelCase = (
{'''feature-extraction''': GPTNeoXJapaneseModel, '''text-generation''': GPTNeoXJapaneseForCausalLM}
if is_torch_available()
else {}
)
lowerCAmelCase = False
lowerCAmelCase = False
lowerCAmelCase = False
lowerCAmelCase = False
def _UpperCamelCase ( self ) -> List[Any]:
snake_case_ = GPTNeoXJapaneseModelTester(self )
snake_case_ = ConfigTester(self , config_class=a , hidden_size=37 )
def _UpperCamelCase ( self ) -> str:
self.config_tester.run_common_tests()
def _UpperCamelCase ( self ) -> Optional[Any]:
snake_case_ , snake_case_ , snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(a , a , a )
def _UpperCamelCase ( self ) -> Union[str, Any]:
snake_case_ , snake_case_ , snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(a , a , a )
def _UpperCamelCase ( self ) -> Optional[int]:
# This regression test was failing with PyTorch < 1.3
snake_case_ , snake_case_ , snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_decoder()
snake_case_ = None
self.model_tester.create_and_check_model_as_decoder(a , a , a )
def _UpperCamelCase ( self ) -> Dict:
snake_case_ , snake_case_ , snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(a , a , a )
def _UpperCamelCase ( self ) -> List[Any]:
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_causal_lm(*a )
@slow
def _UpperCamelCase ( self ) -> Any:
snake_case_ = 'abeja/gpt-neox-japanese-2.7b'
snake_case_ = ['データサイエンティストとは、', '100年後に必要とされる会社は、', 'フルリモートの環境で働くために必要なことは、', '国境の長いトンネルを抜けると', '美味しい日本食といえば、']
snake_case_ = [
'データサイエンティストとは、データを分析し、ビジネスに役立つ知見を導き出す専門家のことです。',
'100年後に必要とされる会社は、「人」が中心の会社です。',
'フルリモートの環境で働くために必要なことは、「自分の時間をコントロールする」ことです。',
'国境の長いトンネルを抜けると、そこは雪国だった。',
'美味しい日本食といえば、やっぱりお寿司ですよね。',
]
snake_case_ = GPTNeoXJapaneseTokenizer.from_pretrained(a )
snake_case_ = GPTNeoXJapaneseForCausalLM.from_pretrained(a )
snake_case_ = []
for prompt in prompts:
snake_case_ = tokenizer(a , return_tensors='pt' ).input_ids
snake_case_ = model.generate(a , max_length=50 )
snake_case_ = tokenizer.batch_decode(a , skip_special_tokens=a )
predicted_outputs += generated_string
self.assertListEqual(a , a )
| 178 | 0 |
from typing import Optional, Tuple
import jax
import jax.numpy as jnp
from flax import linen as nn
from flax.core.frozen_dict import FrozenDict
from transformers import CLIPConfig, FlaxPreTrainedModel
from transformers.models.clip.modeling_flax_clip import FlaxCLIPVisionModule
def _UpperCamelCase ( snake_case__, snake_case__, snake_case__=1e-1_2 ) -> str:
__UpperCAmelCase : Any = jnp.divide(emb_a.T, jnp.clip(jnp.linalg.norm(snake_case__, axis=1 ), a_min=snake_case__ ) ).T
__UpperCAmelCase : int = jnp.divide(emb_a.T, jnp.clip(jnp.linalg.norm(snake_case__, axis=1 ), a_min=snake_case__ ) ).T
return jnp.matmul(snake_case__, norm_emb_a.T )
class _snake_case ( nn.Module ):
lowerCamelCase__: CLIPConfig
lowerCamelCase__: jnp.dtype = jnp.floataa
def _lowerCamelCase ( self: Any ) -> Tuple:
__UpperCAmelCase : List[str] = FlaxCLIPVisionModule(self.config.vision_config )
__UpperCAmelCase : Any = nn.Dense(self.config.projection_dim , use_bias=__lowerCamelCase , dtype=self.dtype )
__UpperCAmelCase : int = self.param("concept_embeds" , jax.nn.initializers.ones , (17, self.config.projection_dim) )
__UpperCAmelCase : int = self.param(
"special_care_embeds" , jax.nn.initializers.ones , (3, self.config.projection_dim) )
__UpperCAmelCase : Tuple = self.param("concept_embeds_weights" , jax.nn.initializers.ones , (17,) )
__UpperCAmelCase : str = self.param("special_care_embeds_weights" , jax.nn.initializers.ones , (3,) )
def __call__( self: List[Any] , __lowerCamelCase: Dict ) -> Dict:
__UpperCAmelCase : Optional[int] = self.vision_model(__lowerCamelCase )[1]
__UpperCAmelCase : List[str] = self.visual_projection(__lowerCamelCase )
__UpperCAmelCase : Optional[int] = jax_cosine_distance(__lowerCamelCase , self.special_care_embeds )
__UpperCAmelCase : Optional[Any] = jax_cosine_distance(__lowerCamelCase , self.concept_embeds )
# increase this value to create a stronger `nfsw` filter
# at the cost of increasing the possibility of filtering benign image inputs
__UpperCAmelCase : List[str] = 0.0
__UpperCAmelCase : Tuple = special_cos_dist - self.special_care_embeds_weights[None, :] + adjustment
__UpperCAmelCase : List[str] = jnp.round(__lowerCamelCase , 3 )
__UpperCAmelCase : Any = jnp.any(special_scores > 0 , axis=1 , keepdims=__lowerCamelCase )
# Use a lower threshold if an image has any special care concept
__UpperCAmelCase : List[Any] = is_special_care * 0.01
__UpperCAmelCase : Any = cos_dist - self.concept_embeds_weights[None, :] + special_adjustment
__UpperCAmelCase : List[str] = jnp.round(__lowerCamelCase , 3 )
__UpperCAmelCase : Any = jnp.any(concept_scores > 0 , axis=1 )
return has_nsfw_concepts
class _snake_case ( _lowercase ):
lowerCamelCase__: int = CLIPConfig
lowerCamelCase__: Tuple = "clip_input"
lowerCamelCase__: str = FlaxStableDiffusionSafetyCheckerModule
def __init__( self: Union[str, Any] , __lowerCamelCase: CLIPConfig , __lowerCamelCase: Optional[Tuple] = None , __lowerCamelCase: int = 0 , __lowerCamelCase: jnp.dtype = jnp.floataa , __lowerCamelCase: bool = True , **__lowerCamelCase: Optional[int] , ) -> int:
if input_shape is None:
__UpperCAmelCase : Dict = (1, 2_24, 2_24, 3)
__UpperCAmelCase : Tuple = self.module_class(config=__lowerCamelCase , dtype=__lowerCamelCase , **__lowerCamelCase )
super().__init__(__lowerCamelCase , __lowerCamelCase , input_shape=__lowerCamelCase , seed=__lowerCamelCase , dtype=__lowerCamelCase , _do_init=_do_init )
def _lowerCamelCase ( self: Dict , __lowerCamelCase: jax.random.KeyArray , __lowerCamelCase: Tuple , __lowerCamelCase: FrozenDict = None ) -> FrozenDict:
# init input tensor
__UpperCAmelCase : Tuple = jax.random.normal(__lowerCamelCase , __lowerCamelCase )
__UpperCAmelCase , __UpperCAmelCase : Dict = jax.random.split(__lowerCamelCase )
__UpperCAmelCase : Optional[int] = {"params": params_rng, "dropout": dropout_rng}
__UpperCAmelCase : str = self.module.init(__lowerCamelCase , __lowerCamelCase )["params"]
return random_params
def __call__( self: Union[str, Any] , __lowerCamelCase: Optional[Any] , __lowerCamelCase: dict = None , ) -> List[Any]:
__UpperCAmelCase : int = jnp.transpose(__lowerCamelCase , (0, 2, 3, 1) )
return self.module.apply(
{"params": params or self.params} , jnp.array(__lowerCamelCase , dtype=jnp.floataa ) , rngs={} , )
| 342 | import inspect
import unittest
from transformers import ConvNextVaConfig
from transformers.models.auto import get_values
from transformers.models.auto.modeling_auto import MODEL_FOR_BACKBONE_MAPPING_NAMES, MODEL_MAPPING_NAMES
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextVaBackbone, ConvNextVaForImageClassification, ConvNextVaModel
from transformers.models.convnextva.modeling_convnextva import CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _snake_case :
def __init__( self: Tuple , __lowerCamelCase: Optional[int] , __lowerCamelCase: Optional[Any]=13 , __lowerCamelCase: Optional[int]=32 , __lowerCamelCase: List[str]=3 , __lowerCamelCase: Dict=4 , __lowerCamelCase: Optional[Any]=[10, 20, 30, 40] , __lowerCamelCase: int=[2, 2, 3, 2] , __lowerCamelCase: Union[str, Any]=True , __lowerCamelCase: Union[str, Any]=True , __lowerCamelCase: Tuple=37 , __lowerCamelCase: Tuple="gelu" , __lowerCamelCase: List[Any]=10 , __lowerCamelCase: Optional[int]=0.02 , __lowerCamelCase: Optional[Any]=["stage2", "stage3", "stage4"] , __lowerCamelCase: Optional[int]=[2, 3, 4] , __lowerCamelCase: int=None , ) -> List[str]:
__UpperCAmelCase : Union[str, Any] = parent
__UpperCAmelCase : List[str] = batch_size
__UpperCAmelCase : Optional[int] = image_size
__UpperCAmelCase : List[str] = num_channels
__UpperCAmelCase : Union[str, Any] = num_stages
__UpperCAmelCase : List[str] = hidden_sizes
__UpperCAmelCase : Any = depths
__UpperCAmelCase : Optional[int] = is_training
__UpperCAmelCase : List[Any] = use_labels
__UpperCAmelCase : Optional[int] = intermediate_size
__UpperCAmelCase : Optional[Any] = hidden_act
__UpperCAmelCase : Union[str, Any] = num_labels
__UpperCAmelCase : Any = initializer_range
__UpperCAmelCase : List[str] = out_features
__UpperCAmelCase : Tuple = out_indices
__UpperCAmelCase : List[Any] = scope
def _lowerCamelCase ( self: List[Any] ) -> Optional[int]:
__UpperCAmelCase : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__UpperCAmelCase : List[str] = None
if self.use_labels:
__UpperCAmelCase : List[Any] = ids_tensor([self.batch_size] , self.num_labels )
__UpperCAmelCase : Optional[Any] = self.get_config()
return config, pixel_values, labels
def _lowerCamelCase ( self: Tuple ) -> List[Any]:
return ConvNextVaConfig(
num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=__lowerCamelCase , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , )
def _lowerCamelCase ( self: List[Any] , __lowerCamelCase: int , __lowerCamelCase: int , __lowerCamelCase: Optional[int] ) -> Union[str, Any]:
__UpperCAmelCase : Optional[Any] = ConvNextVaModel(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
__UpperCAmelCase : List[str] = model(__lowerCamelCase )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def _lowerCamelCase ( self: Optional[Any] , __lowerCamelCase: Optional[Any] , __lowerCamelCase: Any , __lowerCamelCase: Tuple ) -> Tuple:
__UpperCAmelCase : Union[str, Any] = ConvNextVaForImageClassification(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
__UpperCAmelCase : Optional[int] = model(__lowerCamelCase , labels=__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _lowerCamelCase ( self: int , __lowerCamelCase: Any , __lowerCamelCase: Optional[int] , __lowerCamelCase: Optional[Any] ) -> Optional[int]:
__UpperCAmelCase : List[str] = ConvNextVaBackbone(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
__UpperCAmelCase : Any = model(__lowerCamelCase )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
__UpperCAmelCase : List[Any] = None
__UpperCAmelCase : List[str] = ConvNextVaBackbone(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
__UpperCAmelCase : Any = model(__lowerCamelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def _lowerCamelCase ( self: int ) -> List[str]:
__UpperCAmelCase : int = self.prepare_config_and_inputs()
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = config_and_inputs
__UpperCAmelCase : str = {"pixel_values": pixel_values}
return config, inputs_dict
def _lowerCamelCase ( self: List[Any] ) -> List[Any]:
__UpperCAmelCase : Optional[int] = self.prepare_config_and_inputs()
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Tuple = config_and_inputs
__UpperCAmelCase : Dict = {"pixel_values": pixel_values, "labels": labels}
return config, inputs_dict
@require_torch
class _snake_case ( _lowercase , _lowercase , unittest.TestCase ):
lowerCamelCase__: Dict = (
(
ConvNextVaModel,
ConvNextVaForImageClassification,
ConvNextVaBackbone,
)
if is_torch_available()
else ()
)
lowerCamelCase__: str = (
{"feature-extraction": ConvNextVaModel, "image-classification": ConvNextVaForImageClassification}
if is_torch_available()
else {}
)
lowerCamelCase__: Tuple = False
lowerCamelCase__: int = False
lowerCamelCase__: Dict = False
lowerCamelCase__: int = False
lowerCamelCase__: Any = False
def _lowerCamelCase ( self: Union[str, Any] ) -> Union[str, Any]:
__UpperCAmelCase : Union[str, Any] = ConvNextVaModelTester(self )
__UpperCAmelCase : str = ConfigTester(self , config_class=__lowerCamelCase , has_text_modality=__lowerCamelCase , hidden_size=37 )
def _lowerCamelCase ( self: Dict ) -> Tuple:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _lowerCamelCase ( self: List[Any] ) -> int:
return
@unittest.skip(reason="ConvNextV2 does not use inputs_embeds" )
def _lowerCamelCase ( self: Optional[Any] ) -> Optional[int]:
pass
@unittest.skip(reason="ConvNextV2 does not support input and output embeddings" )
def _lowerCamelCase ( self: Any ) -> Any:
pass
@unittest.skip(reason="ConvNextV2 does not use feedforward chunking" )
def _lowerCamelCase ( self: str ) -> Optional[Any]:
pass
def _lowerCamelCase ( self: List[Any] ) -> int:
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
__UpperCAmelCase , __UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs_with_labels()
__UpperCAmelCase : Optional[Any] = True
if model_class.__name__ in [
*get_values(__lowerCamelCase ),
*get_values(__lowerCamelCase ),
]:
continue
__UpperCAmelCase : Optional[Any] = model_class(__lowerCamelCase )
model.to(__lowerCamelCase )
model.train()
__UpperCAmelCase : Any = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase )
__UpperCAmelCase : Any = model(**__lowerCamelCase ).loss
loss.backward()
def _lowerCamelCase ( self: Optional[int] ) -> Dict:
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
__UpperCAmelCase , __UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_with_labels()
__UpperCAmelCase : List[str] = False
__UpperCAmelCase : int = True
if (
model_class.__name__
in [*get_values(__lowerCamelCase ), *get_values(__lowerCamelCase )]
or not model_class.supports_gradient_checkpointing
):
continue
__UpperCAmelCase : int = model_class(__lowerCamelCase )
model.to(__lowerCamelCase )
model.gradient_checkpointing_enable()
model.train()
__UpperCAmelCase : List[Any] = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase )
__UpperCAmelCase : Any = model(**__lowerCamelCase ).loss
loss.backward()
def _lowerCamelCase ( self: List[str] ) -> Dict:
__UpperCAmelCase , __UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCAmelCase : str = model_class(__lowerCamelCase )
__UpperCAmelCase : int = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__UpperCAmelCase : List[Any] = [*signature.parameters.keys()]
__UpperCAmelCase : int = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __lowerCamelCase )
def _lowerCamelCase ( self: str ) -> List[Any]:
__UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCamelCase )
def _lowerCamelCase ( self: Union[str, Any] ) -> Dict:
def check_hidden_states_output(__lowerCamelCase: Any , __lowerCamelCase: Tuple , __lowerCamelCase: str ):
__UpperCAmelCase : Any = model_class(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
with torch.no_grad():
__UpperCAmelCase : Tuple = model(**self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) )
__UpperCAmelCase : List[Any] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
__UpperCAmelCase : Optional[int] = self.model_tester.num_stages
self.assertEqual(len(__lowerCamelCase ) , expected_num_stages + 1 )
# ConvNextV2's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
__UpperCAmelCase , __UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCAmelCase : Optional[int] = True
check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__UpperCAmelCase : Any = True
check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
def _lowerCamelCase ( self: Optional[Any] ) -> Optional[int]:
__UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__lowerCamelCase )
@slow
def _lowerCamelCase ( self: Dict ) -> List[Any]:
for model_name in CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCAmelCase : Optional[int] = ConvNextVaModel.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
def _UpperCamelCase ( ) -> List[Any]:
__UpperCAmelCase : List[str] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class _snake_case ( unittest.TestCase ):
@cached_property
def _lowerCamelCase ( self: Optional[int] ) -> Dict:
return AutoImageProcessor.from_pretrained("facebook/convnextv2-tiny-1k-224" ) if is_vision_available() else None
@slow
def _lowerCamelCase ( self: List[Any] ) -> Tuple:
__UpperCAmelCase : List[Any] = ConvNextVaForImageClassification.from_pretrained("facebook/convnextv2-tiny-1k-224" ).to(__lowerCamelCase )
__UpperCAmelCase : List[str] = self.default_image_processor
__UpperCAmelCase : Optional[Any] = prepare_img()
__UpperCAmelCase : int = preprocessor(images=__lowerCamelCase , return_tensors="pt" ).to(__lowerCamelCase )
# forward pass
with torch.no_grad():
__UpperCAmelCase : str = model(**__lowerCamelCase )
# verify the logits
__UpperCAmelCase : Dict = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , __lowerCamelCase )
__UpperCAmelCase : str = torch.tensor([0.99_96, 0.19_66, -0.43_86] ).to(__lowerCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __lowerCamelCase , atol=1e-4 ) )
| 342 | 1 |
'''simple docstring'''
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import YolosImageProcessor
class _a ( unittest.TestCase ):
'''simple docstring'''
def __init__( self, A, A=7, A=3, A=30, A=400, A=True, A=None, A=True, A=[0.5, 0.5, 0.5], A=[0.5, 0.5, 0.5], A=True, A=1 / 255, A=True, ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = size if size is not None else {'shortest_edge': 18, 'longest_edge': 1_333}
SCREAMING_SNAKE_CASE : Optional[Any] = parent
SCREAMING_SNAKE_CASE : List[Any] = batch_size
SCREAMING_SNAKE_CASE : Optional[int] = num_channels
SCREAMING_SNAKE_CASE : Optional[int] = min_resolution
SCREAMING_SNAKE_CASE : Dict = max_resolution
SCREAMING_SNAKE_CASE : Any = do_resize
SCREAMING_SNAKE_CASE : List[str] = size
SCREAMING_SNAKE_CASE : Union[str, Any] = do_normalize
SCREAMING_SNAKE_CASE : Tuple = image_mean
SCREAMING_SNAKE_CASE : List[Any] = image_std
SCREAMING_SNAKE_CASE : List[Any] = do_rescale
SCREAMING_SNAKE_CASE : Optional[Any] = rescale_factor
SCREAMING_SNAKE_CASE : List[str] = do_pad
def UpperCamelCase_ ( self ):
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def UpperCamelCase_ ( self, A, A=False ):
'''simple docstring'''
if not batched:
SCREAMING_SNAKE_CASE : Union[str, Any] = image_inputs[0]
if isinstance(A, Image.Image ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Any = image.size
else:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Tuple = image.shape[1], image.shape[2]
if w < h:
SCREAMING_SNAKE_CASE : Union[str, Any] = int(self.size['shortest_edge'] * h / w )
SCREAMING_SNAKE_CASE : List[str] = self.size['shortest_edge']
elif w > h:
SCREAMING_SNAKE_CASE : Tuple = self.size['shortest_edge']
SCREAMING_SNAKE_CASE : str = int(self.size['shortest_edge'] * w / h )
else:
SCREAMING_SNAKE_CASE : Tuple = self.size['shortest_edge']
SCREAMING_SNAKE_CASE : int = self.size['shortest_edge']
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = []
for image in image_inputs:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Any = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
SCREAMING_SNAKE_CASE : int = max(A, key=lambda A : item[0] )[0]
SCREAMING_SNAKE_CASE : str = max(A, key=lambda A : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class _a ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
'''simple docstring'''
A : List[str] = YolosImageProcessor if is_vision_available() else None
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = YolosImageProcessingTester(self )
@property
def UpperCamelCase_ ( self ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(A, 'image_mean' ) )
self.assertTrue(hasattr(A, 'image_std' ) )
self.assertTrue(hasattr(A, 'do_normalize' ) )
self.assertTrue(hasattr(A, 'do_resize' ) )
self.assertTrue(hasattr(A, 'size' ) )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size, {'shortest_edge': 18, 'longest_edge': 1_333} )
self.assertEqual(image_processor.do_pad, A )
SCREAMING_SNAKE_CASE : Dict = self.image_processing_class.from_dict(
self.image_processor_dict, size=42, max_size=84, pad_and_return_pixel_mask=A )
self.assertEqual(image_processor.size, {'shortest_edge': 42, 'longest_edge': 84} )
self.assertEqual(image_processor.do_pad, A )
def UpperCamelCase_ ( self ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
SCREAMING_SNAKE_CASE : Tuple = prepare_image_inputs(self.image_processor_tester, equal_resolution=A )
for image in image_inputs:
self.assertIsInstance(A, Image.Image )
# Test not batched input
SCREAMING_SNAKE_CASE : int = image_processing(image_inputs[0], return_tensors='pt' ).pixel_values
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : str = self.image_processor_tester.get_expected_values(A )
self.assertEqual(
encoded_images.shape, (1, self.image_processor_tester.num_channels, expected_height, expected_width), )
# Test batched
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : str = self.image_processor_tester.get_expected_values(A, batched=A )
SCREAMING_SNAKE_CASE : Optional[Any] = image_processing(A, return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
), )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
SCREAMING_SNAKE_CASE : Optional[Any] = prepare_image_inputs(self.image_processor_tester, equal_resolution=A, numpify=A )
for image in image_inputs:
self.assertIsInstance(A, np.ndarray )
# Test not batched input
SCREAMING_SNAKE_CASE : Tuple = image_processing(image_inputs[0], return_tensors='pt' ).pixel_values
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Dict = self.image_processor_tester.get_expected_values(A )
self.assertEqual(
encoded_images.shape, (1, self.image_processor_tester.num_channels, expected_height, expected_width), )
# Test batched
SCREAMING_SNAKE_CASE : Tuple = image_processing(A, return_tensors='pt' ).pixel_values
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Union[str, Any] = self.image_processor_tester.get_expected_values(A, batched=A )
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
), )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
SCREAMING_SNAKE_CASE : List[Any] = prepare_image_inputs(self.image_processor_tester, equal_resolution=A, torchify=A )
for image in image_inputs:
self.assertIsInstance(A, torch.Tensor )
# Test not batched input
SCREAMING_SNAKE_CASE : List[str] = image_processing(image_inputs[0], return_tensors='pt' ).pixel_values
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = self.image_processor_tester.get_expected_values(A )
self.assertEqual(
encoded_images.shape, (1, self.image_processor_tester.num_channels, expected_height, expected_width), )
# Test batched
SCREAMING_SNAKE_CASE : Dict = image_processing(A, return_tensors='pt' ).pixel_values
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[str] = self.image_processor_tester.get_expected_values(A, batched=A )
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
), )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = self.image_processing_class(**self.image_processor_dict )
SCREAMING_SNAKE_CASE : List[str] = self.image_processing_class(do_resize=A, do_normalize=A, do_rescale=A )
# create random PyTorch tensors
SCREAMING_SNAKE_CASE : Optional[Any] = prepare_image_inputs(self.image_processor_tester, equal_resolution=A, torchify=A )
for image in image_inputs:
self.assertIsInstance(A, torch.Tensor )
# Test whether the method "pad" and calling the image processor return the same tensors
SCREAMING_SNAKE_CASE : List[Any] = image_processing_a.pad(A, return_tensors='pt' )
SCREAMING_SNAKE_CASE : str = image_processing_a(A, return_tensors='pt' )
self.assertTrue(
torch.allclose(encoded_images_with_method['pixel_values'], encoded_images['pixel_values'], atol=1E-4 ) )
@slow
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_annotations.txt', 'r' ) as f:
SCREAMING_SNAKE_CASE : Any = json.loads(f.read() )
SCREAMING_SNAKE_CASE : Union[str, Any] = {'image_id': 39_769, 'annotations': target}
# encode them
SCREAMING_SNAKE_CASE : str = YolosImageProcessor.from_pretrained('hustvl/yolos-small' )
SCREAMING_SNAKE_CASE : Union[str, Any] = image_processing(images=A, annotations=A, return_tensors='pt' )
# verify pixel values
SCREAMING_SNAKE_CASE : List[str] = torch.Size([1, 3, 800, 1_066] )
self.assertEqual(encoding['pixel_values'].shape, A )
SCREAMING_SNAKE_CASE : List[Any] = torch.tensor([0.27_96, 0.31_38, 0.34_81] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3], A, atol=1E-4 ) )
# verify area
SCREAMING_SNAKE_CASE : Dict = torch.tensor([58_87.96_00, 11_250.2_061, 489_353.8_438, 837_122.7_500, 147_967.5_156, 165_732.3_438] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'], A ) )
# verify boxes
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape, A )
SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor([0.55_03, 0.27_65, 0.06_04, 0.22_15] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0], A, atol=1E-3 ) )
# verify image_id
SCREAMING_SNAKE_CASE : Any = torch.tensor([39_769] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'], A ) )
# verify is_crowd
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'], A ) )
# verify class_labels
SCREAMING_SNAKE_CASE : Optional[int] = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'], A ) )
# verify orig_size
SCREAMING_SNAKE_CASE : List[Any] = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'], A ) )
# verify size
SCREAMING_SNAKE_CASE : str = torch.tensor([800, 1_066] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'], A ) )
@slow
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt', 'r' ) as f:
SCREAMING_SNAKE_CASE : Optional[Any] = json.loads(f.read() )
SCREAMING_SNAKE_CASE : str = {'file_name': '000000039769.png', 'image_id': 39_769, 'segments_info': target}
SCREAMING_SNAKE_CASE : Tuple = pathlib.Path('./tests/fixtures/tests_samples/COCO/coco_panoptic' )
# encode them
SCREAMING_SNAKE_CASE : Dict = YolosImageProcessor(format='coco_panoptic' )
SCREAMING_SNAKE_CASE : List[str] = image_processing(images=A, annotations=A, masks_path=A, return_tensors='pt' )
# verify pixel values
SCREAMING_SNAKE_CASE : Tuple = torch.Size([1, 3, 800, 1_066] )
self.assertEqual(encoding['pixel_values'].shape, A )
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor([0.27_96, 0.31_38, 0.34_81] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3], A, atol=1E-4 ) )
# verify area
SCREAMING_SNAKE_CASE : Optional[int] = torch.tensor([147_979.6_875, 165_527.0_469, 484_638.5_938, 11_292.9_375, 58_79.65_62, 76_34.11_47] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'], A ) )
# verify boxes
SCREAMING_SNAKE_CASE : Dict = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape, A )
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor([0.26_25, 0.54_37, 0.46_88, 0.86_25] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0], A, atol=1E-3 ) )
# verify image_id
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor([39_769] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'], A ) )
# verify is_crowd
SCREAMING_SNAKE_CASE : str = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'], A ) )
# verify class_labels
SCREAMING_SNAKE_CASE : List[Any] = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'], A ) )
# verify masks
SCREAMING_SNAKE_CASE : Optional[int] = 822_873
self.assertEqual(encoding['labels'][0]['masks'].sum().item(), A )
# verify orig_size
SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'], A ) )
# verify size
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor([800, 1_066] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'], A ) )
| 251 | from ....configuration_utils import PretrainedConfig
from ....utils import logging
A : str = logging.get_logger(__name__)
# TODO: upload to AWS
A : Dict = {
"yjernite/retribert-base-uncased": (
"https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/config.json"
),
}
class lowerCamelCase (SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
lowerCamelCase__ = '''retribert'''
def __init__( self : Optional[int] , __magic_name__ : Optional[Any]=30_522 , __magic_name__ : int=768 , __magic_name__ : Dict=8 , __magic_name__ : List[Any]=12 , __magic_name__ : Tuple=3_072 , __magic_name__ : List[Any]="gelu" , __magic_name__ : Any=0.1 , __magic_name__ : Any=0.1 , __magic_name__ : Tuple=512 , __magic_name__ : Dict=2 , __magic_name__ : int=0.02 , __magic_name__ : List[Any]=1e-12 , __magic_name__ : List[str]=True , __magic_name__ : Dict=128 , __magic_name__ : Union[str, Any]=0 , **__magic_name__ : List[Any] , ) -> Dict:
super().__init__(pad_token_id=__magic_name__ , **__magic_name__ )
SCREAMING_SNAKE_CASE_ = vocab_size
SCREAMING_SNAKE_CASE_ = hidden_size
SCREAMING_SNAKE_CASE_ = num_hidden_layers
SCREAMING_SNAKE_CASE_ = num_attention_heads
SCREAMING_SNAKE_CASE_ = hidden_act
SCREAMING_SNAKE_CASE_ = intermediate_size
SCREAMING_SNAKE_CASE_ = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ = max_position_embeddings
SCREAMING_SNAKE_CASE_ = type_vocab_size
SCREAMING_SNAKE_CASE_ = initializer_range
SCREAMING_SNAKE_CASE_ = layer_norm_eps
SCREAMING_SNAKE_CASE_ = share_encoders
SCREAMING_SNAKE_CASE_ = projection_dim
| 118 | 0 |
"""simple docstring"""
from __future__ import annotations
import copy
import tempfile
import unittest
from transformers import CONFIG_MAPPING, AutoConfig, BertConfig, GPTaConfig, TaConfig, TapasConfig, is_tf_available
from transformers.testing_utils import (
DUMMY_UNKNOWN_IDENTIFIER,
SMALL_MODEL_IDENTIFIER,
RequestCounter,
require_tensorflow_probability,
require_tf,
slow,
)
from ..bert.test_modeling_bert import BertModelTester
if is_tf_available():
from transformers import (
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSequenceClassification,
TFAutoModelForTableQuestionAnswering,
TFAutoModelForTokenClassification,
TFAutoModelWithLMHead,
TFBertForMaskedLM,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertModel,
TFFunnelBaseModel,
TFFunnelModel,
TFGPTaLMHeadModel,
TFRobertaForMaskedLM,
TFTaForConditionalGeneration,
TFTapasForQuestionAnswering,
)
from transformers.models.auto.modeling_tf_auto import (
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_MASKED_LM_MAPPING,
TF_MODEL_FOR_PRETRAINING_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
TF_MODEL_MAPPING,
)
from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.tapas.modeling_tf_tapas import TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST
class __UpperCamelCase ( _A ):
SCREAMING_SNAKE_CASE = "new-model"
if is_tf_available():
class __UpperCamelCase ( _A ):
SCREAMING_SNAKE_CASE = NewModelConfig
@require_tf
class __UpperCamelCase ( unittest.TestCase ):
@slow
def SCREAMING_SNAKE_CASE__ (self : Any):
A = "bert-base-cased"
A = AutoConfig.from_pretrained(__SCREAMING_SNAKE_CASE)
self.assertIsNotNone(__SCREAMING_SNAKE_CASE)
self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
A = TFAutoModel.from_pretrained(__SCREAMING_SNAKE_CASE)
self.assertIsNotNone(__SCREAMING_SNAKE_CASE)
self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
@slow
def SCREAMING_SNAKE_CASE__ (self : List[Any]):
A = "bert-base-cased"
A = AutoConfig.from_pretrained(__SCREAMING_SNAKE_CASE)
self.assertIsNotNone(__SCREAMING_SNAKE_CASE)
self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
A = TFAutoModelForPreTraining.from_pretrained(__SCREAMING_SNAKE_CASE)
self.assertIsNotNone(__SCREAMING_SNAKE_CASE)
self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
@slow
def SCREAMING_SNAKE_CASE__ (self : Tuple):
for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A = AutoConfig.from_pretrained(__SCREAMING_SNAKE_CASE)
self.assertIsNotNone(__SCREAMING_SNAKE_CASE)
self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
A = TFAutoModelForCausalLM.from_pretrained(__SCREAMING_SNAKE_CASE)
A , A = TFAutoModelForCausalLM.from_pretrained(__SCREAMING_SNAKE_CASE , output_loading_info=__SCREAMING_SNAKE_CASE)
self.assertIsNotNone(__SCREAMING_SNAKE_CASE)
self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
@slow
def SCREAMING_SNAKE_CASE__ (self : List[Any]):
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A = AutoConfig.from_pretrained(__SCREAMING_SNAKE_CASE)
self.assertIsNotNone(__SCREAMING_SNAKE_CASE)
self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
A = TFAutoModelWithLMHead.from_pretrained(__SCREAMING_SNAKE_CASE)
self.assertIsNotNone(__SCREAMING_SNAKE_CASE)
self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
@slow
def SCREAMING_SNAKE_CASE__ (self : Optional[Any]):
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A = AutoConfig.from_pretrained(__SCREAMING_SNAKE_CASE)
self.assertIsNotNone(__SCREAMING_SNAKE_CASE)
self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
A = TFAutoModelForMaskedLM.from_pretrained(__SCREAMING_SNAKE_CASE)
A , A = TFAutoModelForMaskedLM.from_pretrained(__SCREAMING_SNAKE_CASE , output_loading_info=__SCREAMING_SNAKE_CASE)
self.assertIsNotNone(__SCREAMING_SNAKE_CASE)
self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
@slow
def SCREAMING_SNAKE_CASE__ (self : Any):
for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A = AutoConfig.from_pretrained(__SCREAMING_SNAKE_CASE)
self.assertIsNotNone(__SCREAMING_SNAKE_CASE)
self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
A = TFAutoModelForSeqaSeqLM.from_pretrained(__SCREAMING_SNAKE_CASE)
A , A = TFAutoModelForSeqaSeqLM.from_pretrained(__SCREAMING_SNAKE_CASE , output_loading_info=__SCREAMING_SNAKE_CASE)
self.assertIsNotNone(__SCREAMING_SNAKE_CASE)
self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
@slow
def SCREAMING_SNAKE_CASE__ (self : Any):
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
A = AutoConfig.from_pretrained(__SCREAMING_SNAKE_CASE)
self.assertIsNotNone(__SCREAMING_SNAKE_CASE)
self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
A = TFAutoModelForSequenceClassification.from_pretrained(__SCREAMING_SNAKE_CASE)
self.assertIsNotNone(__SCREAMING_SNAKE_CASE)
self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
@slow
def SCREAMING_SNAKE_CASE__ (self : Dict):
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
A = AutoConfig.from_pretrained(__SCREAMING_SNAKE_CASE)
self.assertIsNotNone(__SCREAMING_SNAKE_CASE)
self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
A = TFAutoModelForQuestionAnswering.from_pretrained(__SCREAMING_SNAKE_CASE)
self.assertIsNotNone(__SCREAMING_SNAKE_CASE)
self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
@slow
@require_tensorflow_probability
def SCREAMING_SNAKE_CASE__ (self : Union[str, Any]):
for model_name in TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST[5:6]:
A = AutoConfig.from_pretrained(__SCREAMING_SNAKE_CASE)
self.assertIsNotNone(__SCREAMING_SNAKE_CASE)
self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
A = TFAutoModelForTableQuestionAnswering.from_pretrained(__SCREAMING_SNAKE_CASE)
A , A = TFAutoModelForTableQuestionAnswering.from_pretrained(
__SCREAMING_SNAKE_CASE , output_loading_info=__SCREAMING_SNAKE_CASE)
self.assertIsNotNone(__SCREAMING_SNAKE_CASE)
self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
def SCREAMING_SNAKE_CASE__ (self : Optional[Any]):
A = TFAutoModelWithLMHead.from_pretrained(__SCREAMING_SNAKE_CASE)
self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
self.assertEqual(model.num_parameters() , 1_4_4_1_0)
self.assertEqual(model.num_parameters(only_trainable=__SCREAMING_SNAKE_CASE) , 1_4_4_1_0)
def SCREAMING_SNAKE_CASE__ (self : Any):
A = TFAutoModelWithLMHead.from_pretrained(__SCREAMING_SNAKE_CASE)
self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
self.assertEqual(model.num_parameters() , 1_4_4_1_0)
self.assertEqual(model.num_parameters(only_trainable=__SCREAMING_SNAKE_CASE) , 1_4_4_1_0)
def SCREAMING_SNAKE_CASE__ (self : Tuple):
# For the auto model mapping, FunnelConfig has two models: FunnelModel and FunnelBaseModel
A = TFAutoModel.from_pretrained("sgugger/funnel-random-tiny")
self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
A = copy.deepcopy(model.config)
A = ["FunnelBaseModel"]
A = TFAutoModel.from_config(__SCREAMING_SNAKE_CASE)
self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(__SCREAMING_SNAKE_CASE)
A = TFAutoModel.from_pretrained(__SCREAMING_SNAKE_CASE)
self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
def SCREAMING_SNAKE_CASE__ (self : List[str]):
try:
AutoConfig.register("new-model" , __SCREAMING_SNAKE_CASE)
A = [
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSequenceClassification,
TFAutoModelForTokenClassification,
]
for auto_class in auto_classes:
with self.subTest(auto_class.__name__):
# Wrong config class will raise an error
with self.assertRaises(__SCREAMING_SNAKE_CASE):
auto_class.register(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
auto_class.register(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(__SCREAMING_SNAKE_CASE):
auto_class.register(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
# Now that the config is registered, it can be used as any other config with the auto-API
A = BertModelTester(self).get_config()
A = NewModelConfig(**tiny_config.to_dict())
A = auto_class.from_config(__SCREAMING_SNAKE_CASE)
self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(__SCREAMING_SNAKE_CASE)
A = auto_class.from_pretrained(__SCREAMING_SNAKE_CASE)
self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
finally:
if "new-model" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["new-model"]
for mapping in (
TF_MODEL_MAPPING,
TF_MODEL_FOR_PRETRAINING_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_MASKED_LM_MAPPING,
):
if NewModelConfig in mapping._extra_content:
del mapping._extra_content[NewModelConfig]
def SCREAMING_SNAKE_CASE__ (self : List[Any]):
with self.assertRaisesRegex(
__SCREAMING_SNAKE_CASE , "bert-base is not a local folder and is not a valid model identifier"):
A = TFAutoModel.from_pretrained("bert-base")
def SCREAMING_SNAKE_CASE__ (self : Union[str, Any]):
with self.assertRaisesRegex(
__SCREAMING_SNAKE_CASE , R"aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)"):
A = TFAutoModel.from_pretrained(__SCREAMING_SNAKE_CASE , revision="aaaaaa")
def SCREAMING_SNAKE_CASE__ (self : List[str]):
with self.assertRaisesRegex(
__SCREAMING_SNAKE_CASE , "hf-internal-testing/config-no-model does not appear to have a file named pytorch_model.bin" , ):
A = TFAutoModel.from_pretrained("hf-internal-testing/config-no-model")
def SCREAMING_SNAKE_CASE__ (self : Any):
with self.assertRaisesRegex(__SCREAMING_SNAKE_CASE , "Use `from_pt=True` to load this model"):
A = TFAutoModel.from_pretrained("hf-internal-testing/tiny-bert-pt-only")
def SCREAMING_SNAKE_CASE__ (self : List[str]):
# Make sure we have cached the model.
A = TFAutoModel.from_pretrained("hf-internal-testing/tiny-random-bert")
with RequestCounter() as counter:
A = TFAutoModel.from_pretrained("hf-internal-testing/tiny-random-bert")
self.assertEqual(counter.get_request_count , 0)
self.assertEqual(counter.head_request_count , 1)
self.assertEqual(counter.other_request_count , 0)
# With a sharded checkpoint
A = TFAutoModel.from_pretrained("ArthurZ/tiny-random-bert-sharded")
with RequestCounter() as counter:
A = TFAutoModel.from_pretrained("ArthurZ/tiny-random-bert-sharded")
self.assertEqual(counter.get_request_count , 0)
self.assertEqual(counter.head_request_count , 1)
self.assertEqual(counter.other_request_count , 0)
| 57 |
"""simple docstring"""
import argparse
import json
import os
import torch
from transformers import LukeConfig, LukeModel, LukeTokenizer, RobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def __SCREAMING_SNAKE_CASE ( lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
"""simple docstring"""
# Load configuration defined in the metadata file
with open(lowercase__ ) as metadata_file:
A = json.load(lowercase__ )
A = LukeConfig(use_entity_aware_attention=lowercase__ , **metadata["model_config"] )
# Load in the weights from the checkpoint_path
A = torch.load(lowercase__ , map_location="cpu" )
# Load the entity vocab file
A = load_entity_vocab(lowercase__ )
A = RobertaTokenizer.from_pretrained(metadata["model_config"]["bert_model_name"] )
# Add special tokens to the token vocabulary for downstream tasks
A = AddedToken("<ent>" , lstrip=lowercase__ , rstrip=lowercase__ )
A = AddedToken("<ent2>" , lstrip=lowercase__ , rstrip=lowercase__ )
tokenizer.add_special_tokens({"additional_special_tokens": [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(F"""Saving tokenizer to {pytorch_dump_folder_path}""" )
tokenizer.save_pretrained(lowercase__ )
with open(os.path.join(lowercase__ , LukeTokenizer.vocab_files_names["entity_vocab_file"] ) , "w" ) as f:
json.dump(lowercase__ , lowercase__ )
A = LukeTokenizer.from_pretrained(lowercase__ )
# Initialize the embeddings of the special tokens
A = state_dict["embeddings.word_embeddings.weight"]
A = word_emb[tokenizer.convert_tokens_to_ids(["@"] )[0]].unsqueeze(0 )
A = word_emb[tokenizer.convert_tokens_to_ids(["#"] )[0]].unsqueeze(0 )
A = torch.cat([word_emb, ent_emb, enta_emb] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
A = F"""encoder.layer.{layer_index}.attention.self."""
A = state_dict[prefix + matrix_name]
A = state_dict[prefix + matrix_name]
A = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
A = state_dict["entity_embeddings.entity_embeddings.weight"]
A = entity_emb[entity_vocab["[MASK]"]]
A = LukeModel(config=lowercase__ ).eval()
A , A = model.load_state_dict(lowercase__ , strict=lowercase__ )
if not (len(lowercase__ ) == 1 and missing_keys[0] == "embeddings.position_ids"):
raise ValueError(F"""Missing keys {", ".join(lowercase__ )}. Expected only missing embeddings.position_ids""" )
if not (all(key.startswith("entity_predictions" ) or key.startswith("lm_head" ) for key in unexpected_keys )):
raise ValueError(
"Unexpected keys"
F""" {", ".join([key for key in unexpected_keys if not (key.startswith("entity_predictions" ) or key.startswith("lm_head" ))] )}""" )
# Check outputs
A = LukeTokenizer.from_pretrained(lowercase__ , task="entity_classification" )
A = (
"Top seed Ana Ivanovic said on Thursday she could hardly believe her luck as a fortuitous netcord helped the"
" new world number one avoid a humiliating second- round exit at Wimbledon ."
)
A = (39, 42)
A = tokenizer(lowercase__ , entity_spans=[span] , add_prefix_space=lowercase__ , return_tensors="pt" )
A = model(**lowercase__ )
# Verify word hidden states
if model_size == "large":
A = torch.Size((1, 42, 1_024) )
A = torch.tensor(
[[0.01_33, 0.08_65, 0.00_95], [0.30_93, -0.25_76, -0.74_18], [-0.17_20, -0.21_17, -0.28_69]] )
else: # base
A = torch.Size((1, 42, 768) )
A = torch.tensor([[0.00_37, 0.13_68, -0.00_91], [0.10_99, 0.33_29, -0.10_95], [0.07_65, 0.53_35, 0.11_79]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
F"""Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}""" )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , lowercase__ , atol=1e-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
A = torch.Size((1, 1, 1_024) )
A = torch.tensor([[0.04_66, -0.01_06, -0.01_79]] )
else: # base
A = torch.Size((1, 1, 768) )
A = torch.tensor([[0.14_57, 0.10_44, 0.01_74]] )
if not (outputs.entity_last_hidden_state.shape != expected_shape):
raise ValueError(
F"""Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is"""
F""" {expected_shape}""" )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , lowercase__ , atol=1e-4 ):
raise ValueError
# Finally, save our PyTorch model and tokenizer
print("Saving PyTorch model to {}".format(lowercase__ ) )
model.save_pretrained(lowercase__ )
def __SCREAMING_SNAKE_CASE ( lowercase__ ):
"""simple docstring"""
A = {}
with open(lowercase__ , "r" , encoding="utf-8" ) as f:
for index, line in enumerate(lowercase__ ):
A , A = line.rstrip().split("\t" )
A = index
return entity_vocab
if __name__ == "__main__":
__A : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--checkpoint_path', type=str, help='Path to a pytorch_model.bin file.')
parser.add_argument(
'--metadata_path', default=None, type=str, help='Path to a metadata.json file, defining the configuration.'
)
parser.add_argument(
'--entity_vocab_path',
default=None,
type=str,
help='Path to an entity_vocab.tsv file, containing the entity vocabulary.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to where to dump the output PyTorch model.'
)
parser.add_argument(
'--model_size', default='base', type=str, choices=['base', 'large'], help='Size of the model to be converted.'
)
__A : int = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 57 | 1 |
"""simple docstring"""
import gc
import unittest
from transformers import CTRLConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
CTRLForSequenceClassification,
CTRLLMHeadModel,
CTRLModel,
)
class a__ :
def __init__( self : List[str], lowerCAmelCase : Optional[Any], lowerCAmelCase : List[str]=14, lowerCAmelCase : Any=7, lowerCAmelCase : List[Any]=True, lowerCAmelCase : int=True, lowerCAmelCase : Optional[int]=True, lowerCAmelCase : Tuple=True, lowerCAmelCase : List[Any]=True, lowerCAmelCase : Optional[int]=99, lowerCAmelCase : Union[str, Any]=32, lowerCAmelCase : Optional[int]=5, lowerCAmelCase : Optional[int]=4, lowerCAmelCase : Optional[int]=37, lowerCAmelCase : Dict="gelu", lowerCAmelCase : Union[str, Any]=0.1, lowerCAmelCase : Tuple=0.1, lowerCAmelCase : Optional[Any]=512, lowerCAmelCase : List[Any]=16, lowerCAmelCase : Any=2, lowerCAmelCase : int=0.02, lowerCAmelCase : Union[str, Any]=3, lowerCAmelCase : Dict=4, lowerCAmelCase : Union[str, Any]=None, ) -> Optional[Any]:
lowercase : List[str] = parent
lowercase : Union[str, Any] = batch_size
lowercase : Union[str, Any] = seq_length
lowercase : List[Any] = is_training
lowercase : List[Any] = use_token_type_ids
lowercase : List[str] = use_input_mask
lowercase : Tuple = use_labels
lowercase : Optional[int] = use_mc_token_ids
lowercase : Union[str, Any] = vocab_size
lowercase : Optional[int] = hidden_size
lowercase : List[Any] = num_hidden_layers
lowercase : List[str] = num_attention_heads
lowercase : Union[str, Any] = intermediate_size
lowercase : Tuple = hidden_act
lowercase : Optional[int] = hidden_dropout_prob
lowercase : Any = attention_probs_dropout_prob
lowercase : int = max_position_embeddings
lowercase : List[Any] = type_vocab_size
lowercase : Union[str, Any] = type_sequence_label_size
lowercase : Optional[Any] = initializer_range
lowercase : Any = num_labels
lowercase : str = num_choices
lowercase : int = scope
lowercase : int = self.vocab_size - 1
def lowercase ( self : Union[str, Any] ) -> Any:
lowercase : List[str] = ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
lowercase : Any = None
if self.use_input_mask:
lowercase : List[Any] = random_attention_mask([self.batch_size, self.seq_length] )
lowercase : Tuple = None
if self.use_token_type_ids:
lowercase : List[str] = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size )
lowercase : Dict = None
if self.use_mc_token_ids:
lowercase : Union[str, Any] = ids_tensor([self.batch_size, self.num_choices], self.seq_length )
lowercase : Dict = None
lowercase : Dict = None
lowercase : Tuple = None
if self.use_labels:
lowercase : Any = ids_tensor([self.batch_size], self.type_sequence_label_size )
lowercase : int = ids_tensor([self.batch_size, self.seq_length], self.num_labels )
lowercase : Any = ids_tensor([self.batch_size], self.num_choices )
lowercase : Dict = self.get_config()
lowercase : List[Any] = ids_tensor([self.num_hidden_layers, self.num_attention_heads], 2 )
return (
config,
input_ids,
input_mask,
head_mask,
token_type_ids,
mc_token_ids,
sequence_labels,
token_labels,
choice_labels,
)
def lowercase ( self : Optional[Any] ) -> List[str]:
return CTRLConfig(
vocab_size=self.vocab_size, n_embd=self.hidden_size, n_layer=self.num_hidden_layers, n_head=self.num_attention_heads, n_positions=self.max_position_embeddings, pad_token_id=self.pad_token_id, )
def lowercase ( self : List[Any], lowerCAmelCase : str, lowerCAmelCase : Tuple, lowerCAmelCase : List[str], lowerCAmelCase : str, lowerCAmelCase : int, *lowerCAmelCase : Optional[Any] ) -> List[Any]:
lowercase : Tuple = CTRLModel(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
model(lowerCAmelCase, token_type_ids=lowerCAmelCase, head_mask=lowerCAmelCase )
model(lowerCAmelCase, token_type_ids=lowerCAmelCase )
lowercase : List[Any] = model(lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(len(result.past_key_values ), config.n_layer )
def lowercase ( self : Optional[Any], lowerCAmelCase : Any, lowerCAmelCase : str, lowerCAmelCase : List[str], lowerCAmelCase : Dict, lowerCAmelCase : Union[str, Any], *lowerCAmelCase : Optional[Any] ) -> str:
lowercase : Optional[int] = CTRLLMHeadModel(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
lowercase : Union[str, Any] = model(lowerCAmelCase, token_type_ids=lowerCAmelCase, labels=lowerCAmelCase )
self.parent.assertEqual(result.loss.shape, () )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase ( self : Optional[int] ) -> Dict:
lowercase : Optional[int] = self.prepare_config_and_inputs()
(
(
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) ,
) : str = config_and_inputs
lowercase : Union[str, Any] = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'head_mask': head_mask}
return config, inputs_dict
def lowercase ( self : List[Any], lowerCAmelCase : List[Any], lowerCAmelCase : List[Any], lowerCAmelCase : str, lowerCAmelCase : List[Any], *lowerCAmelCase : Optional[int] ) -> int:
lowercase : Tuple = self.num_labels
lowercase : Optional[int] = CTRLForSequenceClassification(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
lowercase : int = ids_tensor([self.batch_size], self.type_sequence_label_size )
lowercase : Union[str, Any] = model(lowerCAmelCase, token_type_ids=lowerCAmelCase, labels=lowerCAmelCase )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) )
@require_torch
class a__ ( SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, unittest.TestCase ):
_lowerCamelCase = (CTRLModel, CTRLLMHeadModel, CTRLForSequenceClassification) if is_torch_available() else ()
_lowerCamelCase = (CTRLLMHeadModel,) if is_torch_available() else ()
_lowerCamelCase = (
{
'feature-extraction': CTRLModel,
'text-classification': CTRLForSequenceClassification,
'text-generation': CTRLLMHeadModel,
'zero-shot': CTRLForSequenceClassification,
}
if is_torch_available()
else {}
)
_lowerCamelCase = True
_lowerCamelCase = False
_lowerCamelCase = False
def lowercase ( self : List[str], lowerCAmelCase : Optional[int], lowerCAmelCase : Optional[Any], lowerCAmelCase : List[str], lowerCAmelCase : Any, lowerCAmelCase : Any ) -> Optional[int]:
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `CTRLConfig` was never used in pipeline tests, either because of a missing checkpoint or because a tiny
# config could not be created.
return True
return False
def lowercase ( self : Tuple ) -> List[Any]:
lowercase : List[str] = CTRLModelTester(self )
lowercase : Optional[int] = ConfigTester(self, config_class=lowerCAmelCase, n_embd=37 )
def lowercase ( self : str ) -> List[str]:
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
def lowercase ( self : Any ) -> Any:
self.config_tester.run_common_tests()
def lowercase ( self : Optional[Any] ) -> Union[str, Any]:
lowercase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_ctrl_model(*lowerCAmelCase )
def lowercase ( self : List[Any] ) -> Optional[Any]:
lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*lowerCAmelCase )
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def lowercase ( self : List[str] ) -> Any:
pass
@slow
def lowercase ( self : Union[str, Any] ) -> List[str]:
for model_name in CTRL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase : Any = CTRLModel.from_pretrained(lowerCAmelCase )
self.assertIsNotNone(lowerCAmelCase )
@unittest.skip('The model doesn\'t support left padding' ) # and it's not used enough to be worth fixing :)
def lowercase ( self : str ) -> Optional[int]:
pass
@require_torch
class a__ ( unittest.TestCase ):
def lowercase ( self : Union[str, Any] ) -> Union[str, Any]:
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
@slow
def lowercase ( self : Any ) -> Any:
lowercase : Tuple = CTRLLMHeadModel.from_pretrained('ctrl' )
model.to(lowerCAmelCase )
lowercase : List[str] = torch.tensor(
[[11859, 0, 1611, 8]], dtype=torch.long, device=lowerCAmelCase ) # Legal the president is
lowercase : str = [
11859,
0,
1611,
8,
5,
150,
26449,
2,
19,
348,
469,
3,
2595,
48,
20740,
246533,
246533,
19,
30,
5,
] # Legal the president is a good guy and I don't want to lose my job. \n \n I have a
lowercase : Union[str, Any] = model.generate(lowerCAmelCase, do_sample=lowerCAmelCase )
self.assertListEqual(output_ids[0].tolist(), lowerCAmelCase )
| 255 |
"""simple docstring"""
def lowercase__ ( ) -> str:
'''simple docstring'''
lowercase : List[str] = 0
for i in range(1 , 10_01 ):
total += i**i
return str(_UpperCAmelCase )[-10:]
if __name__ == "__main__":
print(solution())
| 255 | 1 |
import logging
import re
import pytorch_quantization
import pytorch_quantization.nn as quant_nn
import torch
from pytorch_quantization import calib
from pytorch_quantization.tensor_quant import QuantDescriptor
_UpperCamelCase = logging.getLogger(__name__)
_UpperCamelCase = 50 # max width of layer names
_UpperCamelCase = 70 # max width of quantizer names
def UpperCamelCase_( snake_case__: List[Any] ) -> Any:
UpperCAmelCase__ = parser.add_argument_group('quant_trainer arguments' )
group.add_argument('--wprec' , type=__lowerCamelCase , default=8 , help='weight precision' )
group.add_argument('--aprec' , type=__lowerCamelCase , default=8 , help='activation precision' )
group.add_argument('--quant-per-tensor' , action='store_true' , help='per tensor weight scaling' )
group.add_argument('--quant-disable' , action='store_true' , help='disable all quantizers' )
group.add_argument('--quant-disable-embeddings' , action='store_true' , help='disable all embeddings quantizers' )
group.add_argument('--quant-disable-keyword' , type=__lowerCamelCase , nargs='+' , help='disable quantizers by keyword' )
group.add_argument('--quant-disable-layer-module' , type=__lowerCamelCase , help='disable quantizers by keyword under layer.' )
group.add_argument('--quant-enable-layer-module' , type=__lowerCamelCase , help='enable quantizers by keyword under layer' )
group.add_argument('--calibrator' , default='max' , help='which quantization range calibrator to use' )
group.add_argument('--percentile' , default=__lowerCamelCase , type=__lowerCamelCase , help='percentile for PercentileCalibrator' )
group.add_argument('--fuse-qkv' , action='store_true' , help='use the same scale factor for qkv' )
group.add_argument('--clip-gelu' , metavar='N' , type=__lowerCamelCase , help='clip gelu output maximum value to N' )
group.add_argument(
'--recalibrate-weights' , action='store_true' , help=(
'recalibrate weight amaxes by taking the max of the weights.'
' amaxes will be computed with the current quantization granularity (axis).'
) , )
def UpperCamelCase_( snake_case__: int ) -> Optional[int]:
if args.calibrator == "max":
UpperCAmelCase__ = "max"
elif args.calibrator == "percentile":
if args.percentile is None:
raise ValueError('Specify --percentile when using percentile calibrator' )
UpperCAmelCase__ = "histogram"
elif args.calibrator == "mse":
UpperCAmelCase__ = "histogram"
else:
raise ValueError(f"Invalid calibrator {args.calibrator}" )
UpperCAmelCase__ = QuantDescriptor(num_bits=args.aprec , calib_method=__lowerCamelCase )
UpperCAmelCase__ = QuantDescriptor(num_bits=args.wprec , axis=(None if args.quant_per_tensor else (0,)) )
quant_nn.QuantLinear.set_default_quant_desc_input(__lowerCamelCase )
quant_nn.QuantLinear.set_default_quant_desc_weight(__lowerCamelCase )
def UpperCamelCase_( snake_case__: Tuple , snake_case__: int , snake_case__: Dict=False , snake_case__: Union[str, Any]=False ) -> List[Any]:
logger.info('Configuring Model for Quantization' )
logger.info(f"using quantization package {pytorch_quantization.__file__}" )
if not calib:
if args.quant_disable_embeddings:
set_quantizer_by_name(__lowerCamelCase , ['embeddings'] , which='weight' , _disabled=__lowerCamelCase )
if args.quant_disable:
set_quantizer_by_name(__lowerCamelCase , [''] , _disabled=__lowerCamelCase )
if args.quant_disable_keyword:
set_quantizer_by_name(__lowerCamelCase , args.quant_disable_keyword , _disabled=__lowerCamelCase )
if args.quant_disable_layer_module:
set_quantizer_by_name(__lowerCamelCase , [r'layer.\d+.' + args.quant_disable_layer_module] , _disabled=__lowerCamelCase )
if args.quant_enable_layer_module:
set_quantizer_by_name(__lowerCamelCase , [r'layer.\d+.' + args.quant_enable_layer_module] , _disabled=__lowerCamelCase )
if args.recalibrate_weights:
recalibrate_weights(__lowerCamelCase )
if args.fuse_qkv:
fuse_qkv(__lowerCamelCase , __lowerCamelCase )
if args.clip_gelu:
clip_gelu(__lowerCamelCase , args.clip_gelu )
# if args.local_rank in [-1, 0] and not calib:
print_quant_summary(__lowerCamelCase )
def UpperCamelCase_( snake_case__: Union[str, Any] ) -> str:
logger.info('Enabling Calibration' )
for name, module in model.named_modules():
if name.endswith('_quantizer' ):
if module._calibrator is not None:
module.disable_quant()
module.enable_calib()
else:
module.disable()
logger.info(f"{name:80}: {module}" )
def UpperCamelCase_( snake_case__: Optional[Any] , snake_case__: str ) -> Any:
logger.info('Loading calibrated amax' )
for name, module in model.named_modules():
if name.endswith('_quantizer' ):
if module._calibrator is not None:
if isinstance(module._calibrator , calib.MaxCalibrator ):
module.load_calib_amax()
else:
module.load_calib_amax('percentile' , percentile=args.percentile )
module.enable_quant()
module.disable_calib()
else:
module.enable()
model.cuda()
print_quant_summary(__lowerCamelCase )
def UpperCamelCase_( snake_case__: List[str] , snake_case__: Optional[int] ) -> Optional[Any]:
def fusea(snake_case__: Tuple , snake_case__: Optional[int] , snake_case__: int ):
for mod in [qq, qk, qv]:
if not hasattr(__lowerCamelCase , '_amax' ):
print(' WARNING: NO AMAX BUFFER' )
return
UpperCAmelCase__ = qq._amax.detach().item()
UpperCAmelCase__ = qk._amax.detach().item()
UpperCAmelCase__ = qv._amax.detach().item()
UpperCAmelCase__ = max(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
qq._amax.fill_(__lowerCamelCase )
qk._amax.fill_(__lowerCamelCase )
qv._amax.fill_(__lowerCamelCase )
logger.info(f" q={q:5.2f} k={k:5.2f} v={v:5.2f} -> {amax:5.2f}" )
for name, mod in model.named_modules():
if name.endswith('.attention.self' ):
logger.info(f"FUSE_QKV: {name:{name_width}}" )
fusea(mod.matmul_q_input_quantizer , mod.matmul_k_input_quantizer , mod.matmul_v_input_quantizer )
if args.quant_per_tensor:
fusea(mod.query._weight_quantizer , mod.key._weight_quantizer , mod.value._weight_quantizer )
def UpperCamelCase_( snake_case__: List[str] , snake_case__: List[Any] ) -> List[str]:
for name, mod in model.named_modules():
if name.endswith('.output.dense' ) and not name.endswith('attention.output.dense' ):
UpperCAmelCase__ = mod._input_quantizer._amax.data.detach().item()
mod._input_quantizer._amax.data.detach().clamp_(max=__lowerCamelCase )
UpperCAmelCase__ = mod._input_quantizer._amax.data.detach().item()
logger.info(f"CLIP_GELU: {name:{name_width}} amax: {amax_init:5.2f} -> {amax:5.2f}" )
def UpperCamelCase_( snake_case__: Dict ) -> Optional[int]:
for name, mod in model.named_modules():
if hasattr(__lowerCamelCase , '_weight_quantizer' ) and mod._weight_quantizer.axis is not None:
UpperCAmelCase__ = mod.weight.shape[0]
UpperCAmelCase__ = mod._weight_quantizer._amax.detach()
UpperCAmelCase__ = torch.ones(__lowerCamelCase , dtype=amax.dtype , device=amax.device ) * amax
print(f"expanding {name} {amax} -> {mod._weight_quantizer._amax}" )
def UpperCamelCase_( snake_case__: Optional[Any] ) -> int:
for name, mod in model.named_modules():
if hasattr(__lowerCamelCase , '_weight_quantizer' ):
if not hasattr(mod.weight_quantizer , '_amax' ):
print('RECALIB: {name:{name_width}} WARNING: NO AMAX BUFFER' )
continue
# determine which axes to reduce across
# e.g. a 4D tensor quantized per axis 0 should reduce over (1,2,3)
UpperCAmelCase__ = set() if mod._weight_quantizer.axis is None else set(mod._weight_quantizer.axis )
UpperCAmelCase__ = set(range(len(mod.weight.size() ) ) ) - axis_set
UpperCAmelCase__ = pytorch_quantization.utils.reduce_amax(mod.weight , axis=__lowerCamelCase , keepdims=__lowerCamelCase ).detach()
logger.info(f"RECALIB: {name:{name_width}} {mod._weight_quantizer._amax.flatten()} -> {amax.flatten()}" )
UpperCAmelCase__ = amax
def UpperCamelCase_( snake_case__: Optional[int] , snake_case__: List[str]=25 , snake_case__: List[str]=1_80 , snake_case__: Tuple=None ) -> Optional[Any]:
if ignore is None:
UpperCAmelCase__ = []
elif not isinstance(__lowerCamelCase , __lowerCamelCase ):
UpperCAmelCase__ = [ignore]
UpperCAmelCase__ = 0
for name, mod in model.named_modules():
if not hasattr(__lowerCamelCase , 'weight' ):
continue
UpperCAmelCase__ = max(__lowerCamelCase , len(__lowerCamelCase ) )
for name, mod in model.named_modules():
UpperCAmelCase__ = getattr(__lowerCamelCase , '_input_quantizer' , __lowerCamelCase )
UpperCAmelCase__ = getattr(__lowerCamelCase , '_weight_quantizer' , __lowerCamelCase )
if not hasattr(__lowerCamelCase , 'weight' ):
continue
if type(__lowerCamelCase ) in ignore:
continue
if [True for s in ignore if type(__lowerCamelCase ) is str and s in name]:
continue
UpperCAmelCase__ = f"Act:{input_q.extra_repr()}"
UpperCAmelCase__ = f"Wgt:{weight_q.extra_repr()}"
UpperCAmelCase__ = f"{name:{name_width}} {act_str} {wgt_str}"
if len(__lowerCamelCase ) <= line_width:
logger.info(__lowerCamelCase )
else:
logger.info(f"{name:{name_width}} {act_str}" )
logger.info(f"{' ':{name_width}} {wgt_str}" )
def UpperCamelCase_( snake_case__: Union[str, Any] ) -> Optional[int]:
UpperCAmelCase__ = 0
for name, mod in model.named_modules():
if isinstance(__lowerCamelCase , pytorch_quantization.nn.TensorQuantizer ):
print(f"{name:80} {mod}" )
count += 1
print(f"{count} TensorQuantizers found in model" )
def UpperCamelCase_( snake_case__: Optional[Any] , snake_case__: Union[str, Any] , snake_case__: List[str] , snake_case__: Dict , snake_case__: Optional[int] ) -> Optional[Any]:
UpperCAmelCase__ = getattr(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
if quantizer_mod is not None:
assert hasattr(__lowerCamelCase , __lowerCamelCase )
setattr(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
else:
logger.warning(f"{name} has no {quantizer}" )
def UpperCamelCase_( snake_case__: List[Any] , snake_case__: List[Any] , snake_case__: Union[str, Any]="both" , **snake_case__: List[Any] ) -> Dict:
UpperCAmelCase__ = f"Warning: changing {which} quantizers of {name:{qname_width}}"
for k, v in kwargs.items():
s += f" {k}={v}"
if which in ["input", "both"]:
set_quantizer(__lowerCamelCase , __lowerCamelCase , '_input_quantizer' , __lowerCamelCase , __lowerCamelCase )
if which in ["weight", "both"]:
set_quantizer(__lowerCamelCase , __lowerCamelCase , '_weight_quantizer' , __lowerCamelCase , __lowerCamelCase )
logger.info(__lowerCamelCase )
def UpperCamelCase_( snake_case__: Optional[Any] , snake_case__: List[Any] , **snake_case__: Tuple ) -> Union[str, Any]:
for name, mod in model.named_modules():
if hasattr(__lowerCamelCase , '_input_quantizer' ) or hasattr(__lowerCamelCase , '_weight_quantizer' ):
for n in names:
if re.search(__lowerCamelCase , __lowerCamelCase ):
set_quantizers(__lowerCamelCase , __lowerCamelCase , **__lowerCamelCase )
elif name.endswith('_quantizer' ):
for n in names:
if re.search(__lowerCamelCase , __lowerCamelCase ):
UpperCAmelCase__ = f"Warning: changing {name:{name_width}}"
for k, v in kwargs.items():
s += f" {k}={v}"
setattr(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
logger.info(__lowerCamelCase )
| 351 |
from multiprocessing import Lock, Pipe, Process
# lock used to ensure that two processes do not access a pipe at the same time
_UpperCamelCase = Lock()
def UpperCamelCase_( snake_case__: Optional[Any] , snake_case__: Optional[int] , snake_case__: Tuple , snake_case__: Tuple , snake_case__: Tuple , snake_case__: Dict , snake_case__: Any ) -> str:
global process_lock
# we perform n swaps since after n swaps we know we are sorted
# we *could* stop early if we are sorted already, but it takes as long to
# find out we are sorted as it does to sort the list with this algorithm
for i in range(0 , 10 ):
if (i + position) % 2 == 0 and r_send is not None:
# send your value to your right neighbor
process_lock.acquire()
r_send[1].send(snake_case__ )
process_lock.release()
# receive your right neighbor's value
process_lock.acquire()
UpperCAmelCase__ = rr_cv[0].recv()
process_lock.release()
# take the lower value since you are on the left
UpperCAmelCase__ = min(snake_case__ , snake_case__ )
elif (i + position) % 2 != 0 and l_send is not None:
# send your value to your left neighbor
process_lock.acquire()
l_send[1].send(snake_case__ )
process_lock.release()
# receive your left neighbor's value
process_lock.acquire()
UpperCAmelCase__ = lr_cv[0].recv()
process_lock.release()
# take the higher value since you are on the right
UpperCAmelCase__ = max(snake_case__ , snake_case__ )
# after all swaps are performed, send the values back to main
result_pipe[1].send(snake_case__ )
def UpperCamelCase_( snake_case__: Any ) -> Tuple:
UpperCAmelCase__ = []
UpperCAmelCase__ = []
# initialize the list of pipes where the values will be retrieved
for _ in arr:
result_pipe.append(Pipe() )
# creates the processes
# the first and last process only have one neighbor so they are made outside
# of the loop
UpperCAmelCase__ = Pipe()
UpperCAmelCase__ = Pipe()
process_array_.append(
Process(
target=snake_case__ , args=(0, arr[0], None, temp_rs, None, temp_rr, result_pipe[0]) , ) )
UpperCAmelCase__ = temp_rs
UpperCAmelCase__ = temp_rr
for i in range(1 , len(snake_case__ ) - 1 ):
UpperCAmelCase__ = Pipe()
UpperCAmelCase__ = Pipe()
process_array_.append(
Process(
target=snake_case__ , args=(i, arr[i], temp_ls, temp_rs, temp_lr, temp_rr, result_pipe[i]) , ) )
UpperCAmelCase__ = temp_rs
UpperCAmelCase__ = temp_rr
process_array_.append(
Process(
target=snake_case__ , args=(
len(snake_case__ ) - 1,
arr[len(snake_case__ ) - 1],
temp_ls,
None,
temp_lr,
None,
result_pipe[len(snake_case__ ) - 1],
) , ) )
# start the processes
for p in process_array_:
p.start()
# wait for the processes to end and write their values to the list
for p in range(0 , len(snake_case__ ) ):
UpperCAmelCase__ = result_pipe[p][0].recv()
process_array_[p].join()
return arr
def UpperCamelCase_( ) -> Dict:
UpperCAmelCase__ = list(range(10 , 0 , -1 ) )
print('Initial List' )
print(*snake_case__ )
UpperCAmelCase__ = odd_even_transposition(snake_case__ )
print('Sorted List\n' )
print(*snake_case__ )
if __name__ == "__main__":
main()
| 335 | 0 |
import unittest
from transformers import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING, is_vision_available, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class _SCREAMING_SNAKE_CASE :
@staticmethod
def _A ( *__lowerCamelCase : Tuple , **__lowerCamelCase : str ):
pass
@is_pipeline_test
@require_vision
@require_torch
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
snake_case__ : Any = MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
def _A ( self : List[Any] , __lowerCamelCase : Tuple , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Tuple ):
UpperCamelCase :Any = pipeline(
"""zero-shot-object-detection""" , model="""hf-internal-testing/tiny-random-owlvit-object-detection""" )
UpperCamelCase :Tuple = [
{
"""image""": """./tests/fixtures/tests_samples/COCO/000000039769.png""",
"""candidate_labels""": ["""cat""", """remote""", """couch"""],
}
]
return object_detector, examples
def _A ( self : int , __lowerCamelCase : Tuple , __lowerCamelCase : Tuple ):
UpperCamelCase :Tuple = object_detector(examples[0] , threshold=0.0 )
UpperCamelCase :Optional[Any] = len(__lowerCamelCase )
self.assertGreater(__lowerCamelCase , 0 )
self.assertEqual(
__lowerCamelCase , [
{
"""score""": ANY(__lowerCamelCase ),
"""label""": ANY(__lowerCamelCase ),
"""box""": {"""xmin""": ANY(__lowerCamelCase ), """ymin""": ANY(__lowerCamelCase ), """xmax""": ANY(__lowerCamelCase ), """ymax""": ANY(__lowerCamelCase )},
}
for i in range(__lowerCamelCase )
] , )
@require_tf
@unittest.skip("""Zero Shot Object Detection not implemented in TF""" )
def _A ( self : Dict ):
pass
@require_torch
def _A ( self : List[str] ):
UpperCamelCase :Tuple = pipeline(
"""zero-shot-object-detection""" , model="""hf-internal-testing/tiny-random-owlvit-object-detection""" )
UpperCamelCase :Tuple = object_detector(
"""./tests/fixtures/tests_samples/COCO/000000039769.png""" , candidate_labels=["""cat""", """remote""", """couch"""] , threshold=0.64 , )
self.assertEqual(
nested_simplify(__lowerCamelCase , decimals=4 ) , [
{"""score""": 0.7235, """label""": """cat""", """box""": {"""xmin""": 204, """ymin""": 167, """xmax""": 232, """ymax""": 190}},
{"""score""": 0.7218, """label""": """remote""", """box""": {"""xmin""": 204, """ymin""": 167, """xmax""": 232, """ymax""": 190}},
{"""score""": 0.7184, """label""": """couch""", """box""": {"""xmin""": 204, """ymin""": 167, """xmax""": 232, """ymax""": 190}},
{"""score""": 0.6748, """label""": """remote""", """box""": {"""xmin""": 571, """ymin""": 83, """xmax""": 598, """ymax""": 103}},
{"""score""": 0.6656, """label""": """cat""", """box""": {"""xmin""": 571, """ymin""": 83, """xmax""": 598, """ymax""": 103}},
{"""score""": 0.6614, """label""": """couch""", """box""": {"""xmin""": 571, """ymin""": 83, """xmax""": 598, """ymax""": 103}},
{"""score""": 0.6456, """label""": """remote""", """box""": {"""xmin""": 494, """ymin""": 105, """xmax""": 521, """ymax""": 127}},
{"""score""": 0.642, """label""": """remote""", """box""": {"""xmin""": 67, """ymin""": 274, """xmax""": 93, """ymax""": 297}},
{"""score""": 0.6419, """label""": """cat""", """box""": {"""xmin""": 494, """ymin""": 105, """xmax""": 521, """ymax""": 127}},
] , )
UpperCamelCase :Any = object_detector(
[
{
"""image""": """./tests/fixtures/tests_samples/COCO/000000039769.png""",
"""candidate_labels""": ["""cat""", """remote""", """couch"""],
}
] , threshold=0.64 , )
self.assertEqual(
nested_simplify(__lowerCamelCase , decimals=4 ) , [
[
{"""score""": 0.7235, """label""": """cat""", """box""": {"""xmin""": 204, """ymin""": 167, """xmax""": 232, """ymax""": 190}},
{"""score""": 0.7218, """label""": """remote""", """box""": {"""xmin""": 204, """ymin""": 167, """xmax""": 232, """ymax""": 190}},
{"""score""": 0.7184, """label""": """couch""", """box""": {"""xmin""": 204, """ymin""": 167, """xmax""": 232, """ymax""": 190}},
{"""score""": 0.6748, """label""": """remote""", """box""": {"""xmin""": 571, """ymin""": 83, """xmax""": 598, """ymax""": 103}},
{"""score""": 0.6656, """label""": """cat""", """box""": {"""xmin""": 571, """ymin""": 83, """xmax""": 598, """ymax""": 103}},
{"""score""": 0.6614, """label""": """couch""", """box""": {"""xmin""": 571, """ymin""": 83, """xmax""": 598, """ymax""": 103}},
{"""score""": 0.6456, """label""": """remote""", """box""": {"""xmin""": 494, """ymin""": 105, """xmax""": 521, """ymax""": 127}},
{"""score""": 0.642, """label""": """remote""", """box""": {"""xmin""": 67, """ymin""": 274, """xmax""": 93, """ymax""": 297}},
{"""score""": 0.6419, """label""": """cat""", """box""": {"""xmin""": 494, """ymin""": 105, """xmax""": 521, """ymax""": 127}},
]
] , )
@require_torch
@slow
def _A ( self : Optional[Any] ):
UpperCamelCase :Optional[int] = pipeline("""zero-shot-object-detection""" )
UpperCamelCase :Union[str, Any] = object_detector(
"""http://images.cocodataset.org/val2017/000000039769.jpg""" , candidate_labels=["""cat""", """remote""", """couch"""] , )
self.assertEqual(
nested_simplify(__lowerCamelCase , decimals=4 ) , [
{"""score""": 0.2868, """label""": """cat""", """box""": {"""xmin""": 324, """ymin""": 20, """xmax""": 640, """ymax""": 373}},
{"""score""": 0.277, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 72, """xmax""": 177, """ymax""": 115}},
{"""score""": 0.2537, """label""": """cat""", """box""": {"""xmin""": 1, """ymin""": 55, """xmax""": 315, """ymax""": 472}},
{"""score""": 0.1474, """label""": """remote""", """box""": {"""xmin""": 335, """ymin""": 74, """xmax""": 371, """ymax""": 187}},
{"""score""": 0.1208, """label""": """couch""", """box""": {"""xmin""": 4, """ymin""": 0, """xmax""": 642, """ymax""": 476}},
] , )
UpperCamelCase :List[str] = object_detector(
[
{
"""image""": """http://images.cocodataset.org/val2017/000000039769.jpg""",
"""candidate_labels""": ["""cat""", """remote""", """couch"""],
},
{
"""image""": """http://images.cocodataset.org/val2017/000000039769.jpg""",
"""candidate_labels""": ["""cat""", """remote""", """couch"""],
},
] , )
self.assertEqual(
nested_simplify(__lowerCamelCase , decimals=4 ) , [
[
{"""score""": 0.2868, """label""": """cat""", """box""": {"""xmin""": 324, """ymin""": 20, """xmax""": 640, """ymax""": 373}},
{"""score""": 0.277, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 72, """xmax""": 177, """ymax""": 115}},
{"""score""": 0.2537, """label""": """cat""", """box""": {"""xmin""": 1, """ymin""": 55, """xmax""": 315, """ymax""": 472}},
{"""score""": 0.1474, """label""": """remote""", """box""": {"""xmin""": 335, """ymin""": 74, """xmax""": 371, """ymax""": 187}},
{"""score""": 0.1208, """label""": """couch""", """box""": {"""xmin""": 4, """ymin""": 0, """xmax""": 642, """ymax""": 476}},
],
[
{"""score""": 0.2868, """label""": """cat""", """box""": {"""xmin""": 324, """ymin""": 20, """xmax""": 640, """ymax""": 373}},
{"""score""": 0.277, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 72, """xmax""": 177, """ymax""": 115}},
{"""score""": 0.2537, """label""": """cat""", """box""": {"""xmin""": 1, """ymin""": 55, """xmax""": 315, """ymax""": 472}},
{"""score""": 0.1474, """label""": """remote""", """box""": {"""xmin""": 335, """ymin""": 74, """xmax""": 371, """ymax""": 187}},
{"""score""": 0.1208, """label""": """couch""", """box""": {"""xmin""": 4, """ymin""": 0, """xmax""": 642, """ymax""": 476}},
],
] , )
@require_tf
@unittest.skip("""Zero Shot Object Detection not implemented in TF""" )
def _A ( self : List[Any] ):
pass
@require_torch
@slow
def _A ( self : str ):
UpperCamelCase :Dict = 0.2
UpperCamelCase :Optional[Any] = pipeline("""zero-shot-object-detection""" )
UpperCamelCase :Optional[Any] = object_detector(
"""http://images.cocodataset.org/val2017/000000039769.jpg""" , candidate_labels=["""cat""", """remote""", """couch"""] , threshold=__lowerCamelCase , )
self.assertEqual(
nested_simplify(__lowerCamelCase , decimals=4 ) , [
{"""score""": 0.2868, """label""": """cat""", """box""": {"""xmin""": 324, """ymin""": 20, """xmax""": 640, """ymax""": 373}},
{"""score""": 0.277, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 72, """xmax""": 177, """ymax""": 115}},
{"""score""": 0.2537, """label""": """cat""", """box""": {"""xmin""": 1, """ymin""": 55, """xmax""": 315, """ymax""": 472}},
] , )
@require_torch
@slow
def _A ( self : Optional[int] ):
UpperCamelCase :int = 2
UpperCamelCase :List[str] = pipeline("""zero-shot-object-detection""" )
UpperCamelCase :Union[str, Any] = object_detector(
"""http://images.cocodataset.org/val2017/000000039769.jpg""" , candidate_labels=["""cat""", """remote""", """couch"""] , top_k=__lowerCamelCase , )
self.assertEqual(
nested_simplify(__lowerCamelCase , decimals=4 ) , [
{"""score""": 0.2868, """label""": """cat""", """box""": {"""xmin""": 324, """ymin""": 20, """xmax""": 640, """ymax""": 373}},
{"""score""": 0.277, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 72, """xmax""": 177, """ymax""": 115}},
] , )
| 38 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase : Optional[int] = {"""configuration_wavlm""": ["""WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP""", """WavLMConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Any = [
"""WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""WavLMForAudioFrameClassification""",
"""WavLMForCTC""",
"""WavLMForSequenceClassification""",
"""WavLMForXVector""",
"""WavLMModel""",
"""WavLMPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_wavlm import WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP, WavLMConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavlm import (
WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST,
WavLMForAudioFrameClassification,
WavLMForCTC,
WavLMForSequenceClassification,
WavLMForXVector,
WavLMModel,
WavLMPreTrainedModel,
)
else:
import sys
lowerCAmelCase : Union[str, Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 13 | 0 |
"""simple docstring"""
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase_ : Dict = logging.get_logger(__name__)
lowerCAmelCase_ : Optional[Any] = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt'''}
lowerCAmelCase_ : str = {
'''vocab_file''': {
'''allenai/longformer-base-4096''': '''https://huggingface.co/allenai/longformer-base-4096/resolve/main/vocab.json''',
'''allenai/longformer-large-4096''': (
'''https://huggingface.co/allenai/longformer-large-4096/resolve/main/vocab.json'''
),
'''allenai/longformer-large-4096-finetuned-triviaqa''': (
'''https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/vocab.json'''
),
'''allenai/longformer-base-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/vocab.json'''
),
'''allenai/longformer-large-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/vocab.json'''
),
},
'''merges_file''': {
'''allenai/longformer-base-4096''': '''https://huggingface.co/allenai/longformer-base-4096/resolve/main/merges.txt''',
'''allenai/longformer-large-4096''': (
'''https://huggingface.co/allenai/longformer-large-4096/resolve/main/merges.txt'''
),
'''allenai/longformer-large-4096-finetuned-triviaqa''': (
'''https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/merges.txt'''
),
'''allenai/longformer-base-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/merges.txt'''
),
'''allenai/longformer-large-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/merges.txt'''
),
},
}
lowerCAmelCase_ : Union[str, Any] = {
'''allenai/longformer-base-4096''': 4_0_9_6,
'''allenai/longformer-large-4096''': 4_0_9_6,
'''allenai/longformer-large-4096-finetuned-triviaqa''': 4_0_9_6,
'''allenai/longformer-base-4096-extra.pos.embd.only''': 4_0_9_6,
'''allenai/longformer-large-4096-extra.pos.embd.only''': 4_0_9_6,
}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def _lowerCAmelCase ( ):
'''simple docstring'''
UpperCAmelCase = (
list(range(ord("""!""" ) , ord("""~""" ) + 1 ) ) + list(range(ord("""¡""" ) , ord("""¬""" ) + 1 ) ) + list(range(ord("""®""" ) , ord("""ÿ""" ) + 1 ) )
)
UpperCAmelCase = bs[:]
UpperCAmelCase = 0
for b in range(2**8 ):
if b not in bs:
bs.append(lowerCAmelCase )
cs.append(2**8 + n )
n += 1
UpperCAmelCase = [chr(lowerCAmelCase ) for n in cs]
return dict(zip(lowerCAmelCase , lowerCAmelCase ) )
def _lowerCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase = set()
UpperCAmelCase = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
UpperCAmelCase = char
return pairs
class UpperCamelCase_ ( a_ ):
_A : Any = VOCAB_FILES_NAMES
_A : Tuple = PRETRAINED_VOCAB_FILES_MAP
_A : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_A : int = ['input_ids', 'attention_mask']
def __init__( self , snake_case__ , snake_case__ , snake_case__="replace" , snake_case__="<s>" , snake_case__="</s>" , snake_case__="</s>" , snake_case__="<s>" , snake_case__="<unk>" , snake_case__="<pad>" , snake_case__="<mask>" , snake_case__=False , **snake_case__ , ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase = AddedToken(snake_case__ , lstrip=snake_case__ , rstrip=snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else bos_token
UpperCAmelCase = AddedToken(snake_case__ , lstrip=snake_case__ , rstrip=snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else eos_token
UpperCAmelCase = AddedToken(snake_case__ , lstrip=snake_case__ , rstrip=snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else sep_token
UpperCAmelCase = AddedToken(snake_case__ , lstrip=snake_case__ , rstrip=snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else cls_token
UpperCAmelCase = AddedToken(snake_case__ , lstrip=snake_case__ , rstrip=snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else unk_token
UpperCAmelCase = AddedToken(snake_case__ , lstrip=snake_case__ , rstrip=snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
UpperCAmelCase = AddedToken(snake_case__ , lstrip=snake_case__ , rstrip=snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else mask_token
super().__init__(
errors=snake_case__ , bos_token=snake_case__ , eos_token=snake_case__ , unk_token=snake_case__ , sep_token=snake_case__ , cls_token=snake_case__ , pad_token=snake_case__ , mask_token=snake_case__ , add_prefix_space=snake_case__ , **snake_case__ , )
with open(snake_case__ , encoding="""utf-8""" ) as vocab_handle:
UpperCAmelCase = json.load(snake_case__ )
UpperCAmelCase = {v: k for k, v in self.encoder.items()}
UpperCAmelCase = errors # how to handle errors in decoding
UpperCAmelCase = bytes_to_unicode()
UpperCAmelCase = {v: k for k, v in self.byte_encoder.items()}
with open(snake_case__ , encoding="""utf-8""" ) as merges_handle:
UpperCAmelCase = merges_handle.read().split("""\n""" )[1:-1]
UpperCAmelCase = [tuple(merge.split() ) for merge in bpe_merges]
UpperCAmelCase = dict(zip(snake_case__ , range(len(snake_case__ ) ) ) )
UpperCAmelCase = {}
UpperCAmelCase = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
UpperCAmelCase = re.compile(R"""'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+""" )
@property
def UpperCamelCase_ ( self ) -> Dict:
"""simple docstring"""
return len(self.encoder )
def UpperCamelCase_ ( self ) -> Dict:
"""simple docstring"""
return dict(self.encoder , **self.added_tokens_encoder )
def UpperCamelCase_ ( self , snake_case__ ) -> Dict:
"""simple docstring"""
if token in self.cache:
return self.cache[token]
UpperCAmelCase = tuple(snake_case__ )
UpperCAmelCase = get_pairs(snake_case__ )
if not pairs:
return token
while True:
UpperCAmelCase = min(snake_case__ , key=lambda snake_case__ : self.bpe_ranks.get(snake_case__ , float("""inf""" ) ) )
if bigram not in self.bpe_ranks:
break
UpperCAmelCase , UpperCAmelCase = bigram
UpperCAmelCase = []
UpperCAmelCase = 0
while i < len(snake_case__ ):
try:
UpperCAmelCase = word.index(snake_case__ , snake_case__ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
UpperCAmelCase = j
if word[i] == first and i < len(snake_case__ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
UpperCAmelCase = tuple(snake_case__ )
UpperCAmelCase = new_word
if len(snake_case__ ) == 1:
break
else:
UpperCAmelCase = get_pairs(snake_case__ )
UpperCAmelCase = """ """.join(snake_case__ )
UpperCAmelCase = word
return word
def UpperCamelCase_ ( self , snake_case__ ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase = []
for token in re.findall(self.pat , snake_case__ ):
UpperCAmelCase = """""".join(
self.byte_encoder[b] for b in token.encode("""utf-8""" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(snake_case__ ).split(""" """ ) )
return bpe_tokens
def UpperCamelCase_ ( self , snake_case__ ) -> str:
"""simple docstring"""
return self.encoder.get(snake_case__ , self.encoder.get(self.unk_token ) )
def UpperCamelCase_ ( self , snake_case__ ) -> int:
"""simple docstring"""
return self.decoder.get(snake_case__ )
def UpperCamelCase_ ( self , snake_case__ ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase = """""".join(snake_case__ )
UpperCAmelCase = bytearray([self.byte_decoder[c] for c in text] ).decode("""utf-8""" , errors=self.errors )
return text
def UpperCamelCase_ ( self , snake_case__ , snake_case__ = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(snake_case__ ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
UpperCAmelCase = os.path.join(
snake_case__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
UpperCAmelCase = os.path.join(
snake_case__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] )
with open(snake_case__ , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=snake_case__ , ensure_ascii=snake_case__ ) + """\n""" )
UpperCAmelCase = 0
with open(snake_case__ , """w""" , encoding="""utf-8""" ) as writer:
writer.write("""#version: 0.2\n""" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda snake_case__ : kv[1] ):
if index != token_index:
logger.warning(
f'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
""" Please check that the tokenizer is not corrupted!""" )
UpperCAmelCase = token_index
writer.write(""" """.join(snake_case__ ) + """\n""" )
index += 1
return vocab_file, merge_file
def UpperCamelCase_ ( self , snake_case__ , snake_case__ = None ) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
UpperCAmelCase = [self.cls_token_id]
UpperCAmelCase = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def UpperCamelCase_ ( self , snake_case__ , snake_case__ = None , snake_case__ = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=snake_case__ , token_ids_a=snake_case__ , already_has_special_tokens=snake_case__ )
if token_ids_a is None:
return [1] + ([0] * len(snake_case__ )) + [1]
return [1] + ([0] * len(snake_case__ )) + [1, 1] + ([0] * len(snake_case__ )) + [1]
def UpperCamelCase_ ( self , snake_case__ , snake_case__ = None ) -> List[int]:
"""simple docstring"""
UpperCAmelCase = [self.sep_token_id]
UpperCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def UpperCamelCase_ ( self , snake_case__ , snake_case__=False , **snake_case__ ) -> int:
"""simple docstring"""
UpperCAmelCase = kwargs.pop("""add_prefix_space""" , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(snake_case__ ) > 0 and not text[0].isspace()):
UpperCAmelCase = """ """ + text
return (text, kwargs)
| 248 |
"""simple docstring"""
import argparse
import os
import shutil
from pathlib import Path
import onnx
import torch
from packaging import version
from torch.onnx import export
from diffusers import OnnxRuntimeModel, OnnxStableDiffusionPipeline, StableDiffusionPipeline
lowerCAmelCase_ : Tuple = version.parse(version.parse(torch.__version__).base_version) < version.parse('''1.11''')
def _lowerCAmelCase ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase=False , ):
'''simple docstring'''
output_path.parent.mkdir(parents=lowerCAmelCase , exist_ok=lowerCAmelCase )
# PyTorch deprecated the `enable_onnx_checker` and `use_external_data_format` arguments in v1.11,
# so we check the torch version for backwards compatibility
if is_torch_less_than_1_11:
export(
lowerCAmelCase , lowerCAmelCase , f=output_path.as_posix() , input_names=lowerCAmelCase , output_names=lowerCAmelCase , dynamic_axes=lowerCAmelCase , do_constant_folding=lowerCAmelCase , use_external_data_format=lowerCAmelCase , enable_onnx_checker=lowerCAmelCase , opset_version=lowerCAmelCase , )
else:
export(
lowerCAmelCase , lowerCAmelCase , f=output_path.as_posix() , input_names=lowerCAmelCase , output_names=lowerCAmelCase , dynamic_axes=lowerCAmelCase , do_constant_folding=lowerCAmelCase , opset_version=lowerCAmelCase , )
@torch.no_grad()
def _lowerCAmelCase ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = False ):
'''simple docstring'''
UpperCAmelCase = torch.floataa if fpaa else torch.floataa
if fpaa and torch.cuda.is_available():
UpperCAmelCase = """cuda"""
elif fpaa and not torch.cuda.is_available():
raise ValueError("""`float16` model export is only supported on GPUs with CUDA""" )
else:
UpperCAmelCase = """cpu"""
UpperCAmelCase = StableDiffusionPipeline.from_pretrained(lowerCAmelCase , torch_dtype=lowerCAmelCase ).to(lowerCAmelCase )
UpperCAmelCase = Path(lowerCAmelCase )
# TEXT ENCODER
UpperCAmelCase = pipeline.text_encoder.config.max_position_embeddings
UpperCAmelCase = pipeline.text_encoder.config.hidden_size
UpperCAmelCase = pipeline.tokenizer(
"""A sample prompt""" , padding="""max_length""" , max_length=pipeline.tokenizer.model_max_length , truncation=lowerCAmelCase , return_tensors="""pt""" , )
onnx_export(
pipeline.text_encoder , model_args=(text_input.input_ids.to(device=lowerCAmelCase , dtype=torch.intaa )) , output_path=output_path / """text_encoder""" / """model.onnx""" , ordered_input_names=["""input_ids"""] , output_names=["""last_hidden_state""", """pooler_output"""] , dynamic_axes={
"""input_ids""": {0: """batch""", 1: """sequence"""},
} , opset=lowerCAmelCase , )
del pipeline.text_encoder
# UNET
UpperCAmelCase = pipeline.unet.config.in_channels
UpperCAmelCase = pipeline.unet.config.sample_size
UpperCAmelCase = output_path / """unet""" / """model.onnx"""
onnx_export(
pipeline.unet , model_args=(
torch.randn(2 , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ).to(device=lowerCAmelCase , dtype=lowerCAmelCase ),
torch.randn(2 ).to(device=lowerCAmelCase , dtype=lowerCAmelCase ),
torch.randn(2 , lowerCAmelCase , lowerCAmelCase ).to(device=lowerCAmelCase , dtype=lowerCAmelCase ),
False,
) , output_path=lowerCAmelCase , ordered_input_names=["""sample""", """timestep""", """encoder_hidden_states""", """return_dict"""] , output_names=["""out_sample"""] , dynamic_axes={
"""sample""": {0: """batch""", 1: """channels""", 2: """height""", 3: """width"""},
"""timestep""": {0: """batch"""},
"""encoder_hidden_states""": {0: """batch""", 1: """sequence"""},
} , opset=lowerCAmelCase , use_external_data_format=lowerCAmelCase , )
UpperCAmelCase = str(unet_path.absolute().as_posix() )
UpperCAmelCase = os.path.dirname(lowerCAmelCase )
UpperCAmelCase = onnx.load(lowerCAmelCase )
# clean up existing tensor files
shutil.rmtree(lowerCAmelCase )
os.mkdir(lowerCAmelCase )
# collate external tensor files into one
onnx.save_model(
lowerCAmelCase , lowerCAmelCase , save_as_external_data=lowerCAmelCase , all_tensors_to_one_file=lowerCAmelCase , location="""weights.pb""" , convert_attribute=lowerCAmelCase , )
del pipeline.unet
# VAE ENCODER
UpperCAmelCase = pipeline.vae
UpperCAmelCase = vae_encoder.config.in_channels
UpperCAmelCase = vae_encoder.config.sample_size
# need to get the raw tensor output (sample) from the encoder
UpperCAmelCase = lambda lowerCAmelCase , lowerCAmelCase : vae_encoder.encode(lowerCAmelCase , lowerCAmelCase )[0].sample()
onnx_export(
lowerCAmelCase , model_args=(
torch.randn(1 , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ).to(device=lowerCAmelCase , dtype=lowerCAmelCase ),
False,
) , output_path=output_path / """vae_encoder""" / """model.onnx""" , ordered_input_names=["""sample""", """return_dict"""] , output_names=["""latent_sample"""] , dynamic_axes={
"""sample""": {0: """batch""", 1: """channels""", 2: """height""", 3: """width"""},
} , opset=lowerCAmelCase , )
# VAE DECODER
UpperCAmelCase = pipeline.vae
UpperCAmelCase = vae_decoder.config.latent_channels
UpperCAmelCase = vae_decoder.config.out_channels
# forward only through the decoder part
UpperCAmelCase = vae_encoder.decode
onnx_export(
lowerCAmelCase , model_args=(
torch.randn(1 , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ).to(device=lowerCAmelCase , dtype=lowerCAmelCase ),
False,
) , output_path=output_path / """vae_decoder""" / """model.onnx""" , ordered_input_names=["""latent_sample""", """return_dict"""] , output_names=["""sample"""] , dynamic_axes={
"""latent_sample""": {0: """batch""", 1: """channels""", 2: """height""", 3: """width"""},
} , opset=lowerCAmelCase , )
del pipeline.vae
# SAFETY CHECKER
if pipeline.safety_checker is not None:
UpperCAmelCase = pipeline.safety_checker
UpperCAmelCase = safety_checker.config.vision_config.num_channels
UpperCAmelCase = safety_checker.config.vision_config.image_size
UpperCAmelCase = safety_checker.forward_onnx
onnx_export(
pipeline.safety_checker , model_args=(
torch.randn(
1 , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ).to(device=lowerCAmelCase , dtype=lowerCAmelCase ),
torch.randn(1 , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ).to(device=lowerCAmelCase , dtype=lowerCAmelCase ),
) , output_path=output_path / """safety_checker""" / """model.onnx""" , ordered_input_names=["""clip_input""", """images"""] , output_names=["""out_images""", """has_nsfw_concepts"""] , dynamic_axes={
"""clip_input""": {0: """batch""", 1: """channels""", 2: """height""", 3: """width"""},
"""images""": {0: """batch""", 1: """height""", 2: """width""", 3: """channels"""},
} , opset=lowerCAmelCase , )
del pipeline.safety_checker
UpperCAmelCase = OnnxRuntimeModel.from_pretrained(output_path / """safety_checker""" )
UpperCAmelCase = pipeline.feature_extractor
else:
UpperCAmelCase = None
UpperCAmelCase = None
UpperCAmelCase = OnnxStableDiffusionPipeline(
vae_encoder=OnnxRuntimeModel.from_pretrained(output_path / """vae_encoder""" ) , vae_decoder=OnnxRuntimeModel.from_pretrained(output_path / """vae_decoder""" ) , text_encoder=OnnxRuntimeModel.from_pretrained(output_path / """text_encoder""" ) , tokenizer=pipeline.tokenizer , unet=OnnxRuntimeModel.from_pretrained(output_path / """unet""" ) , scheduler=pipeline.scheduler , safety_checker=lowerCAmelCase , feature_extractor=lowerCAmelCase , requires_safety_checker=safety_checker is not None , )
onnx_pipeline.save_pretrained(lowerCAmelCase )
print("""ONNX pipeline saved to""" , lowerCAmelCase )
del pipeline
del onnx_pipeline
UpperCAmelCase = OnnxStableDiffusionPipeline.from_pretrained(lowerCAmelCase , provider="""CPUExecutionProvider""" )
print("""ONNX pipeline is loadable""" )
if __name__ == "__main__":
lowerCAmelCase_ : Tuple = argparse.ArgumentParser()
parser.add_argument(
'''--model_path''',
type=str,
required=True,
help='''Path to the `diffusers` checkpoint to convert (either a local directory or on the Hub).''',
)
parser.add_argument('''--output_path''', type=str, required=True, help='''Path to the output model.''')
parser.add_argument(
'''--opset''',
default=1_4,
type=int,
help='''The version of the ONNX operator set to use.''',
)
parser.add_argument('''--fp16''', action='''store_true''', default=False, help='''Export the models in `float16` mode''')
lowerCAmelCase_ : Union[str, Any] = parser.parse_args()
convert_models(args.model_path, args.output_path, args.opset, args.fpaa)
| 248 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__A = {
"configuration_time_series_transformer": [
"TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"TimeSeriesTransformerConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
"TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TimeSeriesTransformerForPrediction",
"TimeSeriesTransformerModel",
"TimeSeriesTransformerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TimeSeriesTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimeSeriesTransformerForPrediction,
TimeSeriesTransformerModel,
TimeSeriesTransformerPreTrainedModel,
)
else:
import sys
__A = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 217 |
"""simple docstring"""
from urllib.parse import quote
import pytest
from datasets.utils.hub import hf_hub_url
@pytest.mark.parametrize("repo_id" , ["canonical_dataset_name", "org-name/dataset-name"] )
@pytest.mark.parametrize("path" , ["filename.csv", "filename with blanks.csv"] )
@pytest.mark.parametrize("revision" , [None, "v2"] )
def a__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> List[str]:
__lowerCAmelCase: Optional[Any] = hf_hub_url(repo_id=__SCREAMING_SNAKE_CASE , path=__SCREAMING_SNAKE_CASE , revision=__SCREAMING_SNAKE_CASE )
assert url == F"https://huggingface.co/datasets/{repo_id}/resolve/{revision or 'main'}/{quote(__SCREAMING_SNAKE_CASE )}"
| 217 | 1 |
from typing import List, Optional, Union
import numpy as np
import torch
import torchaudio.compliance.kaldi as ta_kaldi
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
__A =logging.get_logger(__name__)
class _SCREAMING_SNAKE_CASE ( snake_case_ ):
lowerCAmelCase__ = ['input_features', 'attention_mask']
def __init__( self , lowercase=80 , lowercase=16000 , lowercase=80 , lowercase=0.0 , lowercase=True , lowercase=True , lowercase=True , **lowercase , ) -> List[str]:
super().__init__(feature_size=lowercase , sampling_rate=lowercase , padding_value=lowercase , **lowercase )
lowerCamelCase_ = num_mel_bins
lowerCamelCase_ = do_ceptral_normalize
lowerCamelCase_ = normalize_means
lowerCamelCase_ = normalize_vars
lowerCamelCase_ = True
def SCREAMING_SNAKE_CASE_( self , lowercase , ) -> np.ndarray:
lowerCamelCase_ = waveform * (2**15) # Kaldi compliance: 16-bit signed integers
lowerCamelCase_ = torch.from_numpy(lowercase ).unsqueeze(0 )
lowerCamelCase_ = ta_kaldi.fbank(lowercase , num_mel_bins=self.num_mel_bins , sample_frequency=self.sampling_rate )
return features.numpy()
@staticmethod
def SCREAMING_SNAKE_CASE_( lowercase , lowercase , lowercase = True , lowercase = True , lowercase = 0.0 , ) -> np.ndarray:
# make sure we normalize float32 arrays
if normalize_means:
lowerCamelCase_ = x[:input_length].mean(axis=0 )
lowerCamelCase_ = np.subtract(lowercase , lowercase )
if normalize_vars:
lowerCamelCase_ = x[:input_length].std(axis=0 )
lowerCamelCase_ = np.divide(lowercase , lowercase )
if input_length < x.shape[0]:
lowerCamelCase_ = padding_value
# make sure array is in float32
lowerCamelCase_ = x.astype(np.floataa )
return x
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase = None ) -> List[np.ndarray]:
lowerCamelCase_ = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features]
return [
self.utterance_cmvn(lowercase , lowercase , self.normalize_means , self.normalize_vars , self.padding_value )
for x, n in zip(lowercase , lowercase )
]
def __call__( self , lowercase , lowercase = False , lowercase = None , lowercase = False , lowercase = None , lowercase = None , lowercase = None , lowercase = None , **lowercase , ) -> BatchFeature:
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f'The model corresponding to this feature extractor: {self} was trained using a sampling rate of'
f' {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with'
f' {self.sampling_rate} and not {sampling_rate}.' )
else:
logger.warning(
"It is strongly recommended to pass the `sampling_rate` argument to this function. "
"Failing to do so can result in silent errors that might be hard to debug." )
lowerCamelCase_ = isinstance(lowercase , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f'Only mono-channel audio is supported for input to {self}' )
lowerCamelCase_ = is_batched_numpy or (
isinstance(lowercase , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
lowerCamelCase_ = [np.asarray(lowercase , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(lowercase , np.ndarray ):
lowerCamelCase_ = np.asarray(lowercase , dtype=np.floataa )
elif isinstance(lowercase , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
lowerCamelCase_ = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
lowerCamelCase_ = [raw_speech]
# extract fbank features
lowerCamelCase_ = [self._extract_fbank_features(lowercase ) for waveform in raw_speech]
# convert into correct format for padding
lowerCamelCase_ = BatchFeature({"input_features": features} )
lowerCamelCase_ = self.pad(
lowercase , padding=lowercase , max_length=lowercase , truncation=lowercase , pad_to_multiple_of=lowercase , return_attention_mask=lowercase , **lowercase , )
# make sure list is in array format
lowerCamelCase_ = padded_inputs.get("input_features" )
if isinstance(input_features[0] , lowercase ):
lowerCamelCase_ = [np.asarray(lowercase , dtype=np.floataa ) for feature in input_features]
lowerCamelCase_ = padded_inputs.get("attention_mask" )
if attention_mask is not None:
lowerCamelCase_ = [np.asarray(lowercase , dtype=np.intaa ) for array in attention_mask]
# Utterance-level cepstral mean and variance normalization
if self.do_ceptral_normalize:
lowerCamelCase_ = (
np.array(lowercase , dtype=np.intaa )
if self._get_padding_strategies(lowercase , max_length=lowercase ) is not PaddingStrategy.DO_NOT_PAD
else None
)
lowerCamelCase_ = self.normalize(
padded_inputs["input_features"] , attention_mask=lowercase )
if return_tensors is not None:
lowerCamelCase_ = padded_inputs.convert_to_tensors(lowercase )
return padded_inputs
| 368 |
import json
import os
import shutil
import warnings
from argparse import ArgumentParser, Namespace
from pathlib import Path
from typing import List
from ..utils import logging
from . import BaseTransformersCLICommand
try:
from cookiecutter.main import cookiecutter
__A =True
except ImportError:
__A =False
__A =logging.get_logger(__name__) # pylint: disable=invalid-name
def lowerCamelCase_ ( lowerCamelCase__ ):
return AddNewModelCommand(args.testing , args.testing_file , path=args.path )
class _SCREAMING_SNAKE_CASE ( snake_case_ ):
@staticmethod
def SCREAMING_SNAKE_CASE_( lowercase ) -> int:
lowerCamelCase_ = parser.add_parser("add-new-model" )
add_new_model_parser.add_argument("--testing" , action="store_true" , help="If in testing mode." )
add_new_model_parser.add_argument("--testing_file" , type=lowercase , help="Configuration file on which to run." )
add_new_model_parser.add_argument(
"--path" , type=lowercase , help="Path to cookiecutter. Should only be used for testing purposes." )
add_new_model_parser.set_defaults(func=lowercase )
def __init__( self , lowercase , lowercase , lowercase=None , *lowercase ) -> List[str]:
lowerCamelCase_ = testing
lowerCamelCase_ = testing_file
lowerCamelCase_ = path
def SCREAMING_SNAKE_CASE_( self ) -> str:
warnings.warn(
"The command `transformers-cli add-new-model` is deprecated and will be removed in v5 of Transformers. "
"It is not actively maintained anymore, so might give a result that won't pass all tests and quality "
"checks, you should use `transformers-cli add-new-model-like` instead." )
if not _has_cookiecutter:
raise ImportError(
"Model creation dependencies are required to use the `add_new_model` command. Install them by running "
"the following at the root of your `transformers` clone:\n\n\t$ pip install -e .[modelcreation]\n" )
# Ensure that there is no other `cookiecutter-template-xxx` directory in the current working directory
lowerCamelCase_ = [directory for directory in os.listdir() if "cookiecutter-template-" == directory[:22]]
if len(lowercase ) > 0:
raise ValueError(
"Several directories starting with `cookiecutter-template-` in current working directory. "
"Please clean your directory by removing all folders starting with `cookiecutter-template-` or "
"change your working directory." )
lowerCamelCase_ = (
Path(lowercase ).parent.parent.parent.parent if self._path is None else Path(self._path ).parent.parent
)
lowerCamelCase_ = path_to_transformer_root / "templates" / "adding_a_new_model"
# Execute cookiecutter
if not self._testing:
cookiecutter(str(lowercase ) )
else:
with open(self._testing_file , "r" ) as configuration_file:
lowerCamelCase_ = json.load(lowercase )
cookiecutter(
str(path_to_cookiecutter if self._path is None else self._path ) , no_input=lowercase , extra_context=lowercase , )
lowerCamelCase_ = [directory for directory in os.listdir() if "cookiecutter-template-" in directory[:22]][0]
# Retrieve configuration
with open(directory + "/configuration.json" , "r" ) as configuration_file:
lowerCamelCase_ = json.load(lowercase )
lowerCamelCase_ = configuration["lowercase_modelname"]
lowerCamelCase_ = configuration["generate_tensorflow_pytorch_and_flax"]
os.remove(f'{directory}/configuration.json' )
lowerCamelCase_ = "PyTorch" in generate_tensorflow_pytorch_and_flax
lowerCamelCase_ = "TensorFlow" in generate_tensorflow_pytorch_and_flax
lowerCamelCase_ = "Flax" in generate_tensorflow_pytorch_and_flax
lowerCamelCase_ = f'{path_to_transformer_root}/src/transformers/models/{lowercase_model_name}'
os.makedirs(lowercase , exist_ok=lowercase )
os.makedirs(f'{path_to_transformer_root}/tests/models/{lowercase_model_name}' , exist_ok=lowercase )
# Tests require submodules as they have parent imports
with open(f'{path_to_transformer_root}/tests/models/{lowercase_model_name}/__init__.py' , "w" ):
pass
shutil.move(
f'{directory}/__init__.py' , f'{model_dir}/__init__.py' , )
shutil.move(
f'{directory}/configuration_{lowercase_model_name}.py' , f'{model_dir}/configuration_{lowercase_model_name}.py' , )
def remove_copy_lines(lowercase ):
with open(lowercase , "r" ) as f:
lowerCamelCase_ = f.readlines()
with open(lowercase , "w" ) as f:
for line in lines:
if "# Copied from transformers." not in line:
f.write(lowercase )
if output_pytorch:
if not self._testing:
remove_copy_lines(f'{directory}/modeling_{lowercase_model_name}.py' )
shutil.move(
f'{directory}/modeling_{lowercase_model_name}.py' , f'{model_dir}/modeling_{lowercase_model_name}.py' , )
shutil.move(
f'{directory}/test_modeling_{lowercase_model_name}.py' , f'{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_{lowercase_model_name}.py' , )
else:
os.remove(f'{directory}/modeling_{lowercase_model_name}.py' )
os.remove(f'{directory}/test_modeling_{lowercase_model_name}.py' )
if output_tensorflow:
if not self._testing:
remove_copy_lines(f'{directory}/modeling_tf_{lowercase_model_name}.py' )
shutil.move(
f'{directory}/modeling_tf_{lowercase_model_name}.py' , f'{model_dir}/modeling_tf_{lowercase_model_name}.py' , )
shutil.move(
f'{directory}/test_modeling_tf_{lowercase_model_name}.py' , f'{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_tf_{lowercase_model_name}.py' , )
else:
os.remove(f'{directory}/modeling_tf_{lowercase_model_name}.py' )
os.remove(f'{directory}/test_modeling_tf_{lowercase_model_name}.py' )
if output_flax:
if not self._testing:
remove_copy_lines(f'{directory}/modeling_flax_{lowercase_model_name}.py' )
shutil.move(
f'{directory}/modeling_flax_{lowercase_model_name}.py' , f'{model_dir}/modeling_flax_{lowercase_model_name}.py' , )
shutil.move(
f'{directory}/test_modeling_flax_{lowercase_model_name}.py' , f'{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_flax_{lowercase_model_name}.py' , )
else:
os.remove(f'{directory}/modeling_flax_{lowercase_model_name}.py' )
os.remove(f'{directory}/test_modeling_flax_{lowercase_model_name}.py' )
shutil.move(
f'{directory}/{lowercase_model_name}.md' , f'{path_to_transformer_root}/docs/source/en/model_doc/{lowercase_model_name}.md' , )
shutil.move(
f'{directory}/tokenization_{lowercase_model_name}.py' , f'{model_dir}/tokenization_{lowercase_model_name}.py' , )
shutil.move(
f'{directory}/tokenization_fast_{lowercase_model_name}.py' , f'{model_dir}/tokenization_{lowercase_model_name}_fast.py' , )
from os import fdopen, remove
from shutil import copymode, move
from tempfile import mkstemp
def replace(lowercase , lowercase , lowercase ):
# Create temp file
lowerCamelCase_ , lowerCamelCase_ = mkstemp()
lowerCamelCase_ = False
with fdopen(lowercase , "w" ) as new_file:
with open(lowercase ) as old_file:
for line in old_file:
new_file.write(lowercase )
if line_to_copy_below in line:
lowerCamelCase_ = True
for line_to_copy in lines_to_copy:
new_file.write(lowercase )
if not line_found:
raise ValueError(f'Line {line_to_copy_below} was not found in file.' )
# Copy the file permissions from the old file to the new file
copymode(lowercase , lowercase )
# Remove original file
remove(lowercase )
# Move new file
move(lowercase , lowercase )
def skip_units(lowercase ):
return (
("generating PyTorch" in line and not output_pytorch)
or ("generating TensorFlow" in line and not output_tensorflow)
or ("generating Flax" in line and not output_flax)
)
def replace_in_files(lowercase ):
with open(lowercase ) as datafile:
lowerCamelCase_ = []
lowerCamelCase_ = False
lowerCamelCase_ = False
for line in datafile:
if "# To replace in: " in line and "##" not in line:
lowerCamelCase_ = line.split("\"" )[1]
lowerCamelCase_ = skip_units(lowercase )
elif "# Below: " in line and "##" not in line:
lowerCamelCase_ = line.split("\"" )[1]
lowerCamelCase_ = skip_units(lowercase )
elif "# End." in line and "##" not in line:
if not skip_file and not skip_snippet:
replace(lowercase , lowercase , lowercase )
lowerCamelCase_ = []
elif "# Replace with" in line and "##" not in line:
lowerCamelCase_ = []
elif "##" not in line:
lines_to_copy.append(lowercase )
remove(lowercase )
replace_in_files(f'{directory}/to_replace_{lowercase_model_name}.py' )
os.rmdir(lowercase )
| 47 | 0 |
def UpperCAmelCase ( a_ , a_ ) -> str:
"""simple docstring"""
return 1 if input_a == input_a else 0
def UpperCAmelCase ( ) -> List[Any]:
"""simple docstring"""
assert xnor_gate(0 , 0 ) == 1
assert xnor_gate(0 , 1 ) == 0
assert xnor_gate(1 , 0 ) == 0
assert xnor_gate(1 , 1 ) == 1
if __name__ == "__main__":
print(xnor_gate(0, 0))
print(xnor_gate(0, 1))
print(xnor_gate(1, 0))
print(xnor_gate(1, 1))
| 15 |
def _A ( SCREAMING_SNAKE_CASE : Optional[Any] ):
"""simple docstring"""
a__ : Optional[Any] =len(SCREAMING_SNAKE_CASE )
while cur > 1:
# Find the maximum number in arr
a__ : List[Any] =arr.index(max(arr[0:cur] ) )
# Reverse from 0 to mi
a__ : int =arr[mi::-1] + arr[mi + 1 : len(SCREAMING_SNAKE_CASE )]
# Reverse whole list
a__ : List[str] =arr[cur - 1 :: -1] + arr[cur : len(SCREAMING_SNAKE_CASE )]
cur -= 1
return arr
if __name__ == "__main__":
UpperCAmelCase : int = input("""Enter numbers separated by a comma:\n""").strip()
UpperCAmelCase : Optional[int] = [int(item) for item in user_input.split(""",""")]
print(pancake_sort(unsorted))
| 95 | 0 |
"""simple docstring"""
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion_safe import StableDiffusionPipelineSafe as StableDiffusionPipeline
from diffusers.utils import floats_tensor, nightly, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
class lowerCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase__ ( self ) -> Any:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def UpperCamelCase__ ( self ) -> Union[str, Any]:
A = 1
A = 3
A = (3_2, 3_2)
A = floats_tensor((batch_size, num_channels) + sizes ,rng=random.Random(0 ) ).to(lowerCamelCase_ )
return image
@property
def UpperCamelCase__ ( self ) -> Union[str, Any]:
torch.manual_seed(0 )
A = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) ,layers_per_block=2 ,sample_size=3_2 ,in_channels=4 ,out_channels=4 ,down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") ,up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") ,cross_attention_dim=3_2 ,)
return model
@property
def UpperCamelCase__ ( self ) -> Optional[int]:
torch.manual_seed(0 )
A = AutoencoderKL(
block_out_channels=[3_2, 6_4] ,in_channels=3 ,out_channels=3 ,down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] ,up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] ,latent_channels=4 ,)
return model
@property
def UpperCamelCase__ ( self ) -> Union[str, Any]:
torch.manual_seed(0 )
A = CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=3_2 ,intermediate_size=3_7 ,layer_norm_eps=1E-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1_0_0_0 ,)
return CLIPTextModel(lowerCamelCase_ )
@property
def UpperCamelCase__ ( self ) -> Dict:
def extract(*lowerCamelCase_ ,**lowerCamelCase_ ):
class lowerCamelCase__ :
'''simple docstring'''
def __init__( self ) -> Tuple:
A = torch.ones([0] )
def UpperCamelCase__ ( self ,lowerCamelCase_ ) -> int:
self.pixel_values.to(lowerCamelCase_ )
return self
return Out()
return extract
def UpperCamelCase__ ( self ) -> Any:
A = """cpu""" # ensure determinism for the device-dependent torch.Generator
A = self.dummy_cond_unet
A = DDIMScheduler(
beta_start=0.0_00_85 ,beta_end=0.0_12 ,beta_schedule="""scaled_linear""" ,clip_sample=lowerCamelCase_ ,set_alpha_to_one=lowerCamelCase_ ,)
A = self.dummy_vae
A = self.dummy_text_encoder
A = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
# make sure here that pndm scheduler skips prk
A = StableDiffusionPipeline(
unet=lowerCamelCase_ ,scheduler=lowerCamelCase_ ,vae=lowerCamelCase_ ,text_encoder=lowerCamelCase_ ,tokenizer=lowerCamelCase_ ,safety_checker=lowerCamelCase_ ,feature_extractor=self.dummy_extractor ,)
A = sd_pipe.to(lowerCamelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
A = """A painting of a squirrel eating a burger"""
A = torch.Generator(device=lowerCamelCase_ ).manual_seed(0 )
A = sd_pipe([prompt] ,generator=lowerCamelCase_ ,guidance_scale=6.0 ,num_inference_steps=2 ,output_type="""np""" )
A = output.images
A = torch.Generator(device=lowerCamelCase_ ).manual_seed(0 )
A = sd_pipe(
[prompt] ,generator=lowerCamelCase_ ,guidance_scale=6.0 ,num_inference_steps=2 ,output_type="""np""" ,return_dict=lowerCamelCase_ ,)[0]
A = image[0, -3:, -3:, -1]
A = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
A = np.array([0.57_56, 0.61_18, 0.50_05, 0.50_41, 0.54_71, 0.47_26, 0.49_76, 0.48_65, 0.48_64] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCamelCase__ ( self ) -> Tuple:
A = """cpu""" # ensure determinism for the device-dependent torch.Generator
A = self.dummy_cond_unet
A = PNDMScheduler(skip_prk_steps=lowerCamelCase_ )
A = self.dummy_vae
A = self.dummy_text_encoder
A = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
# make sure here that pndm scheduler skips prk
A = StableDiffusionPipeline(
unet=lowerCamelCase_ ,scheduler=lowerCamelCase_ ,vae=lowerCamelCase_ ,text_encoder=lowerCamelCase_ ,tokenizer=lowerCamelCase_ ,safety_checker=lowerCamelCase_ ,feature_extractor=self.dummy_extractor ,)
A = sd_pipe.to(lowerCamelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
A = """A painting of a squirrel eating a burger"""
A = torch.Generator(device=lowerCamelCase_ ).manual_seed(0 )
A = sd_pipe([prompt] ,generator=lowerCamelCase_ ,guidance_scale=6.0 ,num_inference_steps=2 ,output_type="""np""" )
A = output.images
A = torch.Generator(device=lowerCamelCase_ ).manual_seed(0 )
A = sd_pipe(
[prompt] ,generator=lowerCamelCase_ ,guidance_scale=6.0 ,num_inference_steps=2 ,output_type="""np""" ,return_dict=lowerCamelCase_ ,)[0]
A = image[0, -3:, -3:, -1]
A = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
A = np.array([0.51_25, 0.57_16, 0.48_28, 0.50_60, 0.56_50, 0.47_68, 0.51_85, 0.48_95, 0.49_93] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCamelCase__ ( self ) -> Union[str, Any]:
A = StableDiffusionPipeline.from_pretrained(
"""hf-internal-testing/tiny-stable-diffusion-lms-pipe""" ,safety_checker=lowerCamelCase_ )
assert isinstance(lowerCamelCase_ ,lowerCamelCase_ )
assert isinstance(pipe.scheduler ,lowerCamelCase_ )
assert pipe.safety_checker is None
A = pipe("""example prompt""" ,num_inference_steps=2 ).images[0]
assert image is not None
# check that there's no error when saving a pipeline with one of the models being None
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(lowerCamelCase_ )
A = StableDiffusionPipeline.from_pretrained(lowerCamelCase_ )
# sanity check that the pipeline still works
assert pipe.safety_checker is None
A = pipe("""example prompt""" ,num_inference_steps=2 ).images[0]
assert image is not None
@unittest.skipIf(torch_device != """cuda""" ,"""This test requires a GPU""" )
def UpperCamelCase__ ( self ) -> Dict:
A = self.dummy_cond_unet
A = PNDMScheduler(skip_prk_steps=lowerCamelCase_ )
A = self.dummy_vae
A = self.dummy_text_encoder
A = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
# put models in fp16
A = unet.half()
A = vae.half()
A = bert.half()
# make sure here that pndm scheduler skips prk
A = StableDiffusionPipeline(
unet=lowerCamelCase_ ,scheduler=lowerCamelCase_ ,vae=lowerCamelCase_ ,text_encoder=lowerCamelCase_ ,tokenizer=lowerCamelCase_ ,safety_checker=lowerCamelCase_ ,feature_extractor=self.dummy_extractor ,)
A = sd_pipe.to(lowerCamelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
A = """A painting of a squirrel eating a burger"""
A = sd_pipe([prompt] ,num_inference_steps=2 ,output_type="""np""" ).images
assert image.shape == (1, 6_4, 6_4, 3)
@nightly
@require_torch_gpu
class lowerCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase__ ( self ) -> Optional[int]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase__ ( self ) -> int:
A = StableDiffusionPipeline.from_pretrained("""runwayml/stable-diffusion-v1-5""" ,safety_checker=lowerCamelCase_ )
A = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
A = sd_pipe.to(lowerCamelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
A = (
"""portrait of girl with smokey eyes makeup in abandoned hotel, grange clothes, redshift, wide high angle"""
""" coloured polaroid photograph with flash, kodak film, hyper real, stunning moody cinematography, with"""
""" anamorphic lenses, by maripol, fallen angels by wong kar - wai, style of suspiria and neon demon and"""
""" children from bahnhof zoo, detailed """
)
A = 4_0_0_3_6_6_0_3_4_6
A = 7
# without safety guidance (sld_guidance_scale = 0)
A = torch.manual_seed(lowerCamelCase_ )
A = sd_pipe(
[prompt] ,generator=lowerCamelCase_ ,guidance_scale=lowerCamelCase_ ,num_inference_steps=5_0 ,output_type="""np""" ,width=5_1_2 ,height=5_1_2 ,sld_guidance_scale=0 ,)
A = output.images
A = image[0, -3:, -3:, -1]
A = [0.22_78, 0.22_31, 0.22_49, 0.23_33, 0.23_03, 0.18_85, 0.22_73, 0.21_44, 0.21_76]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
# without safety guidance (strong configuration)
A = torch.manual_seed(lowerCamelCase_ )
A = sd_pipe(
[prompt] ,generator=lowerCamelCase_ ,guidance_scale=lowerCamelCase_ ,num_inference_steps=5_0 ,output_type="""np""" ,width=5_1_2 ,height=5_1_2 ,sld_guidance_scale=2_0_0_0 ,sld_warmup_steps=7 ,sld_threshold=0.0_25 ,sld_momentum_scale=0.5 ,sld_mom_beta=0.7 ,)
A = output.images
A = image[0, -3:, -3:, -1]
A = [0.23_83, 0.22_76, 0.2_36, 0.21_92, 0.21_86, 0.20_53, 0.19_71, 0.19_01, 0.17_19]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCamelCase__ ( self ) -> Optional[int]:
A = StableDiffusionPipeline.from_pretrained("""runwayml/stable-diffusion-v1-5""" ,safety_checker=lowerCamelCase_ )
A = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
A = sd_pipe.to(lowerCamelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
A = """padme amidala taking a bath artwork, safe for work, no nudity"""
A = 2_7_3_4_9_7_1_7_5_5
A = 7
A = torch.manual_seed(lowerCamelCase_ )
A = sd_pipe(
[prompt] ,generator=lowerCamelCase_ ,guidance_scale=lowerCamelCase_ ,num_inference_steps=5_0 ,output_type="""np""" ,width=5_1_2 ,height=5_1_2 ,sld_guidance_scale=0 ,)
A = output.images
A = image[0, -3:, -3:, -1]
A = [0.35_02, 0.36_22, 0.33_96, 0.36_42, 0.34_78, 0.33_18, 0.35, 0.33_48, 0.32_97]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
A = torch.manual_seed(lowerCamelCase_ )
A = sd_pipe(
[prompt] ,generator=lowerCamelCase_ ,guidance_scale=lowerCamelCase_ ,num_inference_steps=5_0 ,output_type="""np""" ,width=5_1_2 ,height=5_1_2 ,sld_guidance_scale=2_0_0_0 ,sld_warmup_steps=7 ,sld_threshold=0.0_25 ,sld_momentum_scale=0.5 ,sld_mom_beta=0.7 ,)
A = output.images
A = image[0, -3:, -3:, -1]
A = [0.55_31, 0.52_06, 0.48_95, 0.51_56, 0.51_82, 0.47_51, 0.48_02, 0.48_03, 0.44_43]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCamelCase__ ( self ) -> int:
A = StableDiffusionPipeline.from_pretrained("""runwayml/stable-diffusion-v1-5""" )
A = sd_pipe.to(lowerCamelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
A = (
"""the four horsewomen of the apocalypse, painting by tom of finland, gaston bussiere, craig mullins, j. c."""
""" leyendecker"""
)
A = 1_0_4_4_3_5_5_2_3_4
A = 1_2
A = torch.manual_seed(lowerCamelCase_ )
A = sd_pipe(
[prompt] ,generator=lowerCamelCase_ ,guidance_scale=lowerCamelCase_ ,num_inference_steps=5_0 ,output_type="""np""" ,width=5_1_2 ,height=5_1_2 ,sld_guidance_scale=0 ,)
A = output.images
A = image[0, -3:, -3:, -1]
A = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] )
assert image.shape == (1, 5_1_2, 5_1_2, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-7
A = torch.manual_seed(lowerCamelCase_ )
A = sd_pipe(
[prompt] ,generator=lowerCamelCase_ ,guidance_scale=lowerCamelCase_ ,num_inference_steps=5_0 ,output_type="""np""" ,width=5_1_2 ,height=5_1_2 ,sld_guidance_scale=2_0_0_0 ,sld_warmup_steps=7 ,sld_threshold=0.0_25 ,sld_momentum_scale=0.5 ,sld_mom_beta=0.7 ,)
A = output.images
A = image[0, -3:, -3:, -1]
A = np.array([0.58_18, 0.62_85, 0.68_35, 0.60_19, 0.6_25, 0.67_54, 0.60_96, 0.63_34, 0.65_61] )
assert image.shape == (1, 5_1_2, 5_1_2, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 77 |
"""simple docstring"""
from __future__ import annotations
import unittest
import numpy as np
from transformers import BlipTextConfig
from transformers.testing_utils import require_tf, slow
from transformers.utils import is_tf_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
if is_tf_available():
import tensorflow as tf
from transformers import TFBlipTextModel
from transformers.models.blip.modeling_tf_blip import TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST
class lowerCamelCase__ :
'''simple docstring'''
def __init__( self ,lowerCamelCase_ ,lowerCamelCase_=1_2 ,lowerCamelCase_=7 ,lowerCamelCase_=True ,lowerCamelCase_=True ,lowerCamelCase_=True ,lowerCamelCase_=9_9 ,lowerCamelCase_=3_2 ,lowerCamelCase_=3_2 ,lowerCamelCase_=2 ,lowerCamelCase_=4 ,lowerCamelCase_=3_7 ,lowerCamelCase_=0.1 ,lowerCamelCase_=0.1 ,lowerCamelCase_=5_1_2 ,lowerCamelCase_=0.02 ,lowerCamelCase_=0 ,lowerCamelCase_=None ,) -> List[str]:
A = parent
A = batch_size
A = seq_length
A = is_training
A = use_input_mask
A = use_labels
A = vocab_size
A = hidden_size
A = projection_dim
A = num_hidden_layers
A = num_attention_heads
A = intermediate_size
A = dropout
A = attention_dropout
A = max_position_embeddings
A = initializer_range
A = scope
A = bos_token_id
def UpperCamelCase__ ( self ) -> Tuple:
A = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
A = None
if self.use_input_mask:
A = random_attention_mask([self.batch_size, self.seq_length] )
if input_mask is not None:
A = input_mask.numpy()
A , A = input_mask.shape
A = np.random.randint(1 ,seq_length - 1 ,size=(batch_size,) )
for batch_idx, start_index in enumerate(lowerCamelCase_ ):
A = 1
A = 0
A = self.get_config()
return config, input_ids, tf.convert_to_tensor(lowerCamelCase_ )
def UpperCamelCase__ ( self ) -> int:
return BlipTextConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,projection_dim=self.projection_dim ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,dropout=self.dropout ,attention_dropout=self.attention_dropout ,max_position_embeddings=self.max_position_embeddings ,initializer_range=self.initializer_range ,bos_token_id=self.bos_token_id ,)
def UpperCamelCase__ ( self ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ) -> Tuple:
A = TFBlipTextModel(config=lowerCamelCase_ )
A = model(lowerCamelCase_ ,attention_mask=lowerCamelCase_ ,training=lowerCamelCase_ )
A = model(lowerCamelCase_ ,training=lowerCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape ,(self.batch_size, self.hidden_size) )
def UpperCamelCase__ ( self ) -> Optional[Any]:
A = self.prepare_config_and_inputs()
A , A , A = config_and_inputs
A = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_tf
class lowerCamelCase__ ( SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
_lowerCamelCase = (TFBlipTextModel,) if is_tf_available() else ()
_lowerCamelCase = False
_lowerCamelCase = False
_lowerCamelCase = False
def UpperCamelCase__ ( self ) -> List[str]:
A = BlipTextModelTester(self )
A = ConfigTester(self ,config_class=lowerCamelCase_ ,hidden_size=3_7 )
def UpperCamelCase__ ( self ) -> Union[str, Any]:
self.config_tester.run_common_tests()
def UpperCamelCase__ ( self ) -> Union[str, Any]:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase_ )
def UpperCamelCase__ ( self ) -> Optional[int]:
pass
def UpperCamelCase__ ( self ) -> Optional[Any]:
pass
@unittest.skip(reason="""Blip does not use inputs_embeds""" )
def UpperCamelCase__ ( self ) -> Optional[int]:
pass
@unittest.skip(reason="""BlipTextModel has no base class and is not available in MODEL_MAPPING""" )
def UpperCamelCase__ ( self ) -> Dict:
pass
@unittest.skip(reason="""BlipTextModel has no base class and is not available in MODEL_MAPPING""" )
def UpperCamelCase__ ( self ) -> str:
pass
@slow
def UpperCamelCase__ ( self ) -> str:
for model_name in TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A = TFBlipTextModel.from_pretrained(lowerCamelCase_ )
self.assertIsNotNone(lowerCamelCase_ )
def UpperCamelCase__ ( self ,lowerCamelCase_=True ) -> str:
super().test_pt_tf_model_equivalence(allow_missing_keys=lowerCamelCase_ )
| 77 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
UpperCamelCase = {'''configuration_speech_encoder_decoder''': ['''SpeechEncoderDecoderConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = ['''SpeechEncoderDecoderModel''']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = ['''FlaxSpeechEncoderDecoderModel''']
if TYPE_CHECKING:
from .configuration_speech_encoder_decoder import SpeechEncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_encoder_decoder import SpeechEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_speech_encoder_decoder import FlaxSpeechEncoderDecoderModel
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 87 | import json
import os
import unittest
from transformers import BatchEncoding, LEDTokenizer, LEDTokenizerFast
from transformers.models.led.tokenization_led import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class snake_case_ ( __A ,unittest.TestCase ):
__A : Union[str, Any] = LEDTokenizer
__A : Union[str, Any] = LEDTokenizerFast
__A : Optional[Any] = True
def __UpperCamelCase ( self : List[str] ) -> Union[str, Any]:
super().setUp()
lowercase__ : List[str] = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
]
lowercase__ : Optional[int] = dict(zip(lowercase_ , range(len(lowercase_ ) ) ) )
lowercase__ : str = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
lowercase__ : Tuple = {"unk_token": "<unk>"}
lowercase__ : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
lowercase__ : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(lowercase_ ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(lowercase_ ) )
def __UpperCamelCase ( self : int , **lowercase_ : str ) -> List[Any]:
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **lowercase_ )
def __UpperCamelCase ( self : List[Any] , **lowercase_ : Any ) -> List[Any]:
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **lowercase_ )
def __UpperCamelCase ( self : str , lowercase_ : Any ) -> Tuple:
return "lower newer", "lower newer"
@cached_property
def __UpperCamelCase ( self : Tuple ) -> Optional[Any]:
return LEDTokenizer.from_pretrained("allenai/led-base-16384" )
@cached_property
def __UpperCamelCase ( self : Tuple ) -> int:
return LEDTokenizerFast.from_pretrained("allenai/led-base-16384" )
@require_torch
def __UpperCamelCase ( self : int ) -> List[Any]:
lowercase__ : Union[str, Any] = ["A long paragraph for summarization.", "Another paragraph for summarization."]
lowercase__ : str = [0, 2_50, 2_51, 1_78_18, 13, 3_91_86, 19_38, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowercase__ : Dict = tokenizer(lowercase_ , max_length=len(lowercase_ ) , padding=lowercase_ , return_tensors="pt" )
self.assertIsInstance(lowercase_ , lowercase_ )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
lowercase__ : Union[str, Any] = batch.input_ids.tolist()[0]
self.assertListEqual(lowercase_ , lowercase_ )
@require_torch
def __UpperCamelCase ( self : List[str] ) -> Tuple:
lowercase__ : Dict = ["A long paragraph for summarization.", "Another paragraph for summarization."]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowercase__ : Optional[int] = tokenizer(lowercase_ , padding=lowercase_ , return_tensors="pt" )
self.assertIn("input_ids" , lowercase_ )
self.assertIn("attention_mask" , lowercase_ )
self.assertNotIn("labels" , lowercase_ )
self.assertNotIn("decoder_attention_mask" , lowercase_ )
@require_torch
def __UpperCamelCase ( self : Optional[Any] ) -> Any:
lowercase__ : Dict = [
"Summary of the text.",
"Another summary.",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowercase__ : Dict = tokenizer(text_target=lowercase_ , max_length=32 , padding="max_length" , return_tensors="pt" )
self.assertEqual(32 , targets["input_ids"].shape[1] )
@require_torch
def __UpperCamelCase ( self : Optional[int] ) -> Tuple:
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowercase__ : int = tokenizer(
["I am a small frog" * 10_24, "I am a small frog"] , padding=lowercase_ , truncation=lowercase_ , return_tensors="pt" )
self.assertIsInstance(lowercase_ , lowercase_ )
self.assertEqual(batch.input_ids.shape , (2, 51_22) )
@require_torch
def __UpperCamelCase ( self : List[str] ) -> Any:
lowercase__ : Union[str, Any] = ["A long paragraph for summarization."]
lowercase__ : List[Any] = [
"Summary of the text.",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowercase__ : List[Any] = tokenizer(lowercase_ , return_tensors="pt" )
lowercase__ : Dict = tokenizer(text_target=lowercase_ , return_tensors="pt" )
lowercase__ : Optional[int] = inputs["input_ids"]
lowercase__ : str = targets["input_ids"]
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() )
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() )
@require_torch
def __UpperCamelCase ( self : Union[str, Any] ) -> Dict:
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowercase__ : int = ["Summary of the text.", "Another summary."]
lowercase__ : List[str] = [[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, -1, -1]]
lowercase__ : Tuple = tokenizer(lowercase_ , padding=lowercase_ )
lowercase__ : int = [[0] * len(lowercase_ ) for x in encoded_output["input_ids"]]
lowercase__ : Any = tokenizer.pad(lowercase_ )
self.assertSequenceEqual(outputs["global_attention_mask"] , lowercase_ )
def __UpperCamelCase ( self : int ) -> Union[str, Any]:
pass
def __UpperCamelCase ( self : int ) -> Optional[Any]:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
lowercase__ : List[Any] = self.rust_tokenizer_class.from_pretrained(lowercase_ , **lowercase_ )
lowercase__ : List[str] = self.tokenizer_class.from_pretrained(lowercase_ , **lowercase_ )
lowercase__ : List[Any] = "A, <mask> AllenNLP sentence."
lowercase__ : Tuple = tokenizer_r.encode_plus(lowercase_ , add_special_tokens=lowercase_ , return_token_type_ids=lowercase_ )
lowercase__ : List[str] = tokenizer_p.encode_plus(lowercase_ , add_special_tokens=lowercase_ , return_token_type_ids=lowercase_ )
self.assertEqual(sum(tokens_r["token_type_ids"] ) , sum(tokens_p["token_type_ids"] ) )
self.assertEqual(
sum(tokens_r["attention_mask"] ) / len(tokens_r["attention_mask"] ) , sum(tokens_p["attention_mask"] ) / len(tokens_p["attention_mask"] ) , )
lowercase__ : Any = tokenizer_r.convert_ids_to_tokens(tokens_r["input_ids"] )
lowercase__ : Optional[Any] = tokenizer_p.convert_ids_to_tokens(tokens_p["input_ids"] )
self.assertSequenceEqual(tokens_p["input_ids"] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] )
self.assertSequenceEqual(tokens_r["input_ids"] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] )
self.assertSequenceEqual(
lowercase_ , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] )
self.assertSequenceEqual(
lowercase_ , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] )
| 87 | 1 |
"""simple docstring"""
from abc import ABC, abstractmethod
from typing import Optional, Union
from .. import Dataset, DatasetDict, Features, IterableDataset, IterableDatasetDict, NamedSplit
from ..utils.typing import NestedDataStructureLike, PathLike
class __lowerCamelCase ( a__ ):
'''simple docstring'''
def __init__( self , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = False , __UpperCAmelCase = False , __UpperCAmelCase = None , **__UpperCAmelCase , ) -> Optional[int]:
_a = path_or_paths
_a = split if split or isinstance(__UpperCAmelCase , __UpperCAmelCase ) else '''train'''
_a = features
_a = cache_dir
_a = keep_in_memory
_a = streaming
_a = num_proc
_a = kwargs
@abstractmethod
def _UpperCAmelCase ( self ) -> Union[Dataset, DatasetDict, IterableDataset, IterableDatasetDict]:
pass
class __lowerCamelCase ( a__ ):
'''simple docstring'''
def __init__( self , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = False , __UpperCAmelCase = False , __UpperCAmelCase = None , **__UpperCAmelCase , ) -> Union[str, Any]:
_a = features
_a = cache_dir
_a = keep_in_memory
_a = streaming
_a = num_proc
_a = kwargs
@abstractmethod
def _UpperCAmelCase ( self ) -> Union[Dataset, IterableDataset]:
pass | 153 |
"""simple docstring"""
from collections.abc import Callable
import numpy as np
def A_ ( _lowerCAmelCase : Callable, _lowerCAmelCase : float, _lowerCAmelCase : float, _lowerCAmelCase : float, _lowerCAmelCase : float ):
"""simple docstring"""
_a = int(np.ceil((x_end - xa) / step_size ) )
_a = np.zeros((n + 1,) )
_a = ya
_a = xa
for k in range(_lowerCAmelCase ):
_a = y[k] + step_size * ode_func(_lowerCAmelCase, y[k] )
_a = y[k] + (
(step_size / 2) * (ode_func(_lowerCAmelCase, y[k] ) + ode_func(x + step_size, _lowerCAmelCase ))
)
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod() | 153 | 1 |
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ConvNextConfig, SegformerImageProcessor, UperNetConfig, UperNetForSemanticSegmentation
def _lowerCAmelCase ( __lowerCAmelCase ) -> Tuple:
"""simple docstring"""
snake_case__ : Union[str, Any] = 384
if "tiny" in model_name:
snake_case__ : Any = [3, 3, 9, 3]
snake_case__ : str = [96, 192, 384, 768]
if "small" in model_name:
snake_case__ : str = [3, 3, 27, 3]
snake_case__ : Dict = [96, 192, 384, 768]
if "base" in model_name:
snake_case__ : Union[str, Any] = [3, 3, 27, 3]
snake_case__ : Optional[Any] = [128, 256, 512, 1024]
snake_case__ : str = 512
if "large" in model_name:
snake_case__ : Any = [3, 3, 27, 3]
snake_case__ : Dict = [192, 384, 768, 1536]
snake_case__ : Dict = 768
if "xlarge" in model_name:
snake_case__ : Any = [3, 3, 27, 3]
snake_case__ : Tuple = [256, 512, 1024, 2048]
snake_case__ : int = 1024
# set label information
snake_case__ : Any = 150
snake_case__ : int = '''huggingface/label-files'''
snake_case__ : str = '''ade20k-id2label.json'''
snake_case__ : List[str] = json.load(open(hf_hub_download(__lowerCAmelCase , __lowerCAmelCase , repo_type='''dataset''' ) , '''r''' ) )
snake_case__ : Any = {int(__lowerCAmelCase ): v for k, v in idalabel.items()}
snake_case__ : Union[str, Any] = {v: k for k, v in idalabel.items()}
snake_case__ : Any = ConvNextConfig(
depths=__lowerCAmelCase , hidden_sizes=__lowerCAmelCase , out_features=['''stage1''', '''stage2''', '''stage3''', '''stage4'''] )
snake_case__ : List[str] = UperNetConfig(
backbone_config=__lowerCAmelCase , auxiliary_in_channels=__lowerCAmelCase , num_labels=__lowerCAmelCase , idalabel=__lowerCAmelCase , labelaid=__lowerCAmelCase , )
return config
def _lowerCAmelCase ( __lowerCAmelCase ) -> str:
"""simple docstring"""
snake_case__ : List[Any] = []
# fmt: off
# stem
rename_keys.append(('''backbone.downsample_layers.0.0.weight''', '''backbone.embeddings.patch_embeddings.weight''') )
rename_keys.append(('''backbone.downsample_layers.0.0.bias''', '''backbone.embeddings.patch_embeddings.bias''') )
rename_keys.append(('''backbone.downsample_layers.0.1.weight''', '''backbone.embeddings.layernorm.weight''') )
rename_keys.append(('''backbone.downsample_layers.0.1.bias''', '''backbone.embeddings.layernorm.bias''') )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((f"""backbone.stages.{i}.{j}.gamma""", f"""backbone.encoder.stages.{i}.layers.{j}.layer_scale_parameter""") )
rename_keys.append((f"""backbone.stages.{i}.{j}.depthwise_conv.weight""", f"""backbone.encoder.stages.{i}.layers.{j}.dwconv.weight""") )
rename_keys.append((f"""backbone.stages.{i}.{j}.depthwise_conv.bias""", f"""backbone.encoder.stages.{i}.layers.{j}.dwconv.bias""") )
rename_keys.append((f"""backbone.stages.{i}.{j}.norm.weight""", f"""backbone.encoder.stages.{i}.layers.{j}.layernorm.weight""") )
rename_keys.append((f"""backbone.stages.{i}.{j}.norm.bias""", f"""backbone.encoder.stages.{i}.layers.{j}.layernorm.bias""") )
rename_keys.append((f"""backbone.stages.{i}.{j}.pointwise_conv1.weight""", f"""backbone.encoder.stages.{i}.layers.{j}.pwconv1.weight""") )
rename_keys.append((f"""backbone.stages.{i}.{j}.pointwise_conv1.bias""", f"""backbone.encoder.stages.{i}.layers.{j}.pwconv1.bias""") )
rename_keys.append((f"""backbone.stages.{i}.{j}.pointwise_conv2.weight""", f"""backbone.encoder.stages.{i}.layers.{j}.pwconv2.weight""") )
rename_keys.append((f"""backbone.stages.{i}.{j}.pointwise_conv2.bias""", f"""backbone.encoder.stages.{i}.layers.{j}.pwconv2.bias""") )
if i > 0:
rename_keys.append((f"""backbone.downsample_layers.{i}.0.weight""", f"""backbone.encoder.stages.{i}.downsampling_layer.0.weight""") )
rename_keys.append((f"""backbone.downsample_layers.{i}.0.bias""", f"""backbone.encoder.stages.{i}.downsampling_layer.0.bias""") )
rename_keys.append((f"""backbone.downsample_layers.{i}.1.weight""", f"""backbone.encoder.stages.{i}.downsampling_layer.1.weight""") )
rename_keys.append((f"""backbone.downsample_layers.{i}.1.bias""", f"""backbone.encoder.stages.{i}.downsampling_layer.1.bias""") )
rename_keys.append((f"""backbone.norm{i}.weight""", f"""backbone.hidden_states_norms.stage{i+1}.weight""") )
rename_keys.append((f"""backbone.norm{i}.bias""", f"""backbone.hidden_states_norms.stage{i+1}.bias""") )
# decode head
rename_keys.extend(
[
('''decode_head.conv_seg.weight''', '''decode_head.classifier.weight'''),
('''decode_head.conv_seg.bias''', '''decode_head.classifier.bias'''),
('''auxiliary_head.conv_seg.weight''', '''auxiliary_head.classifier.weight'''),
('''auxiliary_head.conv_seg.bias''', '''auxiliary_head.classifier.bias'''),
] )
# fmt: on
return rename_keys
def _lowerCAmelCase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Any:
"""simple docstring"""
snake_case__ : str = dct.pop(__lowerCAmelCase )
snake_case__ : Tuple = val
def _lowerCAmelCase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Tuple:
"""simple docstring"""
snake_case__ : Optional[int] = {
'''upernet-convnext-tiny''': '''https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_tiny_fp16_512x512_160k_ade20k/upernet_convnext_tiny_fp16_512x512_160k_ade20k_20220227_124553-cad485de.pth''',
'''upernet-convnext-small''': '''https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_small_fp16_512x512_160k_ade20k/upernet_convnext_small_fp16_512x512_160k_ade20k_20220227_131208-1b1e394f.pth''',
'''upernet-convnext-base''': '''https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_base_fp16_512x512_160k_ade20k/upernet_convnext_base_fp16_512x512_160k_ade20k_20220227_181227-02a24fc6.pth''',
'''upernet-convnext-large''': '''https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_large_fp16_640x640_160k_ade20k/upernet_convnext_large_fp16_640x640_160k_ade20k_20220226_040532-e57aa54d.pth''',
'''upernet-convnext-xlarge''': '''https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_xlarge_fp16_640x640_160k_ade20k/upernet_convnext_xlarge_fp16_640x640_160k_ade20k_20220226_080344-95fc38c2.pth''',
}
snake_case__ : Dict = model_name_to_url[model_name]
snake_case__ : Tuple = torch.hub.load_state_dict_from_url(__lowerCAmelCase , map_location='''cpu''' )['''state_dict''']
snake_case__ : Union[str, Any] = get_upernet_config(__lowerCAmelCase )
snake_case__ : List[str] = UperNetForSemanticSegmentation(__lowerCAmelCase )
model.eval()
# replace "bn" => "batch_norm"
for key in state_dict.copy().keys():
snake_case__ : str = state_dict.pop(__lowerCAmelCase )
if "bn" in key:
snake_case__ : Any = key.replace('''bn''' , '''batch_norm''' )
snake_case__ : List[str] = val
# rename keys
snake_case__ : List[str] = create_rename_keys(__lowerCAmelCase )
for src, dest in rename_keys:
rename_key(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
model.load_state_dict(__lowerCAmelCase )
# verify on image
snake_case__ : Optional[int] = '''https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg'''
snake_case__ : Union[str, Any] = Image.open(requests.get(__lowerCAmelCase , stream=__lowerCAmelCase ).raw ).convert('''RGB''' )
snake_case__ : Any = SegformerImageProcessor()
snake_case__ : List[Any] = processor(__lowerCAmelCase , return_tensors='''pt''' ).pixel_values
with torch.no_grad():
snake_case__ : Union[str, Any] = model(__lowerCAmelCase )
if model_name == "upernet-convnext-tiny":
snake_case__ : Union[str, Any] = torch.tensor(
[[-8.8_110, -8.8_110, -8.6_521], [-8.8_110, -8.8_110, -8.6_521], [-8.7_746, -8.7_746, -8.6_130]] )
elif model_name == "upernet-convnext-small":
snake_case__ : Tuple = torch.tensor(
[[-8.8_236, -8.8_236, -8.6_771], [-8.8_236, -8.8_236, -8.6_771], [-8.7_638, -8.7_638, -8.6_240]] )
elif model_name == "upernet-convnext-base":
snake_case__ : Dict = torch.tensor(
[[-8.8_558, -8.8_558, -8.6_905], [-8.8_558, -8.8_558, -8.6_905], [-8.7_669, -8.7_669, -8.6_021]] )
elif model_name == "upernet-convnext-large":
snake_case__ : str = torch.tensor(
[[-8.6_660, -8.6_660, -8.6_210], [-8.6_660, -8.6_660, -8.6_210], [-8.6_310, -8.6_310, -8.5_964]] )
elif model_name == "upernet-convnext-xlarge":
snake_case__ : Optional[Any] = torch.tensor(
[[-8.4_980, -8.4_980, -8.3_977], [-8.4_980, -8.4_980, -8.3_977], [-8.4_379, -8.4_379, -8.3_412]] )
print('''Logits:''' , outputs.logits[0, 0, :3, :3] )
assert torch.allclose(outputs.logits[0, 0, :3, :3] , __lowerCAmelCase , atol=1E-4 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(f"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(__lowerCAmelCase )
print(f"""Saving processor to {pytorch_dump_folder_path}""" )
processor.save_pretrained(__lowerCAmelCase )
if push_to_hub:
print(f"""Pushing model and processor for {model_name} to hub""" )
model.push_to_hub(f"""openmmlab/{model_name}""" )
processor.push_to_hub(f"""openmmlab/{model_name}""" )
if __name__ == "__main__":
A__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''upernet-convnext-tiny''',
type=str,
choices=[f"""upernet-convnext-{size}""" for size in ['''tiny''', '''small''', '''base''', '''large''', '''xlarge''']],
help='''Name of the ConvNext UperNet model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
A__ = parser.parse_args()
convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 230 |
import json
import multiprocessing
import os
import re
from collections import defaultdict
import torch
from accelerate import Accelerator
from accelerate.utils import set_seed
from arguments import HumanEvalArguments
from datasets import load_dataset, load_metric
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from tqdm import tqdm
import transformers
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, StoppingCriteria, StoppingCriteriaList
A__ = ['''\nclass''', '''\ndef''', '''\n#''', '''\n@''', '''\nprint''', '''\nif''']
class a ( __lowerCamelCase ):
def __init__( self :str ,__lowercase :Dict ,__lowercase :str ,__lowercase :Any=None ,__lowercase :Union[str, Any]=1 ):
snake_case__ : Optional[int] = tokenizer
snake_case__ : List[Any] = dataset
snake_case__ : Any = len(__lowercase ) if n_tasks is None else n_tasks
snake_case__ : List[Any] = n_copies
def __iter__( self :Tuple ):
snake_case__ : Union[str, Any] = []
for task in range(self.n_tasks ):
# without strip, the model generate commented codes ...
prompts.append(self.tokenizer.eos_token + self.dataset[task]['''prompt'''].strip() )
snake_case__ : Optional[int] = self.tokenizer(__lowercase ,padding=__lowercase ,return_tensors='''pt''' )
for task in range(self.n_tasks ):
for _ in range(self.n_copies ):
yield {
"ids": outputs.input_ids[task],
"task_id": task,
"input_len": outputs.attention_mask[task].sum(),
}
class a ( __lowerCamelCase ):
def __init__( self :Union[str, Any] ,__lowercase :List[Any] ,__lowercase :int ,__lowercase :List[str] ):
snake_case__ : List[str] = start_length
snake_case__ : Any = eof_strings
snake_case__ : Optional[Any] = tokenizer
def __call__( self :str ,__lowercase :str ,__lowercase :Any ,**__lowercase :Optional[int] ):
snake_case__ : Tuple = self.tokenizer.batch_decode(input_ids[:, self.start_length :] )
snake_case__ : Union[str, Any] = []
for decoded_generation in decoded_generations:
done.append(any(stop_string in decoded_generation for stop_string in self.eof_strings ) )
return all(__lowercase )
def _lowerCAmelCase ( __lowerCAmelCase ) -> List[Any]:
"""simple docstring"""
snake_case__ : List[Any] = re.split('''(%s)''' % '''|'''.join(__lowerCAmelCase ) , __lowerCAmelCase )
# last string should be ""
return "".join(string_list[:-2] )
def _lowerCAmelCase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=20 , **__lowerCAmelCase ) -> List[Any]:
"""simple docstring"""
snake_case__ : Optional[int] = defaultdict(__lowerCAmelCase ) # dict of list of generated tokens
for step, batch in tqdm(enumerate(__lowerCAmelCase ) ):
with torch.no_grad():
snake_case__ : List[Any] = batch['''ids'''].shape[-1]
snake_case__ : str = accelerator.unwrap_model(__lowerCAmelCase ).generate(
input_ids=batch['''ids'''][:, : batch['''input_len''']] , num_return_sequences=__lowerCAmelCase , **__lowerCAmelCase )
# each task is generated batch_size times
snake_case__ : str = batch['''task_id'''].repeat(__lowerCAmelCase )
snake_case__ : Optional[int] = accelerator.pad_across_processes(
__lowerCAmelCase , dim=1 , pad_index=tokenizer.pad_token_id )
snake_case__ , snake_case__ : Any = accelerator.gather((generated_tokens, generated_tasks) )
snake_case__ : Union[str, Any] = generated_tokens.cpu().numpy()
snake_case__ : Optional[Any] = generated_tasks.cpu().numpy()
for task, generated_tokens in zip(__lowerCAmelCase , __lowerCAmelCase ):
gen_token_dict[task].append(__lowerCAmelCase )
snake_case__ : List[Any] = [[] for _ in range(__lowerCAmelCase )]
for task, generated_tokens in gen_token_dict.items():
for s in generated_tokens:
snake_case__ : int = tokenizer.decode(__lowerCAmelCase , skip_special_tokens=__lowerCAmelCase , clean_up_tokenization_spaces=__lowerCAmelCase )
code_gens[task].append(remove_last_block(__lowerCAmelCase ) )
return code_gens
def _lowerCAmelCase ( ) -> Optional[int]:
"""simple docstring"""
snake_case__ : List[str] = HfArgumentParser(__lowerCAmelCase )
snake_case__ : Optional[Any] = parser.parse_args()
transformers.logging.set_verbosity_error()
# enables code execution in code_eval metric
snake_case__ : Union[str, Any] = args.HF_ALLOW_CODE_EVAL
# make sure tokenizer plays nice with multiprocessing
snake_case__ : Union[str, Any] = '''false'''
if args.num_workers is None:
snake_case__ : Union[str, Any] = multiprocessing.cpu_count()
# Use dataset load to feed to accelerate
snake_case__ : Dict = Accelerator()
set_seed(args.seed , device_specific=__lowerCAmelCase )
# Load model and tokenizer
snake_case__ : str = AutoTokenizer.from_pretrained(args.model_ckpt )
snake_case__ : List[str] = tokenizer.eos_token
snake_case__ : Optional[Any] = AutoModelForCausalLM.from_pretrained(args.model_ckpt )
# Generation settings
snake_case__ : List[Any] = {
'''do_sample''': args.do_sample,
'''temperature''': args.temperature,
'''max_new_tokens''': args.max_new_tokens,
'''top_p''': args.top_p,
'''top_k''': args.top_k,
'''stopping_criteria''': StoppingCriteriaList([EndOfFunctionCriteria(0 , __lowerCAmelCase , __lowerCAmelCase )] ),
}
# Load evaluation dataset and metric
snake_case__ : List[str] = load_dataset('''openai_humaneval''' )
snake_case__ : str = load_metric('''code_eval''' )
snake_case__ : List[Any] = args.num_tasks if args.num_tasks is not None else len(human_eval['''test'''] )
snake_case__ : str = args.n_samples // args.batch_size
snake_case__ : Tuple = TokenizedDataset(__lowerCAmelCase , human_eval['''test'''] , n_copies=__lowerCAmelCase , n_tasks=__lowerCAmelCase )
# do not confuse args.batch_size, which is actually the num_return_sequences
snake_case__ : Tuple = DataLoader(__lowerCAmelCase , batch_size=1 )
# Run a quick test to see if code evaluation is enabled
try:
snake_case__ : Optional[Any] = code_eval_metric.compute(references=[''''''] , predictions=[['''''']] )
except ValueError as exception:
print(
'''Code evaluation not enabled. Read the warning below carefully and then use `--HF_ALLOW_CODE_EVAL="1"`'''
''' flag to enable code evaluation.''' )
raise exception
snake_case__ , snake_case__ : Optional[Any] = accelerator.prepare(__lowerCAmelCase , __lowerCAmelCase )
snake_case__ : Dict = complete_code(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , n_tasks=__lowerCAmelCase , batch_size=args.batch_size , **__lowerCAmelCase , )
if accelerator.is_main_process:
snake_case__ : Optional[Any] = []
for task in tqdm(range(__lowerCAmelCase ) ):
snake_case__ : str = human_eval['''test'''][task]['''test''']
snake_case__ : int = f"""check({human_eval['test'][task]['entry_point']})"""
references.append('''\n''' + test_func + '''\n''' + entry_point )
# Evaluate completions with "code_eval" metric
snake_case__ , snake_case__ : Any = code_eval_metric.compute(
references=__lowerCAmelCase , predictions=__lowerCAmelCase , num_workers=args.num_workers )
print(f"""Results: {pass_at_k}""" )
# Save results to json file
with open(args.output_file , '''w''' ) as fp:
json.dump(__lowerCAmelCase , __lowerCAmelCase )
# For some reason the folliwng seems to be necessary sometimes for code_eval to work nice with multiprocessing
# https://stackoverflow.com/questions/60804599/python-multiprocessing-keeps-spawning-the-whole-script
if __name__ == "__main__":
main()
| 230 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCamelCase = {
'''configuration_trajectory_transformer''': [
'''TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''TrajectoryTransformerConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
'''TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TrajectoryTransformerModel''',
'''TrajectoryTransformerPreTrainedModel''',
'''load_tf_weights_in_trajectory_transformer''',
]
if TYPE_CHECKING:
from .configuration_trajectory_transformer import (
TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TrajectoryTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trajectory_transformer import (
TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TrajectoryTransformerModel,
TrajectoryTransformerPreTrainedModel,
load_tf_weights_in_trajectory_transformer,
)
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 370 | import logging
import os
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from tempfile import TemporaryDirectory
from typing import List, Optional
import faiss
import torch
from datasets import Features, Sequence, Value, load_dataset
from transformers import DPRContextEncoder, DPRContextEncoderTokenizerFast, HfArgumentParser
UpperCamelCase = logging.getLogger(__name__)
torch.set_grad_enabled(False)
UpperCamelCase = '''cuda''' if torch.cuda.is_available() else '''cpu'''
def lowercase_ ( _lowerCamelCase : str , _lowerCamelCase : Tuple=100 , _lowerCamelCase : Tuple=" "):
lowercase__ : Union[str, Any] = text.split(_lowerCamelCase)
return [character.join(text[i : i + n]).strip() for i in range(0 , len(_lowerCamelCase) , _lowerCamelCase)]
def lowercase_ ( _lowerCamelCase : dict):
lowercase__ , lowercase__ : List[str] = [], []
for title, text in zip(documents["title"] , documents["text"]):
if text is not None:
for passage in split_text(_lowerCamelCase):
titles.append(title if title is not None else "")
texts.append(_lowerCamelCase)
return {"title": titles, "text": texts}
def lowercase_ ( _lowerCamelCase : dict , _lowerCamelCase : DPRContextEncoder , _lowerCamelCase : DPRContextEncoderTokenizerFast):
lowercase__ : Union[str, Any] = ctx_tokenizer(
documents["title"] , documents["text"] , truncation=_lowerCamelCase , padding="longest" , return_tensors="pt")["input_ids"]
lowercase__ : Any = ctx_encoder(input_ids.to(device=_lowerCamelCase) , return_dict=_lowerCamelCase).pooler_output
return {"embeddings": embeddings.detach().cpu().numpy()}
def lowercase_ ( _lowerCamelCase : "RagExampleArguments" , _lowerCamelCase : "ProcessingArguments" , _lowerCamelCase : "IndexHnswArguments" , ):
######################################
logger.info("Step 1 - Create the dataset")
######################################
# The dataset needed for RAG must have three columns:
# - title (string): title of the document
# - text (string): text of a passage of the document
# - embeddings (array of dimension d): DPR representation of the passage
# Let's say you have documents in tab-separated csv files with columns "title" and "text"
assert os.path.isfile(rag_example_args.csv_path), "Please provide a valid path to a csv file"
# You can load a Dataset object this way
lowercase__ : str = load_dataset(
"csv" , data_files=[rag_example_args.csv_path] , split="train" , delimiter="\t" , column_names=["title", "text"])
# More info about loading csv files in the documentation: https://huggingface.co/docs/datasets/loading_datasets.html?highlight=csv#csv-files
# Then split the documents into passages of 100 words
lowercase__ : List[Any] = dataset.map(_lowerCamelCase , batched=_lowerCamelCase , num_proc=processing_args.num_proc)
# And compute the embeddings
lowercase__ : Optional[Any] = DPRContextEncoder.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name).to(device=_lowerCamelCase)
lowercase__ : Any = DPRContextEncoderTokenizerFast.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name)
lowercase__ : List[Any] = Features(
{"text": Value("string"), "title": Value("string"), "embeddings": Sequence(Value("float32"))}) # optional, save as float32 instead of float64 to save space
lowercase__ : List[Any] = dataset.map(
partial(_lowerCamelCase , ctx_encoder=_lowerCamelCase , ctx_tokenizer=_lowerCamelCase) , batched=_lowerCamelCase , batch_size=processing_args.batch_size , features=_lowerCamelCase , )
# And finally save your dataset
lowercase__ : Optional[int] = os.path.join(rag_example_args.output_dir , "my_knowledge_dataset")
dataset.save_to_disk(_lowerCamelCase)
# from datasets import load_from_disk
# dataset = load_from_disk(passages_path) # to reload the dataset
######################################
logger.info("Step 2 - Index the dataset")
######################################
# Let's use the Faiss implementation of HNSW for fast approximate nearest neighbor search
lowercase__ : Tuple = faiss.IndexHNSWFlat(index_hnsw_args.d , index_hnsw_args.m , faiss.METRIC_INNER_PRODUCT)
dataset.add_faiss_index("embeddings" , custom_index=_lowerCamelCase)
# And save the index
lowercase__ : Union[str, Any] = os.path.join(rag_example_args.output_dir , "my_knowledge_dataset_hnsw_index.faiss")
dataset.get_index("embeddings").save(_lowerCamelCase)
# dataset.load_faiss_index("embeddings", index_path) # to reload the index
@dataclass
class snake_case_ :
__A : str = field(
default=str(Path(__A ).parent / "test_run" / "dummy-kb" / "my_knowledge_dataset.csv" ) ,metadata={"help": "Path to a tab-separated csv file with columns 'title' and 'text'"} ,)
__A : Optional[str] = field(
default=__A ,metadata={"help": "Question that is passed as input to RAG. Default is 'What does Moses' rod turn into ?'."} ,)
__A : str = field(
default="facebook/rag-sequence-nq" ,metadata={"help": "The RAG model to use. Either 'facebook/rag-sequence-nq' or 'facebook/rag-token-nq'"} ,)
__A : str = field(
default="facebook/dpr-ctx_encoder-multiset-base" ,metadata={
"help": (
"The DPR context encoder model to use. Either 'facebook/dpr-ctx_encoder-single-nq-base' or"
" 'facebook/dpr-ctx_encoder-multiset-base'"
)
} ,)
__A : Optional[str] = field(
default=str(Path(__A ).parent / "test_run" / "dummy-kb" ) ,metadata={"help": "Path to a directory where the dataset passages and the index will be saved"} ,)
@dataclass
class snake_case_ :
__A : Optional[int] = field(
default=__A ,metadata={
"help": "The number of processes to use to split the documents into passages. Default is single process."
} ,)
__A : int = field(
default=16 ,metadata={
"help": "The batch size to use when computing the passages embeddings using the DPR context encoder."
} ,)
@dataclass
class snake_case_ :
__A : int = field(
default=768 ,metadata={"help": "The dimension of the embeddings to pass to the HNSW Faiss index."} ,)
__A : int = field(
default=128 ,metadata={
"help": (
"The number of bi-directional links created for every new element during the HNSW index construction."
)
} ,)
if __name__ == "__main__":
logging.basicConfig(level=logging.WARNING)
logger.setLevel(logging.INFO)
UpperCamelCase = HfArgumentParser((RagExampleArguments, ProcessingArguments, IndexHnswArguments))
UpperCamelCase , UpperCamelCase , UpperCamelCase = parser.parse_args_into_dataclasses()
with TemporaryDirectory() as tmp_dir:
UpperCamelCase = rag_example_args.output_dir or tmp_dir
main(rag_example_args, processing_args, index_hnsw_args)
| 333 | 0 |
import logging
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import arg_to_scheduler
from transformers import TrainingArguments
UpperCAmelCase_ : Union[str, Any] = logging.getLogger(__name__)
@dataclass
class SCREAMING_SNAKE_CASE__ ( lowercase__ ):
snake_case__ : Optional[float] = field(
default=0.0 , metadata={'''help''': '''The label smoothing epsilon to apply (if not zero).'''} )
snake_case__ : bool = field(default=lowercase__ , metadata={'''help''': '''Whether to SortishSamler or not.'''} )
snake_case__ : bool = field(
default=lowercase__ , metadata={'''help''': '''Whether to use generate to calculate generative metrics (ROUGE, BLEU).'''} )
snake_case__ : bool = field(default=lowercase__ , metadata={'''help''': '''whether to use adafactor'''} )
snake_case__ : Optional[float] = field(
default=lowercase__ , metadata={'''help''': '''Encoder layer dropout probability. Goes into model.config.'''} )
snake_case__ : Optional[float] = field(
default=lowercase__ , metadata={'''help''': '''Decoder layer dropout probability. Goes into model.config.'''} )
snake_case__ : Optional[float] = field(default=lowercase__ , metadata={'''help''': '''Dropout probability. Goes into model.config.'''} )
snake_case__ : Optional[float] = field(
default=lowercase__ , metadata={'''help''': '''Attention dropout probability. Goes into model.config.'''} )
snake_case__ : Optional[str] = field(
default='''linear''' , metadata={'''help''': f"Which lr scheduler to use. Selected in {sorted(arg_to_scheduler.keys() )}"} , )
| 32 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A_ : Optional[int] = {"""configuration_ibert""": ["""IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """IBertConfig""", """IBertOnnxConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : str = [
"""IBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""IBertForMaskedLM""",
"""IBertForMultipleChoice""",
"""IBertForQuestionAnswering""",
"""IBertForSequenceClassification""",
"""IBertForTokenClassification""",
"""IBertModel""",
"""IBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_ibert import IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, IBertConfig, IBertOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ibert import (
IBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
IBertForMaskedLM,
IBertForMultipleChoice,
IBertForQuestionAnswering,
IBertForSequenceClassification,
IBertForTokenClassification,
IBertModel,
IBertPreTrainedModel,
)
else:
import sys
A_ : str = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 215 | 0 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_flava import FlavaImageProcessor
snake_case__ = logging.get_logger(__name__)
class UpperCamelCase_ (a__ ):
"""simple docstring"""
def __init__( self : Tuple , *_lowerCamelCase : List[str] , **_lowerCamelCase : Tuple ):
"""simple docstring"""
warnings.warn(
'''The class FlavaFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use FlavaImageProcessor instead.''' , _lowerCamelCase , )
super().__init__(*_lowerCamelCase , **_lowerCamelCase )
| 371 |
'''simple docstring'''
import heapq
def snake_case__ ( lowerCamelCase__ : dict ) -> set[int]:
A_ : list[list] = []
# for each node and his adjacency list add them and the rank of the node to queue
# using heapq module the queue will be filled like a Priority Queue
# heapq works with a min priority queue, so I used -1*len(v) to build it
for key, value in graph.items():
# O(log(n))
heapq.heappush(lowerCamelCase__ , [-1 * len(lowerCamelCase__ ), (key, value)] )
# chosen_vertices = set of chosen vertices
A_ : str = set()
# while queue isn't empty and there are still edges
# (queue[0][0] is the rank of the node with max rank)
while queue and queue[0][0] != 0:
# extract vertex with max rank from queue and add it to chosen_vertices
A_ : Tuple = heapq.heappop(lowerCamelCase__ )[1][0]
chosen_vertices.add(lowerCamelCase__ )
# Remove all arcs adjacent to argmax
for elem in queue:
# if v haven't adjacent node, skip
if elem[0] == 0:
continue
# if argmax is reachable from elem
# remove argmax from elem's adjacent list and update his rank
if argmax in elem[1][1]:
A_ : List[str] = elem[1][1].index(lowerCamelCase__ )
del elem[1][1][index]
elem[0] += 1
# re-order the queue
heapq.heapify(lowerCamelCase__ )
return chosen_vertices
if __name__ == "__main__":
import doctest
doctest.testmod()
snake_case__ = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
print(F'Minimum vertex cover:\n{greedy_min_vertex_cover(graph)}')
| 4 | 0 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
"""google/mobilenet_v2_1.4_224""": """https://huggingface.co/google/mobilenet_v2_1.4_224/resolve/main/config.json""",
"""google/mobilenet_v2_1.0_224""": """https://huggingface.co/google/mobilenet_v2_1.0_224/resolve/main/config.json""",
"""google/mobilenet_v2_0.75_160""": """https://huggingface.co/google/mobilenet_v2_0.75_160/resolve/main/config.json""",
"""google/mobilenet_v2_0.35_96""": """https://huggingface.co/google/mobilenet_v2_0.35_96/resolve/main/config.json""",
# See all MobileNetV2 models at https://huggingface.co/models?filter=mobilenet_v2
}
class A__ ( _lowerCamelCase):
A_ : int = 'mobilenet_v2'
def __init__( self , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=2_24 , _SCREAMING_SNAKE_CASE=1.0 , _SCREAMING_SNAKE_CASE=8 , _SCREAMING_SNAKE_CASE=8 , _SCREAMING_SNAKE_CASE=6 , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE="relu6" , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=0.8 , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE=0.001 , _SCREAMING_SNAKE_CASE=2_55 , **_SCREAMING_SNAKE_CASE , ):
super().__init__(**_SCREAMING_SNAKE_CASE )
if depth_multiplier <= 0:
raise ValueError('depth_multiplier must be greater than zero.' )
__lowerCAmelCase : Union[str, Any] = num_channels
__lowerCAmelCase : Optional[Any] = image_size
__lowerCAmelCase : List[str] = depth_multiplier
__lowerCAmelCase : int = depth_divisible_by
__lowerCAmelCase : Union[str, Any] = min_depth
__lowerCAmelCase : int = expand_ratio
__lowerCAmelCase : Optional[Any] = output_stride
__lowerCAmelCase : List[str] = first_layer_is_expansion
__lowerCAmelCase : int = finegrained_output
__lowerCAmelCase : Dict = hidden_act
__lowerCAmelCase : Optional[Any] = tf_padding
__lowerCAmelCase : Optional[int] = classifier_dropout_prob
__lowerCAmelCase : Tuple = initializer_range
__lowerCAmelCase : Tuple = layer_norm_eps
__lowerCAmelCase : List[Any] = semantic_loss_ignore_index
class A__ ( _lowerCamelCase):
A_ : List[str] = version.parse('1.11')
@property
def __lowerCamelCase ( self ):
return OrderedDict([('pixel_values', {0: 'batch'})] )
@property
def __lowerCamelCase ( self ):
if self.task == "image-classification":
return OrderedDict([('logits', {0: 'batch'})] )
else:
return OrderedDict([('last_hidden_state', {0: 'batch'}), ('pooler_output', {0: 'batch'})] )
@property
def __lowerCamelCase ( self ):
return 1E-4 | 86 |
from collections.abc import Generator
from math import sin
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : bytes ) -> bytes:
"""simple docstring"""
if len(__magic_name__ ) != 32:
raise ValueError("""Input must be of length 32""" )
UpperCamelCase :int = B""""""
for i in [3, 2, 1, 0]:
little_endian += string_aa[8 * i : 8 * i + 8]
return little_endian
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : int ) -> bytes:
"""simple docstring"""
if i < 0:
raise ValueError("""Input must be non-negative""" )
UpperCamelCase :Any = format(__magic_name__ , """08x""" )[-8:]
UpperCamelCase :Union[str, Any] = B""""""
for i in [3, 2, 1, 0]:
little_endian_hex += hex_rep[2 * i : 2 * i + 2].encode("""utf-8""" )
return little_endian_hex
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : bytes ) -> bytes:
"""simple docstring"""
UpperCamelCase :str = B""""""
for char in message:
bit_string += format(__magic_name__ , """08b""" ).encode("""utf-8""" )
UpperCamelCase :Any = format(len(__magic_name__ ) , """064b""" ).encode("""utf-8""" )
# Pad bit_string to a multiple of 512 chars
bit_string += b"1"
while len(__magic_name__ ) % 512 != 448:
bit_string += b"0"
bit_string += to_little_endian(start_len[32:] ) + to_little_endian(start_len[:32] )
return bit_string
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : bytes ) -> Generator[list[int], None, None]:
"""simple docstring"""
if len(__magic_name__ ) % 512 != 0:
raise ValueError("""Input must have length that's a multiple of 512""" )
for pos in range(0 , len(__magic_name__ ) , 512 ):
UpperCamelCase :Tuple = bit_string[pos : pos + 512]
UpperCamelCase :Optional[int] = []
for i in range(0 , 512 , 32 ):
block_words.append(int(to_little_endian(block[i : i + 32] ) , 2 ) )
yield block_words
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : int ) -> int:
"""simple docstring"""
if i < 0:
raise ValueError("""Input must be non-negative""" )
UpperCamelCase :List[str] = format(__magic_name__ , """032b""" )
UpperCamelCase :Any = """"""
for c in i_str:
new_str += "1" if c == "0" else "0"
return int(__magic_name__ , 2 )
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : int , __magic_name__ : int ) -> int:
"""simple docstring"""
return (a + b) % 2**32
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : int , __magic_name__ : int ) -> int:
"""simple docstring"""
if i < 0:
raise ValueError("""Input must be non-negative""" )
if shift < 0:
raise ValueError("""Shift must be non-negative""" )
return ((i << shift) ^ (i >> (32 - shift))) % 2**32
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : bytes ) -> bytes:
"""simple docstring"""
UpperCamelCase :Tuple = preprocess(__magic_name__ )
UpperCamelCase :List[str] = [int(2**32 * abs(sin(i + 1 ) ) ) for i in range(64 )]
# Starting states
UpperCamelCase :Union[str, Any] = 0X67_45_23_01
UpperCamelCase :Union[str, Any] = 0XEF_CD_AB_89
UpperCamelCase :List[str] = 0X98_BA_DC_FE
UpperCamelCase :int = 0X10_32_54_76
UpperCamelCase :int = [
7,
12,
17,
22,
7,
12,
17,
22,
7,
12,
17,
22,
7,
12,
17,
22,
5,
9,
14,
20,
5,
9,
14,
20,
5,
9,
14,
20,
5,
9,
14,
20,
4,
11,
16,
23,
4,
11,
16,
23,
4,
11,
16,
23,
4,
11,
16,
23,
6,
10,
15,
21,
6,
10,
15,
21,
6,
10,
15,
21,
6,
10,
15,
21,
]
# Process bit string in chunks, each with 16 32-char words
for block_words in get_block_words(__magic_name__ ):
UpperCamelCase :Optional[Any] = aa
UpperCamelCase :Any = ba
UpperCamelCase :Tuple = ca
UpperCamelCase :List[str] = da
# Hash current chunk
for i in range(64 ):
if i <= 15:
# f = (b & c) | (not_32(b) & d) # Alternate definition for f
UpperCamelCase :int = d ^ (b & (c ^ d))
UpperCamelCase :Optional[int] = i
elif i <= 31:
# f = (d & b) | (not_32(d) & c) # Alternate definition for f
UpperCamelCase :str = c ^ (d & (b ^ c))
UpperCamelCase :Union[str, Any] = (5 * i + 1) % 16
elif i <= 47:
UpperCamelCase :str = b ^ c ^ d
UpperCamelCase :Optional[int] = (3 * i + 5) % 16
else:
UpperCamelCase :List[str] = c ^ (b | not_aa(__magic_name__ ))
UpperCamelCase :int = (7 * i) % 16
UpperCamelCase :Dict = (f + a + added_consts[i] + block_words[g]) % 2**32
UpperCamelCase :Tuple = d
UpperCamelCase :str = c
UpperCamelCase :Tuple = b
UpperCamelCase :Optional[Any] = sum_aa(__magic_name__ , left_rotate_aa(__magic_name__ , shift_amounts[i] ) )
# Add hashed chunk to running total
UpperCamelCase :List[str] = sum_aa(__magic_name__ , __magic_name__ )
UpperCamelCase :str = sum_aa(__magic_name__ , __magic_name__ )
UpperCamelCase :int = sum_aa(__magic_name__ , __magic_name__ )
UpperCamelCase :Optional[Any] = sum_aa(__magic_name__ , __magic_name__ )
UpperCamelCase :Optional[Any] = reformat_hex(__magic_name__ ) + reformat_hex(__magic_name__ ) + reformat_hex(__magic_name__ ) + reformat_hex(__magic_name__ )
return digest
if __name__ == "__main__":
import doctest
doctest.testmod()
| 38 | 0 |
import argparse
import json
import os
import tensorstore as ts
import torch
from flax import serialization
from flax.traverse_util import flatten_dict, unflatten_dict
from tensorflow.io import gfile
from transformers.modeling_utils import dtype_byte_size
from transformers.models.switch_transformers.convert_switch_transformers_original_flax_checkpoint_to_pytorch import (
rename_keys,
)
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
from transformers.utils.hub import convert_file_size_to_int
def a__ ( __UpperCamelCase , __UpperCamelCase ):
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 3:
# expert layer
SCREAMING_SNAKE_CASE_ = flax_key_tuple[:-1] + ("weight",)
SCREAMING_SNAKE_CASE_ = torch.permute(__UpperCamelCase , (0, 2, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(__UpperCamelCase ):
# linear layer
SCREAMING_SNAKE_CASE_ = flax_key_tuple[:-1] + ("weight",)
SCREAMING_SNAKE_CASE_ = flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
SCREAMING_SNAKE_CASE_ = flax_key_tuple[:-1] + ("weight",)
return flax_key_tuple, flax_tensor
def a__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
if "metadata" in layer:
SCREAMING_SNAKE_CASE_ = layer.split("metadata" )
SCREAMING_SNAKE_CASE_ = "".join(split_layer[0] )[:-1]
SCREAMING_SNAKE_CASE_ = [tuple(("metadata" + split_layer[1]).split("/" ) )]
elif "kvstore" in layer:
SCREAMING_SNAKE_CASE_ = layer.split("kvstore" )
SCREAMING_SNAKE_CASE_ = "".join(split_layer[0] )[:-1]
SCREAMING_SNAKE_CASE_ = [tuple(("kvstore" + split_layer[1]).split("/" ) )]
else:
SCREAMING_SNAKE_CASE_ = layer.split("/" )
SCREAMING_SNAKE_CASE_ = "/".join(split_layer[:-1] )
SCREAMING_SNAKE_CASE_ = (split_layer[-1],)
if "kvstore/path" in layer:
SCREAMING_SNAKE_CASE_ = F'''{switch_checkpoint_path}/{checkpoint_info[layer]}'''
elif "kvstore/driver" in layer:
SCREAMING_SNAKE_CASE_ = "file"
else:
SCREAMING_SNAKE_CASE_ = checkpoint_info[layer]
return curr_real_layer_name, split_layer, content
def a__ ( __UpperCamelCase , __UpperCamelCase ):
SCREAMING_SNAKE_CASE_ = rename_keys(__UpperCamelCase )
SCREAMING_SNAKE_CASE_ = {}
for k, v in current_block.items():
SCREAMING_SNAKE_CASE_ = v
SCREAMING_SNAKE_CASE_ = new_current_block
torch.save(__UpperCamelCase , __UpperCamelCase )
def a__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = WEIGHTS_NAME ):
SCREAMING_SNAKE_CASE_ = convert_file_size_to_int(__UpperCamelCase )
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = {}
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = 0
os.makedirs(__UpperCamelCase , exist_ok=__UpperCamelCase )
with gfile.GFile(switch_checkpoint_path + "/checkpoint" , "rb" ) as fp:
SCREAMING_SNAKE_CASE_ = serialization.msgpack_restore(fp.read() )["optimizer"]["target"]
SCREAMING_SNAKE_CASE_ = flatten_dict(__UpperCamelCase , sep="/" )
SCREAMING_SNAKE_CASE_ = {}
for layer in checkpoint_info.keys():
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = get_key_and_tensorstore_dict(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
if curr_real_layer_name in all_layers:
SCREAMING_SNAKE_CASE_ = content
else:
SCREAMING_SNAKE_CASE_ = {split_layer[-1]: content}
for key in all_layers.keys():
# open tensorstore file
SCREAMING_SNAKE_CASE_ = ts.open(unflatten_dict(all_layers[key] ) ).result().read().result()
SCREAMING_SNAKE_CASE_ = torch.tensor(__UpperCamelCase )
SCREAMING_SNAKE_CASE_ = raw_weights.numel() * dtype_byte_size(raw_weights.dtype )
# use the renaming pattern from the small conversion scripts
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = rename_base_flax_keys(tuple(key.split("/" ) ) , __UpperCamelCase )
SCREAMING_SNAKE_CASE_ = "/".join(__UpperCamelCase )
# If this weight is going to tip up over the maximal size, we split.
if current_block_size + weight_size > max_shard_size:
SCREAMING_SNAKE_CASE_ = os.path.join(
__UpperCamelCase , weights_name.replace(".bin" , F'''-{len(__UpperCamelCase )+1:05d}-of-???.bin''' ) )
rename_and_save_block(__UpperCamelCase , __UpperCamelCase )
sharded_state_dicts.append(current_block.keys() )
del current_block
SCREAMING_SNAKE_CASE_ = {}
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = raw_weights.to(getattr(__UpperCamelCase , __UpperCamelCase ) )
current_block_size += weight_size
total_size += weight_size
# Add the last block
SCREAMING_SNAKE_CASE_ = os.path.join(__UpperCamelCase , weights_name.replace(".bin" , F'''-{len(__UpperCamelCase )+1:05d}-of-???.bin''' ) )
rename_and_save_block(__UpperCamelCase , __UpperCamelCase )
sharded_state_dicts.append(current_block.keys() )
# If we only have one shard, we return it
if len(__UpperCamelCase ) == 1:
return {weights_name: sharded_state_dicts[0]}, None
# Otherwise, let's build the index
SCREAMING_SNAKE_CASE_ = {}
SCREAMING_SNAKE_CASE_ = {}
for idx, shard in enumerate(__UpperCamelCase ):
SCREAMING_SNAKE_CASE_ = weights_name.replace(
".bin" , F'''-{idx+1:05d}-of-{len(__UpperCamelCase ):05d}.bin''' ) # len(sharded_state_dicts):05d}
SCREAMING_SNAKE_CASE_ = os.path.join(__UpperCamelCase , weights_name.replace(".bin" , F'''-{idx+1:05d}-of-???.bin''' ) )
os.rename(__UpperCamelCase , os.path.join(__UpperCamelCase , __UpperCamelCase ) )
SCREAMING_SNAKE_CASE_ = shard
for key in shard:
SCREAMING_SNAKE_CASE_ = shard_file
# Add the metadata
SCREAMING_SNAKE_CASE_ = {"total_size": total_size}
SCREAMING_SNAKE_CASE_ = {"metadata": metadata, "weight_map": weight_map}
with open(os.path.join(__UpperCamelCase , __UpperCamelCase ) , "w" , encoding="utf-8" ) as f:
SCREAMING_SNAKE_CASE_ = json.dumps(__UpperCamelCase , indent=2 , sort_keys=__UpperCamelCase ) + "\n"
f.write(__UpperCamelCase )
return metadata, index
if __name__ == "__main__":
A : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--switch_t5x_checkpoint_path",
default="/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128/checkpoint_634600",
type=str,
required=False,
help="Path to a directory containing a folder per layer. Follows the original Google format.",
)
parser.add_argument("--max_shard_size", default="10GB", required=False, help="Max shard size")
parser.add_argument("--dtype", default="bfloat16", type=str, required=False, help="dtype of the saved model")
parser.add_argument(
"--pytorch_dump_folder_path",
default="/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128-converted",
type=str,
required=False,
help="Path to the output pytorch model.",
)
A : Optional[Any] = parser.parse_args()
shard_on_the_fly(
args.switch_tax_checkpoint_path,
args.pytorch_dump_folder_path,
args.max_shard_size,
args.dtype,
)
def a__ ( ):
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration, TaTokenizer
SCREAMING_SNAKE_CASE_ = SwitchTransformersConfig.from_pretrained("google/switch-base-8" )
config.save_pretrained("/home/arthur_huggingface_co/transformers/switch_converted" )
SCREAMING_SNAKE_CASE_ = SwitchTransformersForConditionalGeneration.from_pretrained(
"/home/arthur_huggingface_co/transformers/switch_converted" , device_map="auto" )
SCREAMING_SNAKE_CASE_ = TaTokenizer.from_pretrained("t5-small" )
SCREAMING_SNAKE_CASE_ = "A <extra_id_0> walks into a bar a orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>."
SCREAMING_SNAKE_CASE_ = tokenizer(__UpperCamelCase , return_tensors="pt" ).input_ids
SCREAMING_SNAKE_CASE_ = model.generate(__UpperCamelCase , decoder_start_token_id=0 )
print(tokenizer.decode(out[0] ) )
| 359 | from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available
from ...utils import OptionalDependencyNotAvailable
A : List[Any] = {"configuration_dpt": ["DPT_PRETRAINED_CONFIG_ARCHIVE_MAP", "DPTConfig"]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Optional[Any] = ["DPTFeatureExtractor"]
A : str = ["DPTImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Optional[Any] = [
"DPT_PRETRAINED_MODEL_ARCHIVE_LIST",
"DPTForDepthEstimation",
"DPTForSemanticSegmentation",
"DPTModel",
"DPTPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_dpt import DPT_PRETRAINED_CONFIG_ARCHIVE_MAP, DPTConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_dpt import DPTFeatureExtractor
from .image_processing_dpt import DPTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dpt import (
DPT_PRETRAINED_MODEL_ARCHIVE_LIST,
DPTForDepthEstimation,
DPTForSemanticSegmentation,
DPTModel,
DPTPreTrainedModel,
)
else:
import sys
A : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 305 | 0 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_realm import RealmTokenizer
__lowercase : List[str] = logging.get_logger(__name__)
__lowercase : List[str] = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
__lowercase : List[Any] = {
'''vocab_file''': {
'''google/realm-cc-news-pretrained-embedder''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/vocab.txt'''
),
'''google/realm-cc-news-pretrained-encoder''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/vocab.txt'''
),
'''google/realm-cc-news-pretrained-scorer''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/vocab.txt'''
),
'''google/realm-cc-news-pretrained-openqa''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/vocab.txt'''
),
'''google/realm-orqa-nq-openqa''': '''https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/vocab.txt''',
'''google/realm-orqa-nq-reader''': '''https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/vocab.txt''',
'''google/realm-orqa-wq-openqa''': '''https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/vocab.txt''',
'''google/realm-orqa-wq-reader''': '''https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/vocab.txt''',
},
'''tokenizer_file''': {
'''google/realm-cc-news-pretrained-embedder''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/tokenizer.jsont'''
),
'''google/realm-cc-news-pretrained-encoder''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/tokenizer.json'''
),
'''google/realm-cc-news-pretrained-scorer''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/tokenizer.json'''
),
'''google/realm-cc-news-pretrained-openqa''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/tokenizer.json'''
),
'''google/realm-orqa-nq-openqa''': (
'''https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/tokenizer.json'''
),
'''google/realm-orqa-nq-reader''': (
'''https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/tokenizer.json'''
),
'''google/realm-orqa-wq-openqa''': (
'''https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/tokenizer.json'''
),
'''google/realm-orqa-wq-reader''': (
'''https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/tokenizer.json'''
),
},
}
__lowercase : List[str] = {
'''google/realm-cc-news-pretrained-embedder''': 512,
'''google/realm-cc-news-pretrained-encoder''': 512,
'''google/realm-cc-news-pretrained-scorer''': 512,
'''google/realm-cc-news-pretrained-openqa''': 512,
'''google/realm-orqa-nq-openqa''': 512,
'''google/realm-orqa-nq-reader''': 512,
'''google/realm-orqa-wq-openqa''': 512,
'''google/realm-orqa-wq-reader''': 512,
}
__lowercase : Dict = {
'''google/realm-cc-news-pretrained-embedder''': {'''do_lower_case''': True},
'''google/realm-cc-news-pretrained-encoder''': {'''do_lower_case''': True},
'''google/realm-cc-news-pretrained-scorer''': {'''do_lower_case''': True},
'''google/realm-cc-news-pretrained-openqa''': {'''do_lower_case''': True},
'''google/realm-orqa-nq-openqa''': {'''do_lower_case''': True},
'''google/realm-orqa-nq-reader''': {'''do_lower_case''': True},
'''google/realm-orqa-wq-openqa''': {'''do_lower_case''': True},
'''google/realm-orqa-wq-reader''': {'''do_lower_case''': True},
}
class __lowercase ( _lowerCAmelCase ):
lowerCamelCase : Optional[int] = VOCAB_FILES_NAMES
lowerCamelCase : List[str] = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase : Tuple = PRETRAINED_INIT_CONFIGURATION
lowerCamelCase : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase : str = RealmTokenizer
def __init__(self , A=None , A=None , A=True , A="[UNK]" , A="[SEP]" , A="[PAD]" , A="[CLS]" , A="[MASK]" , A=True , A=None , **A , ):
super().__init__(
_lowerCamelCase , tokenizer_file=_lowerCamelCase , do_lower_case=_lowerCamelCase , unk_token=_lowerCamelCase , sep_token=_lowerCamelCase , pad_token=_lowerCamelCase , cls_token=_lowerCamelCase , mask_token=_lowerCamelCase , tokenize_chinese_chars=_lowerCamelCase , strip_accents=_lowerCamelCase , **_lowerCamelCase , )
lowerCamelCase_ : Any = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , _lowerCamelCase ) != do_lower_case
or normalizer_state.get('''strip_accents''' , _lowerCamelCase ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , _lowerCamelCase ) != tokenize_chinese_chars
):
lowerCamelCase_ : List[Any] = getattr(_lowerCamelCase , normalizer_state.pop('''type''' ) )
lowerCamelCase_ : Dict = do_lower_case
lowerCamelCase_ : Tuple = strip_accents
lowerCamelCase_ : int = tokenize_chinese_chars
lowerCamelCase_ : Union[str, Any] = normalizer_class(**_lowerCamelCase )
lowerCamelCase_ : Dict = do_lower_case
def UpperCAmelCase__ (self , A , **A ):
lowerCamelCase_ : Union[str, Any] = PaddingStrategy.MAX_LENGTH
lowerCamelCase_ : Union[str, Any] = text
lowerCamelCase_ : Union[str, Any] = kwargs.pop('''text_pair''' , _lowerCamelCase )
lowerCamelCase_ : Optional[Any] = kwargs.pop('''return_tensors''' , _lowerCamelCase )
lowerCamelCase_ : Any = {
'''input_ids''': [],
'''attention_mask''': [],
'''token_type_ids''': [],
}
for idx, candidate_text in enumerate(_lowerCamelCase ):
if batch_text_pair is not None:
lowerCamelCase_ : Dict = batch_text_pair[idx]
else:
lowerCamelCase_ : Optional[Any] = None
lowerCamelCase_ : str = super().__call__(_lowerCamelCase , _lowerCamelCase , return_tensors=_lowerCamelCase , **_lowerCamelCase )
lowerCamelCase_ : Optional[Any] = encoded_candidates.get('''input_ids''' )
lowerCamelCase_ : List[str] = encoded_candidates.get('''attention_mask''' )
lowerCamelCase_ : Optional[int] = encoded_candidates.get('''token_type_ids''' )
if encoded_input_ids is not None:
output_data["input_ids"].append(_lowerCamelCase )
if encoded_attention_mask is not None:
output_data["attention_mask"].append(_lowerCamelCase )
if encoded_token_type_ids is not None:
output_data["token_type_ids"].append(_lowerCamelCase )
lowerCamelCase_ : Union[str, Any] = {key: item for key, item in output_data.items() if len(_lowerCamelCase ) != 0}
return BatchEncoding(_lowerCamelCase , tensor_type=_lowerCamelCase )
def UpperCAmelCase__ (self , A , A=None ):
lowerCamelCase_ : Any = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCAmelCase__ (self , A , A = None ):
lowerCamelCase_ : Tuple = [self.sep_token_id]
lowerCamelCase_ : int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCAmelCase__ (self , A , A = None ):
lowerCamelCase_ : int = self._tokenizer.model.save(_lowerCamelCase , name=_lowerCamelCase )
return tuple(_lowerCamelCase )
| 318 |
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class snake_case ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : List[str], _lowerCamelCase : Optional[Any], _lowerCamelCase : Union[str, Any]=13, _lowerCamelCase : Any=3, _lowerCamelCase : Optional[int]=2_24, _lowerCamelCase : str=30, _lowerCamelCase : Dict=4_00, _lowerCamelCase : Union[str, Any]=True, _lowerCamelCase : Any=None, _lowerCamelCase : Optional[Any]=True, _lowerCamelCase : Any=[0.5, 0.5, 0.5], _lowerCamelCase : List[str]=[0.5, 0.5, 0.5], ):
'''simple docstring'''
__A = size if size is not None else {'''height''': 18, '''width''': 18}
__A = parent
__A = batch_size
__A = num_channels
__A = image_size
__A = min_resolution
__A = max_resolution
__A = do_resize
__A = size
__A = do_normalize
__A = image_mean
__A = image_std
def _SCREAMING_SNAKE_CASE ( self : int ):
'''simple docstring'''
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class snake_case ( _lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
A_ : str = ViTImageProcessor if is_vision_available() else None
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ):
'''simple docstring'''
__A = EfficientFormerImageProcessorTester(self )
@property
def _SCREAMING_SNAKE_CASE ( self : Dict ):
'''simple docstring'''
return self.image_proc_tester.prepare_image_processor_dict()
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
'''simple docstring'''
__A = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_lowerCamelCase, '''image_mean''' ) )
self.assertTrue(hasattr(_lowerCamelCase, '''image_std''' ) )
self.assertTrue(hasattr(_lowerCamelCase, '''do_normalize''' ) )
self.assertTrue(hasattr(_lowerCamelCase, '''do_resize''' ) )
self.assertTrue(hasattr(_lowerCamelCase, '''size''' ) )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
'''simple docstring'''
pass
def _SCREAMING_SNAKE_CASE ( self : List[Any] ):
'''simple docstring'''
# Initialize image_processor
__A = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__A = prepare_image_inputs(self.image_proc_tester, equal_resolution=_lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCamelCase, Image.Image )
# Test not batched input
__A = image_processor(image_inputs[0], return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape, (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['''height'''],
self.image_proc_tester.size['''width'''],
), )
# Test batched
__A = image_processor(_lowerCamelCase, return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['''height'''],
self.image_proc_tester.size['''width'''],
), )
def _SCREAMING_SNAKE_CASE ( self : Tuple ):
'''simple docstring'''
# Initialize image_processor
__A = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__A = prepare_image_inputs(self.image_proc_tester, equal_resolution=_lowerCamelCase, numpify=_lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCamelCase, np.ndarray )
# Test not batched input
__A = image_processor(image_inputs[0], return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape, (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['''height'''],
self.image_proc_tester.size['''width'''],
), )
# Test batched
__A = image_processor(_lowerCamelCase, return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['''height'''],
self.image_proc_tester.size['''width'''],
), )
def _SCREAMING_SNAKE_CASE ( self : str ):
'''simple docstring'''
# Initialize image_processor
__A = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__A = prepare_image_inputs(self.image_proc_tester, equal_resolution=_lowerCamelCase, torchify=_lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCamelCase, torch.Tensor )
# Test not batched input
__A = image_processor(image_inputs[0], return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape, (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['''height'''],
self.image_proc_tester.size['''width'''],
), )
# Test batched
__A = image_processor(_lowerCamelCase, return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['''height'''],
self.image_proc_tester.size['''width'''],
), )
| 266 | 0 |
'''simple docstring'''
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class UpperCAmelCase_ ( _a ):
"""simple docstring"""
lowercase = ["image_processor", "tokenizer"]
lowercase = "LayoutLMv2ImageProcessor"
lowercase = ("LayoutXLMTokenizer", "LayoutXLMTokenizerFast")
def __init__( self : str , snake_case_ : List[Any]=None , snake_case_ : Any=None , **snake_case_ : Union[str, Any] ):
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , snake_case_ , )
snake_case__ : Union[str, Any] = kwargs.pop("""feature_extractor""" )
snake_case__ : Optional[int] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(snake_case_ , snake_case_ )
def __call__( self : int , snake_case_ : Optional[Any] , snake_case_ : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , snake_case_ : Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None , snake_case_ : Union[List[List[int]], List[List[List[int]]]] = None , snake_case_ : Optional[Union[List[int], List[List[int]]]] = None , snake_case_ : bool = True , snake_case_ : Union[bool, str, PaddingStrategy] = False , snake_case_ : Union[bool, str, TruncationStrategy] = None , snake_case_ : Optional[int] = None , snake_case_ : int = 0 , snake_case_ : Optional[int] = None , snake_case_ : Optional[bool] = None , snake_case_ : Optional[bool] = None , snake_case_ : bool = False , snake_case_ : bool = False , snake_case_ : bool = False , snake_case_ : bool = False , snake_case_ : bool = True , snake_case_ : Optional[Union[str, TensorType]] = None , **snake_case_ : Optional[Any] , ):
# verify input
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
"""You cannot provide bounding boxes """
"""if you initialized the image processor with apply_ocr set to True.""" )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
"""You cannot provide word labels if you initialized the image processor with apply_ocr set to True.""" )
if return_overflowing_tokens is True and return_offsets_mapping is False:
raise ValueError("""You cannot return overflowing tokens without returning the offsets mapping.""" )
# first, apply the image processor
snake_case__ : List[Any] = self.image_processor(images=snake_case_ , return_tensors=snake_case_ )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(snake_case_ , snake_case_ ):
snake_case__ : Optional[Any] = [text] # add batch dimension (as the image processor always adds a batch dimension)
snake_case__ : str = features["""words"""]
snake_case__ : Optional[int] = self.tokenizer(
text=text if text is not None else features["""words"""] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features["""boxes"""] , word_labels=snake_case_ , add_special_tokens=snake_case_ , padding=snake_case_ , truncation=snake_case_ , max_length=snake_case_ , stride=snake_case_ , pad_to_multiple_of=snake_case_ , return_token_type_ids=snake_case_ , return_attention_mask=snake_case_ , return_overflowing_tokens=snake_case_ , return_special_tokens_mask=snake_case_ , return_offsets_mapping=snake_case_ , return_length=snake_case_ , verbose=snake_case_ , return_tensors=snake_case_ , **snake_case_ , )
# add pixel values
snake_case__ : List[str] = features.pop("""pixel_values""" )
if return_overflowing_tokens is True:
snake_case__ : Tuple = self.get_overflowing_images(snake_case_ , encoded_inputs["""overflow_to_sample_mapping"""] )
snake_case__ : str = images
return encoded_inputs
def lowerCamelCase ( self : Union[str, Any] , snake_case_ : Union[str, Any] , snake_case_ : List[str] ):
# in case there's an overflow, ensure each `input_ids` sample is mapped to its corresponding image
snake_case__ : Tuple = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(snake_case_ ) != len(snake_case_ ):
raise ValueError(
"""Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got"""
f" {len(snake_case_ )} and {len(snake_case_ )}" )
return images_with_overflow
def lowerCamelCase ( self : List[str] , *snake_case_ : Any , **snake_case_ : Optional[Any] ):
return self.tokenizer.batch_decode(*snake_case_ , **snake_case_ )
def lowerCamelCase ( self : Union[str, Any] , *snake_case_ : Any , **snake_case_ : Optional[Any] ):
return self.tokenizer.decode(*snake_case_ , **snake_case_ )
@property
def lowerCamelCase ( self : int ):
return ["input_ids", "bbox", "attention_mask", "image"]
@property
def lowerCamelCase ( self : Dict ):
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , snake_case_ , )
return self.image_processor_class
@property
def lowerCamelCase ( self : Optional[int] ):
warnings.warn(
"""`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" , snake_case_ , )
return self.image_processor
| 43 |
'''simple docstring'''
import json
import os
import unittest
from transformers import BatchEncoding, MvpTokenizer, MvpTokenizerFast
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin, filter_roberta_detectors
@require_tokenizers
class UpperCAmelCase_ ( _a , unittest.TestCase ):
"""simple docstring"""
lowercase = MvpTokenizer
lowercase = MvpTokenizerFast
lowercase = True
lowercase = filter_roberta_detectors
def lowerCamelCase ( self : Union[str, Any] ):
super().setUp()
snake_case__ : List[Any] = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
]
snake_case__ : Union[str, Any] = dict(zip(snake_case_ , range(len(snake_case_ ) ) ) )
snake_case__ : Any = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
snake_case__ : Tuple = {"""unk_token""": """<unk>"""}
snake_case__ : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
snake_case__ : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(snake_case_ ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(snake_case_ ) )
def lowerCamelCase ( self : Optional[Any] , **snake_case_ : Union[str, Any] ):
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **snake_case_ )
def lowerCamelCase ( self : List[str] , **snake_case_ : List[str] ):
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **snake_case_ )
def lowerCamelCase ( self : List[Any] , snake_case_ : List[str] ):
return "lower newer", "lower newer"
@cached_property
def lowerCamelCase ( self : int ):
return MvpTokenizer.from_pretrained("""RUCAIBox/mvp""" )
@cached_property
def lowerCamelCase ( self : Tuple ):
return MvpTokenizerFast.from_pretrained("""RUCAIBox/mvp""" )
@require_torch
def lowerCamelCase ( self : Any ):
snake_case__ : List[str] = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
snake_case__ : Dict = [0, 250, 251, 17_818, 13, 39_186, 1_938, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
snake_case__ : List[Any] = tokenizer(snake_case_ , max_length=len(snake_case_ ) , padding=snake_case_ , return_tensors="""pt""" )
self.assertIsInstance(snake_case_ , snake_case_ )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
snake_case__ : Union[str, Any] = batch.input_ids.tolist()[0]
self.assertListEqual(snake_case_ , snake_case_ )
# Test that special tokens are reset
@require_torch
def lowerCamelCase ( self : Dict ):
snake_case__ : List[str] = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
snake_case__ : List[Any] = tokenizer(snake_case_ , padding=snake_case_ , return_tensors="""pt""" )
# check if input_ids are returned and no labels
self.assertIn("""input_ids""" , snake_case_ )
self.assertIn("""attention_mask""" , snake_case_ )
self.assertNotIn("""labels""" , snake_case_ )
self.assertNotIn("""decoder_attention_mask""" , snake_case_ )
@require_torch
def lowerCamelCase ( self : Tuple ):
snake_case__ : Any = [
"""Summary of the text.""",
"""Another summary.""",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
snake_case__ : str = tokenizer(text_target=snake_case_ , max_length=32 , padding="""max_length""" , return_tensors="""pt""" )
self.assertEqual(32 , targets["""input_ids"""].shape[1] )
@require_torch
def lowerCamelCase ( self : List[str] ):
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
snake_case__ : Optional[Any] = tokenizer(
["""I am a small frog""" * 1_024, """I am a small frog"""] , padding=snake_case_ , truncation=snake_case_ , return_tensors="""pt""" )
self.assertIsInstance(snake_case_ , snake_case_ )
self.assertEqual(batch.input_ids.shape , (2, 1_024) )
@require_torch
def lowerCamelCase ( self : str ):
snake_case__ : Optional[Any] = ["""A long paragraph for summarization."""]
snake_case__ : Optional[Any] = [
"""Summary of the text.""",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
snake_case__ : int = tokenizer(snake_case_ , text_target=snake_case_ , return_tensors="""pt""" )
snake_case__ : int = inputs["""input_ids"""]
snake_case__ : str = inputs["""labels"""]
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() )
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() )
def lowerCamelCase ( self : int ):
pass
def lowerCamelCase ( self : str ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
snake_case__ : int = self.rust_tokenizer_class.from_pretrained(snake_case_ , **snake_case_ )
snake_case__ : Optional[int] = self.tokenizer_class.from_pretrained(snake_case_ , **snake_case_ )
snake_case__ : Optional[Any] = """A, <mask> AllenNLP sentence."""
snake_case__ : List[str] = tokenizer_r.encode_plus(snake_case_ , add_special_tokens=snake_case_ , return_token_type_ids=snake_case_ )
snake_case__ : int = tokenizer_p.encode_plus(snake_case_ , add_special_tokens=snake_case_ , return_token_type_ids=snake_case_ )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r["""token_type_ids"""] ) , sum(tokens_p["""token_type_ids"""] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r["""attention_mask"""] ) / len(tokens_r["""attention_mask"""] ) , sum(tokens_p["""attention_mask"""] ) / len(tokens_p["""attention_mask"""] ) , )
snake_case__ : Any = tokenizer_r.convert_ids_to_tokens(tokens_r["""input_ids"""] )
snake_case__ : List[str] = tokenizer_p.convert_ids_to_tokens(tokens_p["""input_ids"""] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p["""input_ids"""] , [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] )
self.assertSequenceEqual(tokens_r["""input_ids"""] , [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] )
self.assertSequenceEqual(
snake_case_ , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
self.assertSequenceEqual(
snake_case_ , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
| 43 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.