code
stringlengths
86
54.5k
code_codestyle
int64
0
371
style_context
stringlengths
87
49.2k
style_context_codestyle
int64
0
349
label
int64
0
1
"""simple docstring""" import logging from transformers import PretrainedConfig __A = logging.getLogger(__name__) __A = { '''bertabs-finetuned-cnndm''': '''https://huggingface.co/remi/bertabs-finetuned-cnndm-extractive-abstractive-summarization/resolve/main/config.json''', } class _snake_case ( UpperCamelCase_ ): snake_case__ = 'bertabs' def __init__( self : Dict , UpperCAmelCase : int=30522 , UpperCAmelCase : Union[str, Any]=512 , UpperCAmelCase : Optional[int]=6 , UpperCAmelCase : List[str]=512 , UpperCAmelCase : Optional[Any]=8 , UpperCAmelCase : Any=512 , UpperCAmelCase : Optional[int]=0.2 , UpperCAmelCase : Union[str, Any]=6 , UpperCAmelCase : Any=768 , UpperCAmelCase : List[Any]=8 , UpperCAmelCase : Union[str, Any]=2048 , UpperCAmelCase : Optional[Any]=0.2 , **UpperCAmelCase : Any , ): super().__init__(**lowercase_ ) __lowerCamelCase : Optional[Any] = vocab_size __lowerCamelCase : Union[str, Any] = max_pos __lowerCamelCase : Union[str, Any] = enc_layers __lowerCamelCase : str = enc_hidden_size __lowerCamelCase : str = enc_heads __lowerCamelCase : List[str] = enc_ff_size __lowerCamelCase : str = enc_dropout __lowerCamelCase : List[str] = dec_layers __lowerCamelCase : Tuple = dec_hidden_size __lowerCamelCase : Optional[Any] = dec_heads __lowerCamelCase : Dict = dec_ff_size __lowerCamelCase : str = dec_dropout
135
import unittest from transformers.models.xlm_prophetnet.tokenization_xlm_prophetnet import SPIECE_UNDERLINE, XLMProphetNetTokenizer from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin lowerCamelCase = get_tests_dir('fixtures/test_sentencepiece.model') @require_sentencepiece class A ( UpperCamelCase_ , unittest.TestCase ): UpperCamelCase__ : str =XLMProphetNetTokenizer UpperCamelCase__ : Any =False UpperCamelCase__ : Optional[Any] =True def lowerCamelCase ( self : Tuple ) -> Optional[Any]: """simple docstring""" super().setUp() # We have a SentencePiece fixture for testing _lowerCamelCase : Union[str, Any] =XLMProphetNetTokenizer(lowercase_ , keep_accents=lowercase_ ) tokenizer.save_pretrained(self.tmpdirname ) def lowerCamelCase ( self : int ) -> List[str]: """simple docstring""" _lowerCamelCase : Tuple ='[PAD]' _lowerCamelCase : Dict =0 self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowercase_ ) , lowercase_ ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowercase_ ) , lowercase_ ) def lowerCamelCase ( self : Optional[Any] ) -> int: """simple docstring""" _lowerCamelCase : int =list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , '[PAD]' ) self.assertEqual(vocab_keys[1] , '[CLS]' ) self.assertEqual(vocab_keys[-1] , 'j' ) self.assertEqual(len(lowercase_ ) , 1012 ) def lowerCamelCase ( self : Optional[int] ) -> Any: """simple docstring""" self.assertEqual(self.get_tokenizer().vocab_size , 1012 ) def lowerCamelCase ( self : Dict ) -> Union[str, Any]: """simple docstring""" _lowerCamelCase : List[Any] =XLMProphetNetTokenizer(lowercase_ , keep_accents=lowercase_ ) _lowerCamelCase : Any =tokenizer.tokenize('This is a test' ) self.assertListEqual(lowercase_ , ['▁This', '▁is', '▁a', '▁t', 'est'] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(lowercase_ ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , ) _lowerCamelCase : Dict =tokenizer.tokenize('I was born in 92000, and this is falsé.' ) self.assertListEqual( lowercase_ , [ SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '9', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', 'é', '.', ] , ) _lowerCamelCase : Union[str, Any] =tokenizer.convert_tokens_to_ids(lowercase_ ) self.assertListEqual( lowercase_ , [ value + tokenizer.fairseq_offset for value in [8, 21, 84, 55, 24, 19, 7, -9, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, -9, 4] ] , ) _lowerCamelCase : int =tokenizer.convert_ids_to_tokens(lowercase_ ) self.assertListEqual( lowercase_ , [ SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '[UNK]', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', '[UNK]', '.', ] , ) @cached_property def lowerCamelCase ( self : Any ) -> Optional[Any]: """simple docstring""" return XLMProphetNetTokenizer.from_pretrained('microsoft/xprophetnet-large-wiki100-cased' ) @slow def lowerCamelCase ( self : List[str] ) -> Any: """simple docstring""" _lowerCamelCase : Optional[int] ='Hello World!' _lowerCamelCase : Optional[int] =[3_5389, 6672, 49, 2] self.assertListEqual(lowercase_ , self.big_tokenizer.encode(lowercase_ ) ) @slow def lowerCamelCase ( self : Optional[int] ) -> Dict: """simple docstring""" _lowerCamelCase : Dict ={'input_ids': [[1_1073, 8_2783, 18, 26, 8_2783, 549, 5_1540, 248, 1_7209, 1301, 217, 20, 21_5186, 1325, 147, 1_7209, 1301, 217, 20, 5_6370, 53, 12_2020, 20, 1_6477, 27, 8_7355, 4548, 20, 4728, 7_8392, 17, 15_9969, 18, 26, 2_4491, 629, 15, 538, 2_2704, 5439, 15, 2788, 2_4491, 9885, 15, 4_3534, 605, 15, 814, 1_8403, 3_3200, 29, 15, 4_3534, 2_4458, 1_2410, 111, 2_4966, 8_3669, 9637, 14_4068, 26, 850, 2_2346, 27, 147, 2_4966, 8_3669, 8_3490, 26, 3_9113, 735, 27, 689, 656, 2800, 1339, 4600, 53, 12_2020, 11_5785, 34, 816, 1339, 4_6887, 18, 147, 5_3905, 1951, 4_2238, 4_1170, 1_7732, 834, 436, 15, 2_7523, 9_8733, 217, 147, 5542, 4981, 930, 1_7347, 16, 2], [2_0091, 629, 94, 8_2786, 58, 490, 20, 1528, 84, 5_3905, 344, 8_0592, 11_0128, 1_8822, 5267, 1306, 62, 15_2537, 308, 7997, 401, 12_4427, 549, 3_5442, 225, 109, 1_5055, 2_5748, 147, 7119, 4_3712, 34, 767, 13_5366, 18, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [592, 6_3784, 11_9466, 17, 14_7808, 8_8214, 18, 656, 81, 32, 3296, 1_0280, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=lowercase_ , model_name='microsoft/xprophetnet-large-wiki100-cased' , revision='1acad1643ddd54a44df6a1b797ada8373685d90e' , )
199
0
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available, is_vision_available, ) __a = { '''configuration_blip''': [ '''BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''BlipConfig''', '''BlipTextConfig''', '''BlipVisionConfig''', ], '''processing_blip''': ['''BlipProcessor'''], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a = ['''BlipImageProcessor'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a = [ '''BLIP_PRETRAINED_MODEL_ARCHIVE_LIST''', '''BlipModel''', '''BlipPreTrainedModel''', '''BlipForConditionalGeneration''', '''BlipForQuestionAnswering''', '''BlipVisionModel''', '''BlipTextModel''', '''BlipForImageTextRetrieval''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a = [ '''TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFBlipModel''', '''TFBlipPreTrainedModel''', '''TFBlipForConditionalGeneration''', '''TFBlipForQuestionAnswering''', '''TFBlipVisionModel''', '''TFBlipTextModel''', '''TFBlipForImageTextRetrieval''', ] if TYPE_CHECKING: from .configuration_blip import BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, BlipConfig, BlipTextConfig, BlipVisionConfig from .processing_blip import BlipProcessor try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .image_processing_blip import BlipImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_blip import ( BLIP_PRETRAINED_MODEL_ARCHIVE_LIST, BlipForConditionalGeneration, BlipForImageTextRetrieval, BlipForQuestionAnswering, BlipModel, BlipPreTrainedModel, BlipTextModel, BlipVisionModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_blip import ( TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST, TFBlipForConditionalGeneration, TFBlipForImageTextRetrieval, TFBlipForQuestionAnswering, TFBlipModel, TFBlipPreTrainedModel, TFBlipTextModel, TFBlipVisionModel, ) else: import sys __a = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
173
from __future__ import annotations from math import ceil, floor, sqrt def __lowercase ( _UpperCamelCase = 2000000 ) ->int: """simple docstring""" lowercase : list[int] = [0] lowercase : int for idx in range(1, ceil(sqrt(target * 2 ) * 1.1 ) ): triangle_numbers.append(triangle_numbers[-1] + idx ) # we want this to be as close as possible to target lowercase : int = 0 # the area corresponding to the grid that gives the product closest to target lowercase : int = 0 # an estimate of b, using the quadratic formula lowercase : float # the largest integer less than b_estimate lowercase : int # the largest integer less than b_estimate lowercase : int # the triangle number corresponding to b_floor lowercase : int # the triangle number corresponding to b_ceil lowercase : int for idx_a, triangle_a in enumerate(triangle_numbers[1:], 1 ): lowercase : List[str] = (-1 + sqrt(1 + 8 * target / triangle_a )) / 2 lowercase : str = floor(_UpperCamelCase ) lowercase : int = ceil(_UpperCamelCase ) lowercase : str = triangle_numbers[b_floor] lowercase : str = triangle_numbers[b_ceil] if abs(target - triangle_b_first_guess * triangle_a ) < abs( target - best_product ): lowercase : Optional[int] = triangle_b_first_guess * triangle_a lowercase : Tuple = idx_a * b_floor if abs(target - triangle_b_second_guess * triangle_a ) < abs( target - best_product ): lowercase : Dict = triangle_b_second_guess * triangle_a lowercase : Any = idx_a * b_ceil return area if __name__ == "__main__": print(F'''{solution() = }''')
173
1
import logging import os import sys from dataclasses import dataclass, field from itertools import chain from typing import Optional, Union import datasets import numpy as np import torch from datasets import load_dataset import transformers from transformers import ( AutoConfig, AutoModelForMultipleChoice, AutoTokenizer, HfArgumentParser, Trainer, TrainingArguments, default_data_collator, set_seed, ) from transformers.tokenization_utils_base import PreTrainedTokenizerBase from transformers.trainer_utils import get_last_checkpoint from transformers.utils import PaddingStrategy, check_min_version, send_example_telemetry # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version('''4.31.0''') a_ = logging.getLogger(__name__) @dataclass class lowercase__ : a_ =field( metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} ) a_ =field( default=lowerCamelCase__, metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} ) a_ =field( default=lowerCamelCase__, metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} ) a_ =field( default=lowerCamelCase__, metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""}, ) a_ =field( default=lowerCamelCase__, metadata={"""help""": """Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."""}, ) a_ =field( default="""main""", metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""}, ) a_ =field( default=lowerCamelCase__, metadata={ """help""": ( """Will use the token generated when running `huggingface-cli login` (necessary to use this script """ """with private models).""" ) }, ) @dataclass class lowercase__ : a_ =field(default=lowerCamelCase__, metadata={"""help""": """The input training data file (a text file)."""} ) a_ =field( default=lowerCamelCase__, metadata={"""help""": """An optional input evaluation data file to evaluate the perplexity on (a text file)."""}, ) a_ =field( default=lowerCamelCase__, metadata={"""help""": """Overwrite the cached training and evaluation sets"""} ) a_ =field( default=lowerCamelCase__, metadata={"""help""": """The number of processes to use for the preprocessing."""}, ) a_ =field( default=lowerCamelCase__, metadata={ """help""": ( """The maximum total input sequence length after tokenization. If passed, sequences longer """ """than this will be truncated, sequences shorter will be padded.""" ) }, ) a_ =field( default=lowerCamelCase__, metadata={ """help""": ( """Whether to pad all samples to the maximum sentence length. """ """If False, will pad the samples dynamically when batching to the maximum length in the batch. More """ """efficient on GPU but very bad for TPU.""" ) }, ) a_ =field( default=lowerCamelCase__, metadata={ """help""": ( """For debugging purposes or quicker training, truncate the number of training examples to this """ """value if set.""" ) }, ) a_ =field( default=lowerCamelCase__, metadata={ """help""": ( """For debugging purposes or quicker training, truncate the number of evaluation examples to this """ """value if set.""" ) }, ) def UpperCAmelCase ( self )-> Tuple: '''simple docstring''' if self.train_file is not None: lowerCAmelCase__ = self.train_file.split("." )[-1] assert extension in ["csv", "json"], "`train_file` should be a csv or a json file." if self.validation_file is not None: lowerCAmelCase__ = self.validation_file.split("." )[-1] assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file." @dataclass class lowercase__ : a_ =42 a_ =True a_ =None a_ =None def __call__( self , __UpperCAmelCase )-> str: '''simple docstring''' lowerCAmelCase__ = "label" if "label" in features[0].keys() else "labels" lowerCAmelCase__ = [feature.pop(__UpperCAmelCase ) for feature in features] lowerCAmelCase__ = len(__UpperCAmelCase ) lowerCAmelCase__ = len(features[0]["input_ids"] ) lowerCAmelCase__ = [ [{k: v[i] for k, v in feature.items()} for i in range(__UpperCAmelCase )] for feature in features ] lowerCAmelCase__ = list(chain(*__UpperCAmelCase ) ) lowerCAmelCase__ = self.tokenizer.pad( __UpperCAmelCase , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors="pt" , ) # Un-flatten lowerCAmelCase__ = {k: v.view(__UpperCAmelCase , __UpperCAmelCase , -1 ) for k, v in batch.items()} # Add back labels lowerCAmelCase__ = torch.tensor(__UpperCAmelCase , dtype=torch.intaa ) return batch def _a ( ) -> Optional[Any]: """simple docstring""" lowerCAmelCase__ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = parser.parse_args_into_dataclasses() # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The # information sent is the one passed as arguments along with your Python/PyTorch versions. send_example_telemetry("run_swag" , __snake_case , __snake_case ) # Setup logging logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , ) if training_args.should_log: # The default of training_args.log_level is passive, so we set log level at info here to have that default. transformers.utils.logging.set_verbosity_info() lowerCAmelCase__ = training_args.get_process_log_level() logger.setLevel(__snake_case ) datasets.utils.logging.set_verbosity(__snake_case ) transformers.utils.logging.set_verbosity(__snake_case ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Log on each process the small summary: logger.warning( F"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}" + F"distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}" ) logger.info(F"Training/evaluation parameters {training_args}" ) # Detecting last checkpoint. lowerCAmelCase__ = None if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir: lowerCAmelCase__ = get_last_checkpoint(training_args.output_dir ) if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0: raise ValueError( F"Output directory ({training_args.output_dir}) already exists and is not empty. " "Use --overwrite_output_dir to overcome." ) elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: logger.info( F"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change " "the `--output_dir` or add `--overwrite_output_dir` to train from scratch." ) # Set seed before initializing model. set_seed(training_args.seed ) # Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below) # or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/ # (the dataset will be downloaded automatically from the datasets Hub). # For CSV/JSON files, this script will use the column called 'text' or the first column if no column called # 'text' is found. You can easily tweak this behavior (see below). # In distributed training, the load_dataset function guarantee that only one local process can concurrently # download the dataset. if data_args.train_file is not None or data_args.validation_file is not None: lowerCAmelCase__ = {} if data_args.train_file is not None: lowerCAmelCase__ = data_args.train_file if data_args.validation_file is not None: lowerCAmelCase__ = data_args.validation_file lowerCAmelCase__ = data_args.train_file.split("." )[-1] lowerCAmelCase__ = load_dataset( __snake_case , data_files=__snake_case , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , ) else: # Downloading and loading the swag dataset from the hub. lowerCAmelCase__ = load_dataset( "swag" , "regular" , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , ) # See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at # https://huggingface.co/docs/datasets/loading_datasets.html. # Load pretrained model and tokenizer # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. lowerCAmelCase__ = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) lowerCAmelCase__ = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) lowerCAmelCase__ = AutoModelForMultipleChoice.from_pretrained( model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=__snake_case , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) # When using your own dataset or a different dataset from swag, you will probably need to change this. lowerCAmelCase__ = [F"ending{i}" for i in range(4 )] lowerCAmelCase__ = "sent1" lowerCAmelCase__ = "sent2" if data_args.max_seq_length is None: lowerCAmelCase__ = tokenizer.model_max_length if max_seq_length > 1_024: logger.warning( "The chosen tokenizer supports a `model_max_length` that is longer than the default `block_size` value" " of 1024. If you would like to use a longer `block_size` up to `tokenizer.model_max_length` you can" " override this default with `--block_size xxx`." ) lowerCAmelCase__ = 1_024 else: if data_args.max_seq_length > tokenizer.model_max_length: logger.warning( F"The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the" F"model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}." ) lowerCAmelCase__ = min(data_args.max_seq_length , tokenizer.model_max_length ) # Preprocessing the datasets. def preprocess_function(UpperCamelCase_ : Optional[int] ): lowerCAmelCase__ = [[context] * 4 for context in examples[context_name]] lowerCAmelCase__ = examples[question_header_name] lowerCAmelCase__ = [ [F"{header} {examples[end][i]}" for end in ending_names] for i, header in enumerate(__snake_case ) ] # Flatten out lowerCAmelCase__ = list(chain(*__snake_case ) ) lowerCAmelCase__ = list(chain(*__snake_case ) ) # Tokenize lowerCAmelCase__ = tokenizer( __snake_case , __snake_case , truncation=__snake_case , max_length=__snake_case , padding="max_length" if data_args.pad_to_max_length else False , ) # Un-flatten return {k: [v[i : i + 4] for i in range(0 , len(__snake_case ) , 4 )] for k, v in tokenized_examples.items()} if training_args.do_train: if "train" not in raw_datasets: raise ValueError("--do_train requires a train dataset" ) lowerCAmelCase__ = raw_datasets["train"] if data_args.max_train_samples is not None: lowerCAmelCase__ = min(len(__snake_case ) , data_args.max_train_samples ) lowerCAmelCase__ = train_dataset.select(range(__snake_case ) ) with training_args.main_process_first(desc="train dataset map pre-processing" ): lowerCAmelCase__ = train_dataset.map( __snake_case , batched=__snake_case , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , ) if training_args.do_eval: if "validation" not in raw_datasets: raise ValueError("--do_eval requires a validation dataset" ) lowerCAmelCase__ = raw_datasets["validation"] if data_args.max_eval_samples is not None: lowerCAmelCase__ = min(len(__snake_case ) , data_args.max_eval_samples ) lowerCAmelCase__ = eval_dataset.select(range(__snake_case ) ) with training_args.main_process_first(desc="validation dataset map pre-processing" ): lowerCAmelCase__ = eval_dataset.map( __snake_case , batched=__snake_case , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , ) # Data collator lowerCAmelCase__ = ( default_data_collator if data_args.pad_to_max_length else DataCollatorForMultipleChoice(tokenizer=__snake_case , pad_to_multiple_of=8 if training_args.fpaa else None ) ) # Metric def compute_metrics(UpperCamelCase_ : Tuple ): lowerCAmelCase__ , lowerCAmelCase__ = eval_predictions lowerCAmelCase__ = np.argmax(__snake_case , axis=1 ) return {"accuracy": (preds == label_ids).astype(np.floataa ).mean().item()} # Initialize our Trainer lowerCAmelCase__ = Trainer( model=__snake_case , args=__snake_case , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , tokenizer=__snake_case , data_collator=__snake_case , compute_metrics=__snake_case , ) # Training if training_args.do_train: lowerCAmelCase__ = None if training_args.resume_from_checkpoint is not None: lowerCAmelCase__ = training_args.resume_from_checkpoint elif last_checkpoint is not None: lowerCAmelCase__ = last_checkpoint lowerCAmelCase__ = trainer.train(resume_from_checkpoint=__snake_case ) trainer.save_model() # Saves the tokenizer too for easy upload lowerCAmelCase__ = train_result.metrics lowerCAmelCase__ = ( data_args.max_train_samples if data_args.max_train_samples is not None else len(__snake_case ) ) lowerCAmelCase__ = min(__snake_case , len(__snake_case ) ) trainer.log_metrics("train" , __snake_case ) trainer.save_metrics("train" , __snake_case ) trainer.save_state() # Evaluation if training_args.do_eval: logger.info("*** Evaluate ***" ) lowerCAmelCase__ = trainer.evaluate() lowerCAmelCase__ = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(__snake_case ) lowerCAmelCase__ = min(__snake_case , len(__snake_case ) ) trainer.log_metrics("eval" , __snake_case ) trainer.save_metrics("eval" , __snake_case ) lowerCAmelCase__ = { "finetuned_from": model_args.model_name_or_path, "tasks": "multiple-choice", "dataset_tags": "swag", "dataset_args": "regular", "dataset": "SWAG", "language": "en", } if training_args.push_to_hub: trainer.push_to_hub(**__snake_case ) else: trainer.create_model_card(**__snake_case ) def _a ( UpperCamelCase_ : Dict ) -> Union[str, Any]: """simple docstring""" main() if __name__ == "__main__": main()
340
'''simple docstring''' a_ : Any = [ 9_99, 8_00, 7_99, 6_00, 5_99, 5_00, 4_00, 3_99, 3_77, 3_55, 3_33, 3_11, 2_88, 2_66, 2_44, 2_22, 2_00, 1_99, 1_77, 1_55, 1_33, 1_11, 88, 66, 44, 22, 0, ] a_ : Any = [ 9_99, 9_76, 9_52, 9_28, 9_05, 8_82, 8_58, 8_57, 8_10, 7_62, 7_15, 7_14, 5_72, 4_29, 4_28, 2_86, 2_85, 2_38, 1_90, 1_43, 1_42, 1_18, 95, 71, 47, 24, 0, ] a_ : Optional[Any] = [ 9_99, 9_88, 9_77, 9_66, 9_55, 9_44, 9_33, 9_22, 9_11, 9_00, 8_99, 8_79, 8_59, 8_40, 8_20, 8_00, 7_99, 7_66, 7_33, 7_00, 6_99, 6_50, 6_00, 5_99, 5_00, 4_99, 4_00, 3_99, 3_50, 3_00, 2_99, 2_66, 2_33, 2_00, 1_99, 1_79, 1_59, 1_40, 1_20, 1_00, 99, 88, 77, 66, 55, 44, 33, 22, 11, 0, ] a_ : str = [ 9_99, 9_95, 9_92, 9_89, 9_85, 9_81, 9_78, 9_75, 9_71, 9_67, 9_64, 9_61, 9_57, 9_56, 9_51, 9_47, 9_42, 9_37, 9_33, 9_28, 9_23, 9_19, 9_14, 9_13, 9_08, 9_03, 8_97, 8_92, 8_87, 8_81, 8_76, 8_71, 8_70, 8_64, 8_58, 8_52, 8_46, 8_40, 8_34, 8_28, 8_27, 8_20, 8_13, 8_06, 7_99, 7_92, 7_85, 7_84, 7_77, 7_70, 7_63, 7_56, 7_49, 7_42, 7_41, 7_33, 7_24, 7_16, 7_07, 6_99, 6_98, 6_88, 6_77, 6_66, 6_56, 6_55, 6_45, 6_34, 6_23, 6_13, 6_12, 5_98, 5_84, 5_70, 5_69, 5_55, 5_41, 5_27, 5_26, 5_05, 4_84, 4_83, 4_62, 4_40, 4_39, 3_96, 3_95, 3_52, 3_51, 3_08, 3_07, 2_64, 2_63, 2_20, 2_19, 1_76, 1_32, 88, 44, 0, ] a_ : Optional[int] = [ 9_99, 9_97, 9_95, 9_92, 9_90, 9_88, 9_86, 9_84, 9_81, 9_79, 9_77, 9_75, 9_72, 9_70, 9_68, 9_66, 9_64, 9_61, 9_59, 9_57, 9_56, 9_54, 9_51, 9_49, 9_46, 9_44, 9_41, 9_39, 9_36, 9_34, 9_31, 9_29, 9_26, 9_24, 9_21, 9_19, 9_16, 9_14, 9_13, 9_10, 9_07, 9_05, 9_02, 8_99, 8_96, 8_93, 8_91, 8_88, 8_85, 8_82, 8_79, 8_77, 8_74, 8_71, 8_70, 8_67, 8_64, 8_61, 8_58, 8_55, 8_52, 8_49, 8_46, 8_43, 8_40, 8_37, 8_34, 8_31, 8_28, 8_27, 8_24, 8_21, 8_17, 8_14, 8_11, 8_08, 8_04, 8_01, 7_98, 7_95, 7_91, 7_88, 7_85, 7_84, 7_80, 7_77, 7_74, 7_70, 7_66, 7_63, 7_60, 7_56, 7_52, 7_49, 7_46, 7_42, 7_41, 7_37, 7_33, 7_30, 7_26, 7_22, 7_18, 7_14, 7_10, 7_07, 7_03, 6_99, 6_98, 6_94, 6_90, 6_85, 6_81, 6_77, 6_73, 6_69, 6_64, 6_60, 6_56, 6_55, 6_50, 6_46, 6_41, 6_36, 6_32, 6_27, 6_22, 6_18, 6_13, 6_12, 6_07, 6_02, 5_96, 5_91, 5_86, 5_80, 5_75, 5_70, 5_69, 5_63, 5_57, 5_51, 5_45, 5_39, 5_33, 5_27, 5_26, 5_19, 5_12, 5_05, 4_98, 4_91, 4_84, 4_83, 4_74, 4_66, 4_57, 4_49, 4_40, 4_39, 4_28, 4_18, 4_07, 3_96, 3_95, 3_81, 3_66, 3_52, 3_51, 3_30, 3_08, 3_07, 2_86, 2_64, 2_63, 2_42, 2_20, 2_19, 1_76, 1_75, 1_32, 1_31, 88, 44, 0, ] a_ : Dict = [ 9_99, 9_91, 9_82, 9_74, 9_66, 9_58, 9_50, 9_41, 9_33, 9_25, 9_16, 9_08, 9_00, 8_99, 8_74, 8_50, 8_25, 8_00, 7_99, 7_00, 6_00, 5_00, 4_00, 3_00, 2_00, 1_00, 0, ] a_ : Tuple = [ 9_99, 9_92, 9_85, 9_78, 9_71, 9_64, 9_57, 9_49, 9_42, 9_35, 9_28, 9_21, 9_14, 9_07, 9_00, 8_99, 8_79, 8_59, 8_40, 8_20, 8_00, 7_99, 7_66, 7_33, 7_00, 6_99, 6_50, 6_00, 5_99, 5_00, 4_99, 4_00, 3_99, 3_00, 2_99, 2_00, 1_99, 1_00, 99, 0, ] a_ : Any = [ 9_99, 9_96, 9_92, 9_89, 9_85, 9_82, 9_79, 9_75, 9_72, 9_68, 9_65, 9_61, 9_58, 9_55, 9_51, 9_48, 9_44, 9_41, 9_38, 9_34, 9_31, 9_27, 9_24, 9_20, 9_17, 9_14, 9_10, 9_07, 9_03, 9_00, 8_99, 8_91, 8_84, 8_76, 8_69, 8_61, 8_53, 8_46, 8_38, 8_30, 8_23, 8_15, 8_08, 8_00, 7_99, 7_88, 7_77, 7_66, 7_55, 7_44, 7_33, 7_22, 7_11, 7_00, 6_99, 6_88, 6_77, 6_66, 6_55, 6_44, 6_33, 6_22, 6_11, 6_00, 5_99, 5_85, 5_71, 5_57, 5_42, 5_28, 5_14, 5_00, 4_99, 4_85, 4_71, 4_57, 4_42, 4_28, 4_14, 4_00, 3_99, 3_79, 3_59, 3_40, 3_20, 3_00, 2_99, 2_79, 2_59, 2_40, 2_20, 2_00, 1_99, 1_66, 1_33, 1_00, 99, 66, 33, 0, ]
75
0
class __snake_case : def __init__( self ,snake_case ,snake_case=None ,snake_case=None ): '''simple docstring''' lowercase : Tuple = data lowercase : List[Any] = previous lowercase : List[str] = next_node def __str__( self ): '''simple docstring''' return f"{self.data}" def _SCREAMING_SNAKE_CASE ( self ): '''simple docstring''' return self.data def _SCREAMING_SNAKE_CASE ( self ): '''simple docstring''' return self.next def _SCREAMING_SNAKE_CASE ( self ): '''simple docstring''' return self.previous class __snake_case : def __init__( self ,snake_case ): '''simple docstring''' lowercase : Optional[int] = head def __iter__( self ): '''simple docstring''' return self def _SCREAMING_SNAKE_CASE ( self ): '''simple docstring''' if not self.current: raise StopIteration else: lowercase : Union[str, Any] = self.current.get_data() lowercase : Optional[Any] = self.current.get_next() return value class __snake_case : def __init__( self ): '''simple docstring''' lowercase : str = None # First node in list lowercase : str = None # Last node in list def __str__( self ): '''simple docstring''' lowercase : int = self.head lowercase : str = [] while current is not None: nodes.append(current.get_data() ) lowercase : Dict = current.get_next() return " ".join(str(snake_case ) for node in nodes ) def __contains__( self ,snake_case ): '''simple docstring''' lowercase : Dict = self.head while current: if current.get_data() == value: return True lowercase : Any = current.get_next() return False def __iter__( self ): '''simple docstring''' return LinkedListIterator(self.head ) def _SCREAMING_SNAKE_CASE ( self ): '''simple docstring''' if self.head: return self.head.get_data() return None def _SCREAMING_SNAKE_CASE ( self ): '''simple docstring''' if self.tail: return self.tail.get_data() return None def _SCREAMING_SNAKE_CASE ( self ,snake_case ): '''simple docstring''' if self.head is None: lowercase : Any = node lowercase : Dict = node else: self.insert_before_node(self.head ,snake_case ) def _SCREAMING_SNAKE_CASE ( self ,snake_case ): '''simple docstring''' if self.head is None: self.set_head(snake_case ) else: self.insert_after_node(self.tail ,snake_case ) def _SCREAMING_SNAKE_CASE ( self ,snake_case ): '''simple docstring''' lowercase : Dict = Node(snake_case ) if self.head is None: self.set_head(snake_case ) else: self.set_tail(snake_case ) def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case ): '''simple docstring''' lowercase : Optional[int] = node lowercase : Optional[int] = node.previous if node.get_previous() is None: lowercase : Optional[int] = node_to_insert else: lowercase : Optional[int] = node_to_insert lowercase : List[Any] = node_to_insert def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case ): '''simple docstring''' lowercase : Dict = node lowercase : List[str] = node.next if node.get_next() is None: lowercase : Union[str, Any] = node_to_insert else: lowercase : List[str] = node_to_insert lowercase : Dict = node_to_insert def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case ): '''simple docstring''' lowercase : List[Any] = 1 lowercase : List[str] = Node(snake_case ) lowercase : Any = self.head while node: if current_position == position: self.insert_before_node(snake_case ,snake_case ) return current_position += 1 lowercase : List[Any] = node.next self.insert_after_node(self.tail ,snake_case ) def _SCREAMING_SNAKE_CASE ( self ,snake_case ): '''simple docstring''' lowercase : Optional[int] = self.head while node: if node.get_data() == item: return node lowercase : Any = node.get_next() raise Exception("""Node not found""" ) def _SCREAMING_SNAKE_CASE ( self ,snake_case ): '''simple docstring''' if (node := self.get_node(snake_case )) is not None: if node == self.head: lowercase : Optional[Any] = self.head.get_next() if node == self.tail: lowercase : List[str] = self.tail.get_previous() self.remove_node_pointers(snake_case ) @staticmethod def _SCREAMING_SNAKE_CASE ( snake_case ): '''simple docstring''' if node.get_next(): lowercase : Optional[int] = node.previous if node.get_previous(): lowercase : Union[str, Any] = node.next lowercase : Union[str, Any] = None lowercase : Optional[Any] = None def _SCREAMING_SNAKE_CASE ( self ): '''simple docstring''' return self.head is None def _snake_case( ) -> None: pass if __name__ == "__main__": import doctest doctest.testmod()
357
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> int: def count_of_possible_combinations(SCREAMING_SNAKE_CASE__ ) -> int: if target < 0: return 0 if target == 0: return 1 return sum(count_of_possible_combinations(target - item ) for item in array ) return count_of_possible_combinations(SCREAMING_SNAKE_CASE__ ) def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> int: def count_of_possible_combinations_with_dp_array( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> int: if target < 0: return 0 if target == 0: return 1 if dp_array[target] != -1: return dp_array[target] lowercase : Any = sum( count_of_possible_combinations_with_dp_array(target - item , SCREAMING_SNAKE_CASE__ ) for item in array ) lowercase : Optional[int] = answer return answer lowercase : int = [-1] * (target + 1) return count_of_possible_combinations_with_dp_array(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> int: lowercase : str = [0] * (target + 1) lowercase : Tuple = 1 for i in range(1 , target + 1 ): for j in range(SCREAMING_SNAKE_CASE__ ): if i - array[j] >= 0: dp_array[i] += dp_array[i - array[j]] return dp_array[target] if __name__ == "__main__": import doctest doctest.testmod() lowercase : Any = 3 lowercase : Optional[Any] = 5 lowercase : Tuple = [1, 2, 5] print(combination_sum_iv(n, array, target))
285
0
from typing import Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature from ...image_transforms import get_image_size, pad, rescale, to_channel_dimension_format from ...image_utils import ChannelDimension, ImageInput, make_list_of_images, to_numpy_array, valid_images from ...utils import TensorType, logging _a = logging.get_logger(__name__) class __lowerCamelCase ( snake_case__): """simple docstring""" UpperCamelCase__ = ["pixel_values"] def __init__( self , UpperCAmelCase = True , UpperCAmelCase = 1 / 255 , UpperCAmelCase = True , UpperCAmelCase = 8 , **UpperCAmelCase , ): """simple docstring""" super().__init__(**UpperCAmelCase ) _UpperCAmelCase = do_rescale _UpperCAmelCase = rescale_factor _UpperCAmelCase = do_pad _UpperCAmelCase = pad_size def UpperCamelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = None , **UpperCAmelCase ): """simple docstring""" return rescale(UpperCAmelCase , scale=UpperCAmelCase , data_format=UpperCAmelCase , **UpperCAmelCase ) def UpperCamelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = None ): """simple docstring""" _UpperCAmelCase , _UpperCAmelCase = get_image_size(UpperCAmelCase ) _UpperCAmelCase = (old_height // size + 1) * size - old_height _UpperCAmelCase = (old_width // size + 1) * size - old_width return pad(UpperCAmelCase , ((0, pad_height), (0, pad_width)) , mode='symmetric' , data_format=UpperCAmelCase ) def UpperCamelCase ( self , UpperCAmelCase , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = ChannelDimension.FIRST , **UpperCAmelCase , ): """simple docstring""" _UpperCAmelCase = do_rescale if do_rescale is not None else self.do_rescale _UpperCAmelCase = rescale_factor if rescale_factor is not None else self.rescale_factor _UpperCAmelCase = do_pad if do_pad is not None else self.do_pad _UpperCAmelCase = pad_size if pad_size is not None else self.pad_size _UpperCAmelCase = make_list_of_images(UpperCAmelCase ) if not valid_images(UpperCAmelCase ): raise ValueError( 'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ' 'torch.Tensor, tf.Tensor or jax.ndarray.' ) if do_rescale and rescale_factor is None: raise ValueError('Rescale factor must be specified if do_rescale is True.' ) # All transformations expect numpy arrays. _UpperCAmelCase = [to_numpy_array(UpperCAmelCase ) for image in images] if do_rescale: _UpperCAmelCase = [self.rescale(image=UpperCAmelCase , scale=UpperCAmelCase ) for image in images] if do_pad: _UpperCAmelCase = [self.pad(UpperCAmelCase , size=UpperCAmelCase ) for image in images] _UpperCAmelCase = [to_channel_dimension_format(UpperCAmelCase , UpperCAmelCase ) for image in images] _UpperCAmelCase = {'pixel_values': images} return BatchFeature(data=UpperCAmelCase , tensor_type=UpperCAmelCase )
39
from queue import Queue from typing import TYPE_CHECKING, Optional if TYPE_CHECKING: from ..models.auto import AutoTokenizer class __lowerCAmelCase : def _lowercase ( self , lowerCAmelCase__ ) -> Optional[Any]: '''simple docstring''' raise NotImplementedError() def _lowercase ( self ) -> int: '''simple docstring''' raise NotImplementedError() class __lowerCAmelCase ( UpperCamelCase__): def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ = False , **lowerCAmelCase__ ) -> int: '''simple docstring''' a__ : str =tokenizer a__ : List[str] =skip_prompt a__ : List[Any] =decode_kwargs # variables used in the streaming process a__ : Dict =[] a__ : int =0 a__ : str =True def _lowercase ( self , lowerCAmelCase__ ) -> Union[str, Any]: '''simple docstring''' if len(value.shape ) > 1 and value.shape[0] > 1: raise ValueError("TextStreamer only supports batch size 1" ) elif len(value.shape ) > 1: a__ : Any =value[0] if self.skip_prompt and self.next_tokens_are_prompt: a__ : Dict =False return # Add the new token to the cache and decodes the entire thing. self.token_cache.extend(value.tolist() ) a__ : Union[str, Any] =self.tokenizer.decode(self.token_cache , **self.decode_kwargs ) # After the symbol for a new line, we flush the cache. if text.endswith("\n" ): a__ : List[Any] =text[self.print_len :] a__ : List[str] =[] a__ : Optional[int] =0 # If the last token is a CJK character, we print the characters. elif len(lowerCAmelCase__ ) > 0 and self._is_chinese_char(ord(text[-1] ) ): a__ : List[str] =text[self.print_len :] self.print_len += len(lowerCAmelCase__ ) # Otherwise, prints until the last space char (simple heuristic to avoid printing incomplete words, # which may change with the subsequent token -- there are probably smarter ways to do this!) else: a__ : str =text[self.print_len : text.rfind(" " ) + 1] self.print_len += len(lowerCAmelCase__ ) self.on_finalized_text(lowerCAmelCase__ ) def _lowercase ( self ) -> Any: '''simple docstring''' if len(self.token_cache ) > 0: a__ : Union[str, Any] =self.tokenizer.decode(self.token_cache , **self.decode_kwargs ) a__ : List[Any] =text[self.print_len :] a__ : List[str] =[] a__ : Optional[int] =0 else: a__ : Union[str, Any] ="" a__ : Any =True self.on_finalized_text(lowerCAmelCase__ , stream_end=lowerCAmelCase__ ) def _lowercase ( self , lowerCAmelCase__ , lowerCAmelCase__ = False ) -> Optional[Any]: '''simple docstring''' print(lowerCAmelCase__ , flush=lowerCAmelCase__ , end="" if not stream_end else None ) def _lowercase ( self , lowerCAmelCase__ ) -> str: '''simple docstring''' if ( (cp >= 0X4E_00 and cp <= 0X9F_FF) or (cp >= 0X34_00 and cp <= 0X4D_BF) # or (cp >= 0X2_00_00 and cp <= 0X2_A6_DF) # or (cp >= 0X2_A7_00 and cp <= 0X2_B7_3F) # or (cp >= 0X2_B7_40 and cp <= 0X2_B8_1F) # or (cp >= 0X2_B8_20 and cp <= 0X2_CE_AF) # or (cp >= 0XF9_00 and cp <= 0XFA_FF) or (cp >= 0X2_F8_00 and cp <= 0X2_FA_1F) # ): # return True return False class __lowerCAmelCase ( UpperCamelCase__): def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ = False , lowerCAmelCase__ = None , **lowerCAmelCase__ ) -> Optional[Any]: '''simple docstring''' super().__init__(lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__ ) a__ : str =Queue() a__ : Optional[Any] =None a__ : Any =timeout def _lowercase ( self , lowerCAmelCase__ , lowerCAmelCase__ = False ) -> List[str]: '''simple docstring''' self.text_queue.put(lowerCAmelCase__ , timeout=self.timeout ) if stream_end: self.text_queue.put(self.stop_signal , timeout=self.timeout ) def __iter__( self ) -> Dict: '''simple docstring''' return self def _lowercase ( self ) -> int: '''simple docstring''' a__ : int =self.text_queue.get(timeout=self.timeout ) if value == self.stop_signal: raise StopIteration() else: return value
95
0
'''simple docstring''' import unittest from transformers import BarthezTokenizer, BarthezTokenizerFast, BatchEncoding from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers @require_sentencepiece @slow # see https://github.com/huggingface/transformers/issues/11457 class a_ ( lowerCamelCase , unittest.TestCase ): lowercase = BarthezTokenizer lowercase = BarthezTokenizerFast lowercase = True lowercase = True def A__ ( self ) -> Tuple: """simple docstring""" super().setUp() UpperCamelCase = BarthezTokenizerFast.from_pretrained("""moussaKam/mbarthez""" ) tokenizer.save_pretrained(self.tmpdirname ) tokenizer.save_pretrained(self.tmpdirname , legacy_format=_SCREAMING_SNAKE_CASE ) UpperCamelCase = tokenizer def A__ ( self ) -> Tuple: """simple docstring""" UpperCamelCase = """<pad>""" UpperCamelCase = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE ) def A__ ( self ) -> Dict: """simple docstring""" UpperCamelCase = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , """<s>""" ) self.assertEqual(vocab_keys[1] , """<pad>""" ) self.assertEqual(vocab_keys[-1] , """<mask>""" ) self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , 101122 ) def A__ ( self ) -> Tuple: """simple docstring""" self.assertEqual(self.get_tokenizer().vocab_size , 101122 ) @require_torch def A__ ( self ) -> Tuple: """simple docstring""" UpperCamelCase = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""] UpperCamelCase = [0, 57, 3018, 70307, 91, 2] UpperCamelCase = self.tokenizer( _SCREAMING_SNAKE_CASE , max_length=len(_SCREAMING_SNAKE_CASE ) , padding=_SCREAMING_SNAKE_CASE , truncation=_SCREAMING_SNAKE_CASE , return_tensors="""pt""" ) self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) self.assertEqual((2, 6) , batch.input_ids.shape ) self.assertEqual((2, 6) , batch.attention_mask.shape ) UpperCamelCase = batch.input_ids.tolist()[0] self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) def A__ ( self ) -> str: """simple docstring""" if not self.test_rust_tokenizer: return UpperCamelCase = self.get_tokenizer() UpperCamelCase = self.get_rust_tokenizer() UpperCamelCase = """I was born in 92000, and this is falsé.""" UpperCamelCase = tokenizer.tokenize(_SCREAMING_SNAKE_CASE ) UpperCamelCase = rust_tokenizer.tokenize(_SCREAMING_SNAKE_CASE ) self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) UpperCamelCase = tokenizer.encode(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE ) UpperCamelCase = rust_tokenizer.encode(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE ) self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) UpperCamelCase = self.get_rust_tokenizer() UpperCamelCase = tokenizer.encode(_SCREAMING_SNAKE_CASE ) UpperCamelCase = rust_tokenizer.encode(_SCREAMING_SNAKE_CASE ) self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) @slow def A__ ( self ) -> Optional[Any]: """simple docstring""" UpperCamelCase = {"""input_ids""": [[0, 490, 14328, 4507, 354, 47, 43669, 95, 25, 78117, 20215, 19779, 190, 22, 400, 4, 35343, 80310, 603, 86, 24937, 105, 33438, 94762, 196, 39642, 7, 15, 15933, 173, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 10534, 87, 25, 66, 3358, 196, 55289, 8, 82961, 81, 2204, 75203, 7, 15, 763, 12956, 216, 178, 14328, 9595, 1377, 69693, 7, 448, 71021, 196, 18106, 1437, 13974, 108, 9083, 4, 49315, 7, 39, 86, 1326, 2793, 46333, 4, 448, 196, 74588, 7, 49315, 7, 39, 21, 822, 38470, 74, 21, 66723, 62480, 8, 22050, 5, 2]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501 # fmt: on # moussaKam/mbarthez is a french model. So we also use french texts. UpperCamelCase = [ """Le transformeur est un modèle d'apprentissage profond introduit en 2017, """ """utilisé principalement dans le domaine du traitement automatique des langues (TAL).""", """À l'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus """ """pour gérer des données séquentielles, telles que le langage naturel, pour des tâches """ """telles que la traduction et la synthèse de texte.""", ] self.tokenizer_integration_test_util( expected_encoding=_SCREAMING_SNAKE_CASE , model_name="""moussaKam/mbarthez""" , revision="""c2e4ecbca5e3cd2c37fe1ac285ca4fbdf1366fb6""" , sequences=_SCREAMING_SNAKE_CASE , )
364
'''simple docstring''' import os from typing import List, Optional, Union from ...tokenization_utils import PreTrainedTokenizer from ...tokenization_utils_base import AddedToken from ...utils import logging SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__) SCREAMING_SNAKE_CASE__ = {'vocab_file': 'vocab.txt'} SCREAMING_SNAKE_CASE__ = { 'vocab_file': { 'facebook/esm2_t6_8M_UR50D': 'https://huggingface.co/facebook/esm2_t6_8M_UR50D/resolve/main/vocab.txt', 'facebook/esm2_t12_35M_UR50D': 'https://huggingface.co/facebook/esm2_t12_35M_UR50D/resolve/main/vocab.txt', }, } SCREAMING_SNAKE_CASE__ = { 'facebook/esm2_t6_8M_UR50D': 1_0_2_4, 'facebook/esm2_t12_35M_UR50D': 1_0_2_4, } def lowercase__ ( __UpperCamelCase )-> Any: with open(__UpperCamelCase , """r""" ) as f: UpperCamelCase = f.read().splitlines() return [l.strip() for l in lines] class a_ ( lowerCamelCase ): lowercase = VOCAB_FILES_NAMES lowercase = PRETRAINED_VOCAB_FILES_MAP lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowercase = ["""input_ids""", """attention_mask"""] def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE="<unk>" , _SCREAMING_SNAKE_CASE="<cls>" , _SCREAMING_SNAKE_CASE="<pad>" , _SCREAMING_SNAKE_CASE="<mask>" , _SCREAMING_SNAKE_CASE="<eos>" , **_SCREAMING_SNAKE_CASE , ) -> Any: """simple docstring""" super().__init__(**_SCREAMING_SNAKE_CASE ) UpperCamelCase = load_vocab_file(_SCREAMING_SNAKE_CASE ) UpperCamelCase = dict(enumerate(self.all_tokens ) ) UpperCamelCase = {tok: ind for ind, tok in enumerate(self.all_tokens )} UpperCamelCase = unk_token UpperCamelCase = cls_token UpperCamelCase = pad_token UpperCamelCase = mask_token UpperCamelCase = eos_token UpperCamelCase = self.all_tokens self._create_trie(self.unique_no_split_tokens ) def A__ ( self , _SCREAMING_SNAKE_CASE ) -> str: """simple docstring""" return self._id_to_token.get(_SCREAMING_SNAKE_CASE , self.unk_token ) def A__ ( self , _SCREAMING_SNAKE_CASE ) -> int: """simple docstring""" return self._token_to_id.get(_SCREAMING_SNAKE_CASE , self._token_to_id.get(self.unk_token ) ) def A__ ( self , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> List[str]: """simple docstring""" return text.split() def A__ ( self , _SCREAMING_SNAKE_CASE=False ) -> Dict: """simple docstring""" return len(self._id_to_token ) def A__ ( self ) -> Tuple: """simple docstring""" return {token: i for i, token in enumerate(self.all_tokens )} def A__ ( self , _SCREAMING_SNAKE_CASE ) -> int: """simple docstring""" return self._token_to_id.get(_SCREAMING_SNAKE_CASE , self._token_to_id.get(self.unk_token ) ) def A__ ( self , _SCREAMING_SNAKE_CASE ) -> str: """simple docstring""" return self._id_to_token.get(_SCREAMING_SNAKE_CASE , self.unk_token ) def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ) -> List[int]: """simple docstring""" UpperCamelCase = [self.cls_token_id] UpperCamelCase = [self.eos_token_id] # No sep token in ESM vocabulary if token_ids_a is None: if self.eos_token_id is None: return cls + token_ids_a else: return cls + token_ids_a + sep elif self.eos_token_id is None: raise ValueError("""Cannot tokenize multiple sequences when EOS token is not set!""" ) return cls + token_ids_a + sep + token_ids_a + sep # Multiple inputs always have an EOS token def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = False ) -> List[int]: """simple docstring""" if already_has_special_tokens: if token_ids_a is not None: raise ValueError( """You should not supply a second sequence if the provided sequence of """ """ids is already formatted with special tokens for the model.""" ) return [1 if token in self.all_special_ids else 0 for token in token_ids_a] UpperCamelCase = [1] + ([0] * len(_SCREAMING_SNAKE_CASE )) + [1] if token_ids_a is not None: mask += [0] * len(_SCREAMING_SNAKE_CASE ) + [1] return mask def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Optional[int]: """simple docstring""" UpperCamelCase = os.path.join(_SCREAMING_SNAKE_CASE , (filename_prefix + """-""" if filename_prefix else """""") + """vocab.txt""" ) with open(_SCREAMING_SNAKE_CASE , """w""" ) as f: f.write("""\n""".join(self.all_tokens ) ) return (vocab_file,) @property def A__ ( self ) -> int: """simple docstring""" return self.get_vocab_size(with_added_tokens=_SCREAMING_SNAKE_CASE ) def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = False ) -> int: """simple docstring""" return super()._add_tokens(_SCREAMING_SNAKE_CASE , special_tokens=_SCREAMING_SNAKE_CASE )
183
0
from __future__ import annotations def __UpperCamelCase ( lowerCAmelCase__ : int | str ): __a : Optional[Any] = str(lowerCAmelCase__ ) return n == n[::-1] def __UpperCamelCase ( lowerCAmelCase__ : int = 1_0_0_0_0_0_0 ): __a : List[str] = 0 for i in range(1 , lowerCAmelCase__ ): if is_palindrome(lowerCAmelCase__ ) and is_palindrome(bin(lowerCAmelCase__ ).split('''b''' )[1] ): total += i return total if __name__ == "__main__": print(solution(int(str(input().strip()))))
216
from __future__ import annotations import unittest from transformers import LEDConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFLEDForConditionalGeneration, TFLEDModel @require_tf class UpperCamelCase__ : _SCREAMING_SNAKE_CASE : Union[str, Any] = LEDConfig _SCREAMING_SNAKE_CASE : Optional[int] = {} _SCREAMING_SNAKE_CASE : int = "gelu" def __init__(self : List[str] , snake_case_ : Union[str, Any] , snake_case_ : Union[str, Any]=1_3 , snake_case_ : Optional[Any]=7 , snake_case_ : Any=True , snake_case_ : List[Any]=False , snake_case_ : str=9_9 , snake_case_ : Any=3_2 , snake_case_ : Dict=2 , snake_case_ : List[Any]=4 , snake_case_ : Optional[int]=3_7 , snake_case_ : Dict=0.1 , snake_case_ : int=0.1 , snake_case_ : Optional[Any]=2_0 , snake_case_ : Optional[Any]=2 , snake_case_ : Optional[int]=1 , snake_case_ : Optional[int]=0 , snake_case_ : str=4 , ): __a : List[Any] = parent __a : Union[str, Any] = batch_size __a : List[str] = seq_length __a : Any = is_training __a : Tuple = use_labels __a : List[Any] = vocab_size __a : Optional[Any] = hidden_size __a : int = num_hidden_layers __a : Optional[int] = num_attention_heads __a : int = intermediate_size __a : Union[str, Any] = hidden_dropout_prob __a : Dict = attention_probs_dropout_prob __a : int = max_position_embeddings __a : Tuple = eos_token_id __a : Optional[Any] = pad_token_id __a : List[str] = bos_token_id __a : List[str] = attention_window # `ModelTesterMixin.test_attention_outputs` is expecting attention tensors to be of size # [num_attention_heads, encoder_seq_length, encoder_key_length], but TFLongformerSelfAttention # returns attention of shape [num_attention_heads, encoder_seq_length, self.attention_window + 1] # because its local attention only attends to `self.attention_window` and one before and one after __a : Union[str, Any] = self.attention_window + 2 # because of padding `encoder_seq_length`, is different from `seq_length`. Relevant for # the `test_attention_outputs` and `test_hidden_states_output` tests __a : List[str] = ( self.seq_length + (self.attention_window - self.seq_length % self.attention_window) % self.attention_window ) def lowerCAmelCase (self : Optional[int] ): __a : Dict = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) __a : Union[str, Any] = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 ) __a : int = tf.concat([input_ids, eos_tensor] , axis=1 ) __a : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __a : Optional[int] = self.config_cls( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , attention_window=self.attention_window , **self.config_updates , ) __a : Optional[int] = prepare_led_inputs_dict(snake_case_ , snake_case_ , snake_case_ ) __a : Dict = tf.concat( [tf.zeros_like(snake_case_ )[:, :-1], tf.ones_like(snake_case_ )[:, -1:]] , axis=-1 , ) __a : Tuple = global_attention_mask return config, inputs_dict def lowerCAmelCase (self : List[Any] , snake_case_ : Dict , snake_case_ : int ): __a : List[str] = TFLEDModel(config=snake_case_ ).get_decoder() __a : Dict = inputs_dict['''input_ids'''] __a : Dict = input_ids[:1, :] __a : Any = inputs_dict['''attention_mask'''][:1, :] __a : List[str] = 1 # first forward pass __a : Optional[Any] = model(snake_case_ , attention_mask=snake_case_ , use_cache=snake_case_ ) __a , __a : Tuple = outputs.to_tuple() # create hypothetical next token and extent to next_input_ids __a : List[Any] = ids_tensor((self.batch_size, 3) , config.vocab_size ) __a : str = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta ) # append to next input_ids and __a : Optional[Any] = tf.concat([input_ids, next_tokens] , axis=-1 ) __a : Optional[Any] = tf.concat([attention_mask, next_attn_mask] , axis=-1 ) __a : Optional[int] = model(snake_case_ , attention_mask=snake_case_ )[0] __a : int = model(snake_case_ , attention_mask=snake_case_ , past_key_values=snake_case_ )[0] self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] ) # select random slice __a : Any = int(ids_tensor((1,) , output_from_past.shape[-1] ) ) __a : List[Any] = output_from_no_past[:, -3:, random_slice_idx] __a : Dict = output_from_past[:, :, random_slice_idx] # test that outputs are equal for slice tf.debugging.assert_near(snake_case_ , snake_case_ , rtol=1E-3 ) def __UpperCamelCase ( lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : int , lowerCAmelCase__ : List[Any]=None , lowerCAmelCase__ : int=None , lowerCAmelCase__ : Union[str, Any]=None , lowerCAmelCase__ : List[str]=None , ): if attention_mask is None: __a : Any = tf.cast(tf.math.not_equal(lowerCAmelCase__ , config.pad_token_id ) , tf.inta ) if decoder_attention_mask is None: __a : Dict = tf.concat( [ tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ), tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ), ] , axis=-1 , ) if head_mask is None: __a : Dict = tf.ones((config.encoder_layers, config.encoder_attention_heads) ) if decoder_head_mask is None: __a : str = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) return { "input_ids": input_ids, "attention_mask": attention_mask, "decoder_input_ids": decoder_input_ids, "decoder_attention_mask": decoder_attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, } @require_tf class UpperCamelCase__ ( __lowercase ,__lowercase ,unittest.TestCase ): _SCREAMING_SNAKE_CASE : Dict = (TFLEDForConditionalGeneration, TFLEDModel) if is_tf_available() else () _SCREAMING_SNAKE_CASE : Tuple = (TFLEDForConditionalGeneration,) if is_tf_available() else () _SCREAMING_SNAKE_CASE : Optional[Any] = ( { "conversational": TFLEDForConditionalGeneration, "feature-extraction": TFLEDModel, "summarization": TFLEDForConditionalGeneration, "text2text-generation": TFLEDForConditionalGeneration, "translation": TFLEDForConditionalGeneration, } if is_tf_available() else {} ) _SCREAMING_SNAKE_CASE : Union[str, Any] = True _SCREAMING_SNAKE_CASE : Optional[int] = False _SCREAMING_SNAKE_CASE : Optional[Any] = False _SCREAMING_SNAKE_CASE : Optional[Any] = False def lowerCAmelCase (self : Optional[int] ): __a : List[str] = TFLEDModelTester(self ) __a : Optional[int] = ConfigTester(self , config_class=snake_case_ ) def lowerCAmelCase (self : Any ): self.config_tester.run_common_tests() def lowerCAmelCase (self : Optional[Any] ): __a : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_decoder_model_past_large_inputs(*snake_case_ ) def lowerCAmelCase (self : Any ): __a , __a : Tuple = self.model_tester.prepare_config_and_inputs_for_common() __a : Any = tf.zeros_like(inputs_dict['''attention_mask'''] ) __a : Tuple = 2 __a : Dict = tf.where( tf.range(self.model_tester.seq_length )[None, :] < num_global_attn_indices , 1 , inputs_dict['''global_attention_mask'''] , ) __a : List[str] = True __a : Tuple = self.model_tester.seq_length __a : Any = self.model_tester.encoder_seq_length def check_decoder_attentions_output(snake_case_ : Any ): __a : str = outputs.decoder_attentions self.assertEqual(len(snake_case_ ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , ) def check_encoder_attentions_output(snake_case_ : Optional[int] ): __a : int = [t.numpy() for t in outputs.encoder_attentions] __a : int = [t.numpy() for t in outputs.encoder_global_attentions] self.assertEqual(len(snake_case_ ) , self.model_tester.num_hidden_layers ) self.assertEqual(len(snake_case_ ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , ) self.assertListEqual( list(global_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, num_global_attn_indices] , ) for model_class in self.all_model_classes: __a : Dict = True __a : Optional[Any] = False __a : List[str] = False __a : List[Any] = model_class(snake_case_ ) __a : List[str] = model(self._prepare_for_class(snake_case_ , snake_case_ ) ) __a : List[str] = len(snake_case_ ) self.assertEqual(config.output_hidden_states , snake_case_ ) check_encoder_attentions_output(snake_case_ ) if self.is_encoder_decoder: __a : List[str] = model_class(snake_case_ ) __a : int = model(self._prepare_for_class(snake_case_ , snake_case_ ) ) self.assertEqual(config.output_hidden_states , snake_case_ ) check_decoder_attentions_output(snake_case_ ) # Check that output attentions can also be changed via the config del inputs_dict["output_attentions"] __a : List[Any] = True __a : Dict = model_class(snake_case_ ) __a : Tuple = model(self._prepare_for_class(snake_case_ , snake_case_ ) ) self.assertEqual(config.output_hidden_states , snake_case_ ) check_encoder_attentions_output(snake_case_ ) # Check attention is always last and order is fine __a : List[str] = True __a : Any = True __a : Tuple = model_class(snake_case_ ) __a : int = model(self._prepare_for_class(snake_case_ , snake_case_ ) ) self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(snake_case_ ) ) self.assertEqual(model.config.output_hidden_states , snake_case_ ) check_encoder_attentions_output(snake_case_ ) @unittest.skip('''LED keeps using potentially symbolic tensors in conditionals and breaks tracing.''' ) def lowerCAmelCase (self : List[str] ): pass def lowerCAmelCase (self : List[Any] ): # TODO: Head-masking not yet implement pass def __UpperCamelCase ( lowerCAmelCase__ : Optional[Any] ): return tf.constant(lowerCAmelCase__ , dtype=tf.intaa ) lowercase__ =1e-4 @slow @require_tf class UpperCamelCase__ ( unittest.TestCase ): def lowerCAmelCase (self : Any ): __a : Dict = TFLEDForConditionalGeneration.from_pretrained('''allenai/led-base-16384''' ).led # change to intended input here __a : Union[str, Any] = _long_tensor([5_1_2 * [0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9]] ) __a : Dict = _long_tensor([1_2_8 * [0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9]] ) __a : List[str] = prepare_led_inputs_dict(model.config , snake_case_ , snake_case_ ) __a : List[str] = model(**snake_case_ )[0] __a : Any = (1, 1_0_2_4, 7_6_8) self.assertEqual(output.shape , snake_case_ ) # change to expected output here __a : Dict = tf.convert_to_tensor( [[2.3050, 2.8279, 0.6531], [-1.8457, -0.1455, -3.5661], [-1.0186, 0.4586, -2.2043]] , ) tf.debugging.assert_near(output[:, :3, :3] , snake_case_ , atol=1E-3 ) def lowerCAmelCase (self : int ): __a : Optional[Any] = TFLEDForConditionalGeneration.from_pretrained('''allenai/led-base-16384''' ) # change to intended input here __a : int = _long_tensor([5_1_2 * [0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9]] ) __a : Tuple = _long_tensor([1_2_8 * [0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9]] ) __a : Dict = prepare_led_inputs_dict(model.config , snake_case_ , snake_case_ ) __a : List[str] = model(**snake_case_ )[0] __a : List[Any] = (1, 1_0_2_4, model.config.vocab_size) self.assertEqual(output.shape , snake_case_ ) # change to expected output here __a : str = tf.convert_to_tensor( [[33.6507, 6.4572, 16.8089], [5.8739, -2.4238, 11.2902], [-3.2139, -4.3149, 4.2783]] , ) tf.debugging.assert_near(output[:, :3, :3] , snake_case_ , atol=1E-3 , rtol=1E-3 )
216
1
"""simple docstring""" import io import json import fsspec import pytest from datasets import Dataset, DatasetDict, Features, NamedSplit, Value from datasets.io.json import JsonDatasetReader, JsonDatasetWriter from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases def _SCREAMING_SNAKE_CASE ( _lowercase : int , _lowercase : Optional[int] ): '''simple docstring''' assert isinstance(__snake_case , __snake_case ) assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize("keep_in_memory" , [False, True] ) def _SCREAMING_SNAKE_CASE ( _lowercase : Union[str, Any] , _lowercase : Tuple , _lowercase : Any ): '''simple docstring''' a : List[str] = tmp_path / "cache" a : Optional[Any] = {"col_1": "string", "col_2": "int64", "col_3": "float64"} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): a : Tuple = JsonDatasetReader(__snake_case , cache_dir=__snake_case , keep_in_memory=__snake_case ).read() _check_json_dataset(__snake_case , __snake_case ) @pytest.mark.parametrize( "features" , [ None, {"col_1": "string", "col_2": "int64", "col_3": "float64"}, {"col_1": "string", "col_2": "string", "col_3": "string"}, {"col_1": "int32", "col_2": "int32", "col_3": "int32"}, {"col_1": "float32", "col_2": "float32", "col_3": "float32"}, ] , ) def _SCREAMING_SNAKE_CASE ( _lowercase : List[str] , _lowercase : List[str] , _lowercase : Any ): '''simple docstring''' a : str = tmp_path / "cache" a : List[Any] = {"col_1": "string", "col_2": "int64", "col_3": "float64"} a : Optional[Any] = features.copy() if features else default_expected_features a : Union[str, Any] = ( Features({feature: Value(__snake_case ) for feature, dtype in features.items()} ) if features is not None else None ) a : Tuple = JsonDatasetReader(__snake_case , features=__snake_case , cache_dir=__snake_case ).read() _check_json_dataset(__snake_case , __snake_case ) @pytest.mark.parametrize( "features" , [ None, {"col_3": "float64", "col_1": "string", "col_2": "int64"}, ] , ) def _SCREAMING_SNAKE_CASE ( _lowercase : Optional[Any] , _lowercase : Union[str, Any] , _lowercase : Tuple ): '''simple docstring''' a : List[str] = tmp_path / "cache" a : Optional[int] = {"col_3": "float64", "col_1": "string", "col_2": "int64"} a : Optional[int] = features.copy() if features else default_expected_features a : Union[str, Any] = ( Features({feature: Value(__snake_case ) for feature, dtype in features.items()} ) if features is not None else None ) a : Dict = JsonDatasetReader(__snake_case , features=__snake_case , cache_dir=__snake_case ).read() assert isinstance(__snake_case , __snake_case ) assert dataset.num_rows == 2 assert dataset.num_columns == 3 assert dataset.column_names == ["col_3", "col_1", "col_2"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype def _SCREAMING_SNAKE_CASE ( _lowercase : str , _lowercase : Optional[Any] ): '''simple docstring''' a : Tuple = {"col_2": "int64", "col_3": "float64", "col_1": "string"} a : Any = features.copy() a : int = ( Features({feature: Value(__snake_case ) for feature, dtype in features.items()} ) if features is not None else None ) a : List[str] = tmp_path / "cache" a : Optional[int] = JsonDatasetReader(__snake_case , features=__snake_case , cache_dir=__snake_case ).read() assert isinstance(__snake_case , __snake_case ) assert dataset.num_rows == 2 assert dataset.num_columns == 3 assert dataset.column_names == ["col_2", "col_3", "col_1"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] ) def _SCREAMING_SNAKE_CASE ( _lowercase : List[str] , _lowercase : List[Any] , _lowercase : int ): '''simple docstring''' a : List[Any] = tmp_path / "cache" a : List[Any] = {"col_1": "string", "col_2": "int64", "col_3": "float64"} a : List[Any] = JsonDatasetReader(__snake_case , cache_dir=__snake_case , split=__snake_case ).read() _check_json_dataset(__snake_case , __snake_case ) assert dataset.split == split if split else "train" @pytest.mark.parametrize("path_type" , [str, list] ) def _SCREAMING_SNAKE_CASE ( _lowercase : Optional[Any] , _lowercase : Union[str, Any] , _lowercase : Dict ): '''simple docstring''' if issubclass(__snake_case , __snake_case ): a : Dict = jsonl_path elif issubclass(__snake_case , __snake_case ): a : Any = [jsonl_path] a : Union[str, Any] = tmp_path / "cache" a : List[str] = {"col_1": "string", "col_2": "int64", "col_3": "float64"} a : Tuple = JsonDatasetReader(__snake_case , cache_dir=__snake_case ).read() _check_json_dataset(__snake_case , __snake_case ) def _SCREAMING_SNAKE_CASE ( _lowercase : Optional[Any] , _lowercase : Dict , _lowercase : Optional[Any]=("train",) ): '''simple docstring''' assert isinstance(__snake_case , __snake_case ) for split in splits: a : int = dataset_dict[split] assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize("keep_in_memory" , [False, True] ) def _SCREAMING_SNAKE_CASE ( _lowercase : Union[str, Any] , _lowercase : List[Any] , _lowercase : str ): '''simple docstring''' a : List[Any] = tmp_path / "cache" a : int = {"col_1": "string", "col_2": "int64", "col_3": "float64"} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): a : int = JsonDatasetReader({"train": jsonl_path} , cache_dir=__snake_case , keep_in_memory=__snake_case ).read() _check_json_datasetdict(__snake_case , __snake_case ) @pytest.mark.parametrize( "features" , [ None, {"col_1": "string", "col_2": "int64", "col_3": "float64"}, {"col_1": "string", "col_2": "string", "col_3": "string"}, {"col_1": "int32", "col_2": "int32", "col_3": "int32"}, {"col_1": "float32", "col_2": "float32", "col_3": "float32"}, ] , ) def _SCREAMING_SNAKE_CASE ( _lowercase : str , _lowercase : Optional[int] , _lowercase : Optional[int] ): '''simple docstring''' a : Union[str, Any] = tmp_path / "cache" a : str = {"col_1": "string", "col_2": "int64", "col_3": "float64"} a : Optional[int] = features.copy() if features else default_expected_features a : int = ( Features({feature: Value(__snake_case ) for feature, dtype in features.items()} ) if features is not None else None ) a : Optional[Any] = JsonDatasetReader({"train": jsonl_path} , features=__snake_case , cache_dir=__snake_case ).read() _check_json_datasetdict(__snake_case , __snake_case ) @pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] ) def _SCREAMING_SNAKE_CASE ( _lowercase : Optional[int] , _lowercase : Any , _lowercase : Optional[int] ): '''simple docstring''' if split: a : Any = {split: jsonl_path} else: a : int = "train" a : Tuple = {"train": jsonl_path, "test": jsonl_path} a : Optional[Any] = tmp_path / "cache" a : Tuple = {"col_1": "string", "col_2": "int64", "col_3": "float64"} a : List[str] = JsonDatasetReader(__snake_case , cache_dir=__snake_case ).read() _check_json_datasetdict(__snake_case , __snake_case , splits=list(path.keys() ) ) assert all(dataset[split].split == split for split in path.keys() ) def _SCREAMING_SNAKE_CASE ( _lowercase : int ): '''simple docstring''' return json.load(__snake_case ) def _SCREAMING_SNAKE_CASE ( _lowercase : Dict ): '''simple docstring''' return [json.loads(__snake_case ) for line in buffer] class __UpperCamelCase : @pytest.mark.parametrize("lines, load_json_function" , [(True, load_json_lines), (False, load_json)] ) def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> int: with io.BytesIO() as buffer: JsonDatasetWriter(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , lines=_SCREAMING_SNAKE_CASE ).write() buffer.seek(0 ) a : str = load_json_function(_SCREAMING_SNAKE_CASE ) assert isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) assert isinstance(exported_content[0] , _SCREAMING_SNAKE_CASE ) assert len(_SCREAMING_SNAKE_CASE ) == 10 @pytest.mark.parametrize( "orient, container, keys, len_at" , [ ("records", list, {"tokens", "labels", "answers", "id"}, None), ("split", dict, {"columns", "data"}, "data"), ("index", dict, set("0123456789" ), None), ("columns", dict, {"tokens", "labels", "answers", "id"}, "tokens"), ("values", list, None, None), ("table", dict, {"schema", "data"}, "data"), ] , ) def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Any: with io.BytesIO() as buffer: JsonDatasetWriter(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , lines=_SCREAMING_SNAKE_CASE , orient=_SCREAMING_SNAKE_CASE ).write() buffer.seek(0 ) a : Optional[int] = load_json(_SCREAMING_SNAKE_CASE ) assert isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) if keys: if container is dict: assert exported_content.keys() == keys else: assert exported_content[0].keys() == keys else: assert not hasattr(_SCREAMING_SNAKE_CASE , "keys" ) and not hasattr(exported_content[0] , "keys" ) if len_at: assert len(exported_content[len_at] ) == 10 else: assert len(_SCREAMING_SNAKE_CASE ) == 10 @pytest.mark.parametrize("lines, load_json_function" , [(True, load_json_lines), (False, load_json)] ) def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Optional[int]: with io.BytesIO() as buffer: JsonDatasetWriter(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , lines=_SCREAMING_SNAKE_CASE , num_proc=2 ).write() buffer.seek(0 ) a : int = load_json_function(_SCREAMING_SNAKE_CASE ) assert isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) assert isinstance(exported_content[0] , _SCREAMING_SNAKE_CASE ) assert len(_SCREAMING_SNAKE_CASE ) == 10 @pytest.mark.parametrize( "orient, container, keys, len_at" , [ ("records", list, {"tokens", "labels", "answers", "id"}, None), ("split", dict, {"columns", "data"}, "data"), ("index", dict, set("0123456789" ), None), ("columns", dict, {"tokens", "labels", "answers", "id"}, "tokens"), ("values", list, None, None), ("table", dict, {"schema", "data"}, "data"), ] , ) def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> List[Any]: with io.BytesIO() as buffer: JsonDatasetWriter(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , lines=_SCREAMING_SNAKE_CASE , orient=_SCREAMING_SNAKE_CASE , num_proc=2 ).write() buffer.seek(0 ) a : List[Any] = load_json(_SCREAMING_SNAKE_CASE ) assert isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) if keys: if container is dict: assert exported_content.keys() == keys else: assert exported_content[0].keys() == keys else: assert not hasattr(_SCREAMING_SNAKE_CASE , "keys" ) and not hasattr(exported_content[0] , "keys" ) if len_at: assert len(exported_content[len_at] ) == 10 else: assert len(_SCREAMING_SNAKE_CASE ) == 10 def __a ( self , lowerCAmelCase__ ) -> Union[str, Any]: with pytest.raises(_SCREAMING_SNAKE_CASE ): with io.BytesIO() as buffer: JsonDatasetWriter(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , num_proc=0 ) @pytest.mark.parametrize("compression, extension" , [("gzip", "gz"), ("bz2", "bz2"), ("xz", "xz")] ) def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Any: a : Union[str, Any] = tmp_path_factory.mktemp("data" ) / f"""test.json.{extension}""" a : List[str] = str(shared_datadir / f"""test_file.json.{extension}""" ) JsonDatasetWriter(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , compression=_SCREAMING_SNAKE_CASE ).write() with fsspec.open(_SCREAMING_SNAKE_CASE , "rb" , compression="infer" ) as f: a : Optional[Any] = f.read() with fsspec.open(_SCREAMING_SNAKE_CASE , "rb" , compression="infer" ) as f: a : Optional[Any] = f.read() assert exported_content == original_content
364
"""simple docstring""" from __future__ import annotations from math import pow, sqrt def _SCREAMING_SNAKE_CASE ( _lowercase : float , _lowercase : float , _lowercase : float ) ->dict[str, float]: '''simple docstring''' if (resistance, reactance, impedance).count(0 ) != 1: raise ValueError("One and only one argument must be 0" ) if resistance == 0: return {"resistance": sqrt(pow(_lowercase , 2 ) - pow(_lowercase , 2 ) )} elif reactance == 0: return {"reactance": sqrt(pow(_lowercase , 2 ) - pow(_lowercase , 2 ) )} elif impedance == 0: return {"impedance": sqrt(pow(_lowercase , 2 ) + pow(_lowercase , 2 ) )} else: raise ValueError("Exactly one argument must be 0" ) if __name__ == "__main__": import doctest doctest.testmod()
79
0
import unittest from transformers import load_tool from .test_tools_common import ToolTesterMixin __lowerCamelCase = """ Hugging Face was founded in 2016 by French entrepreneurs Clément Delangue, Julien Chaumond, and Thomas Wolf originally as a company that developed a chatbot app targeted at teenagers.[2] After open-sourcing the model behind the chatbot, the company pivoted to focus on being a platform for machine learning. In March 2021, Hugging Face raised $40 million in a Series B funding round.[3] On April 28, 2021, the company launched the BigScience Research Workshop in collaboration with several other research groups to release an open large language model.[4] In 2022, the workshop concluded with the announcement of BLOOM, a multilingual large language model with 176 billion parameters.[5] """ class UpperCAmelCase ( unittest.TestCase ,A_ ): def _SCREAMING_SNAKE_CASE (self : Dict ) -> int: '''simple docstring''' snake_case : Optional[Any] = load_tool("text-question-answering" ) self.tool.setup() snake_case : str = load_tool("text-question-answering" , remote=snake_case__ ) def _SCREAMING_SNAKE_CASE (self : Optional[Any] ) -> int: '''simple docstring''' snake_case : Tuple = self.tool(snake_case__ , "What did Hugging Face do in April 2021?" ) self.assertEqual(snake_case__ , "launched the BigScience Research Workshop" ) def _SCREAMING_SNAKE_CASE (self : Any ) -> Optional[int]: '''simple docstring''' snake_case : Optional[int] = self.remote_tool(snake_case__ , "What did Hugging Face do in April 2021?" ) self.assertEqual(snake_case__ , "launched the BigScience Research Workshop" ) def _SCREAMING_SNAKE_CASE (self : int ) -> int: '''simple docstring''' snake_case : Dict = self.tool(text=snake_case__ , question="What did Hugging Face do in April 2021?" ) self.assertEqual(snake_case__ , "launched the BigScience Research Workshop" ) def _SCREAMING_SNAKE_CASE (self : Optional[Any] ) -> int: '''simple docstring''' snake_case : List[Any] = self.remote_tool(text=snake_case__ , question="What did Hugging Face do in April 2021?" ) self.assertEqual(snake_case__ , "launched the BigScience Research Workshop" )
59
import logging import numpy as np import pytest from scipy.linalg import eigh logging.basicConfig(level=logging.INFO, format="""%(message)s""") def UpperCamelCase ( __lowerCamelCase : np.ndarray ): return input_array.reshape((input_array.size, 1) ) def UpperCamelCase ( __lowerCamelCase : np.ndarray , __lowerCamelCase : np.ndarray , __lowerCamelCase : int ): snake_case : Any = np.nan for i in range(__lowerCamelCase ): snake_case : List[str] = features[:, labels == i] snake_case : Dict = data.mean(1 ) # Centralize the data of class i snake_case : Optional[Any] = data - column_reshape(__lowerCamelCase ) if i > 0: # If covariance_sum is not None covariance_sum += np.dot(__lowerCamelCase , centered_data.T ) else: # If covariance_sum is np.nan (i.e. first loop) snake_case : Optional[Any] = np.dot(__lowerCamelCase , centered_data.T ) return covariance_sum / features.shape[1] def UpperCamelCase ( __lowerCamelCase : np.ndarray , __lowerCamelCase : np.ndarray , __lowerCamelCase : int ): snake_case : Optional[Any] = features.mean(1 ) snake_case : Tuple = np.nan for i in range(__lowerCamelCase ): snake_case : Tuple = features[:, labels == i] snake_case : Tuple = data.shape[1] snake_case : List[str] = data.mean(1 ) if i > 0: # If covariance_sum is not None covariance_sum += device_data * np.dot( column_reshape(__lowerCamelCase ) - column_reshape(__lowerCamelCase ) , (column_reshape(__lowerCamelCase ) - column_reshape(__lowerCamelCase )).T , ) else: # If covariance_sum is np.nan (i.e. first loop) snake_case : Optional[int] = device_data * np.dot( column_reshape(__lowerCamelCase ) - column_reshape(__lowerCamelCase ) , (column_reshape(__lowerCamelCase ) - column_reshape(__lowerCamelCase )).T , ) return covariance_sum / features.shape[1] def UpperCamelCase ( __lowerCamelCase : np.ndarray , __lowerCamelCase : int ): # Check if the features have been loaded if features.any(): snake_case : Tuple = features.mean(1 ) # Center the dataset snake_case : List[str] = features - np.reshape(__lowerCamelCase , (data_mean.size, 1) ) snake_case : Optional[Any] = np.dot(__lowerCamelCase , centered_data.T ) / features.shape[1] snake_case , snake_case : Dict = np.linalg.eigh(__lowerCamelCase ) # Take all the columns in the reverse order (-1), and then takes only the first snake_case : Optional[Any] = eigenvectors[:, ::-1][:, 0:dimensions] # Project the database on the new space snake_case : Union[str, Any] = np.dot(filtered_eigenvectors.T , __lowerCamelCase ) logging.info("Principal Component Analysis computed" ) return projected_data else: logging.basicConfig(level=logging.ERROR , format="%(message)s" , force=__lowerCamelCase ) logging.error("Dataset empty" ) raise AssertionError def UpperCamelCase ( __lowerCamelCase : np.ndarray , __lowerCamelCase : np.ndarray , __lowerCamelCase : int , __lowerCamelCase : int ): assert classes > dimensions # Check if features have been already loaded if features.any: snake_case , snake_case : str = eigh( covariance_between_classes(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) , covariance_within_classes(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) , ) snake_case : str = eigenvectors[:, ::-1][:, :dimensions] snake_case , snake_case , snake_case : int = np.linalg.svd(__lowerCamelCase ) snake_case : List[Any] = svd_matrix[:, 0:dimensions] snake_case : Optional[Any] = np.dot(filtered_svd_matrix.T , __lowerCamelCase ) logging.info("Linear Discriminant Analysis computed" ) return projected_data else: logging.basicConfig(level=logging.ERROR , format="%(message)s" , force=__lowerCamelCase ) logging.error("Dataset empty" ) raise AssertionError def UpperCamelCase ( ): # Create dummy dataset with 2 classes and 3 features snake_case : str = np.array([[1, 2, 3, 4, 5], [2, 3, 4, 5, 6], [3, 4, 5, 6, 7]] ) snake_case : Union[str, Any] = np.array([0, 0, 0, 1, 1] ) snake_case : List[Any] = 2 snake_case : Any = 2 # Assert that the function raises an AssertionError if dimensions > classes with pytest.raises(__lowerCamelCase ) as error_info: snake_case : str = linear_discriminant_analysis( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) if isinstance(__lowerCamelCase , np.ndarray ): raise AssertionError( "Did not raise AssertionError for dimensions > classes" ) assert error_info.type is AssertionError def UpperCamelCase ( ): snake_case : List[str] = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]] ) snake_case : List[str] = 2 snake_case : int = np.array([[6.9282_0323, 8.6602_5404, 10.3923_0485], [3.0, 3.0, 3.0]] ) with pytest.raises(__lowerCamelCase ) as error_info: snake_case : Union[str, Any] = principal_component_analysis(__lowerCamelCase , __lowerCamelCase ) if not np.allclose(__lowerCamelCase , __lowerCamelCase ): raise AssertionError assert error_info.type is AssertionError if __name__ == "__main__": import doctest doctest.testmod()
59
1
'''simple docstring''' def __lowerCamelCase ( __lowerCAmelCase : int ) -> int: assert ( isinstance(__lowerCAmelCase , __lowerCAmelCase ) and number_of_steps > 0 ), F'''number_of_steps needs to be positive integer, your input {number_of_steps}''' if number_of_steps == 1: return 1 snake_case , snake_case = 1, 1 for _ in range(number_of_steps - 1 ): snake_case , snake_case = current + previous, current return current if __name__ == "__main__": import doctest doctest.testmod()
3
'''simple docstring''' import warnings from ...configuration_utils import PretrainedConfig from ...utils import logging _SCREAMING_SNAKE_CASE = logging.get_logger(__name__) _SCREAMING_SNAKE_CASE = { "RUCAIBox/mvp": "https://huggingface.co/RUCAIBox/mvp/resolve/main/config.json", } class _lowerCAmelCase ( A__ ): """simple docstring""" snake_case_ = "mvp" snake_case_ = ["past_key_values"] snake_case_ = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"} def __init__( self : int , __snake_case : Optional[int]=5_02_67 , __snake_case : List[Any]=10_24 , __snake_case : str=12 , __snake_case : Union[str, Any]=40_96 , __snake_case : List[Any]=16 , __snake_case : Tuple=12 , __snake_case : Tuple=40_96 , __snake_case : Union[str, Any]=16 , __snake_case : Any=0.0 , __snake_case : Dict=0.0 , __snake_case : List[Any]="gelu" , __snake_case : Tuple=10_24 , __snake_case : int=0.1 , __snake_case : Any=0.0 , __snake_case : List[str]=0.0 , __snake_case : Dict=0.02 , __snake_case : Any=0.0 , __snake_case : Optional[int]=False , __snake_case : List[str]=True , __snake_case : Tuple=1 , __snake_case : Tuple=0 , __snake_case : List[str]=2 , __snake_case : Optional[Any]=True , __snake_case : Dict=2 , __snake_case : Any=2 , __snake_case : Any=False , __snake_case : Any=1_00 , __snake_case : Optional[Any]=8_00 , **__snake_case : List[Any] , )-> Optional[int]: snake_case = vocab_size snake_case = max_position_embeddings snake_case = d_model snake_case = encoder_ffn_dim snake_case = encoder_layers snake_case = encoder_attention_heads snake_case = decoder_ffn_dim snake_case = decoder_layers snake_case = decoder_attention_heads snake_case = dropout snake_case = attention_dropout snake_case = activation_dropout snake_case = activation_function snake_case = init_std snake_case = encoder_layerdrop snake_case = decoder_layerdrop snake_case = classifier_dropout snake_case = use_cache snake_case = encoder_layers snake_case = scale_embedding # scale factor will be sqrt(d_model) if True snake_case = use_prompt snake_case = prompt_length snake_case = prompt_mid_dim super().__init__( pad_token_id=__snake_case , bos_token_id=__snake_case , eos_token_id=__snake_case , is_encoder_decoder=__snake_case , decoder_start_token_id=__snake_case , forced_eos_token_id=__snake_case , **__snake_case , ) if self.forced_bos_token_id is None and kwargs.get("""force_bos_token_to_be_generated""" , __snake_case ): snake_case = self.bos_token_id warnings.warn( f'''Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. ''' """The config can simply be saved and uploaded again to be fixed.""" )
3
1
"""simple docstring""" import itertools from dataclasses import dataclass from typing import Any, Callable, Dict, List, Optional, Union import pandas as pd import pyarrow as pa import datasets import datasets.config from datasets.features.features import require_storage_cast from datasets.table import table_cast from datasets.utils.py_utils import Literal __lowerCAmelCase : Any =datasets.utils.logging.get_logger(__name__) __lowerCAmelCase : Optional[Any] =["names", "prefix"] __lowerCAmelCase : Dict =["warn_bad_lines", "error_bad_lines", "mangle_dupe_cols"] __lowerCAmelCase : Optional[Any] =["encoding_errors", "on_bad_lines"] __lowerCAmelCase : List[Any] =["date_format"] @dataclass class _A ( datasets.BuilderConfig ): snake_case__ : str = "," snake_case__ : Optional[str] = None snake_case__ : Optional[Union[int, List[int], str]] = "infer" snake_case__ : Optional[List[str]] = None snake_case__ : Optional[List[str]] = None snake_case__ : Optional[Union[int, str, List[int], List[str]]] = None snake_case__ : Optional[Union[List[int], List[str]]] = None snake_case__ : Optional[str] = None snake_case__ : bool = True snake_case__ : Optional[Literal["c", "python", "pyarrow"]] = None snake_case__ : Dict[Union[int, str], Callable[[Any], Any]] = None snake_case__ : Optional[list] = None snake_case__ : Optional[list] = None snake_case__ : bool = False snake_case__ : Optional[Union[int, List[int]]] = None snake_case__ : Optional[int] = None snake_case__ : Optional[Union[str, List[str]]] = None snake_case__ : bool = True snake_case__ : bool = True snake_case__ : bool = False snake_case__ : bool = True snake_case__ : Optional[str] = None snake_case__ : str = "." snake_case__ : Optional[str] = None snake_case__ : str = '"' snake_case__ : int = 0 snake_case__ : Optional[str] = None snake_case__ : Optional[str] = None snake_case__ : Optional[str] = None snake_case__ : Optional[str] = None snake_case__ : bool = True snake_case__ : bool = True snake_case__ : int = 0 snake_case__ : bool = True snake_case__ : bool = False snake_case__ : Optional[str] = None snake_case__ : int = 1_0000 snake_case__ : Optional[datasets.Features] = None snake_case__ : Optional[str] = "strict" snake_case__ : Literal["error", "warn", "skip"] = "error" snake_case__ : Optional[str] = None def A__ ( self ): """simple docstring""" if self.delimiter is not None: lowercase = self.delimiter if self.column_names is not None: lowercase = self.column_names @property def A__ ( self ): """simple docstring""" lowercase = { "sep": self.sep, "header": self.header, "names": self.names, "index_col": self.index_col, "usecols": self.usecols, "prefix": self.prefix, "mangle_dupe_cols": self.mangle_dupe_cols, "engine": self.engine, "converters": self.converters, "true_values": self.true_values, "false_values": self.false_values, "skipinitialspace": self.skipinitialspace, "skiprows": self.skiprows, "nrows": self.nrows, "na_values": self.na_values, "keep_default_na": self.keep_default_na, "na_filter": self.na_filter, "verbose": self.verbose, "skip_blank_lines": self.skip_blank_lines, "thousands": self.thousands, "decimal": self.decimal, "lineterminator": self.lineterminator, "quotechar": self.quotechar, "quoting": self.quoting, "escapechar": self.escapechar, "comment": self.comment, "encoding": self.encoding, "dialect": self.dialect, "error_bad_lines": self.error_bad_lines, "warn_bad_lines": self.warn_bad_lines, "skipfooter": self.skipfooter, "doublequote": self.doublequote, "memory_map": self.memory_map, "float_precision": self.float_precision, "chunksize": self.chunksize, "encoding_errors": self.encoding_errors, "on_bad_lines": self.on_bad_lines, "date_format": self.date_format, } # some kwargs must not be passed if they don't have a default value # some others are deprecated and we can also not pass them if they are the default value for pd_read_csv_parameter in _PANDAS_READ_CSV_NO_DEFAULT_PARAMETERS + _PANDAS_READ_CSV_DEPRECATED_PARAMETERS: if pd_read_csv_kwargs[pd_read_csv_parameter] == getattr(CsvConfig() , __lowerCAmelCase ): del pd_read_csv_kwargs[pd_read_csv_parameter] # Remove 2.0 new arguments if not (datasets.config.PANDAS_VERSION.major >= 2): for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_2_0_0_PARAMETERS: del pd_read_csv_kwargs[pd_read_csv_parameter] # Remove 1.3 new arguments if not (datasets.config.PANDAS_VERSION.major >= 1 and datasets.config.PANDAS_VERSION.minor >= 3): for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_1_3_0_PARAMETERS: del pd_read_csv_kwargs[pd_read_csv_parameter] return pd_read_csv_kwargs class _A ( datasets.ArrowBasedBuilder ): snake_case__ : Any = CsvConfig def A__ ( self ): """simple docstring""" return datasets.DatasetInfo(features=self.config.features ) def A__ ( self , __lowerCAmelCase ): """simple docstring""" if not self.config.data_files: raise ValueError(f'At least one data file must be specified, but got data_files={self.config.data_files}' ) lowercase = dl_manager.download_and_extract(self.config.data_files ) if isinstance(__lowerCAmelCase , (str, list, tuple) ): lowercase = data_files if isinstance(__lowerCAmelCase , __lowerCAmelCase ): lowercase = [files] lowercase = [dl_manager.iter_files(__lowerCAmelCase ) for file in files] return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"""files""": files} )] lowercase = [] for split_name, files in data_files.items(): if isinstance(__lowerCAmelCase , __lowerCAmelCase ): lowercase = [files] lowercase = [dl_manager.iter_files(__lowerCAmelCase ) for file in files] splits.append(datasets.SplitGenerator(name=__lowerCAmelCase , gen_kwargs={"""files""": files} ) ) return splits def A__ ( self , __lowerCAmelCase ): """simple docstring""" if self.config.features is not None: lowercase = self.config.features.arrow_schema if all(not require_storage_cast(__lowerCAmelCase ) for feature in self.config.features.values() ): # cheaper cast lowercase = pa.Table.from_arrays([pa_table[field.name] for field in schema] , schema=__lowerCAmelCase ) else: # more expensive cast; allows str <-> int/float or str to Audio for example lowercase = table_cast(__lowerCAmelCase , __lowerCAmelCase ) return pa_table def A__ ( self , __lowerCAmelCase ): """simple docstring""" lowercase = self.config.features.arrow_schema if self.config.features else None # dtype allows reading an int column as str lowercase = ( { name: dtype.to_pandas_dtype() if not require_storage_cast(__lowerCAmelCase ) else object for name, dtype, feature in zip(schema.names , schema.types , self.config.features.values() ) } if schema is not None else None ) for file_idx, file in enumerate(itertools.chain.from_iterable(__lowerCAmelCase ) ): lowercase = pd.read_csv(__lowerCAmelCase , iterator=__lowerCAmelCase , dtype=__lowerCAmelCase , **self.config.pd_read_csv_kwargs ) try: for batch_idx, df in enumerate(__lowerCAmelCase ): lowercase = pa.Table.from_pandas(__lowerCAmelCase ) # Uncomment for debugging (will print the Arrow table size and elements) # logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}") # logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows))) yield (file_idx, batch_idx), self._cast_table(__lowerCAmelCase ) except ValueError as e: logger.error(f'Failed to read file \'{file}\' with error {type(__lowerCAmelCase )}: {e}' ) raise
197
'''simple docstring''' import argparse import logging import pickle import random import time import numpy as np from transformers import BertTokenizer, GPTaTokenizer, RobertaTokenizer logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO ) a : Tuple = logging.getLogger(__name__) def lowercase ( ): '''simple docstring''' UpperCAmelCase : Any = argparse.ArgumentParser( description="Preprocess the data to avoid re-doing it several times by (tokenization + token_to_ids)." ) parser.add_argument("--file_path" , type=__magic_name__ , default="data/dump.txt" , help="The path to the data." ) parser.add_argument("--tokenizer_type" , type=__magic_name__ , default="bert" , choices=["bert", "roberta", "gpt2"] ) parser.add_argument("--tokenizer_name" , type=__magic_name__ , default="bert-base-uncased" , help="The tokenizer to use." ) parser.add_argument("--dump_file" , type=__magic_name__ , default="data/dump" , help="The dump file prefix." ) UpperCAmelCase : List[Any] = parser.parse_args() logger.info(F"Loading Tokenizer ({args.tokenizer_name})" ) if args.tokenizer_type == "bert": UpperCAmelCase : Any = BertTokenizer.from_pretrained(args.tokenizer_name ) UpperCAmelCase : Optional[int] = tokenizer.special_tokens_map["cls_token"] # `[CLS]` UpperCAmelCase : Any = tokenizer.special_tokens_map["sep_token"] # `[SEP]` elif args.tokenizer_type == "roberta": UpperCAmelCase : List[Any] = RobertaTokenizer.from_pretrained(args.tokenizer_name ) UpperCAmelCase : Tuple = tokenizer.special_tokens_map["cls_token"] # `<s>` UpperCAmelCase : Optional[int] = tokenizer.special_tokens_map["sep_token"] # `</s>` elif args.tokenizer_type == "gpt2": UpperCAmelCase : List[str] = GPTaTokenizer.from_pretrained(args.tokenizer_name ) UpperCAmelCase : Optional[Any] = tokenizer.special_tokens_map["bos_token"] # `<|endoftext|>` UpperCAmelCase : List[Any] = tokenizer.special_tokens_map["eos_token"] # `<|endoftext|>` logger.info(F"Loading text from {args.file_path}" ) with open(args.file_path , "r" , encoding="utf8" ) as fp: UpperCAmelCase : str = fp.readlines() logger.info("Start encoding" ) logger.info(F"{len(__magic_name__ )} examples to process." ) UpperCAmelCase : int = [] UpperCAmelCase : int = 0 UpperCAmelCase : Union[str, Any] = 1_0000 UpperCAmelCase : Union[str, Any] = time.time() for text in data: UpperCAmelCase : Dict = F"{bos} {text.strip()} {sep}" UpperCAmelCase : Tuple = tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ ) rslt.append(__magic_name__ ) iter += 1 if iter % interval == 0: UpperCAmelCase : Dict = time.time() logger.info(F"{iter} examples processed. - {(end-start):.2f}s/{interval}expl" ) UpperCAmelCase : Any = time.time() logger.info("Finished binarization" ) logger.info(F"{len(__magic_name__ )} examples processed." ) UpperCAmelCase : str = F"{args.dump_file}.{args.tokenizer_name}.pickle" UpperCAmelCase : List[str] = tokenizer.vocab_size if vocab_size < (1 << 16): UpperCAmelCase : int = [np.uintaa(__magic_name__ ) for d in rslt] else: UpperCAmelCase : int = [np.intaa(__magic_name__ ) for d in rslt] random.shuffle(rslt_ ) logger.info(F"Dump to {dp_file}" ) with open(__magic_name__ , "wb" ) as handle: pickle.dump(rslt_ , __magic_name__ , protocol=pickle.HIGHEST_PROTOCOL ) if __name__ == "__main__": main()
311
0
from __future__ import annotations import random # Maximum size of the population. Bigger could be faster but is more memory expensive. lowercase : List[str] = 200 # Number of elements selected in every generation of evolution. The selection takes # place from best to worst of that generation and must be smaller than N_POPULATION. lowercase : List[Any] = 50 # Probability that an element of a generation can mutate, changing one of its genes. # This will guarantee that all genes will be used during evolution. lowercase : Optional[Any] = 0.4 # Just a seed to improve randomness required by the algorithm. random.seed(random.randint(0, 1000)) def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : str , _lowerCamelCase : str) -> tuple[str, float]: '''simple docstring''' __UpperCamelCase : Union[str, Any] = len([g for position, g in enumerate(_lowerCamelCase) if g == main_target[position]]) return (item, float(_lowerCamelCase)) def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : str , _lowerCamelCase : str) -> tuple[str, str]: '''simple docstring''' __UpperCamelCase : Union[str, Any] = random.randint(0 , len(_lowerCamelCase) - 1) __UpperCamelCase : Optional[Any] = parent_a[:random_slice] + parent_a[random_slice:] __UpperCamelCase : Optional[int] = parent_a[:random_slice] + parent_a[random_slice:] return (child_a, child_a) def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : str , _lowerCamelCase : list[str]) -> str: '''simple docstring''' __UpperCamelCase : Union[str, Any] = list(_lowerCamelCase) if random.uniform(0 , 1) < MUTATION_PROBABILITY: __UpperCamelCase : Optional[int] = random.choice(_lowerCamelCase) return "".join(_lowerCamelCase) def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : tuple[str, float] , _lowerCamelCase : list[tuple[str, float]] , _lowerCamelCase : list[str] , ) -> list[str]: '''simple docstring''' __UpperCamelCase : str = [] # Generate more children proportionally to the fitness score. __UpperCamelCase : str = int(parent_a[1] * 100) + 1 __UpperCamelCase : Tuple = 10 if child_n >= 10 else child_n for _ in range(_lowerCamelCase): __UpperCamelCase : str = population_score[random.randint(0 , _lowerCamelCase)][0] __UpperCamelCase , __UpperCamelCase : Optional[Any] = crossover(parent_a[0] , _lowerCamelCase) # Append new string to the population list. pop.append(mutate(_lowerCamelCase , _lowerCamelCase)) pop.append(mutate(_lowerCamelCase , _lowerCamelCase)) return pop def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : str , _lowerCamelCase : list[str] , _lowerCamelCase : bool = True) -> tuple[int, int, str]: '''simple docstring''' if N_POPULATION < N_SELECTED: __UpperCamelCase : Optional[Any] = F'{N_POPULATION} must be bigger than {N_SELECTED}' raise ValueError(_lowerCamelCase) # Verify that the target contains no genes besides the ones inside genes variable. __UpperCamelCase : Any = sorted({c for c in target if c not in genes}) if not_in_genes_list: __UpperCamelCase : Any = F'{not_in_genes_list} is not in genes list, evolution cannot converge' raise ValueError(_lowerCamelCase) # Generate random starting population. __UpperCamelCase : Union[str, Any] = [] for _ in range(_lowerCamelCase): population.append("".join([random.choice(_lowerCamelCase) for i in range(len(_lowerCamelCase))])) # Just some logs to know what the algorithms is doing. __UpperCamelCase , __UpperCamelCase : List[str] = 0, 0 # This loop will end when we find a perfect match for our target. while True: generation += 1 total_population += len(_lowerCamelCase) # Random population created. Now it's time to evaluate. # Adding a bit of concurrency can make everything faster, # # import concurrent.futures # population_score: list[tuple[str, float]] = [] # with concurrent.futures.ThreadPoolExecutor( # max_workers=NUM_WORKERS) as executor: # futures = {executor.submit(evaluate, item) for item in population} # concurrent.futures.wait(futures) # population_score = [item.result() for item in futures] # # but with a simple algorithm like this, it will probably be slower. # We just need to call evaluate for every item inside the population. __UpperCamelCase : Optional[Any] = [evaluate(_lowerCamelCase , _lowerCamelCase) for item in population] # Check if there is a matching evolution. __UpperCamelCase : List[str] = sorted(_lowerCamelCase , key=lambda _lowerCamelCase: x[1] , reverse=_lowerCamelCase) if population_score[0][0] == target: return (generation, total_population, population_score[0][0]) # Print the best result every 10 generation. # Just to know that the algorithm is working. if debug and generation % 10 == 0: print( F'\nGeneration: {generation}' F'\nTotal Population:{total_population}' F'\nBest score: {population_score[0][1]}' F'\nBest string: {population_score[0][0]}') # Flush the old population, keeping some of the best evolutions. # Keeping this avoid regression of evolution. __UpperCamelCase : Tuple = population[: int(N_POPULATION / 3)] population.clear() population.extend(_lowerCamelCase) # Normalize population score to be between 0 and 1. __UpperCamelCase : Optional[Any] = [ (item, score / len(_lowerCamelCase)) for item, score in population_score ] # This is selection for i in range(_lowerCamelCase): population.extend(select(population_score[int(_lowerCamelCase)] , _lowerCamelCase , _lowerCamelCase)) # Check if the population has already reached the maximum value and if so, # break the cycle. If this check is disabled, the algorithm will take # forever to compute large strings, but will also calculate small strings in # a far fewer generations. if len(_lowerCamelCase) > N_POPULATION: break if __name__ == "__main__": lowercase : Any = ( 'This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!' ) lowercase : Optional[Any] = list( ' ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm' 'nopqrstuvwxyz.,;!?+-*#@^\'èéòà€ù=)(&%$£/\\' ) lowercase , lowercase , lowercase : Union[str, Any] = basic(target_str, genes_list) print( f"\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}" )
151
import argparse import json import os import time import zipfile from get_ci_error_statistics import download_artifact, get_artifacts_links from transformers import logging lowercase : List[str] = logging.get_logger(__name__) def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : Tuple , _lowerCamelCase : List[Any]) -> Any: '''simple docstring''' __UpperCamelCase : str = set() __UpperCamelCase : Optional[Any] = [] def parse_line(_lowerCamelCase : Tuple): for line in fp: if isinstance(_lowerCamelCase , _lowerCamelCase): __UpperCamelCase : Tuple = line.decode("UTF-8") if "warnings summary (final)" in line: continue # This means we are outside the body of a warning elif not line.startswith(" "): # process a single warning and move it to `selected_warnings`. if len(_lowerCamelCase) > 0: __UpperCamelCase : Optional[Any] = "\n".join(_lowerCamelCase) # Only keep the warnings specified in `targets` if any(F': {x}: ' in warning for x in targets): selected_warnings.add(_lowerCamelCase) buffer.clear() continue else: __UpperCamelCase : Optional[Any] = line.strip() buffer.append(_lowerCamelCase) if from_gh: for filename in os.listdir(_lowerCamelCase): __UpperCamelCase : Any = os.path.join(_lowerCamelCase , _lowerCamelCase) if not os.path.isdir(_lowerCamelCase): # read the file if filename != "warnings.txt": continue with open(_lowerCamelCase) as fp: parse_line(_lowerCamelCase) else: try: with zipfile.ZipFile(_lowerCamelCase) as z: for filename in z.namelist(): if not os.path.isdir(_lowerCamelCase): # read the file if filename != "warnings.txt": continue with z.open(_lowerCamelCase) as fp: parse_line(_lowerCamelCase) except Exception: logger.warning( F'{artifact_path} is either an invalid zip file or something else wrong. This file is skipped.') return selected_warnings def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : Any , _lowerCamelCase : Optional[int]) -> Dict: '''simple docstring''' __UpperCamelCase : Union[str, Any] = set() __UpperCamelCase : str = [os.path.join(_lowerCamelCase , _lowerCamelCase) for p in os.listdir(_lowerCamelCase) if (p.endswith(".zip") or from_gh)] for p in paths: selected_warnings.update(extract_warnings_from_single_artifact(_lowerCamelCase , _lowerCamelCase)) return selected_warnings if __name__ == "__main__": def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : Tuple) -> str: '''simple docstring''' return values.split(",") lowercase : List[Any] = argparse.ArgumentParser() # Required parameters parser.add_argument('--workflow_run_id', type=str, required=True, help='A GitHub Actions workflow run id.') parser.add_argument( '--output_dir', type=str, required=True, help='Where to store the downloaded artifacts and other result files.', ) parser.add_argument('--token', default=None, type=str, help='A token that has actions:read permission.') # optional parameters parser.add_argument( '--targets', default='DeprecationWarning,UserWarning,FutureWarning', type=list_str, help='Comma-separated list of target warning(s) which we want to extract.', ) parser.add_argument( '--from_gh', action='store_true', help='If running from a GitHub action workflow and collecting warnings from its artifacts.', ) lowercase : Union[str, Any] = parser.parse_args() lowercase : Tuple = args.from_gh if from_gh: # The artifacts have to be downloaded using `actions/download-artifact@v3` pass else: os.makedirs(args.output_dir, exist_ok=True) # get download links lowercase : int = get_artifacts_links(args.workflow_run_id, token=args.token) with open(os.path.join(args.output_dir, 'artifacts.json'), 'w', encoding='UTF-8') as fp: json.dump(artifacts, fp, ensure_ascii=False, indent=4) # download artifacts for idx, (name, url) in enumerate(artifacts.items()): print(name) print(url) print('=' * 80) download_artifact(name, url, args.output_dir, args.token) # Be gentle to GitHub time.sleep(1) # extract warnings from artifacts lowercase : Any = extract_warnings(args.output_dir, args.targets) lowercase : int = sorted(selected_warnings) with open(os.path.join(args.output_dir, 'selected_warnings.json'), 'w', encoding='UTF-8') as fp: json.dump(selected_warnings, fp, ensure_ascii=False, indent=4)
151
1
import json import os from functools import lru_cache from typing import TYPE_CHECKING, List, Optional, Tuple import regex as re from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging if TYPE_CHECKING: from transformers.pipelines.conversational import Conversation lowerCamelCase__ = logging.get_logger(__name__) lowerCamelCase__ = { """vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_config_file""": """tokenizer_config.json""", } lowerCamelCase__ = { """vocab_file""": {"""facebook/blenderbot-3B""": """https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json"""}, """merges_file""": {"""facebook/blenderbot-3B""": """https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt"""}, """tokenizer_config_file""": { """facebook/blenderbot-3B""": """https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json""" }, } lowerCamelCase__ = {"""facebook/blenderbot-3B""": 128} @lru_cache() # Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode def lowerCAmelCase__ ( ) -> List[str]: lowerCAmelCase__ : List[str] = ( list(range(ord('!' ) , ord('~' ) + 1 ) ) + list(range(ord('¡' ) , ord('¬' ) + 1 ) ) + list(range(ord('®' ) , ord('ÿ' ) + 1 ) ) ) lowerCAmelCase__ : Any = bs[:] lowerCAmelCase__ : Optional[int] = 0 for b in range(2**8 ): if b not in bs: bs.append(SCREAMING_SNAKE_CASE_ ) cs.append(2**8 + n ) n += 1 lowerCAmelCase__ : str = [chr(SCREAMING_SNAKE_CASE_ ) for n in cs] return dict(zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ) def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> List[Any]: lowerCAmelCase__ : Optional[Any] = set() lowerCAmelCase__ : Any = word[0] for char in word[1:]: pairs.add((prev_char, char) ) lowerCAmelCase__ : List[Any] = char return pairs class A__ ( __magic_name__ ): lowercase = VOCAB_FILES_NAMES lowercase = PRETRAINED_VOCAB_FILES_MAP lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowercase = ['input_ids', 'attention_mask'] def __init__( self : int , a : Optional[int] , a : Any , a : int="replace" , a : List[Any]="<s>" , a : Optional[int]="</s>" , a : Any="</s>" , a : int="<s>" , a : Optional[Any]="<unk>" , a : Tuple="<pad>" , a : Union[str, Any]="<mask>" , a : str=False , **a : Any , ): '''simple docstring''' lowerCAmelCase__ : Optional[int] = AddedToken(a , lstrip=a , rstrip=a ) if isinstance(a , a ) else bos_token lowerCAmelCase__ : Optional[int] = AddedToken(a , lstrip=a , rstrip=a ) if isinstance(a , a ) else eos_token lowerCAmelCase__ : int = AddedToken(a , lstrip=a , rstrip=a ) if isinstance(a , a ) else sep_token lowerCAmelCase__ : Dict = AddedToken(a , lstrip=a , rstrip=a ) if isinstance(a , a ) else cls_token lowerCAmelCase__ : List[Any] = AddedToken(a , lstrip=a , rstrip=a ) if isinstance(a , a ) else unk_token lowerCAmelCase__ : List[Any] = AddedToken(a , lstrip=a , rstrip=a ) if isinstance(a , a ) else pad_token # Mask token behave like a normal word, i.e. include the space before it lowerCAmelCase__ : List[Any] = AddedToken(a , lstrip=a , rstrip=a ) if isinstance(a , a ) else mask_token super().__init__( errors=a , bos_token=a , eos_token=a , unk_token=a , sep_token=a , cls_token=a , pad_token=a , mask_token=a , add_prefix_space=a , **a , ) with open(a , encoding='utf-8' ) as vocab_handle: lowerCAmelCase__ : Union[str, Any] = json.load(a ) lowerCAmelCase__ : int = {v: k for k, v in self.encoder.items()} lowerCAmelCase__ : Optional[int] = errors # how to handle errors in decoding lowerCAmelCase__ : Any = bytes_to_unicode() lowerCAmelCase__ : Any = {v: k for k, v in self.byte_encoder.items()} with open(a , encoding='utf-8' ) as merges_handle: lowerCAmelCase__ : Union[str, Any] = merges_handle.read().split('\n' )[1:-1] lowerCAmelCase__ : Optional[Any] = [tuple(merge.split() ) for merge in bpe_merges] lowerCAmelCase__ : Any = dict(zip(a , range(len(a ) ) ) ) lowerCAmelCase__ : List[str] = {} lowerCAmelCase__ : Any = add_prefix_space # Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions lowerCAmelCase__ : List[Any] = re.compile(R'\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+' ) @property # Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer.vocab_size with Roberta->Blenderbot, RoBERTa->Blenderbot def _lowerCamelCase ( self : List[Any] ): '''simple docstring''' return len(self.encoder ) def _lowerCamelCase ( self : Optional[Any] ): '''simple docstring''' return dict(self.encoder , **self.added_tokens_encoder ) def _lowerCamelCase ( self : List[Any] , a : Optional[int] ): '''simple docstring''' if token in self.cache: return self.cache[token] lowerCAmelCase__ : Tuple = tuple(a ) lowerCAmelCase__ : List[str] = get_pairs(a ) if not pairs: return token while True: lowerCAmelCase__ : Any = min(a , key=lambda a : self.bpe_ranks.get(a , float('inf' ) ) ) if bigram not in self.bpe_ranks: break lowerCAmelCase__ , lowerCAmelCase__ : Optional[int] = bigram lowerCAmelCase__ : Optional[Any] = [] lowerCAmelCase__ : int = 0 while i < len(a ): try: lowerCAmelCase__ : List[str] = word.index(a , a ) except ValueError: new_word.extend(word[i:] ) break else: new_word.extend(word[i:j] ) lowerCAmelCase__ : int = j if word[i] == first and i < len(a ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 lowerCAmelCase__ : List[str] = tuple(a ) lowerCAmelCase__ : Tuple = new_word if len(a ) == 1: break else: lowerCAmelCase__ : Union[str, Any] = get_pairs(a ) lowerCAmelCase__ : Any = ' '.join(a ) lowerCAmelCase__ : Dict = word return word def _lowerCamelCase ( self : str , a : Tuple ): '''simple docstring''' lowerCAmelCase__ : str = [] for token in re.findall(self.pat , a ): lowerCAmelCase__ : Optional[Any] = ''.join( self.byte_encoder[b] for b in token.encode('utf-8' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case) bpe_tokens.extend(bpe_token for bpe_token in self.bpe(a ).split(' ' ) ) return bpe_tokens def _lowerCamelCase ( self : int , a : Union[str, Any] ): '''simple docstring''' return self.encoder.get(a , self.encoder.get(self.unk_token ) ) def _lowerCamelCase ( self : Any , a : Dict ): '''simple docstring''' return self.decoder.get(a ) def _lowerCamelCase ( self : Dict , a : str ): '''simple docstring''' lowerCAmelCase__ : int = ''.join(a ) lowerCAmelCase__ : Union[str, Any] = bytearray([self.byte_decoder[c] for c in text] ).decode('utf-8' , errors=self.errors ) return text def _lowerCamelCase ( self : List[Any] , a : str , a : Optional[str] = None ): '''simple docstring''' if not os.path.isdir(a ): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' ) return lowerCAmelCase__ : Any = os.path.join( a , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] ) lowerCAmelCase__ : Optional[int] = os.path.join( a , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'] ) with open(a , 'w' , encoding='utf-8' ) as f: f.write(json.dumps(self.encoder , indent=2 , sort_keys=a , ensure_ascii=a ) + '\n' ) lowerCAmelCase__ : Dict = 0 with open(a , 'w' , encoding='utf-8' ) as writer: writer.write('#version: 0.2\n' ) for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda a : kv[1] ): if index != token_index: logger.warning( f'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.''' ' Please check that the tokenizer is not corrupted!' ) lowerCAmelCase__ : str = token_index writer.write(' '.join(a ) + '\n' ) index += 1 return vocab_file, merge_file def _lowerCamelCase ( self : List[str] , a : List[int] , a : Optional[List[int]] = None , a : bool = False ): '''simple docstring''' if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=a , token_ids_a=a , already_has_special_tokens=a ) if token_ids_a is None: return [1] + ([0] * len(a )) + [1] return [1] + ([0] * len(a )) + [1, 1] + ([0] * len(a )) + [1] def _lowerCamelCase ( self : int , a : List[int] , a : Optional[List[int]] = None ): '''simple docstring''' lowerCAmelCase__ : Tuple = [self.sep_token_id] lowerCAmelCase__ : Dict = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def _lowerCamelCase ( self : Optional[int] , a : Any , a : Optional[Any]=False , **a : Dict ): '''simple docstring''' lowerCAmelCase__ : int = kwargs.pop('add_prefix_space' , self.add_prefix_space ) if (is_split_into_words or add_prefix_space) and (len(a ) > 0 and not text[0].isspace()): lowerCAmelCase__ : Dict = ' ' + text return (text, kwargs) def _lowerCamelCase ( self : List[Any] , a : List[int] , a : Optional[List[int]] = None ): '''simple docstring''' return token_ids_a + [self.eos_token_id] def _lowerCamelCase ( self : str , a : "Conversation" ): '''simple docstring''' lowerCAmelCase__ : Any = [] for is_user, text in conversation.iter_texts(): if is_user: # We need to space prefix as it's being done within blenderbot inputs.append(' ' + text ) else: # Generated responses should contain them already. inputs.append(a ) lowerCAmelCase__ : List[Any] = ' '.join(a ) lowerCAmelCase__ : int = self.encode(a ) if len(a ) > self.model_max_length: lowerCAmelCase__ : Optional[int] = input_ids[-self.model_max_length :] logger.warning(f'''Trimmed input from conversation as it was longer than {self.model_max_length} tokens.''' ) return input_ids
212
from collections import OrderedDict from typing import Any, List, Mapping, Optional from ... import PreTrainedTokenizer, TensorType, is_torch_available from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfigWithPast, PatchingSpec from ...utils import logging lowerCamelCase__ = logging.get_logger(__name__) lowerCamelCase__ = { """EleutherAI/gpt-j-6B""": """https://huggingface.co/EleutherAI/gpt-j-6B/resolve/main/config.json""", # See all GPT-J models at https://huggingface.co/models?filter=gpt_j } class A__ ( __magic_name__ ): lowercase = 'gptj' lowercase = { 'max_position_embeddings': 'n_positions', 'hidden_size': 'n_embd', 'num_attention_heads': 'n_head', 'num_hidden_layers': 'n_layer', } def __init__( self : Optional[Any] , a : Dict=50_400 , a : Union[str, Any]=2_048 , a : List[str]=4_096 , a : Any=28 , a : Optional[Any]=16 , a : Optional[Any]=64 , a : int=None , a : Any="gelu_new" , a : Union[str, Any]=0.0 , a : List[Any]=0.0 , a : List[Any]=0.0 , a : Optional[Any]=1E-5 , a : Optional[int]=0.0_2 , a : int=True , a : str=50_256 , a : str=50_256 , a : Any=False , **a : Dict , ): '''simple docstring''' lowerCAmelCase__ : List[Any] = vocab_size lowerCAmelCase__ : List[Any] = n_positions lowerCAmelCase__ : str = n_embd lowerCAmelCase__ : str = n_layer lowerCAmelCase__ : str = n_head lowerCAmelCase__ : Dict = n_inner lowerCAmelCase__ : Union[str, Any] = rotary_dim lowerCAmelCase__ : Optional[int] = activation_function lowerCAmelCase__ : Any = resid_pdrop lowerCAmelCase__ : int = embd_pdrop lowerCAmelCase__ : int = attn_pdrop lowerCAmelCase__ : List[Any] = layer_norm_epsilon lowerCAmelCase__ : str = initializer_range lowerCAmelCase__ : Dict = use_cache lowerCAmelCase__ : str = bos_token_id lowerCAmelCase__ : int = eos_token_id super().__init__( bos_token_id=a , eos_token_id=a , tie_word_embeddings=a , **a ) class A__ ( __magic_name__ ): def __init__( self : str , a : PretrainedConfig , a : str = "default" , a : List[PatchingSpec] = None , a : bool = False , ): '''simple docstring''' super().__init__(a , task=a , patching_specs=a , use_past=a ) if not getattr(self._config , 'pad_token_id' , a ): # TODO: how to do that better? lowerCAmelCase__ : int = 0 @property def _lowerCamelCase ( self : Tuple ): '''simple docstring''' lowerCAmelCase__ : Dict = OrderedDict({'input_ids': {0: 'batch', 1: 'sequence'}} ) if self.use_past: self.fill_with_past_key_values_(a , direction='inputs' ) lowerCAmelCase__ : Optional[Any] = {0: 'batch', 1: 'past_sequence + sequence'} else: lowerCAmelCase__ : Tuple = {0: 'batch', 1: 'sequence'} return common_inputs @property def _lowerCamelCase ( self : Union[str, Any] ): '''simple docstring''' return self._config.n_layer @property def _lowerCamelCase ( self : Optional[Any] ): '''simple docstring''' return self._config.n_head def _lowerCamelCase ( self : Tuple , a : PreTrainedTokenizer , a : int = -1 , a : int = -1 , a : bool = False , a : Optional[TensorType] = None , ): '''simple docstring''' lowerCAmelCase__ : Tuple = super(a , self ).generate_dummy_inputs( a , batch_size=a , seq_length=a , is_pair=a , framework=a ) # We need to order the input in the way they appears in the forward() lowerCAmelCase__ : Optional[int] = OrderedDict({'input_ids': common_inputs['input_ids']} ) # Need to add the past_keys if self.use_past: if not is_torch_available(): raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' ) else: import torch lowerCAmelCase__ , lowerCAmelCase__ : int = common_inputs['input_ids'].shape # Not using the same length for past_key_values lowerCAmelCase__ : Optional[int] = seqlen + 2 lowerCAmelCase__ : Tuple = ( batch, self.num_attention_heads, past_key_values_length, self._config.hidden_size // self.num_attention_heads, ) lowerCAmelCase__ : Tuple = [ (torch.zeros(a ), torch.zeros(a )) for _ in range(self.num_layers ) ] lowerCAmelCase__ : Any = common_inputs['attention_mask'] if self.use_past: lowerCAmelCase__ : List[str] = ordered_inputs['attention_mask'].dtype lowerCAmelCase__ : Optional[Any] = torch.cat( [ordered_inputs['attention_mask'], torch.ones(a , a , dtype=a )] , dim=1 ) return ordered_inputs @property def _lowerCamelCase ( self : int ): '''simple docstring''' return 13
212
1
'''simple docstring''' def UpperCAmelCase_ ( __lowercase : List[Any] , __lowercase : List[str] ) -> str: '''simple docstring''' _UpperCAmelCase = 0 while b > 0: if b & 1: res += a a += a b >>= 1 return res def UpperCAmelCase_ ( __lowercase : str , __lowercase : Tuple , __lowercase : Optional[Any] ) -> int: '''simple docstring''' _UpperCAmelCase = 0 while b > 0: if b & 1: _UpperCAmelCase = ((res % c) + (a % c)) % c a += a b >>= 1 return res
368
'''simple docstring''' from typing import List, Union import numpy as np from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_torch_available(): import torch from ..models.auto.modeling_auto import MODEL_FOR_DEPTH_ESTIMATION_MAPPING __SCREAMING_SNAKE_CASE :List[Any] = logging.get_logger(__name__) @add_end_docstrings(lowerCAmelCase_ ) class A_ ( lowerCAmelCase_ ): def __init__( self : List[str] , *snake_case_ : Dict , **snake_case_ : Dict ): super().__init__(*snake_case_ , **snake_case_ ) requires_backends(self , "vision" ) self.check_model_type(snake_case_ ) def __call__( self : Optional[Any] , snake_case_ : Union[str, List[str], "Image.Image", List["Image.Image"]] , **snake_case_ : Optional[int] ): return super().__call__(snake_case_ , **snake_case_ ) def lowercase ( self : Union[str, Any] , **snake_case_ : Union[str, Any] ): return {}, {}, {} def lowercase ( self : Dict , snake_case_ : Optional[int] ): _UpperCAmelCase = load_image(snake_case_ ) _UpperCAmelCase = image.size _UpperCAmelCase = self.image_processor(images=snake_case_ , return_tensors=self.framework ) return model_inputs def lowercase ( self : Optional[int] , snake_case_ : List[Any] ): _UpperCAmelCase = self.model(**snake_case_ ) return model_outputs def lowercase ( self : List[str] , snake_case_ : Dict ): _UpperCAmelCase = model_outputs.predicted_depth _UpperCAmelCase = torch.nn.functional.interpolate( predicted_depth.unsqueeze(1 ) , size=self.image_size[::-1] , mode="bicubic" , align_corners=snake_case_ ) _UpperCAmelCase = prediction.squeeze().cpu().numpy() _UpperCAmelCase = (output * 2_5_5 / np.max(snake_case_ )).astype("uint8" ) _UpperCAmelCase = Image.fromarray(snake_case_ ) _UpperCAmelCase = {} _UpperCAmelCase = predicted_depth _UpperCAmelCase = depth return output_dict
156
0
"""simple docstring""" _UpperCamelCase: Optional[Any] = '0.18.2' from .configuration_utils import ConfigMixin from .utils import ( OptionalDependencyNotAvailable, is_flax_available, is_inflect_available, is_invisible_watermark_available, is_k_diffusion_available, is_k_diffusion_version, is_librosa_available, is_note_seq_available, is_onnx_available, is_scipy_available, is_torch_available, is_torchsde_available, is_transformers_available, is_transformers_version, is_unidecode_available, logging, ) try: if not is_onnx_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_onnx_objects import * # noqa F403 else: from .pipelines import OnnxRuntimeModel try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_pt_objects import * # noqa F403 else: from .models import ( AutoencoderKL, ControlNetModel, ModelMixin, PriorTransformer, TaFilmDecoder, TransformeraDModel, UNetaDModel, UNetaDConditionModel, UNetaDModel, UNetaDConditionModel, VQModel, ) from .optimization import ( get_constant_schedule, get_constant_schedule_with_warmup, get_cosine_schedule_with_warmup, get_cosine_with_hard_restarts_schedule_with_warmup, get_linear_schedule_with_warmup, get_polynomial_decay_schedule_with_warmup, get_scheduler, ) from .pipelines import ( AudioPipelineOutput, ConsistencyModelPipeline, DanceDiffusionPipeline, DDIMPipeline, DDPMPipeline, DiffusionPipeline, DiTPipeline, ImagePipelineOutput, KarrasVePipeline, LDMPipeline, LDMSuperResolutionPipeline, PNDMPipeline, RePaintPipeline, ScoreSdeVePipeline, ) from .schedulers import ( CMStochasticIterativeScheduler, DDIMInverseScheduler, DDIMParallelScheduler, DDIMScheduler, DDPMParallelScheduler, DDPMScheduler, DEISMultistepScheduler, DPMSolverMultistepInverseScheduler, DPMSolverMultistepScheduler, DPMSolverSinglestepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, HeunDiscreteScheduler, IPNDMScheduler, KarrasVeScheduler, KDPMaAncestralDiscreteScheduler, KDPMaDiscreteScheduler, PNDMScheduler, RePaintScheduler, SchedulerMixin, ScoreSdeVeScheduler, UnCLIPScheduler, UniPCMultistepScheduler, VQDiffusionScheduler, ) from .training_utils import EMAModel try: if not (is_torch_available() and is_scipy_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_torch_and_scipy_objects import * # noqa F403 else: from .schedulers import LMSDiscreteScheduler try: if not (is_torch_available() and is_torchsde_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_torch_and_torchsde_objects import * # noqa F403 else: from .schedulers import DPMSolverSDEScheduler try: if not (is_torch_available() and is_transformers_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_torch_and_transformers_objects import * # noqa F403 else: from .pipelines import ( AltDiffusionImgaImgPipeline, AltDiffusionPipeline, AudioLDMPipeline, CycleDiffusionPipeline, IFImgaImgPipeline, IFImgaImgSuperResolutionPipeline, IFInpaintingPipeline, IFInpaintingSuperResolutionPipeline, IFPipeline, IFSuperResolutionPipeline, ImageTextPipelineOutput, KandinskyImgaImgPipeline, KandinskyInpaintPipeline, KandinskyPipeline, KandinskyPriorPipeline, KandinskyVaaControlnetImgaImgPipeline, KandinskyVaaControlnetPipeline, KandinskyVaaImgaImgPipeline, KandinskyVaaInpaintPipeline, KandinskyVaaPipeline, KandinskyVaaPriorEmbaEmbPipeline, KandinskyVaaPriorPipeline, LDMTextToImagePipeline, PaintByExamplePipeline, SemanticStableDiffusionPipeline, ShapEImgaImgPipeline, ShapEPipeline, StableDiffusionAttendAndExcitePipeline, StableDiffusionControlNetImgaImgPipeline, StableDiffusionControlNetInpaintPipeline, StableDiffusionControlNetPipeline, StableDiffusionDepthaImgPipeline, StableDiffusionDiffEditPipeline, StableDiffusionImageVariationPipeline, StableDiffusionImgaImgPipeline, StableDiffusionInpaintPipeline, StableDiffusionInpaintPipelineLegacy, StableDiffusionInstructPixaPixPipeline, StableDiffusionLatentUpscalePipeline, StableDiffusionLDMaDPipeline, StableDiffusionModelEditingPipeline, StableDiffusionPanoramaPipeline, StableDiffusionParadigmsPipeline, StableDiffusionPipeline, StableDiffusionPipelineSafe, StableDiffusionPixaPixZeroPipeline, StableDiffusionSAGPipeline, StableDiffusionUpscalePipeline, StableUnCLIPImgaImgPipeline, StableUnCLIPPipeline, TextToVideoSDPipeline, TextToVideoZeroPipeline, UnCLIPImageVariationPipeline, UnCLIPPipeline, UniDiffuserModel, UniDiffuserPipeline, UniDiffuserTextDecoder, VersatileDiffusionDualGuidedPipeline, VersatileDiffusionImageVariationPipeline, VersatileDiffusionPipeline, VersatileDiffusionTextToImagePipeline, VideoToVideoSDPipeline, VQDiffusionPipeline, ) try: if not (is_torch_available() and is_transformers_available() and is_invisible_watermark_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_torch_and_transformers_and_invisible_watermark_objects import * # noqa F403 else: from .pipelines import StableDiffusionXLImgaImgPipeline, StableDiffusionXLPipeline try: if not (is_torch_available() and is_transformers_available() and is_k_diffusion_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_torch_and_transformers_and_k_diffusion_objects import * # noqa F403 else: from .pipelines import StableDiffusionKDiffusionPipeline try: if not (is_torch_available() and is_transformers_available() and is_onnx_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_torch_and_transformers_and_onnx_objects import * # noqa F403 else: from .pipelines import ( OnnxStableDiffusionImgaImgPipeline, OnnxStableDiffusionInpaintPipeline, OnnxStableDiffusionInpaintPipelineLegacy, OnnxStableDiffusionPipeline, OnnxStableDiffusionUpscalePipeline, StableDiffusionOnnxPipeline, ) try: if not (is_torch_available() and is_librosa_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_torch_and_librosa_objects import * # noqa F403 else: from .pipelines import AudioDiffusionPipeline, Mel try: if not (is_transformers_available() and is_torch_available() and is_note_seq_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403 else: from .pipelines import SpectrogramDiffusionPipeline try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_flax_objects import * # noqa F403 else: from .models.controlnet_flax import FlaxControlNetModel from .models.modeling_flax_utils import FlaxModelMixin from .models.unet_ad_condition_flax import FlaxUNetaDConditionModel from .models.vae_flax import FlaxAutoencoderKL from .pipelines import FlaxDiffusionPipeline from .schedulers import ( FlaxDDIMScheduler, FlaxDDPMScheduler, FlaxDPMSolverMultistepScheduler, FlaxKarrasVeScheduler, FlaxLMSDiscreteScheduler, FlaxPNDMScheduler, FlaxSchedulerMixin, FlaxScoreSdeVeScheduler, ) try: if not (is_flax_available() and is_transformers_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_flax_and_transformers_objects import * # noqa F403 else: from .pipelines import ( FlaxStableDiffusionControlNetPipeline, FlaxStableDiffusionImgaImgPipeline, FlaxStableDiffusionInpaintPipeline, FlaxStableDiffusionPipeline, ) try: if not (is_note_seq_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_note_seq_objects import * # noqa F403 else: from .pipelines import MidiProcessor
255
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_torch_available, ) lowerCAmelCase: Union[str, Any] = { 'configuration_speecht5': [ 'SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP', 'SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP', 'SpeechT5Config', 'SpeechT5HifiGanConfig', ], 'feature_extraction_speecht5': ['SpeechT5FeatureExtractor'], 'processing_speecht5': ['SpeechT5Processor'], } try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase: List[Any] = ['SpeechT5Tokenizer'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase: Any = [ 'SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST', 'SpeechT5ForSpeechToText', 'SpeechT5ForSpeechToSpeech', 'SpeechT5ForTextToSpeech', 'SpeechT5Model', 'SpeechT5PreTrainedModel', 'SpeechT5HifiGan', ] if TYPE_CHECKING: from .configuration_speechta import ( SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP, SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP, SpeechTaConfig, SpeechTaHifiGanConfig, ) from .feature_extraction_speechta import SpeechTaFeatureExtractor from .processing_speechta import SpeechTaProcessor try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_speechta import SpeechTaTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_speechta import ( SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST, SpeechTaForSpeechToSpeech, SpeechTaForSpeechToText, SpeechTaForTextToSpeech, SpeechTaHifiGan, SpeechTaModel, SpeechTaPreTrainedModel, ) else: import sys lowerCAmelCase: Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
297
0
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available SCREAMING_SNAKE_CASE :List[str] = {'configuration_yolos': ['YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP', 'YolosConfig', 'YolosOnnxConfig']} try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE :int = ['YolosFeatureExtractor'] SCREAMING_SNAKE_CASE :Optional[int] = ['YolosImageProcessor'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE :Optional[Any] = [ 'YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST', 'YolosForObjectDetection', 'YolosModel', 'YolosPreTrainedModel', ] if TYPE_CHECKING: from .configuration_yolos import YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP, YolosConfig, YolosOnnxConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_yolos import YolosFeatureExtractor from .image_processing_yolos import YolosImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_yolos import ( YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST, YolosForObjectDetection, YolosModel, YolosPreTrainedModel, ) else: import sys SCREAMING_SNAKE_CASE :Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
124
import copy from dataclasses import dataclass, field from typing import ClassVar, Dict from ..features import ClassLabel, Features, Value from .base import TaskTemplate @dataclass(frozen=__SCREAMING_SNAKE_CASE ) class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' snake_case_ = field(default="text-classification" , metadata={"include_in_asdict_even_if_is_default": True} ) snake_case_ = Features({"text": Value("string" )} ) snake_case_ = Features({"labels": ClassLabel} ) snake_case_ = "text" snake_case_ = "labels" def UpperCamelCase_ ( self : Optional[Any] ,A : Dict ): if self.label_column not in features: raise ValueError(f'''Column {self.label_column} is not present in features.''' ) if not isinstance(features[self.label_column] ,A ): raise ValueError(f'''Column {self.label_column} is not a ClassLabel.''' ) __A = copy.deepcopy(self ) __A = self.label_schema.copy() __A = features[self.label_column] __A = label_schema return task_template @property def UpperCamelCase_ ( self : Dict ): return { self.text_column: "text", self.label_column: "labels", }
124
1
"""simple docstring""" import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, StableDiffusionSAGPipeline, UNetaDConditionModel, ) from diffusers.utils import slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class a ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ): UpperCamelCase : Union[str, Any] = StableDiffusionSAGPipeline UpperCamelCase : Optional[Any] = TEXT_TO_IMAGE_PARAMS UpperCamelCase : Optional[int] = TEXT_TO_IMAGE_BATCH_PARAMS UpperCamelCase : int = TEXT_TO_IMAGE_IMAGE_PARAMS UpperCamelCase : Optional[int] = TEXT_TO_IMAGE_IMAGE_PARAMS UpperCamelCase : Dict = False def lowerCamelCase__ ( self : Dict ) -> Tuple: '''simple docstring''' torch.manual_seed(0 ) SCREAMING_SNAKE_CASE_: List[Any] =UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , ) SCREAMING_SNAKE_CASE_: Optional[Any] =DDIMScheduler( beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule="""scaled_linear""" , clip_sample=lowerCAmelCase , set_alpha_to_one=lowerCAmelCase , ) torch.manual_seed(0 ) SCREAMING_SNAKE_CASE_: Tuple =AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , ) torch.manual_seed(0 ) SCREAMING_SNAKE_CASE_: Dict =CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) SCREAMING_SNAKE_CASE_: Dict =CLIPTextModel(lowerCAmelCase ) SCREAMING_SNAKE_CASE_: Optional[int] =CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" ) SCREAMING_SNAKE_CASE_: Any ={ """unet""": unet, """scheduler""": scheduler, """vae""": vae, """text_encoder""": text_encoder, """tokenizer""": tokenizer, """safety_checker""": None, """feature_extractor""": None, } return components def lowerCamelCase__ ( self : List[str] , lowerCAmelCase : Optional[int] , lowerCAmelCase : Dict=0 ) -> Any: '''simple docstring''' if str(lowerCAmelCase ).startswith("""mps""" ): SCREAMING_SNAKE_CASE_: int =torch.manual_seed(lowerCAmelCase ) else: SCREAMING_SNAKE_CASE_: List[str] =torch.Generator(device=lowerCAmelCase ).manual_seed(lowerCAmelCase ) SCREAMING_SNAKE_CASE_: List[str] ={ """prompt""": """.""", """generator""": generator, """num_inference_steps""": 2, """guidance_scale""": 1.0, """sag_scale""": 1.0, """output_type""": """numpy""", } return inputs def lowerCamelCase__ ( self : List[str] ) -> Optional[int]: '''simple docstring''' super().test_inference_batch_single_identical(expected_max_diff=3E-3 ) @slow @require_torch_gpu class a ( unittest.TestCase ): def lowerCamelCase__ ( self : List[Any] ) -> int: '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def lowerCamelCase__ ( self : Any ) -> Optional[Any]: '''simple docstring''' SCREAMING_SNAKE_CASE_: Union[str, Any] =StableDiffusionSAGPipeline.from_pretrained("""CompVis/stable-diffusion-v1-4""" ) SCREAMING_SNAKE_CASE_: Any =sag_pipe.to(lowerCAmelCase ) sag_pipe.set_progress_bar_config(disable=lowerCAmelCase ) SCREAMING_SNAKE_CASE_: str =""".""" SCREAMING_SNAKE_CASE_: Dict =torch.manual_seed(0 ) SCREAMING_SNAKE_CASE_: Tuple =sag_pipe( [prompt] , generator=lowerCAmelCase , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type="""np""" ) SCREAMING_SNAKE_CASE_: Any =output.images SCREAMING_SNAKE_CASE_: str =image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) SCREAMING_SNAKE_CASE_: Optional[int] =np.array([0.1_5_6_8, 0.1_7_3_8, 0.1_6_9_5, 0.1_6_9_3, 0.1_5_0_7, 0.1_7_0_5, 0.1_5_4_7, 0.1_7_5_1, 0.1_9_4_9] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-2 def lowerCamelCase__ ( self : Dict ) -> List[str]: '''simple docstring''' SCREAMING_SNAKE_CASE_: Optional[Any] =StableDiffusionSAGPipeline.from_pretrained("""stabilityai/stable-diffusion-2-1-base""" ) SCREAMING_SNAKE_CASE_: List[Any] =sag_pipe.to(lowerCAmelCase ) sag_pipe.set_progress_bar_config(disable=lowerCAmelCase ) SCREAMING_SNAKE_CASE_: Tuple =""".""" SCREAMING_SNAKE_CASE_: Any =torch.manual_seed(0 ) SCREAMING_SNAKE_CASE_: Tuple =sag_pipe( [prompt] , generator=lowerCAmelCase , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type="""np""" ) SCREAMING_SNAKE_CASE_: int =output.images SCREAMING_SNAKE_CASE_: Union[str, Any] =image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) SCREAMING_SNAKE_CASE_: Optional[int] =np.array([0.3_4_5_9, 0.2_8_7_6, 0.2_5_3_7, 0.3_0_0_2, 0.2_6_7_1, 0.2_1_6_0, 0.3_0_2_6, 0.2_2_6_2, 0.2_3_7_1] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-2 def lowerCamelCase__ ( self : Union[str, Any] ) -> Any: '''simple docstring''' SCREAMING_SNAKE_CASE_: Dict =StableDiffusionSAGPipeline.from_pretrained("""stabilityai/stable-diffusion-2-1-base""" ) SCREAMING_SNAKE_CASE_: Tuple =sag_pipe.to(lowerCAmelCase ) sag_pipe.set_progress_bar_config(disable=lowerCAmelCase ) SCREAMING_SNAKE_CASE_: Optional[int] =""".""" SCREAMING_SNAKE_CASE_: Any =torch.manual_seed(0 ) SCREAMING_SNAKE_CASE_: int =sag_pipe( [prompt] , width=768 , height=512 , generator=lowerCAmelCase , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type="""np""" , ) SCREAMING_SNAKE_CASE_: Union[str, Any] =output.images assert image.shape == (1, 512, 768, 3)
173
"""simple docstring""" def __magic_name__ ( lowercase , lowercase ): SCREAMING_SNAKE_CASE_: Union[str, Any] =int(lowercase ) # Initialize Result SCREAMING_SNAKE_CASE_: str =[] # Traverse through all denomination for denomination in reversed(lowercase ): # Find denominations while int(lowercase ) >= int(lowercase ): total_value -= int(lowercase ) answer.append(lowercase ) # Append the "answers" array return answer # Driver Code if __name__ == "__main__": _UpperCAmelCase = [] _UpperCAmelCase = """0""" if ( input("""Do you want to enter your denominations ? (yY/n): """).strip().lower() == "y" ): _UpperCAmelCase = int(input("""Enter the number of denominations you want to add: """).strip()) for i in range(0, n): denominations.append(int(input(f"""Denomination {i}: """).strip())) _UpperCAmelCase = input("""Enter the change you want to make in Indian Currency: """).strip() else: # All denominations of Indian Currency if user does not enter _UpperCAmelCase = [1, 2, 5, 1_0, 2_0, 5_0, 1_0_0, 5_0_0, 2_0_0_0] _UpperCAmelCase = input("""Enter the change you want to make: """).strip() if int(value) == 0 or int(value) < 0: print("""The total value cannot be zero or negative.""") else: print(f"""Following is minimal change for {value}: """) _UpperCAmelCase = find_minimum_change(denominations, value) # Print result for i in range(len(answer)): print(answer[i], end=""" """)
173
1
import numpy as np def __lowercase ( _a , _a , _a , _a , _a ): snake_case_ : List[str] = int(np.ceil((x_end - xa) / h ) ) snake_case_ : str = np.zeros((n + 1,) ) snake_case_ : Optional[Any] = ya snake_case_ : Any = xa for k in range(_a ): snake_case_ : List[Any] = f(_a , y[k] ) snake_case_ : int = f(x + 0.5 * h , y[k] + 0.5 * h * ka ) snake_case_ : int = f(x + 0.5 * h , y[k] + 0.5 * h * ka ) snake_case_ : int = f(x + h , y[k] + h * ka ) snake_case_ : List[Any] = y[k] + (1 / 6) * h * (ka + 2 * ka + 2 * ka + ka) x += h return y if __name__ == "__main__": import doctest doctest.testmod()
351
"""simple docstring""" from ..utils import ( OptionalDependencyNotAvailable, is_flax_available, is_scipy_available, is_torch_available, is_torchsde_available, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ..utils.dummy_pt_objects import * # noqa F403 else: from .scheduling_consistency_models import CMStochasticIterativeScheduler from .scheduling_ddim import DDIMScheduler from .scheduling_ddim_inverse import DDIMInverseScheduler from .scheduling_ddim_parallel import DDIMParallelScheduler from .scheduling_ddpm import DDPMScheduler from .scheduling_ddpm_parallel import DDPMParallelScheduler from .scheduling_deis_multistep import DEISMultistepScheduler from .scheduling_dpmsolver_multistep import DPMSolverMultistepScheduler from .scheduling_dpmsolver_multistep_inverse import DPMSolverMultistepInverseScheduler from .scheduling_dpmsolver_singlestep import DPMSolverSinglestepScheduler from .scheduling_euler_ancestral_discrete import EulerAncestralDiscreteScheduler from .scheduling_euler_discrete import EulerDiscreteScheduler from .scheduling_heun_discrete import HeunDiscreteScheduler from .scheduling_ipndm import IPNDMScheduler from .scheduling_k_dpm_2_ancestral_discrete import KDPMaAncestralDiscreteScheduler from .scheduling_k_dpm_2_discrete import KDPMaDiscreteScheduler from .scheduling_karras_ve import KarrasVeScheduler from .scheduling_pndm import PNDMScheduler from .scheduling_repaint import RePaintScheduler from .scheduling_sde_ve import ScoreSdeVeScheduler from .scheduling_sde_vp import ScoreSdeVpScheduler from .scheduling_unclip import UnCLIPScheduler from .scheduling_unipc_multistep import UniPCMultistepScheduler from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin from .scheduling_vq_diffusion import VQDiffusionScheduler try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ..utils.dummy_flax_objects import * # noqa F403 else: from .scheduling_ddim_flax import FlaxDDIMScheduler from .scheduling_ddpm_flax import FlaxDDPMScheduler from .scheduling_dpmsolver_multistep_flax import FlaxDPMSolverMultistepScheduler from .scheduling_karras_ve_flax import FlaxKarrasVeScheduler from .scheduling_lms_discrete_flax import FlaxLMSDiscreteScheduler from .scheduling_pndm_flax import FlaxPNDMScheduler from .scheduling_sde_ve_flax import FlaxScoreSdeVeScheduler from .scheduling_utils_flax import ( FlaxKarrasDiffusionSchedulers, FlaxSchedulerMixin, FlaxSchedulerOutput, broadcast_to_shape_from_left, ) try: if not (is_torch_available() and is_scipy_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ..utils.dummy_torch_and_scipy_objects import * # noqa F403 else: from .scheduling_lms_discrete import LMSDiscreteScheduler try: if not (is_torch_available() and is_torchsde_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ..utils.dummy_torch_and_torchsde_objects import * # noqa F403 else: from .scheduling_dpmsolver_sde import DPMSolverSDEScheduler
155
0
import json import os from functools import lru_cache from typing import TYPE_CHECKING, List, Optional, Tuple import regex as re from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging if TYPE_CHECKING: from transformers.pipelines.conversational import Conversation a_ = logging.get_logger(__name__) a_ = { """vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_config_file""": """tokenizer_config.json""", } a_ = { """vocab_file""": {"""facebook/blenderbot-3B""": """https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json"""}, """merges_file""": {"""facebook/blenderbot-3B""": """https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt"""}, """tokenizer_config_file""": { """facebook/blenderbot-3B""": """https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json""" }, } a_ = {"""facebook/blenderbot-3B""": 128} @lru_cache() # Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode def _a ( ) -> Optional[Any]: """simple docstring""" lowerCAmelCase__ = ( list(range(ord("!" ) , ord("~" ) + 1 ) ) + list(range(ord("¡" ) , ord("¬" ) + 1 ) ) + list(range(ord("®" ) , ord("ÿ" ) + 1 ) ) ) lowerCAmelCase__ = bs[:] lowerCAmelCase__ = 0 for b in range(2**8 ): if b not in bs: bs.append(UpperCamelCase__ ) cs.append(2**8 + n ) n += 1 lowerCAmelCase__ = [chr(UpperCamelCase__ ) for n in cs] return dict(zip(UpperCamelCase__ , UpperCamelCase__ ) ) def _a ( UpperCamelCase_ : Any ) -> Optional[int]: """simple docstring""" lowerCAmelCase__ = set() lowerCAmelCase__ = word[0] for char in word[1:]: pairs.add((prev_char, char) ) lowerCAmelCase__ = char return pairs class lowercase__ ( lowercase_ ): a_ =VOCAB_FILES_NAMES a_ =PRETRAINED_VOCAB_FILES_MAP a_ =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES a_ =['''input_ids''', '''attention_mask'''] def __init__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase="replace" , __UpperCAmelCase="<s>" , __UpperCAmelCase="</s>" , __UpperCAmelCase="</s>" , __UpperCAmelCase="<s>" , __UpperCAmelCase="<unk>" , __UpperCAmelCase="<pad>" , __UpperCAmelCase="<mask>" , __UpperCAmelCase=False , **__UpperCAmelCase , )-> str: '''simple docstring''' lowerCAmelCase__ = AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else bos_token lowerCAmelCase__ = AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else eos_token lowerCAmelCase__ = AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else sep_token lowerCAmelCase__ = AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else cls_token lowerCAmelCase__ = AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else unk_token lowerCAmelCase__ = AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else pad_token # Mask token behave like a normal word, i.e. include the space before it lowerCAmelCase__ = AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else mask_token super().__init__( errors=__UpperCAmelCase , bos_token=__UpperCAmelCase , eos_token=__UpperCAmelCase , unk_token=__UpperCAmelCase , sep_token=__UpperCAmelCase , cls_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , mask_token=__UpperCAmelCase , add_prefix_space=__UpperCAmelCase , **__UpperCAmelCase , ) with open(__UpperCAmelCase , encoding="utf-8" ) as vocab_handle: lowerCAmelCase__ = json.load(__UpperCAmelCase ) lowerCAmelCase__ = {v: k for k, v in self.encoder.items()} lowerCAmelCase__ = errors # how to handle errors in decoding lowerCAmelCase__ = bytes_to_unicode() lowerCAmelCase__ = {v: k for k, v in self.byte_encoder.items()} with open(__UpperCAmelCase , encoding="utf-8" ) as merges_handle: lowerCAmelCase__ = merges_handle.read().split("\n" )[1:-1] lowerCAmelCase__ = [tuple(merge.split() ) for merge in bpe_merges] lowerCAmelCase__ = dict(zip(__UpperCAmelCase , range(len(__UpperCAmelCase ) ) ) ) lowerCAmelCase__ = {} lowerCAmelCase__ = add_prefix_space # Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions lowerCAmelCase__ = re.compile(R"\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+" ) @property # Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer.vocab_size with Roberta->Blenderbot, RoBERTa->Blenderbot def UpperCAmelCase ( self )-> List[Any]: '''simple docstring''' return len(self.encoder ) def UpperCAmelCase ( self )-> Any: '''simple docstring''' return dict(self.encoder , **self.added_tokens_encoder ) def UpperCAmelCase ( self , __UpperCAmelCase )-> Tuple: '''simple docstring''' if token in self.cache: return self.cache[token] lowerCAmelCase__ = tuple(__UpperCAmelCase ) lowerCAmelCase__ = get_pairs(__UpperCAmelCase ) if not pairs: return token while True: lowerCAmelCase__ = min(__UpperCAmelCase , key=lambda __UpperCAmelCase : self.bpe_ranks.get(__UpperCAmelCase , float("inf" ) ) ) if bigram not in self.bpe_ranks: break lowerCAmelCase__ , lowerCAmelCase__ = bigram lowerCAmelCase__ = [] lowerCAmelCase__ = 0 while i < len(__UpperCAmelCase ): try: lowerCAmelCase__ = word.index(__UpperCAmelCase , __UpperCAmelCase ) except ValueError: new_word.extend(word[i:] ) break else: new_word.extend(word[i:j] ) lowerCAmelCase__ = j if word[i] == first and i < len(__UpperCAmelCase ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 lowerCAmelCase__ = tuple(__UpperCAmelCase ) lowerCAmelCase__ = new_word if len(__UpperCAmelCase ) == 1: break else: lowerCAmelCase__ = get_pairs(__UpperCAmelCase ) lowerCAmelCase__ = " ".join(__UpperCAmelCase ) lowerCAmelCase__ = word return word def UpperCAmelCase ( self , __UpperCAmelCase )-> List[str]: '''simple docstring''' lowerCAmelCase__ = [] for token in re.findall(self.pat , __UpperCAmelCase ): lowerCAmelCase__ = "".join( self.byte_encoder[b] for b in token.encode("utf-8" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case) bpe_tokens.extend(bpe_token for bpe_token in self.bpe(__UpperCAmelCase ).split(" " ) ) return bpe_tokens def UpperCAmelCase ( self , __UpperCAmelCase )-> int: '''simple docstring''' return self.encoder.get(__UpperCAmelCase , self.encoder.get(self.unk_token ) ) def UpperCAmelCase ( self , __UpperCAmelCase )-> Optional[int]: '''simple docstring''' return self.decoder.get(__UpperCAmelCase ) def UpperCAmelCase ( self , __UpperCAmelCase )-> int: '''simple docstring''' lowerCAmelCase__ = "".join(__UpperCAmelCase ) lowerCAmelCase__ = bytearray([self.byte_decoder[c] for c in text] ).decode("utf-8" , errors=self.errors ) return text def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None )-> Optional[int]: '''simple docstring''' if not os.path.isdir(__UpperCAmelCase ): logger.error(F"Vocabulary path ({save_directory}) should be a directory" ) return lowerCAmelCase__ = os.path.join( __UpperCAmelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) lowerCAmelCase__ = os.path.join( __UpperCAmelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] ) with open(__UpperCAmelCase , "w" , encoding="utf-8" ) as f: f.write(json.dumps(self.encoder , indent=2 , sort_keys=__UpperCAmelCase , ensure_ascii=__UpperCAmelCase ) + "\n" ) lowerCAmelCase__ = 0 with open(__UpperCAmelCase , "w" , encoding="utf-8" ) as writer: writer.write("#version: 0.2\n" ) for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __UpperCAmelCase : kv[1] ): if index != token_index: logger.warning( F"Saving vocabulary to {merge_file}: BPE merge indices are not consecutive." " Please check that the tokenizer is not corrupted!" ) lowerCAmelCase__ = token_index writer.write(" ".join(__UpperCAmelCase ) + "\n" ) index += 1 return vocab_file, merge_file def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = False )-> Union[str, Any]: '''simple docstring''' if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=__UpperCAmelCase , token_ids_a=__UpperCAmelCase , already_has_special_tokens=__UpperCAmelCase ) if token_ids_a is None: return [1] + ([0] * len(__UpperCAmelCase )) + [1] return [1] + ([0] * len(__UpperCAmelCase )) + [1, 1] + ([0] * len(__UpperCAmelCase )) + [1] def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None )-> List[str]: '''simple docstring''' lowerCAmelCase__ = [self.sep_token_id] lowerCAmelCase__ = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase=False , **__UpperCAmelCase )-> List[Any]: '''simple docstring''' lowerCAmelCase__ = kwargs.pop("add_prefix_space" , self.add_prefix_space ) if (is_split_into_words or add_prefix_space) and (len(__UpperCAmelCase ) > 0 and not text[0].isspace()): lowerCAmelCase__ = " " + text return (text, kwargs) def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None )-> Optional[int]: '''simple docstring''' return token_ids_a + [self.eos_token_id] def UpperCAmelCase ( self , __UpperCAmelCase )-> Optional[Any]: '''simple docstring''' lowerCAmelCase__ = [] for is_user, text in conversation.iter_texts(): if is_user: # We need to space prefix as it's being done within blenderbot inputs.append(" " + text ) else: # Generated responses should contain them already. inputs.append(__UpperCAmelCase ) lowerCAmelCase__ = " ".join(__UpperCAmelCase ) lowerCAmelCase__ = self.encode(__UpperCAmelCase ) if len(__UpperCAmelCase ) > self.model_max_length: lowerCAmelCase__ = input_ids[-self.model_max_length :] logger.warning(F"Trimmed input from conversation as it was longer than {self.model_max_length} tokens." ) return input_ids
340
import json import os import shutil import tempfile import unittest import numpy as np import pytest from transformers import BertTokenizer, BertTokenizerFast from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES from transformers.testing_utils import require_vision from transformers.utils import FEATURE_EXTRACTOR_NAME, is_vision_available if is_vision_available(): from PIL import Image from transformers import ChineseCLIPImageProcessor, ChineseCLIPProcessor @require_vision class lowercase ( unittest.TestCase ): def a ( self ): snake_case_ = tempfile.mkdtemp() snake_case_ = [ '[UNK]', '[CLS]', '[SEP]', '[PAD]', '[MASK]', '的', '价', '格', '是', '15', '便', 'alex', '##andra', ',', '。', '-', 't', 'shirt', ] snake_case_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] ) with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer: vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) ) snake_case_ = { 'do_resize': True, 'size': {'height': 224, 'width': 224}, 'do_center_crop': True, 'crop_size': {'height': 18, 'width': 18}, 'do_normalize': True, 'image_mean': [0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73], 'image_std': [0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11], 'do_convert_rgb': True, } snake_case_ = os.path.join(self.tmpdirname , snake_case ) with open(self.image_processor_file , 'w' , encoding='utf-8' ) as fp: json.dump(snake_case , snake_case ) def a ( self , **snake_case ): return BertTokenizer.from_pretrained(self.tmpdirname , **snake_case ) def a ( self , **snake_case ): return BertTokenizerFast.from_pretrained(self.tmpdirname , **snake_case ) def a ( self , **snake_case ): return ChineseCLIPImageProcessor.from_pretrained(self.tmpdirname , **snake_case ) def a ( self ): shutil.rmtree(self.tmpdirname ) def a ( self ): snake_case_ = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )] snake_case_ = [Image.fromarray(np.moveaxis(snake_case , 0 , -1 ) ) for x in image_inputs] return image_inputs def a ( self ): snake_case_ = self.get_tokenizer() snake_case_ = self.get_rust_tokenizer() snake_case_ = self.get_image_processor() snake_case_ = ChineseCLIPProcessor(tokenizer=snake_case , image_processor=snake_case ) processor_slow.save_pretrained(self.tmpdirname ) snake_case_ = ChineseCLIPProcessor.from_pretrained(self.tmpdirname , use_fast=snake_case ) snake_case_ = ChineseCLIPProcessor(tokenizer=snake_case , image_processor=snake_case ) processor_fast.save_pretrained(self.tmpdirname ) snake_case_ = ChineseCLIPProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() ) self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() ) self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() ) self.assertIsInstance(processor_slow.tokenizer , snake_case ) self.assertIsInstance(processor_fast.tokenizer , snake_case ) self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertIsInstance(processor_slow.image_processor , snake_case ) self.assertIsInstance(processor_fast.image_processor , snake_case ) def a ( self ): snake_case_ = ChineseCLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) snake_case_ = self.get_tokenizer(cls_token='(CLS)' , sep_token='(SEP)' ) snake_case_ = self.get_image_processor(do_normalize=snake_case ) snake_case_ = ChineseCLIPProcessor.from_pretrained( self.tmpdirname , cls_token='(CLS)' , sep_token='(SEP)' , do_normalize=snake_case ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , snake_case ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , snake_case ) def a ( self ): snake_case_ = self.get_image_processor() snake_case_ = self.get_tokenizer() snake_case_ = ChineseCLIPProcessor(tokenizer=snake_case , image_processor=snake_case ) snake_case_ = self.prepare_image_inputs() snake_case_ = image_processor(snake_case , return_tensors='np' ) snake_case_ = processor(images=snake_case , return_tensors='np' ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 ) def a ( self ): snake_case_ = self.get_image_processor() snake_case_ = self.get_tokenizer() snake_case_ = ChineseCLIPProcessor(tokenizer=snake_case , image_processor=snake_case ) snake_case_ = 'Alexandra,T-shirt的价格是15便士。' snake_case_ = processor(text=snake_case ) snake_case_ = tokenizer(snake_case ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def a ( self ): snake_case_ = self.get_image_processor() snake_case_ = self.get_tokenizer() snake_case_ = ChineseCLIPProcessor(tokenizer=snake_case , image_processor=snake_case ) snake_case_ = 'Alexandra,T-shirt的价格是15便士。' snake_case_ = self.prepare_image_inputs() snake_case_ = processor(text=snake_case , images=snake_case ) self.assertListEqual(list(inputs.keys() ) , ['input_ids', 'token_type_ids', 'attention_mask', 'pixel_values'] ) # test if it raises when no input is passed with pytest.raises(snake_case ): processor() def a ( self ): snake_case_ = self.get_image_processor() snake_case_ = self.get_tokenizer() snake_case_ = ChineseCLIPProcessor(tokenizer=snake_case , image_processor=snake_case ) snake_case_ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] snake_case_ = processor.batch_decode(snake_case ) snake_case_ = tokenizer.batch_decode(snake_case ) self.assertListEqual(snake_case , snake_case ) def a ( self ): snake_case_ = self.get_image_processor() snake_case_ = self.get_tokenizer() snake_case_ = ChineseCLIPProcessor(tokenizer=snake_case , image_processor=snake_case ) snake_case_ = 'Alexandra,T-shirt的价格是15便士。' snake_case_ = self.prepare_image_inputs() snake_case_ = processor(text=snake_case , images=snake_case ) self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
285
0
'''simple docstring''' import coval # From: git+https://github.com/ns-moosavi/coval.git # noqa: F401 from coval.conll import reader, util from coval.eval import evaluator import datasets __UpperCAmelCase :Dict = datasets.logging.get_logger(__name__) __UpperCAmelCase :int = "\\n@InProceedings{moosavi2019minimum,\n author = { Nafise Sadat Moosavi, Leo Born, Massimo Poesio and Michael Strube},\n title = {Using Automatically Extracted Minimum Spans to Disentangle Coreference Evaluation from Boundary Detection},\n year = {2019},\n booktitle = {Proceedings of the 57th Annual Meeting of\n the Association for Computational Linguistics (Volume 1: Long Papers)},\n publisher = {Association for Computational Linguistics},\n address = {Florence, Italy},\n}\n\n@inproceedings{10.3115/1072399.1072405,\nauthor = {Vilain, Marc and Burger, John and Aberdeen, John and Connolly, Dennis and Hirschman, Lynette},\ntitle = {A Model-Theoretic Coreference Scoring Scheme},\nyear = {1995},\nisbn = {1558604022},\npublisher = {Association for Computational Linguistics},\naddress = {USA},\nurl = {https://doi.org/10.3115/1072399.1072405},\ndoi = {10.3115/1072399.1072405},\nbooktitle = {Proceedings of the 6th Conference on Message Understanding},\npages = {45–52},\nnumpages = {8},\nlocation = {Columbia, Maryland},\nseries = {MUC6 ’95}\n}\n\n@INPROCEEDINGS{Bagga98algorithmsfor,\n author = {Amit Bagga and Breck Baldwin},\n title = {Algorithms for Scoring Coreference Chains},\n booktitle = {In The First International Conference on Language Resources and Evaluation Workshop on Linguistics Coreference},\n year = {1998},\n pages = {563--566}\n}\n\n@INPROCEEDINGS{Luo05oncoreference,\n author = {Xiaoqiang Luo},\n title = {On coreference resolution performance metrics},\n booktitle = {In Proc. of HLT/EMNLP},\n year = {2005},\n pages = {25--32},\n publisher = {URL}\n}\n\n@inproceedings{moosavi-strube-2016-coreference,\n title = \"Which Coreference Evaluation Metric Do You Trust? A Proposal for a Link-based Entity Aware Metric\",\n author = \"Moosavi, Nafise Sadat and\n Strube, Michael\",\n booktitle = \"Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)\",\n month = aug,\n year = \"2016\",\n address = \"Berlin, Germany\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/P16-1060\",\n doi = \"10.18653/v1/P16-1060\",\n pages = \"632--642\",\n}\n\n" __UpperCAmelCase :Union[str, Any] = "\\nCoVal is a coreference evaluation tool for the CoNLL and ARRAU datasets which\nimplements of the common evaluation metrics including MUC [Vilain et al, 1995],\nB-cubed [Bagga and Baldwin, 1998], CEAFe [Luo et al., 2005],\nLEA [Moosavi and Strube, 2016] and the averaged CoNLL score\n(the average of the F1 values of MUC, B-cubed and CEAFe)\n[Denis and Baldridge, 2009a; Pradhan et al., 2011].\n\nThis wrapper of CoVal currently only work with CoNLL line format:\nThe CoNLL format has one word per line with all the annotation for this word in column separated by spaces:\nColumn Type Description\n1 Document ID This is a variation on the document filename\n2 Part number Some files are divided into multiple parts numbered as 000, 001, 002, ... etc.\n3 Word number\n4 Word itself This is the token as segmented/tokenized in the Treebank. Initially the *_skel file contain the placeholder [WORD] which gets replaced by the actual token from the Treebank which is part of the OntoNotes release.\n5 Part-of-Speech\n6 Parse bit This is the bracketed structure broken before the first open parenthesis in the parse, and the word/part-of-speech leaf replaced with a *. The full parse can be created by substituting the asterix with the \"([pos] [word])\" string (or leaf) and concatenating the items in the rows of that column.\n7 Predicate lemma The predicate lemma is mentioned for the rows for which we have semantic role information. All other rows are marked with a \"-\"\n8 Predicate Frameset ID This is the PropBank frameset ID of the predicate in Column 7.\n9 Word sense This is the word sense of the word in Column 3.\n10 Speaker/Author This is the speaker or author name where available. Mostly in Broadcast Conversation and Web Log data.\n11 Named Entities These columns identifies the spans representing various named entities.\n12:N Predicate Arguments There is one column each of predicate argument structure information for the predicate mentioned in Column 7.\nN Coreference Coreference chain information encoded in a parenthesis structure.\nMore informations on the format can be found here (section \"*_conll File Format\"): http://www.conll.cemantix.org/2012/data.html\n\nDetails on the evaluation on CoNLL can be found here: https://github.com/ns-moosavi/coval/blob/master/conll/README.md\n\nCoVal code was written by @ns-moosavi.\nSome parts are borrowed from https://github.com/clarkkev/deep-coref/blob/master/evaluation.py\nThe test suite is taken from https://github.com/conll/reference-coreference-scorers/\nMention evaluation and the test suite are added by @andreasvc.\nParsing CoNLL files is developed by Leo Born.\n" __UpperCAmelCase :List[Any] = "\nCalculates coreference evaluation metrics.\nArgs:\n predictions: list of sentences. Each sentence is a list of word predictions to score in the CoNLL format.\n Each prediction is a word with its annotations as a string made of columns joined with spaces.\n Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)\n See the details on the format in the description of the metric.\n references: list of sentences. Each sentence is a list of word reference to score in the CoNLL format.\n Each reference is a word with its annotations as a string made of columns joined with spaces.\n Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)\n See the details on the format in the description of the metric.\n keep_singletons: After extracting all mentions of key or system files,\n mentions whose corresponding coreference chain is of size one,\n are considered as singletons. The default evaluation mode will include\n singletons in evaluations if they are included in the key or the system files.\n By setting 'keep_singletons=False', all singletons in the key and system files\n will be excluded from the evaluation.\n NP_only: Most of the recent coreference resolvers only resolve NP mentions and\n leave out the resolution of VPs. By setting the 'NP_only' option, the scorer will only evaluate the resolution of NPs.\n min_span: By setting 'min_span', the scorer reports the results based on automatically detected minimum spans.\n Minimum spans are determined using the MINA algorithm.\n\nReturns:\n 'mentions': mentions\n 'muc': MUC metric [Vilain et al, 1995]\n 'bcub': B-cubed [Bagga and Baldwin, 1998]\n 'ceafe': CEAFe [Luo et al., 2005]\n 'lea': LEA [Moosavi and Strube, 2016]\n 'conll_score': averaged CoNLL score (the average of the F1 values of MUC, B-cubed and CEAFe)\n\nExamples:\n\n >>> coval = datasets.load_metric('coval')\n >>> words = ['bc/cctv/00/cctv_0005 0 0 Thank VBP (TOP(S(VP* thank 01 1 Xu_li * (V*) * -',\n ... 'bc/cctv/00/cctv_0005 0 1 you PRP (NP*) - - - Xu_li * (ARG1*) (ARG0*) (116)',\n ... 'bc/cctv/00/cctv_0005 0 2 everyone NN (NP*) - - - Xu_li * (ARGM-DIS*) * (116)',\n ... 'bc/cctv/00/cctv_0005 0 3 for IN (PP* - - - Xu_li * (ARG2* * -',\n ... 'bc/cctv/00/cctv_0005 0 4 watching VBG (S(VP*)))) watch 01 1 Xu_li * *) (V*) -',\n ... 'bc/cctv/00/cctv_0005 0 5 . . *)) - - - Xu_li * * * -']\n >>> references = [words]\n >>> predictions = [words]\n >>> results = coval.compute(predictions=predictions, references=references)\n >>> print(results) # doctest:+ELLIPSIS\n {'mentions/recall': 1.0,[...] 'conll_score': 100.0}\n" def _a ( _lowercase : Any , _lowercase : List[Any] , _lowercase : int=False , _lowercase : str=False , _lowercase : int=True , _lowercase : List[str]=False , _lowercase : Optional[Any]="dummy_doc" ): '''simple docstring''' __UpperCAmelCase : Dict = {doc: key_lines} __UpperCAmelCase : int = {doc: sys_lines} __UpperCAmelCase : int = {} __UpperCAmelCase : Any = 0 __UpperCAmelCase : Tuple = 0 __UpperCAmelCase : int = 0 __UpperCAmelCase : Optional[int] = 0 __UpperCAmelCase : Dict = 0 __UpperCAmelCase : str = 0 __UpperCAmelCase , __UpperCAmelCase : int = reader.get_doc_mentions(_lowercase , key_doc_lines[doc] , _lowercase ) key_singletons_num += singletons_num if NP_only or min_span: __UpperCAmelCase : List[str] = reader.set_annotated_parse_trees(_lowercase , key_doc_lines[doc] , _lowercase , _lowercase ) __UpperCAmelCase , __UpperCAmelCase : Optional[int] = reader.get_doc_mentions(_lowercase , sys_doc_lines[doc] , _lowercase ) sys_singletons_num += singletons_num if NP_only or min_span: __UpperCAmelCase : Optional[Any] = reader.set_annotated_parse_trees(_lowercase , key_doc_lines[doc] , _lowercase , _lowercase ) if remove_nested: __UpperCAmelCase , __UpperCAmelCase : List[str] = reader.remove_nested_coref_mentions(_lowercase , _lowercase ) key_nested_coref_num += nested_mentions key_removed_nested_clusters += removed_clusters __UpperCAmelCase , __UpperCAmelCase : str = reader.remove_nested_coref_mentions(_lowercase , _lowercase ) sys_nested_coref_num += nested_mentions sys_removed_nested_clusters += removed_clusters __UpperCAmelCase : Optional[int] = reader.get_mention_assignments(_lowercase , _lowercase ) __UpperCAmelCase : Optional[Any] = reader.get_mention_assignments(_lowercase , _lowercase ) __UpperCAmelCase : List[str] = (key_clusters, sys_clusters, key_mention_sys_cluster, sys_mention_key_cluster) if remove_nested: logger.info( '''Number of removed nested coreferring mentions in the key ''' F'annotation: {key_nested_coref_num}; and system annotation: {sys_nested_coref_num}' ) logger.info( '''Number of resulting singleton clusters in the key ''' F'annotation: {key_removed_nested_clusters}; and system annotation: {sys_removed_nested_clusters}' ) if not keep_singletons: logger.info( F'{key_singletons_num:d} and {sys_singletons_num:d} singletons are removed from the key and system ' '''files, respectively''' ) return doc_coref_infos def _a ( _lowercase : int , _lowercase : List[str] , _lowercase : Optional[int] , _lowercase : str , _lowercase : List[Any] , _lowercase : int , _lowercase : Dict ): '''simple docstring''' __UpperCAmelCase : Any = get_coref_infos(_lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ) __UpperCAmelCase : Dict = {} __UpperCAmelCase : List[Any] = 0 __UpperCAmelCase : List[str] = 0 for name, metric in metrics: __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Any = evaluator.evaluate_documents(_lowercase , _lowercase , beta=1 ) if name in ["muc", "bcub", "ceafe"]: conll += fa conll_subparts_num += 1 output_scores.update({F'{name}/recall': recall, F'{name}/precision': precision, F'{name}/f1': fa} ) logger.info( name.ljust(10 ) , F'Recall: {recall * 100:.2f}' , F' Precision: {precision * 100:.2f}' , F' F1: {fa * 100:.2f}' , ) if conll_subparts_num == 3: __UpperCAmelCase : List[str] = (conll / 3) * 100 logger.info(F'CoNLL score: {conll:.2f}' ) output_scores.update({'''conll_score''': conll} ) return output_scores def _a ( _lowercase : Optional[Any] ): '''simple docstring''' __UpperCAmelCase : Optional[Any] = False for line in key_lines: if not line.startswith('''#''' ): if len(line.split() ) > 6: __UpperCAmelCase : Union[str, Any] = line.split()[5] if not parse_col == "-": __UpperCAmelCase : Dict = True break else: break return has_gold_parse @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class a ( datasets.Metric ): """simple docstring""" def lowerCamelCase__ ( self : List[Any] ) -> Dict: return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { '''predictions''': datasets.Sequence(datasets.Value('''string''' ) ), '''references''': datasets.Sequence(datasets.Value('''string''' ) ), } ) , codebase_urls=['''https://github.com/ns-moosavi/coval'''] , reference_urls=[ '''https://github.com/ns-moosavi/coval''', '''https://www.aclweb.org/anthology/P16-1060''', '''http://www.conll.cemantix.org/2012/data.html''', ] , ) def lowerCamelCase__ ( self : Dict , snake_case : List[str] , snake_case : Dict , snake_case : int=True , snake_case : List[Any]=False , snake_case : Optional[int]=False , snake_case : Optional[int]=False ) -> Dict: __UpperCAmelCase : str = [ ('''mentions''', evaluator.mentions), ('''muc''', evaluator.muc), ('''bcub''', evaluator.b_cubed), ('''ceafe''', evaluator.ceafe), ('''lea''', evaluator.lea), ] if min_span: __UpperCAmelCase : Any = util.check_gold_parse_annotation(snake_case ) if not has_gold_parse: raise NotImplementedError('''References should have gold parse annotation to use \'min_span\'.''' ) # util.parse_key_file(key_file) # key_file = key_file + ".parsed" __UpperCAmelCase : Any = evaluate( key_lines=snake_case , sys_lines=snake_case , metrics=snake_case , NP_only=snake_case , remove_nested=snake_case , keep_singletons=snake_case , min_span=snake_case , ) return score
240
'''simple docstring''' from dataclasses import dataclass from typing import Tuple import numpy as np import torch @dataclass class a : """simple docstring""" SCREAMING_SNAKE_CASE : torch.Tensor # [batch_size x 3] SCREAMING_SNAKE_CASE : torch.Tensor # [batch_size x 3] SCREAMING_SNAKE_CASE : torch.Tensor # [batch_size x 3] SCREAMING_SNAKE_CASE : torch.Tensor # [batch_size x 3] SCREAMING_SNAKE_CASE : int SCREAMING_SNAKE_CASE : int SCREAMING_SNAKE_CASE : float SCREAMING_SNAKE_CASE : float SCREAMING_SNAKE_CASE : Tuple[int] def lowerCamelCase__ ( self : Any ) -> int: assert self.x.shape[0] == self.y.shape[0] == self.z.shape[0] == self.origin.shape[0] assert self.x.shape[1] == self.y.shape[1] == self.z.shape[1] == self.origin.shape[1] == 3 assert len(self.x.shape ) == len(self.y.shape ) == len(self.z.shape ) == len(self.origin.shape ) == 2 def lowerCamelCase__ ( self : Union[str, Any] ) -> str: return torch.from_numpy(np.array([self.width, self.height] , dtype=np.floataa ) ) def lowerCamelCase__ ( self : Any ) -> Optional[Any]: return torch.from_numpy(np.array([self.x_fov, self.y_fov] , dtype=np.floataa ) ) def lowerCamelCase__ ( self : Any ) -> torch.Tensor: __UpperCAmelCase : Dict = torch.arange(self.height * self.width ) __UpperCAmelCase : Dict = torch.stack( [ pixel_indices % self.width, torch.div(snake_case , self.width , rounding_mode='''trunc''' ), ] , axis=1 , ) return coords @property def lowerCamelCase__ ( self : Any ) -> int: __UpperCAmelCase , *__UpperCAmelCase : str = self.shape __UpperCAmelCase : Dict = int(np.prod(snake_case ) ) __UpperCAmelCase : Tuple = self.get_image_coords() __UpperCAmelCase : List[Any] = torch.broadcast_to(coords.unsqueeze(0 ) , [batch_size * inner_batch_size, *coords.shape] ) __UpperCAmelCase : Any = self.get_camera_rays(snake_case ) __UpperCAmelCase : List[str] = rays.view(snake_case , inner_batch_size * self.height * self.width , 2 , 3 ) return rays def lowerCamelCase__ ( self : Union[str, Any] , snake_case : torch.Tensor ) -> torch.Tensor: __UpperCAmelCase , *__UpperCAmelCase , __UpperCAmelCase : List[str] = coords.shape assert n_coords == 2 assert batch_size == self.origin.shape[0] __UpperCAmelCase : List[str] = coords.view(snake_case , -1 , 2 ) __UpperCAmelCase : Optional[Any] = self.resolution() __UpperCAmelCase : Tuple = self.fov() __UpperCAmelCase : Optional[int] = (flat.float() / (res - 1)) * 2 - 1 __UpperCAmelCase : Union[str, Any] = fracs * torch.tan(fov / 2 ) __UpperCAmelCase : str = fracs.view(snake_case , -1 , 2 ) __UpperCAmelCase : Any = ( self.z.view(snake_case , 1 , 3 ) + self.x.view(snake_case , 1 , 3 ) * fracs[:, :, :1] + self.y.view(snake_case , 1 , 3 ) * fracs[:, :, 1:] ) __UpperCAmelCase : Union[str, Any] = directions / directions.norm(dim=-1 , keepdim=snake_case ) __UpperCAmelCase : Union[str, Any] = torch.stack( [ torch.broadcast_to(self.origin.view(snake_case , 1 , 3 ) , [batch_size, directions.shape[1], 3] ), directions, ] , dim=2 , ) return rays.view(snake_case , *snake_case , 2 , 3 ) def lowerCamelCase__ ( self : Any , snake_case : int , snake_case : int ) -> "DifferentiableProjectiveCamera": assert width * self.height == height * self.width, "The aspect ratio should not change." return DifferentiableProjectiveCamera( origin=self.origin , x=self.x , y=self.y , z=self.z , width=snake_case , height=snake_case , x_fov=self.x_fov , y_fov=self.y_fov , ) def _a ( _lowercase : int ): '''simple docstring''' __UpperCAmelCase : str = [] __UpperCAmelCase : Optional[int] = [] __UpperCAmelCase : Union[str, Any] = [] __UpperCAmelCase : List[Any] = [] for theta in np.linspace(0 , 2 * np.pi , num=20 ): __UpperCAmelCase : Dict = np.array([np.sin(_lowercase ), np.cos(_lowercase ), -0.5] ) z /= np.sqrt(np.sum(z**2 ) ) __UpperCAmelCase : Any = -z * 4 __UpperCAmelCase : Dict = np.array([np.cos(_lowercase ), -np.sin(_lowercase ), 0.0] ) __UpperCAmelCase : List[str] = np.cross(_lowercase , _lowercase ) origins.append(_lowercase ) xs.append(_lowercase ) ys.append(_lowercase ) zs.append(_lowercase ) return DifferentiableProjectiveCamera( origin=torch.from_numpy(np.stack(_lowercase , axis=0 ) ).float() , x=torch.from_numpy(np.stack(_lowercase , axis=0 ) ).float() , y=torch.from_numpy(np.stack(_lowercase , axis=0 ) ).float() , z=torch.from_numpy(np.stack(_lowercase , axis=0 ) ).float() , width=_lowercase , height=_lowercase , x_fov=0.7 , y_fov=0.7 , shape=(1, len(_lowercase )) , )
240
1
'''simple docstring''' from typing import List, Optional from ...configuration_utils import PretrainedConfig from ...utils import logging a_ = logging.get_logger(__name__) a_ = { '''huggingface/autoformer-tourism-monthly''': '''https://huggingface.co/huggingface/autoformer-tourism-monthly/resolve/main/config.json''', } class __SCREAMING_SNAKE_CASE ( __snake_case ): snake_case_ = """autoformer""" snake_case_ = { """hidden_size""": """d_model""", """num_attention_heads""": """encoder_attention_heads""", """num_hidden_layers""": """encoder_layers""", } def __init__( self : int , __lowercase : Optional[int] = None , __lowercase : Optional[int] = None , __lowercase : str = "student_t" , __lowercase : str = "nll" , __lowercase : int = 1 , __lowercase : List[int] = [1, 2, 3, 4, 5, 6, 7] , __lowercase : bool = True , __lowercase : int = 0 , __lowercase : int = 0 , __lowercase : int = 0 , __lowercase : int = 0 , __lowercase : Optional[List[int]] = None , __lowercase : Optional[List[int]] = None , __lowercase : int = 64 , __lowercase : int = 2 , __lowercase : int = 2 , __lowercase : int = 2 , __lowercase : int = 2 , __lowercase : int = 32 , __lowercase : int = 32 , __lowercase : str = "gelu" , __lowercase : float = 0.1 , __lowercase : float = 0.1 , __lowercase : float = 0.1 , __lowercase : float = 0.1 , __lowercase : float = 0.1 , __lowercase : int = 1_00 , __lowercase : float = 0.02 , __lowercase : bool = True , __lowercase : Optional[int]=True , __lowercase : int = 10 , __lowercase : int = 25 , __lowercase : int = 3 , **__lowercase : Any , ) -> Dict: # time series specific configuration SCREAMING_SNAKE_CASE__ : Optional[int] =prediction_length SCREAMING_SNAKE_CASE__ : str =context_length if context_length is not None else prediction_length SCREAMING_SNAKE_CASE__ : Tuple =distribution_output SCREAMING_SNAKE_CASE__ : int =loss SCREAMING_SNAKE_CASE__ : Any =input_size SCREAMING_SNAKE_CASE__ : Optional[int] =num_time_features SCREAMING_SNAKE_CASE__ : List[Any] =lags_sequence SCREAMING_SNAKE_CASE__ : Any =scaling SCREAMING_SNAKE_CASE__ : Optional[int] =num_dynamic_real_features SCREAMING_SNAKE_CASE__ : int =num_static_real_features SCREAMING_SNAKE_CASE__ : Dict =num_static_categorical_features if cardinality is not None and num_static_categorical_features > 0: if len(__SCREAMING_SNAKE_CASE ) != num_static_categorical_features: raise ValueError( '''The cardinality should be a list of the same length as `num_static_categorical_features`''' ) SCREAMING_SNAKE_CASE__ : List[Any] =cardinality else: SCREAMING_SNAKE_CASE__ : Dict =[0] if embedding_dimension is not None and num_static_categorical_features > 0: if len(__SCREAMING_SNAKE_CASE ) != num_static_categorical_features: raise ValueError( '''The embedding dimension should be a list of the same length as `num_static_categorical_features`''' ) SCREAMING_SNAKE_CASE__ : Tuple =embedding_dimension else: SCREAMING_SNAKE_CASE__ : Tuple =[min(50 , (cat + 1) // 2 ) for cat in self.cardinality] SCREAMING_SNAKE_CASE__ : Union[str, Any] =num_parallel_samples # Transformer architecture configuration SCREAMING_SNAKE_CASE__ : int =input_size * len(self.lags_sequence ) + self._number_of_features SCREAMING_SNAKE_CASE__ : Optional[Any] =d_model SCREAMING_SNAKE_CASE__ : List[Any] =encoder_attention_heads SCREAMING_SNAKE_CASE__ : Optional[Any] =decoder_attention_heads SCREAMING_SNAKE_CASE__ : Any =encoder_ffn_dim SCREAMING_SNAKE_CASE__ : Optional[int] =decoder_ffn_dim SCREAMING_SNAKE_CASE__ : Tuple =encoder_layers SCREAMING_SNAKE_CASE__ : int =decoder_layers SCREAMING_SNAKE_CASE__ : List[Any] =dropout SCREAMING_SNAKE_CASE__ : Dict =attention_dropout SCREAMING_SNAKE_CASE__ : Optional[Any] =activation_dropout SCREAMING_SNAKE_CASE__ : Tuple =encoder_layerdrop SCREAMING_SNAKE_CASE__ : List[Any] =decoder_layerdrop SCREAMING_SNAKE_CASE__ : Optional[Any] =activation_function SCREAMING_SNAKE_CASE__ : Tuple =init_std SCREAMING_SNAKE_CASE__ : int =use_cache # Autoformer SCREAMING_SNAKE_CASE__ : Any =label_length SCREAMING_SNAKE_CASE__ : Optional[int] =moving_average SCREAMING_SNAKE_CASE__ : List[Any] =autocorrelation_factor super().__init__(is_encoder_decoder=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) @property def __magic_name__ ( self : str ) -> int: return ( sum(self.embedding_dimension ) + self.num_dynamic_real_features + self.num_time_features + self.num_static_real_features + self.input_size * 2 # the log1p(abs(loc)) and log(scale) features )
152
"""simple docstring""" import unittest import numpy as np from transformers.testing_utils import require_pytesseract, require_torch from transformers.utils import is_pytesseract_available, is_torch_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_pytesseract_available(): from PIL import Image from transformers import LayoutLMvaImageProcessor class a ( unittest.TestCase ): def __init__( self : Any , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : List[str]=7 , __SCREAMING_SNAKE_CASE : Dict=3 , __SCREAMING_SNAKE_CASE : Dict=18 , __SCREAMING_SNAKE_CASE : Union[str, Any]=30 , __SCREAMING_SNAKE_CASE : Optional[Any]=400 , __SCREAMING_SNAKE_CASE : Dict=True , __SCREAMING_SNAKE_CASE : Dict=None , __SCREAMING_SNAKE_CASE : Any=True , ) -> str: lowerCamelCase_ = size if size is not None else {'height': 18, 'width': 18} lowerCamelCase_ = parent lowerCamelCase_ = batch_size lowerCamelCase_ = num_channels lowerCamelCase_ = image_size lowerCamelCase_ = min_resolution lowerCamelCase_ = max_resolution lowerCamelCase_ = do_resize lowerCamelCase_ = size lowerCamelCase_ = apply_ocr def UpperCamelCase ( self : int ) -> Tuple: return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr} @require_torch @require_pytesseract class a ( __snake_case , unittest.TestCase ): SCREAMING_SNAKE_CASE : str = LayoutLMvaImageProcessor if is_pytesseract_available() else None def UpperCamelCase ( self : List[str] ) -> int: lowerCamelCase_ = LayoutLMvaImageProcessingTester(self ) @property def UpperCamelCase ( self : Optional[Any] ) -> Any: return self.image_processor_tester.prepare_image_processor_dict() def UpperCamelCase ( self : Tuple ) -> Optional[Any]: lowerCamelCase_ = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , 'do_resize' ) ) self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , 'size' ) ) self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , 'apply_ocr' ) ) def UpperCamelCase ( self : Any ) -> Any: lowerCamelCase_ = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {'height': 18, 'width': 18} ) lowerCamelCase_ = self.image_processing_class.from_dict(self.image_processor_dict , size=42 ) self.assertEqual(image_processor.size , {'height': 42, 'width': 42} ) def UpperCamelCase ( self : Dict ) -> Any: pass def UpperCamelCase ( self : int ) -> Dict: # Initialize image_processing lowerCamelCase_ = self.image_processing_class(**self.image_processor_dict ) # create random PIL images lowerCamelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE ) for image in image_inputs: self.assertIsInstance(__SCREAMING_SNAKE_CASE , Image.Image ) # Test not batched input lowerCamelCase_ = image_processing(image_inputs[0] , return_tensors='pt' ) self.assertEqual( encoding.pixel_values.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size['height'], self.image_processor_tester.size['width'], ) , ) self.assertIsInstance(encoding.words , __SCREAMING_SNAKE_CASE ) self.assertIsInstance(encoding.boxes , __SCREAMING_SNAKE_CASE ) # Test batched lowerCamelCase_ = image_processing(__SCREAMING_SNAKE_CASE , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size['height'], self.image_processor_tester.size['width'], ) , ) def UpperCamelCase ( self : Optional[int] ) -> Optional[Any]: # Initialize image_processing lowerCamelCase_ = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors lowerCamelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE , numpify=__SCREAMING_SNAKE_CASE ) for image in image_inputs: self.assertIsInstance(__SCREAMING_SNAKE_CASE , np.ndarray ) # Test not batched input lowerCamelCase_ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size['height'], self.image_processor_tester.size['width'], ) , ) # Test batched lowerCamelCase_ = image_processing(__SCREAMING_SNAKE_CASE , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size['height'], self.image_processor_tester.size['width'], ) , ) def UpperCamelCase ( self : Dict ) -> int: # Initialize image_processing lowerCamelCase_ = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors lowerCamelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE , torchify=__SCREAMING_SNAKE_CASE ) for image in image_inputs: self.assertIsInstance(__SCREAMING_SNAKE_CASE , torch.Tensor ) # Test not batched input lowerCamelCase_ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size['height'], self.image_processor_tester.size['width'], ) , ) # Test batched lowerCamelCase_ = image_processing(__SCREAMING_SNAKE_CASE , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size['height'], self.image_processor_tester.size['width'], ) , ) def UpperCamelCase ( self : Dict ) -> Any: # with apply_OCR = True lowerCamelCase_ = LayoutLMvaImageProcessor() from datasets import load_dataset lowerCamelCase_ = load_dataset('hf-internal-testing/fixtures_docvqa' , split='test' ) lowerCamelCase_ = Image.open(ds[0]['file'] ).convert('RGB' ) lowerCamelCase_ = image_processing(__SCREAMING_SNAKE_CASE , return_tensors='pt' ) self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) ) self.assertEqual(len(encoding.words ) , len(encoding.boxes ) ) # fmt: off # the words and boxes were obtained with Tesseract 4.1.1 lowerCamelCase_ = [['11:14', 'to', '11:39', 'a.m', '11:39', 'to', '11:44', 'a.m.', '11:44', 'a.m.', 'to', '12:25', 'p.m.', '12:25', 'to', '12:58', 'p.m.', '12:58', 'to', '4:00', 'p.m.', '2:00', 'to', '5:00', 'p.m.', 'Coffee', 'Break', 'Coffee', 'will', 'be', 'served', 'for', 'men', 'and', 'women', 'in', 'the', 'lobby', 'adjacent', 'to', 'exhibit', 'area.', 'Please', 'move', 'into', 'exhibit', 'area.', '(Exhibits', 'Open)', 'TRRF', 'GENERAL', 'SESSION', '(PART', '|)', 'Presiding:', 'Lee', 'A.', 'Waller', 'TRRF', 'Vice', 'President', '“Introductory', 'Remarks”', 'Lee', 'A.', 'Waller,', 'TRRF', 'Vice', 'Presi-', 'dent', 'Individual', 'Interviews', 'with', 'TRRF', 'Public', 'Board', 'Members', 'and', 'Sci-', 'entific', 'Advisory', 'Council', 'Mem-', 'bers', 'Conducted', 'by', 'TRRF', 'Treasurer', 'Philip', 'G.', 'Kuehn', 'to', 'get', 'answers', 'which', 'the', 'public', 'refrigerated', 'warehousing', 'industry', 'is', 'looking', 'for.', 'Plus', 'questions', 'from', 'the', 'floor.', 'Dr.', 'Emil', 'M.', 'Mrak,', 'University', 'of', 'Cal-', 'ifornia,', 'Chairman,', 'TRRF', 'Board;', 'Sam', 'R.', 'Cecil,', 'University', 'of', 'Georgia', 'College', 'of', 'Agriculture;', 'Dr.', 'Stanley', 'Charm,', 'Tufts', 'University', 'School', 'of', 'Medicine;', 'Dr.', 'Robert', 'H.', 'Cotton,', 'ITT', 'Continental', 'Baking', 'Company;', 'Dr.', 'Owen', 'Fennema,', 'University', 'of', 'Wis-', 'consin;', 'Dr.', 'Robert', 'E.', 'Hardenburg,', 'USDA.', 'Questions', 'and', 'Answers', 'Exhibits', 'Open', 'Capt.', 'Jack', 'Stoney', 'Room', 'TRRF', 'Scientific', 'Advisory', 'Council', 'Meeting', 'Ballroom', 'Foyer']] # noqa: E231 lowerCamelCase_ = [[[141, 57, 214, 69], [228, 58, 252, 69], [141, 75, 216, 88], [230, 79, 280, 88], [142, 260, 218, 273], [230, 261, 255, 273], [143, 279, 218, 290], [231, 282, 290, 291], [143, 342, 218, 354], [231, 345, 289, 355], [202, 362, 227, 373], [143, 379, 220, 392], [231, 382, 291, 394], [144, 714, 220, 726], [231, 715, 256, 726], [144, 732, 220, 745], [232, 736, 291, 747], [144, 769, 218, 782], [231, 770, 256, 782], [141, 788, 202, 801], [215, 791, 274, 804], [143, 826, 204, 838], [215, 826, 240, 838], [142, 844, 202, 857], [215, 847, 274, 859], [334, 57, 427, 69], [440, 57, 522, 69], [369, 75, 461, 88], [469, 75, 516, 88], [528, 76, 562, 88], [570, 76, 667, 88], [675, 75, 711, 87], [721, 79, 778, 88], [789, 75, 840, 88], [369, 97, 470, 107], [484, 94, 507, 106], [518, 94, 562, 107], [576, 94, 655, 110], [668, 94, 792, 109], [804, 95, 829, 107], [369, 113, 465, 125], [477, 116, 547, 125], [562, 113, 658, 125], [671, 116, 748, 125], [761, 113, 811, 125], [369, 131, 465, 143], [477, 133, 548, 143], [563, 130, 698, 145], [710, 130, 802, 146], [336, 171, 412, 183], [423, 171, 572, 183], [582, 170, 716, 184], [728, 171, 817, 187], [829, 171, 844, 186], [338, 197, 482, 212], [507, 196, 557, 209], [569, 196, 595, 208], [610, 196, 702, 209], [505, 214, 583, 226], [595, 214, 656, 227], [670, 215, 807, 227], [335, 259, 543, 274], [556, 259, 708, 272], [372, 279, 422, 291], [435, 279, 460, 291], [474, 279, 574, 292], [587, 278, 664, 291], [676, 278, 738, 291], [751, 279, 834, 291], [372, 298, 434, 310], [335, 341, 483, 354], [497, 341, 655, 354], [667, 341, 728, 354], [740, 341, 825, 354], [335, 360, 430, 372], [442, 360, 534, 372], [545, 359, 687, 372], [697, 360, 754, 372], [765, 360, 823, 373], [334, 378, 428, 391], [440, 378, 577, 394], [590, 378, 705, 391], [720, 378, 801, 391], [334, 397, 400, 409], [370, 416, 529, 429], [544, 416, 576, 432], [587, 416, 665, 428], [677, 416, 814, 429], [372, 435, 452, 450], [465, 434, 495, 447], [511, 434, 600, 447], [611, 436, 637, 447], [649, 436, 694, 451], [705, 438, 824, 447], [369, 453, 452, 466], [464, 454, 509, 466], [522, 453, 611, 469], [625, 453, 792, 469], [370, 472, 556, 488], [570, 472, 684, 487], [697, 472, 718, 485], [732, 472, 835, 488], [369, 490, 411, 503], [425, 490, 484, 503], [496, 490, 635, 506], [645, 490, 707, 503], [718, 491, 761, 503], [771, 490, 840, 503], [336, 510, 374, 521], [388, 510, 447, 522], [460, 510, 489, 521], [503, 510, 580, 522], [592, 509, 736, 525], [745, 509, 770, 522], [781, 509, 840, 522], [338, 528, 434, 541], [448, 528, 596, 541], [609, 527, 687, 540], [700, 528, 792, 541], [336, 546, 397, 559], [407, 546, 431, 559], [443, 546, 525, 560], [537, 546, 680, 562], [688, 546, 714, 559], [722, 546, 837, 562], [336, 565, 449, 581], [461, 565, 485, 577], [497, 565, 665, 581], [681, 565, 718, 577], [732, 565, 837, 580], [337, 584, 438, 597], [452, 583, 521, 596], [535, 584, 677, 599], [690, 583, 787, 596], [801, 583, 825, 596], [338, 602, 478, 615], [492, 602, 530, 614], [543, 602, 638, 615], [650, 602, 676, 614], [688, 602, 788, 615], [802, 602, 843, 614], [337, 621, 502, 633], [516, 621, 615, 637], [629, 621, 774, 636], [789, 621, 827, 633], [337, 639, 418, 652], [432, 640, 571, 653], [587, 639, 731, 655], [743, 639, 769, 652], [780, 639, 841, 652], [338, 658, 440, 673], [455, 658, 491, 670], [508, 658, 602, 671], [616, 658, 638, 670], [654, 658, 835, 674], [337, 677, 429, 689], [337, 714, 482, 726], [495, 714, 548, 726], [561, 714, 683, 726], [338, 770, 461, 782], [474, 769, 554, 785], [489, 788, 562, 803], [576, 788, 643, 801], [656, 787, 751, 804], [764, 788, 844, 801], [334, 825, 421, 838], [430, 824, 574, 838], [584, 824, 723, 841], [335, 844, 450, 857], [464, 843, 583, 860], [628, 862, 755, 875], [769, 861, 848, 878]]] # noqa: E231 # fmt: on self.assertListEqual(encoding.words , __SCREAMING_SNAKE_CASE ) self.assertListEqual(encoding.boxes , __SCREAMING_SNAKE_CASE ) # with apply_OCR = False lowerCamelCase_ = LayoutLMvaImageProcessor(apply_ocr=__SCREAMING_SNAKE_CASE ) lowerCamelCase_ = image_processing(__SCREAMING_SNAKE_CASE , return_tensors='pt' ) self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
183
0
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_sentencepiece_available, is_tf_available, is_tokenizers_available, is_torch_available, ) lowerCamelCase_ = { '''configuration_xlm_roberta''': [ '''XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XLMRobertaConfig''', '''XLMRobertaOnnxConfig''', ], } try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase_ = ['''XLMRobertaTokenizer'''] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase_ = ['''XLMRobertaTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase_ = [ '''XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''', '''XLMRobertaForCausalLM''', '''XLMRobertaForMaskedLM''', '''XLMRobertaForMultipleChoice''', '''XLMRobertaForQuestionAnswering''', '''XLMRobertaForSequenceClassification''', '''XLMRobertaForTokenClassification''', '''XLMRobertaModel''', '''XLMRobertaPreTrainedModel''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase_ = [ '''TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFXLMRobertaForCausalLM''', '''TFXLMRobertaForMaskedLM''', '''TFXLMRobertaForMultipleChoice''', '''TFXLMRobertaForQuestionAnswering''', '''TFXLMRobertaForSequenceClassification''', '''TFXLMRobertaForTokenClassification''', '''TFXLMRobertaModel''', '''TFXLMRobertaPreTrainedModel''', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase_ = [ '''FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''', '''FlaxXLMRobertaForMaskedLM''', '''FlaxXLMRobertaForCausalLM''', '''FlaxXLMRobertaForMultipleChoice''', '''FlaxXLMRobertaForQuestionAnswering''', '''FlaxXLMRobertaForSequenceClassification''', '''FlaxXLMRobertaForTokenClassification''', '''FlaxXLMRobertaModel''', '''FlaxXLMRobertaPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_xlm_roberta import ( XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMRobertaConfig, XLMRobertaOnnxConfig, ) try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_xlm_roberta import XLMRobertaTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_xlm_roberta_fast import XLMRobertaTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_xlm_roberta import ( XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST, XLMRobertaForCausalLM, XLMRobertaForMaskedLM, XLMRobertaForMultipleChoice, XLMRobertaForQuestionAnswering, XLMRobertaForSequenceClassification, XLMRobertaForTokenClassification, XLMRobertaModel, XLMRobertaPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_xlm_roberta import ( TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST, TFXLMRobertaForCausalLM, TFXLMRobertaForMaskedLM, TFXLMRobertaForMultipleChoice, TFXLMRobertaForQuestionAnswering, TFXLMRobertaForSequenceClassification, TFXLMRobertaForTokenClassification, TFXLMRobertaModel, TFXLMRobertaPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_xlm_roberta import ( FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST, FlaxXLMRobertaForCausalLM, FlaxXLMRobertaForMaskedLM, FlaxXLMRobertaForMultipleChoice, FlaxXLMRobertaForQuestionAnswering, FlaxXLMRobertaForSequenceClassification, FlaxXLMRobertaForTokenClassification, FlaxXLMRobertaModel, FlaxXLMRobertaPreTrainedModel, ) else: import sys lowerCamelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
369
import argparse import collections import json import os import re import string import sys import numpy as np lowerCamelCase_ = re.compile(R'''\b(a|an|the)\b''', re.UNICODE) lowerCamelCase_ = None def UpperCamelCase( ) -> List[Any]: '''simple docstring''' snake_case_ = argparse.ArgumentParser("""Official evaluation script for SQuAD version 2.0.""" ) parser.add_argument("""data_file""" , metavar="""data.json""" , help="""Input data JSON file.""" ) parser.add_argument("""pred_file""" , metavar="""pred.json""" , help="""Model predictions.""" ) parser.add_argument( """--out-file""" , """-o""" , metavar="""eval.json""" , help="""Write accuracy metrics to file (default is stdout).""" ) parser.add_argument( """--na-prob-file""" , """-n""" , metavar="""na_prob.json""" , help="""Model estimates of probability of no answer.""" ) parser.add_argument( """--na-prob-thresh""" , """-t""" , type=lowercase_ , default=1.0 , help="""Predict \"\" if no-answer probability exceeds this (default = 1.0).""" , ) parser.add_argument( """--out-image-dir""" , """-p""" , metavar="""out_images""" , default=lowercase_ , help="""Save precision-recall curves to directory.""" ) parser.add_argument("""--verbose""" , """-v""" , action="""store_true""" ) if len(sys.argv ) == 1: parser.print_help() sys.exit(1 ) return parser.parse_args() def UpperCamelCase( lowercase_ ) -> Union[str, Any]: '''simple docstring''' snake_case_ = {} for article in dataset: for p in article["paragraphs"]: for qa in p["qas"]: snake_case_ = bool(qa["""answers"""]["""text"""] ) return qid_to_has_ans def UpperCamelCase( lowercase_ ) -> Tuple: '''simple docstring''' def remove_articles(lowercase_ ): return ARTICLES_REGEX.sub(""" """ , lowercase_ ) def white_space_fix(lowercase_ ): return " ".join(text.split() ) def remove_punc(lowercase_ ): snake_case_ = set(string.punctuation ) return "".join(ch for ch in text if ch not in exclude ) def lower(lowercase_ ): return text.lower() return white_space_fix(remove_articles(remove_punc(lower(lowercase_ ) ) ) ) def UpperCamelCase( lowercase_ ) -> Dict: '''simple docstring''' if not s: return [] return normalize_answer(lowercase_ ).split() def UpperCamelCase( lowercase_ , lowercase_ ) -> Dict: '''simple docstring''' return int(normalize_answer(lowercase_ ) == normalize_answer(lowercase_ ) ) def UpperCamelCase( lowercase_ , lowercase_ ) -> Any: '''simple docstring''' snake_case_ = get_tokens(lowercase_ ) snake_case_ = get_tokens(lowercase_ ) snake_case_ = collections.Counter(lowercase_ ) & collections.Counter(lowercase_ ) snake_case_ = sum(common.values() ) if len(lowercase_ ) == 0 or len(lowercase_ ) == 0: # If either is no-answer, then F1 is 1 if they agree, 0 otherwise return int(gold_toks == pred_toks ) if num_same == 0: return 0 snake_case_ = 1.0 * num_same / len(lowercase_ ) snake_case_ = 1.0 * num_same / len(lowercase_ ) snake_case_ = (2 * precision * recall) / (precision + recall) return fa def UpperCamelCase( lowercase_ , lowercase_ ) -> List[str]: '''simple docstring''' snake_case_ = {} snake_case_ = {} for article in dataset: for p in article["paragraphs"]: for qa in p["qas"]: snake_case_ = qa["""id"""] snake_case_ = [t for t in qa["""answers"""]["""text"""] if normalize_answer(lowercase_ )] if not gold_answers: # For unanswerable questions, only correct answer is empty string snake_case_ = [""""""] if qid not in preds: print(f'''Missing prediction for {qid}''' ) continue snake_case_ = preds[qid] # Take max over all gold answers snake_case_ = max(compute_exact(lowercase_ , lowercase_ ) for a in gold_answers ) snake_case_ = max(compute_fa(lowercase_ , lowercase_ ) for a in gold_answers ) return exact_scores, fa_scores def UpperCamelCase( lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> Union[str, Any]: '''simple docstring''' snake_case_ = {} for qid, s in scores.items(): snake_case_ = na_probs[qid] > na_prob_thresh if pred_na: snake_case_ = float(not qid_to_has_ans[qid] ) else: snake_case_ = s return new_scores def UpperCamelCase( lowercase_ , lowercase_ , lowercase_=None ) -> Dict: '''simple docstring''' if not qid_list: snake_case_ = len(lowercase_ ) return collections.OrderedDict( [ ("""exact""", 1_00.0 * sum(exact_scores.values() ) / total), ("""f1""", 1_00.0 * sum(fa_scores.values() ) / total), ("""total""", total), ] ) else: snake_case_ = len(lowercase_ ) return collections.OrderedDict( [ ("""exact""", 1_00.0 * sum(exact_scores[k] for k in qid_list ) / total), ("""f1""", 1_00.0 * sum(fa_scores[k] for k in qid_list ) / total), ("""total""", total), ] ) def UpperCamelCase( lowercase_ , lowercase_ , lowercase_ ) -> Any: '''simple docstring''' for k in new_eval: snake_case_ = new_eval[k] def UpperCamelCase( lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> Dict: '''simple docstring''' plt.step(lowercase_ , lowercase_ , color="""b""" , alpha=0.2 , where="""post""" ) plt.fill_between(lowercase_ , lowercase_ , step="""post""" , alpha=0.2 , color="""b""" ) plt.xlabel("""Recall""" ) plt.ylabel("""Precision""" ) plt.xlim([0.0, 1.05] ) plt.ylim([0.0, 1.05] ) plt.title(lowercase_ ) plt.savefig(lowercase_ ) plt.clf() def UpperCamelCase( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_=None , lowercase_=None ) -> Dict: '''simple docstring''' snake_case_ = sorted(lowercase_ , key=lambda lowercase_ : na_probs[k] ) snake_case_ = 0.0 snake_case_ = 1.0 snake_case_ = 0.0 snake_case_ = [1.0] snake_case_ = [0.0] snake_case_ = 0.0 for i, qid in enumerate(lowercase_ ): if qid_to_has_ans[qid]: true_pos += scores[qid] snake_case_ = true_pos / float(i + 1 ) snake_case_ = true_pos / float(lowercase_ ) if i == len(lowercase_ ) - 1 or na_probs[qid] != na_probs[qid_list[i + 1]]: # i.e., if we can put a threshold after this point avg_prec += cur_p * (cur_r - recalls[-1]) precisions.append(lowercase_ ) recalls.append(lowercase_ ) if out_image: plot_pr_curve(lowercase_ , lowercase_ , lowercase_ , lowercase_ ) return {"ap": 1_00.0 * avg_prec} def UpperCamelCase( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> str: '''simple docstring''' if out_image_dir and not os.path.exists(lowercase_ ): os.makedirs(lowercase_ ) snake_case_ = sum(1 for v in qid_to_has_ans.values() if v ) if num_true_pos == 0: return snake_case_ = make_precision_recall_eval( lowercase_ , lowercase_ , lowercase_ , lowercase_ , out_image=os.path.join(lowercase_ , """pr_exact.png""" ) , title="""Precision-Recall curve for Exact Match score""" , ) snake_case_ = make_precision_recall_eval( lowercase_ , lowercase_ , lowercase_ , lowercase_ , out_image=os.path.join(lowercase_ , """pr_f1.png""" ) , title="""Precision-Recall curve for F1 score""" , ) snake_case_ = {k: float(lowercase_ ) for k, v in qid_to_has_ans.items()} snake_case_ = make_precision_recall_eval( lowercase_ , lowercase_ , lowercase_ , lowercase_ , out_image=os.path.join(lowercase_ , """pr_oracle.png""" ) , title="""Oracle Precision-Recall curve (binary task of HasAns vs. NoAns)""" , ) merge_eval(lowercase_ , lowercase_ , """pr_exact""" ) merge_eval(lowercase_ , lowercase_ , """pr_f1""" ) merge_eval(lowercase_ , lowercase_ , """pr_oracle""" ) def UpperCamelCase( lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> int: '''simple docstring''' if not qid_list: return snake_case_ = [na_probs[k] for k in qid_list] snake_case_ = np.ones_like(lowercase_ ) / float(len(lowercase_ ) ) plt.hist(lowercase_ , weights=lowercase_ , bins=20 , range=(0.0, 1.0) ) plt.xlabel("""Model probability of no-answer""" ) plt.ylabel("""Proportion of dataset""" ) plt.title(f'''Histogram of no-answer probability: {name}''' ) plt.savefig(os.path.join(lowercase_ , f'''na_prob_hist_{name}.png''' ) ) plt.clf() def UpperCamelCase( lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> Optional[int]: '''simple docstring''' snake_case_ = sum(1 for k in qid_to_has_ans if not qid_to_has_ans[k] ) snake_case_ = num_no_ans snake_case_ = cur_score snake_case_ = 0.0 snake_case_ = sorted(lowercase_ , key=lambda lowercase_ : na_probs[k] ) for i, qid in enumerate(lowercase_ ): if qid not in scores: continue if qid_to_has_ans[qid]: snake_case_ = scores[qid] else: if preds[qid]: snake_case_ = -1 else: snake_case_ = 0 cur_score += diff if cur_score > best_score: snake_case_ = cur_score snake_case_ = na_probs[qid] return 1_00.0 * best_score / len(lowercase_ ), best_thresh def UpperCamelCase( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> Any: '''simple docstring''' snake_case_ , snake_case_ = find_best_thresh(lowercase_ , lowercase_ , lowercase_ , lowercase_ ) snake_case_ , snake_case_ = find_best_thresh(lowercase_ , lowercase_ , lowercase_ , lowercase_ ) snake_case_ = best_exact snake_case_ = exact_thresh snake_case_ = best_fa snake_case_ = fa_thresh def UpperCamelCase( ) -> Union[str, Any]: '''simple docstring''' with open(OPTS.data_file ) as f: snake_case_ = json.load(lowercase_ ) snake_case_ = dataset_json["""data"""] with open(OPTS.pred_file ) as f: snake_case_ = json.load(lowercase_ ) if OPTS.na_prob_file: with open(OPTS.na_prob_file ) as f: snake_case_ = json.load(lowercase_ ) else: snake_case_ = {k: 0.0 for k in preds} snake_case_ = make_qid_to_has_ans(lowercase_ ) # maps qid to True/False snake_case_ = [k for k, v in qid_to_has_ans.items() if v] snake_case_ = [k for k, v in qid_to_has_ans.items() if not v] snake_case_ , snake_case_ = get_raw_scores(lowercase_ , lowercase_ ) snake_case_ = apply_no_ans_threshold(lowercase_ , lowercase_ , lowercase_ , OPTS.na_prob_thresh ) snake_case_ = apply_no_ans_threshold(lowercase_ , lowercase_ , lowercase_ , OPTS.na_prob_thresh ) snake_case_ = make_eval_dict(lowercase_ , lowercase_ ) if has_ans_qids: snake_case_ = make_eval_dict(lowercase_ , lowercase_ , qid_list=lowercase_ ) merge_eval(lowercase_ , lowercase_ , """HasAns""" ) if no_ans_qids: snake_case_ = make_eval_dict(lowercase_ , lowercase_ , qid_list=lowercase_ ) merge_eval(lowercase_ , lowercase_ , """NoAns""" ) if OPTS.na_prob_file: find_all_best_thresh(lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ) if OPTS.na_prob_file and OPTS.out_image_dir: run_precision_recall_analysis(lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , OPTS.out_image_dir ) histogram_na_prob(lowercase_ , lowercase_ , OPTS.out_image_dir , """hasAns""" ) histogram_na_prob(lowercase_ , lowercase_ , OPTS.out_image_dir , """noAns""" ) if OPTS.out_file: with open(OPTS.out_file , """w""" ) as f: json.dump(lowercase_ , lowercase_ ) else: print(json.dumps(lowercase_ , indent=2 ) ) if __name__ == "__main__": lowerCamelCase_ = parse_args() if OPTS.out_image_dir: import matplotlib matplotlib.use('''Agg''') import matplotlib.pyplot as plt main()
34
0
'''simple docstring''' from .data_collator import ( DataCollatorForLanguageModeling, DataCollatorForPermutationLanguageModeling, DataCollatorForSeqaSeq, DataCollatorForSOP, DataCollatorForTokenClassification, DataCollatorForWholeWordMask, DataCollatorWithPadding, DefaultDataCollator, default_data_collator, ) from .metrics import glue_compute_metrics, xnli_compute_metrics from .processors import ( DataProcessor, InputExample, InputFeatures, SingleSentenceClassificationProcessor, SquadExample, SquadFeatures, SquadVaProcessor, SquadVaProcessor, glue_convert_examples_to_features, glue_output_modes, glue_processors, glue_tasks_num_labels, squad_convert_examples_to_features, xnli_output_modes, xnli_processors, xnli_tasks_num_labels, )
56
'''simple docstring''' from __future__ import annotations def __lowercase ( __lowercase , __lowercase = None , __lowercase = None ) -> None: '''simple docstring''' if start is None: _A = 0 if end is None: _A = len(__lowercase ) - 1 if start >= end: return _A = (start + end) // 2 slowsort(__lowercase , __lowercase , __lowercase ) slowsort(__lowercase , mid + 1 , __lowercase ) if sequence[end] < sequence[mid]: _A , _A = sequence[mid], sequence[end] slowsort(__lowercase , __lowercase , end - 1 ) if __name__ == "__main__": from doctest import testmod testmod()
79
0
def _UpperCAmelCase (UpperCamelCase_ : Optional[Any] ): '''simple docstring''' _lowerCAmelCase : Any = [] _lowerCAmelCase : str = set({"""(""", """[""", """{"""} ) _lowerCAmelCase : List[str] = set({""")""", """]""", """}"""} ) _lowerCAmelCase : List[str] = {"""{""": """}""", """[""": """]""", """(""": """)"""} for i in range(len(UpperCamelCase_ ) ): if s[i] in open_brackets: stack.append(s[i] ) elif s[i] in closed_brackets and ( len(UpperCamelCase_ ) == 0 or (len(UpperCamelCase_ ) > 0 and open_to_closed[stack.pop()] != s[i]) ): return False return len(UpperCamelCase_ ) == 0 def _UpperCAmelCase (): '''simple docstring''' _lowerCAmelCase : Tuple = input("""Enter sequence of brackets: """ ) if is_balanced(UpperCamelCase_ ): print(UpperCamelCase_ , """is balanced""" ) else: print(UpperCamelCase_ , """is not balanced""" ) if __name__ == "__main__": main()
159
import os import sys from contextlib import contextmanager # Windows only if os.name == "nt": import ctypes import msvcrt # noqa class __snake_case (ctypes.Structure ): # _fields is a specific attr expected by ctypes lowerCAmelCase__ = [("size", ctypes.c_int), ("visible", ctypes.c_byte)] def _UpperCAmelCase (): '''simple docstring''' if os.name == "nt": _lowerCAmelCase : Tuple = CursorInfo() _lowerCAmelCase : Any = ctypes.windll.kernelaa.GetStdHandle(-11 ) ctypes.windll.kernelaa.GetConsoleCursorInfo(UpperCamelCase_ , ctypes.byref(UpperCamelCase_ ) ) _lowerCAmelCase : Tuple = False ctypes.windll.kernelaa.SetConsoleCursorInfo(UpperCamelCase_ , ctypes.byref(UpperCamelCase_ ) ) elif os.name == "posix": sys.stdout.write("""\033[?25l""" ) sys.stdout.flush() def _UpperCAmelCase (): '''simple docstring''' if os.name == "nt": _lowerCAmelCase : Any = CursorInfo() _lowerCAmelCase : str = ctypes.windll.kernelaa.GetStdHandle(-11 ) ctypes.windll.kernelaa.GetConsoleCursorInfo(UpperCamelCase_ , ctypes.byref(UpperCamelCase_ ) ) _lowerCAmelCase : List[Any] = True ctypes.windll.kernelaa.SetConsoleCursorInfo(UpperCamelCase_ , ctypes.byref(UpperCamelCase_ ) ) elif os.name == "posix": sys.stdout.write("""\033[?25h""" ) sys.stdout.flush() @contextmanager def _UpperCAmelCase (): '''simple docstring''' try: hide_cursor() yield finally: show_cursor()
159
1
'''simple docstring''' def lowerCAmelCase_ ( snake_case__ ): '''simple docstring''' assert ( isinstance(snake_case__ , snake_case__ ) and number_of_steps > 0 ), F'number_of_steps needs to be positive integer, your input {number_of_steps}' if number_of_steps == 1: return 1 A, A : Dict = 1, 1 for _ in range(number_of_steps - 1 ): A, A : Optional[Any] = current + previous, current return current if __name__ == "__main__": import doctest doctest.testmod()
3
'''simple docstring''' from typing import List, Optional, Union import numpy as np import torch import torchaudio.compliance.kaldi as ta_kaldi from ...feature_extraction_sequence_utils import SequenceFeatureExtractor from ...feature_extraction_utils import BatchFeature from ...utils import PaddingStrategy, TensorType, logging lowercase : Optional[int] = logging.get_logger(__name__) class A ( __snake_case ): __magic_name__ = ['''input_features''', '''attention_mask'''] def __init__( self , SCREAMING_SNAKE_CASE=80 , SCREAMING_SNAKE_CASE=16000 , SCREAMING_SNAKE_CASE=80 , SCREAMING_SNAKE_CASE=0.0 , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=True , **SCREAMING_SNAKE_CASE , ) -> int: """simple docstring""" super().__init__(feature_size=SCREAMING_SNAKE_CASE , sampling_rate=SCREAMING_SNAKE_CASE , padding_value=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) A : Optional[int] = num_mel_bins A : Tuple = do_ceptral_normalize A : Dict = normalize_means A : List[Any] = normalize_vars A : List[str] = True def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , ) -> np.ndarray: """simple docstring""" A : List[Any] = waveform * (2**15) # Kaldi compliance: 16-bit signed integers A : Any = torch.from_numpy(SCREAMING_SNAKE_CASE ).unsqueeze(0 ) A : Any = ta_kaldi.fbank(SCREAMING_SNAKE_CASE , num_mel_bins=self.num_mel_bins , sample_frequency=self.sampling_rate ) return features.numpy() @staticmethod def __lowerCAmelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = True , SCREAMING_SNAKE_CASE = True , SCREAMING_SNAKE_CASE = 0.0 , ) -> np.ndarray: """simple docstring""" if normalize_means: A : Dict = x[:input_length].mean(axis=0 ) A : Optional[Any] = np.subtract(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) if normalize_vars: A : str = x[:input_length].std(axis=0 ) A : int = np.divide(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) if input_length < x.shape[0]: A : List[str] = padding_value # make sure array is in float32 A : Tuple = x.astype(np.floataa ) return x def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None ) -> List[np.ndarray]: """simple docstring""" A : List[Any] = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features] return [ self.utterance_cmvn(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , self.normalize_means , self.normalize_vars , self.padding_value ) for x, n in zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) ] def __call__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = False , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = False , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , **SCREAMING_SNAKE_CASE , ) -> BatchFeature: """simple docstring""" if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( F'The model corresponding to this feature extractor: {self} was trained using a sampling rate of' F' {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with' F' {self.sampling_rate} and not {sampling_rate}.' ) else: logger.warning( '''It is strongly recommended to pass the `sampling_rate` argument to this function. ''' '''Failing to do so can result in silent errors that might be hard to debug.''' ) A : List[Any] = isinstance(SCREAMING_SNAKE_CASE , np.ndarray ) and len(raw_speech.shape ) > 1 if is_batched_numpy and len(raw_speech.shape ) > 2: raise ValueError(F'Only mono-channel audio is supported for input to {self}' ) A : Tuple = is_batched_numpy or ( isinstance(SCREAMING_SNAKE_CASE , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) )) ) if is_batched: A : Union[str, Any] = [np.asarray(SCREAMING_SNAKE_CASE , dtype=np.floataa ) for speech in raw_speech] elif not is_batched and not isinstance(SCREAMING_SNAKE_CASE , np.ndarray ): A : Union[str, Any] = np.asarray(SCREAMING_SNAKE_CASE , dtype=np.floataa ) elif isinstance(SCREAMING_SNAKE_CASE , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ): A : Optional[int] = raw_speech.astype(np.floataa ) # always return batch if not is_batched: A : Any = [raw_speech] # extract fbank features A : List[str] = [self._extract_fbank_features(SCREAMING_SNAKE_CASE ) for waveform in raw_speech] # convert into correct format for padding A : str = BatchFeature({'''input_features''': features} ) A : Union[str, Any] = self.pad( SCREAMING_SNAKE_CASE , padding=SCREAMING_SNAKE_CASE , max_length=SCREAMING_SNAKE_CASE , truncation=SCREAMING_SNAKE_CASE , pad_to_multiple_of=SCREAMING_SNAKE_CASE , return_attention_mask=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , ) # make sure list is in array format A : List[str] = padded_inputs.get('''input_features''' ) if isinstance(input_features[0] , SCREAMING_SNAKE_CASE ): A : str = [np.asarray(SCREAMING_SNAKE_CASE , dtype=np.floataa ) for feature in input_features] A : Union[str, Any] = padded_inputs.get('''attention_mask''' ) if attention_mask is not None: A : Union[str, Any] = [np.asarray(SCREAMING_SNAKE_CASE , dtype=np.intaa ) for array in attention_mask] # Utterance-level cepstral mean and variance normalization if self.do_ceptral_normalize: A : Dict = ( np.array(SCREAMING_SNAKE_CASE , dtype=np.intaa ) if self._get_padding_strategies(SCREAMING_SNAKE_CASE , max_length=SCREAMING_SNAKE_CASE ) is not PaddingStrategy.DO_NOT_PAD else None ) A : List[Any] = self.normalize( padded_inputs['''input_features'''] , attention_mask=SCREAMING_SNAKE_CASE ) if return_tensors is not None: A : int = padded_inputs.convert_to_tensors(SCREAMING_SNAKE_CASE ) return padded_inputs
3
1
import argparse import json import pickle from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import MaskFormerConfig, MaskFormerForInstanceSegmentation, MaskFormerImageProcessor, SwinConfig from transformers.utils import logging logging.set_verbosity_info() lowercase : str = logging.get_logger(__name__) def A_ ( A__ ) -> Tuple: a__ : Union[str, Any] = SwinConfig.from_pretrained( 'microsoft/swin-tiny-patch4-window7-224' , out_features=['stage1', 'stage2', 'stage3', 'stage4'] ) a__ : Tuple = MaskFormerConfig(backbone_config=A__ ) a__ : List[str] = 'huggingface/label-files' if "ade20k-full" in model_name: # this should be ok a__ : str = 847 a__ : Optional[Any] = 'maskformer-ade20k-full-id2label.json' elif "ade" in model_name: # this should be ok a__ : Optional[Any] = 150 a__ : Optional[int] = 'ade20k-id2label.json' elif "coco-stuff" in model_name: # this should be ok a__ : Optional[Any] = 171 a__ : Union[str, Any] = 'maskformer-coco-stuff-id2label.json' elif "coco" in model_name: # TODO a__ : List[str] = 133 a__ : Union[str, Any] = 'coco-panoptic-id2label.json' elif "cityscapes" in model_name: # this should be ok a__ : Dict = 19 a__ : int = 'cityscapes-id2label.json' elif "vistas" in model_name: # this should be ok a__ : Optional[int] = 65 a__ : Dict = 'mapillary-vistas-id2label.json' a__ : Dict = json.load(open(hf_hub_download(A__ , A__ , repo_type='dataset' ) , 'r' ) ) a__ : str = {int(A__ ): v for k, v in idalabel.items()} return config def A_ ( A__ ) -> Union[str, Any]: a__ : Dict = [] # stem # fmt: off rename_keys.append(('backbone.patch_embed.proj.weight', 'model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.weight') ) rename_keys.append(('backbone.patch_embed.proj.bias', 'model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.bias') ) rename_keys.append(('backbone.patch_embed.norm.weight', 'model.pixel_level_module.encoder.model.embeddings.norm.weight') ) rename_keys.append(('backbone.patch_embed.norm.bias', 'model.pixel_level_module.encoder.model.embeddings.norm.bias') ) # stages for i in range(len(config.backbone_config.depths ) ): for j in range(config.backbone_config.depths[i] ): rename_keys.append((F'backbone.layers.{i}.blocks.{j}.norm1.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight') ) rename_keys.append((F'backbone.layers.{i}.blocks.{j}.norm1.bias', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias') ) rename_keys.append((F'backbone.layers.{i}.blocks.{j}.attn.relative_position_bias_table', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table') ) rename_keys.append((F'backbone.layers.{i}.blocks.{j}.attn.relative_position_index', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index') ) rename_keys.append((F'backbone.layers.{i}.blocks.{j}.attn.proj.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight') ) rename_keys.append((F'backbone.layers.{i}.blocks.{j}.attn.proj.bias', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias') ) rename_keys.append((F'backbone.layers.{i}.blocks.{j}.norm2.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight') ) rename_keys.append((F'backbone.layers.{i}.blocks.{j}.norm2.bias', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias') ) rename_keys.append((F'backbone.layers.{i}.blocks.{j}.mlp.fc1.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight') ) rename_keys.append((F'backbone.layers.{i}.blocks.{j}.mlp.fc1.bias', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias') ) rename_keys.append((F'backbone.layers.{i}.blocks.{j}.mlp.fc2.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.weight') ) rename_keys.append((F'backbone.layers.{i}.blocks.{j}.mlp.fc2.bias', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.bias') ) if i < 3: rename_keys.append((F'backbone.layers.{i}.downsample.reduction.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.reduction.weight') ) rename_keys.append((F'backbone.layers.{i}.downsample.norm.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.weight') ) rename_keys.append((F'backbone.layers.{i}.downsample.norm.bias', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.bias') ) rename_keys.append((F'backbone.norm{i}.weight', F'model.pixel_level_module.encoder.hidden_states_norms.{i}.weight') ) rename_keys.append((F'backbone.norm{i}.bias', F'model.pixel_level_module.encoder.hidden_states_norms.{i}.bias') ) # FPN rename_keys.append(('sem_seg_head.layer_4.weight', 'model.pixel_level_module.decoder.fpn.stem.0.weight') ) rename_keys.append(('sem_seg_head.layer_4.norm.weight', 'model.pixel_level_module.decoder.fpn.stem.1.weight') ) rename_keys.append(('sem_seg_head.layer_4.norm.bias', 'model.pixel_level_module.decoder.fpn.stem.1.bias') ) for source_index, target_index in zip(range(3 , 0 , -1 ) , range(0 , 3 ) ): rename_keys.append((F'sem_seg_head.adapter_{source_index}.weight', F'model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.0.weight') ) rename_keys.append((F'sem_seg_head.adapter_{source_index}.norm.weight', F'model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.weight') ) rename_keys.append((F'sem_seg_head.adapter_{source_index}.norm.bias', F'model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.bias') ) rename_keys.append((F'sem_seg_head.layer_{source_index}.weight', F'model.pixel_level_module.decoder.fpn.layers.{target_index}.block.0.weight') ) rename_keys.append((F'sem_seg_head.layer_{source_index}.norm.weight', F'model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.weight') ) rename_keys.append((F'sem_seg_head.layer_{source_index}.norm.bias', F'model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.bias') ) rename_keys.append(('sem_seg_head.mask_features.weight', 'model.pixel_level_module.decoder.mask_projection.weight') ) rename_keys.append(('sem_seg_head.mask_features.bias', 'model.pixel_level_module.decoder.mask_projection.bias') ) # Transformer decoder for idx in range(config.decoder_config.decoder_layers ): # self-attention out projection rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.weight', F'model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.weight') ) rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.bias', F'model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.bias') ) # cross-attention out projection rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.weight', F'model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.weight') ) rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.bias', F'model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.bias') ) # MLP 1 rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.weight', F'model.transformer_module.decoder.layers.{idx}.fc1.weight') ) rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.bias', F'model.transformer_module.decoder.layers.{idx}.fc1.bias') ) # MLP 2 rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.weight', F'model.transformer_module.decoder.layers.{idx}.fc2.weight') ) rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.bias', F'model.transformer_module.decoder.layers.{idx}.fc2.bias') ) # layernorm 1 (self-attention layernorm) rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.weight', F'model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.weight') ) rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.bias', F'model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.bias') ) # layernorm 2 (cross-attention layernorm) rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.weight', F'model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.weight') ) rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.bias', F'model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.bias') ) # layernorm 3 (final layernorm) rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.weight', F'model.transformer_module.decoder.layers.{idx}.final_layer_norm.weight') ) rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.bias', F'model.transformer_module.decoder.layers.{idx}.final_layer_norm.bias') ) rename_keys.append(('sem_seg_head.predictor.transformer.decoder.norm.weight', 'model.transformer_module.decoder.layernorm.weight') ) rename_keys.append(('sem_seg_head.predictor.transformer.decoder.norm.bias', 'model.transformer_module.decoder.layernorm.bias') ) # heads on top rename_keys.append(('sem_seg_head.predictor.query_embed.weight', 'model.transformer_module.queries_embedder.weight') ) rename_keys.append(('sem_seg_head.predictor.input_proj.weight', 'model.transformer_module.input_projection.weight') ) rename_keys.append(('sem_seg_head.predictor.input_proj.bias', 'model.transformer_module.input_projection.bias') ) rename_keys.append(('sem_seg_head.predictor.class_embed.weight', 'class_predictor.weight') ) rename_keys.append(('sem_seg_head.predictor.class_embed.bias', 'class_predictor.bias') ) for i in range(3 ): rename_keys.append((F'sem_seg_head.predictor.mask_embed.layers.{i}.weight', F'mask_embedder.{i}.0.weight') ) rename_keys.append((F'sem_seg_head.predictor.mask_embed.layers.{i}.bias', F'mask_embedder.{i}.0.bias') ) # fmt: on return rename_keys def A_ ( A__ , A__ , A__ ) -> Optional[int]: a__ : List[Any] = dct.pop(A__ ) a__ : str = val def A_ ( A__ , A__ ) -> Tuple: a__ : Optional[Any] = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )] for i in range(len(backbone_config.depths ) ): a__ : Tuple = num_features[i] for j in range(backbone_config.depths[i] ): # fmt: off # read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias) a__ : Optional[Any] = state_dict.pop(F'backbone.layers.{i}.blocks.{j}.attn.qkv.weight' ) a__ : List[Any] = state_dict.pop(F'backbone.layers.{i}.blocks.{j}.attn.qkv.bias' ) # next, add query, keys and values (in that order) to the state dict a__ : Union[str, Any] = in_proj_weight[:dim, :] a__ : Union[str, Any] = in_proj_bias[: dim] a__ : Tuple = in_proj_weight[ dim : dim * 2, : ] a__ : Optional[Any] = in_proj_bias[ dim : dim * 2 ] a__ : Optional[int] = in_proj_weight[ -dim :, : ] a__ : int = in_proj_bias[-dim :] # fmt: on def A_ ( A__ , A__ ) -> Dict: # fmt: off a__ : Any = config.decoder_config.hidden_size for idx in range(config.decoder_config.decoder_layers ): # read in weights + bias of self-attention input projection layer (in the original implementation, this is a single matrix + bias) a__ : List[Any] = state_dict.pop(F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_weight' ) a__ : Any = state_dict.pop(F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_bias' ) # next, add query, keys and values (in that order) to the state dict a__ : Tuple = in_proj_weight[: hidden_size, :] a__ : Union[str, Any] = in_proj_bias[:config.hidden_size] a__ : str = in_proj_weight[hidden_size : hidden_size * 2, :] a__ : int = in_proj_bias[hidden_size : hidden_size * 2] a__ : Tuple = in_proj_weight[-hidden_size :, :] a__ : Union[str, Any] = in_proj_bias[-hidden_size :] # read in weights + bias of cross-attention input projection layer (in the original implementation, this is a single matrix + bias) a__ : Any = state_dict.pop(F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_weight' ) a__ : Optional[int] = state_dict.pop(F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_bias' ) # next, add query, keys and values (in that order) to the state dict a__ : Dict = in_proj_weight[: hidden_size, :] a__ : List[str] = in_proj_bias[:config.hidden_size] a__ : List[Any] = in_proj_weight[hidden_size : hidden_size * 2, :] a__ : int = in_proj_bias[hidden_size : hidden_size * 2] a__ : List[str] = in_proj_weight[-hidden_size :, :] a__ : Optional[int] = in_proj_bias[-hidden_size :] # fmt: on def A_ ( ) -> torch.Tensor: a__ : Union[str, Any] = 'http://images.cocodataset.org/val2017/000000039769.jpg' a__ : List[str] = Image.open(requests.get(A__ , stream=A__ ).raw ) return im @torch.no_grad() def A_ ( A__ , A__ , A__ , A__ = False ) -> List[str]: a__ : Optional[Any] = get_maskformer_config(A__ ) # load original state_dict with open(A__ , 'rb' ) as f: a__ : Optional[Any] = pickle.load(A__ ) a__ : int = data['model'] # for name, param in state_dict.items(): # print(name, param.shape) # rename keys a__ : Tuple = create_rename_keys(A__ ) for src, dest in rename_keys: rename_key(A__ , A__ , A__ ) read_in_swin_q_k_v(A__ , config.backbone_config ) read_in_decoder_q_k_v(A__ , A__ ) # update to torch tensors for key, value in state_dict.items(): a__ : List[str] = torch.from_numpy(A__ ) # load 🤗 model a__ : List[str] = MaskFormerForInstanceSegmentation(A__ ) model.eval() for name, param in model.named_parameters(): print(A__ , param.shape ) a__ , a__ : str = model.load_state_dict(A__ , strict=A__ ) assert missing_keys == [ "model.pixel_level_module.encoder.model.layernorm.weight", "model.pixel_level_module.encoder.model.layernorm.bias", ] assert len(A__ ) == 0, F'Unexpected keys: {unexpected_keys}' # verify results a__ : Optional[int] = prepare_img() if "vistas" in model_name: a__ : Optional[int] = 65 elif "cityscapes" in model_name: a__ : str = 6_5535 else: a__ : List[str] = 255 a__ : List[Any] = True if 'ade' in model_name else False a__ : int = MaskFormerImageProcessor(ignore_index=A__ , reduce_labels=A__ ) a__ : Optional[int] = image_processor(A__ , return_tensors='pt' ) a__ : Dict = model(**A__ ) print('Logits:' , outputs.class_queries_logits[0, :3, :3] ) if model_name == "maskformer-swin-tiny-ade": a__ : Optional[Any] = torch.tensor( [[3.63_53, -4.47_70, -2.60_65], [0.50_81, -4.23_94, -3.53_43], [2.19_09, -5.03_53, -1.93_23]] ) assert torch.allclose(outputs.class_queries_logits[0, :3, :3] , A__ , atol=1E-4 ) print('Looks ok!' ) if pytorch_dump_folder_path is not None: print(F'Saving model and image processor to {pytorch_dump_folder_path}' ) Path(A__ ).mkdir(exist_ok=A__ ) model.save_pretrained(A__ ) image_processor.save_pretrained(A__ ) if push_to_hub: print('Pushing model and image processor to the hub...' ) model.push_to_hub(F'nielsr/{model_name}' ) image_processor.push_to_hub(F'nielsr/{model_name}' ) if __name__ == "__main__": lowercase : Tuple = argparse.ArgumentParser() # Required parameters parser.add_argument( """--model_name""", default="""maskformer-swin-tiny-ade""", type=str, help=("""Name of the MaskFormer model you'd like to convert""",), ) parser.add_argument( """--checkpoint_path""", default="""/Users/nielsrogge/Documents/MaskFormer_checkpoints/MaskFormer-Swin-tiny-ADE20k/model.pkl""", type=str, help="""Path to the original state dict (.pth file).""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory.""" ) parser.add_argument( """--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub.""" ) lowercase : Tuple = parser.parse_args() convert_maskformer_checkpoint( args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub )
225
import unittest from transformers import AutoTokenizer, NystromformerConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( NystromformerForMaskedLM, NystromformerForMultipleChoice, NystromformerForQuestionAnswering, NystromformerForSequenceClassification, NystromformerForTokenClassification, NystromformerModel, ) from transformers.models.nystromformer.modeling_nystromformer import NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST class A__ : """simple docstring""" def __init__( self , lowercase , lowercase=13 , lowercase=7 , lowercase=True , lowercase=True , lowercase=True , lowercase=True , lowercase=99 , lowercase=32 , lowercase=5 , lowercase=4 , lowercase=37 , lowercase="gelu" , lowercase=0.1 , lowercase=0.1 , lowercase=512 , lowercase=16 , lowercase=2 , lowercase=0.02 , lowercase=3 , lowercase=4 , lowercase=None , ) -> List[Any]: '''simple docstring''' a__ : Any = parent a__ : int = batch_size a__ : Dict = seq_length a__ : Tuple = is_training a__ : Any = use_input_mask a__ : Optional[Any] = use_token_type_ids a__ : Dict = use_labels a__ : Optional[int] = vocab_size a__ : List[Any] = hidden_size a__ : int = num_hidden_layers a__ : Optional[Any] = num_attention_heads a__ : str = intermediate_size a__ : Optional[int] = hidden_act a__ : Dict = hidden_dropout_prob a__ : Optional[int] = attention_probs_dropout_prob a__ : Tuple = max_position_embeddings a__ : Dict = type_vocab_size a__ : Any = type_sequence_label_size a__ : List[str] = initializer_range a__ : List[str] = num_labels a__ : Optional[Any] = num_choices a__ : str = scope def __lowercase ( self) -> Union[str, Any]: '''simple docstring''' a__ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size) a__ : Tuple = None if self.use_input_mask: a__ : Optional[int] = random_attention_mask([self.batch_size, self.seq_length]) a__ : Any = None if self.use_token_type_ids: a__ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size) a__ : str = None a__ : List[Any] = None a__ : List[str] = None if self.use_labels: a__ : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size) a__ : str = ids_tensor([self.batch_size, self.seq_length] , self.num_labels) a__ : str = ids_tensor([self.batch_size] , self.num_choices) a__ : Union[str, Any] = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def __lowercase ( self) -> Optional[int]: '''simple docstring''' return NystromformerConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowercase , initializer_range=self.initializer_range , ) def __lowercase ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase) -> Optional[int]: '''simple docstring''' a__ : Union[str, Any] = NystromformerModel(config=lowercase) model.to(lowercase) model.eval() a__ : List[Any] = model(lowercase , attention_mask=lowercase , token_type_ids=lowercase) a__ : int = model(lowercase , token_type_ids=lowercase) a__ : Optional[Any] = model(lowercase) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size)) def __lowercase ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase) -> str: '''simple docstring''' a__ : List[str] = NystromformerForMaskedLM(config=lowercase) model.to(lowercase) model.eval() a__ : int = model(lowercase , attention_mask=lowercase , token_type_ids=lowercase , labels=lowercase) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size)) def __lowercase ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase) -> Union[str, Any]: '''simple docstring''' a__ : Any = NystromformerForQuestionAnswering(config=lowercase) model.to(lowercase) model.eval() a__ : str = model( lowercase , attention_mask=lowercase , token_type_ids=lowercase , start_positions=lowercase , end_positions=lowercase , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length)) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length)) def __lowercase ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase) -> Optional[Any]: '''simple docstring''' a__ : int = self.num_labels a__ : Optional[Any] = NystromformerForSequenceClassification(lowercase) model.to(lowercase) model.eval() a__ : Tuple = model(lowercase , attention_mask=lowercase , token_type_ids=lowercase , labels=lowercase) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels)) def __lowercase ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase) -> List[str]: '''simple docstring''' a__ : Tuple = self.num_labels a__ : int = NystromformerForTokenClassification(config=lowercase) model.to(lowercase) model.eval() a__ : str = model(lowercase , attention_mask=lowercase , token_type_ids=lowercase , labels=lowercase) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels)) def __lowercase ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase) -> Any: '''simple docstring''' a__ : Optional[int] = self.num_choices a__ : Tuple = NystromformerForMultipleChoice(config=lowercase) model.to(lowercase) model.eval() a__ : Optional[Any] = input_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous() a__ : Tuple = token_type_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous() a__ : str = input_mask.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous() a__ : Optional[int] = model( lowercase , attention_mask=lowercase , token_type_ids=lowercase , labels=lowercase , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices)) def __lowercase ( self) -> Union[str, Any]: '''simple docstring''' a__ : List[Any] = self.prepare_config_and_inputs() ( ( a__ ) , ( a__ ) , ( a__ ) , ( a__ ) , ( a__ ) , ( a__ ) , ( a__ ) , ) : str = config_and_inputs a__ : Any = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask} return config, inputs_dict @require_torch class A__ ( __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ): """simple docstring""" __A : Any = ( ( NystromformerModel, NystromformerForMaskedLM, NystromformerForMultipleChoice, NystromformerForQuestionAnswering, NystromformerForSequenceClassification, NystromformerForTokenClassification, ) if is_torch_available() else () ) __A : str = ( { '''feature-extraction''': NystromformerModel, '''fill-mask''': NystromformerForMaskedLM, '''question-answering''': NystromformerForQuestionAnswering, '''text-classification''': NystromformerForSequenceClassification, '''token-classification''': NystromformerForTokenClassification, '''zero-shot''': NystromformerForSequenceClassification, } if is_torch_available() else {} ) __A : Optional[Any] = False __A : Tuple = False def __lowercase ( self) -> Optional[Any]: '''simple docstring''' a__ : int = NystromformerModelTester(self) a__ : Any = ConfigTester(self , config_class=lowercase , hidden_size=37) def __lowercase ( self) -> Any: '''simple docstring''' self.config_tester.run_common_tests() def __lowercase ( self) -> int: '''simple docstring''' a__ : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowercase) def __lowercase ( self) -> Tuple: '''simple docstring''' a__ : Tuple = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: a__ : Optional[Any] = type self.model_tester.create_and_check_model(*lowercase) def __lowercase ( self) -> Tuple: '''simple docstring''' a__ : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*lowercase) def __lowercase ( self) -> Any: '''simple docstring''' a__ : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*lowercase) def __lowercase ( self) -> Any: '''simple docstring''' a__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*lowercase) def __lowercase ( self) -> Union[str, Any]: '''simple docstring''' a__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*lowercase) def __lowercase ( self) -> int: '''simple docstring''' a__ : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*lowercase) @slow def __lowercase ( self) -> Optional[int]: '''simple docstring''' for model_name in NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: a__ : int = NystromformerModel.from_pretrained(lowercase) self.assertIsNotNone(lowercase) @require_torch class A__ ( unittest.TestCase ): """simple docstring""" @slow def __lowercase ( self) -> Optional[Any]: '''simple docstring''' a__ : List[str] = NystromformerModel.from_pretrained('uw-madison/nystromformer-512') a__ : Tuple = torch.tensor([[0, 1, 2, 3, 4, 5]]) with torch.no_grad(): a__ : List[Any] = model(lowercase)[0] a__ : str = torch.Size((1, 6, 768)) self.assertEqual(output.shape , lowercase) a__ : str = torch.tensor( [[[-0.45_32, -0.09_36, 0.51_37], [-0.26_76, 0.06_28, 0.61_86], [-0.36_29, -0.17_26, 0.47_16]]]) self.assertTrue(torch.allclose(output[:, :3, :3] , lowercase , atol=1e-4)) @slow def __lowercase ( self) -> Optional[int]: '''simple docstring''' a__ : Any = 'the [MASK] of Belgium is Brussels' a__ : List[str] = AutoTokenizer.from_pretrained('uw-madison/nystromformer-512') a__ : Optional[int] = NystromformerForMaskedLM.from_pretrained('uw-madison/nystromformer-512') a__ : List[Any] = tokenizer(lowercase , return_tensors='pt') with torch.no_grad(): a__ : Union[str, Any] = model(encoding.input_ids).logits a__ : str = token_logits[:, 2, :].argmax(-1)[0] self.assertEqual(tokenizer.decode(lowercase) , 'capital')
225
1
'''simple docstring''' def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ ): UpperCAmelCase : Dict = int(UpperCAmelCase_ ) # Initialize Result UpperCAmelCase : List[Any] = [] # Traverse through all denomination for denomination in reversed(UpperCAmelCase_ ): # Find denominations while int(UpperCAmelCase_ ) >= int(UpperCAmelCase_ ): total_value -= int(UpperCAmelCase_ ) answer.append(UpperCAmelCase_ ) # Append the "answers" array return answer # Driver Code if __name__ == "__main__": lowercase__ = [] lowercase__ = "0" if ( input("Do you want to enter your denominations ? (yY/n): ").strip().lower() == "y" ): lowercase__ = int(input("Enter the number of denominations you want to add: ").strip()) for i in range(0, n): denominations.append(int(input(f'''Denomination {i}: ''').strip())) lowercase__ = input("Enter the change you want to make in Indian Currency: ").strip() else: # All denominations of Indian Currency if user does not enter lowercase__ = [1, 2, 5, 10, 20, 50, 100, 500, 2000] lowercase__ = input("Enter the change you want to make: ").strip() if int(value) == 0 or int(value) < 0: print("The total value cannot be zero or negative.") else: print(f'''Following is minimal change for {value}: ''') lowercase__ = find_minimum_change(denominations, value) # Print result for i in range(len(answer)): print(answer[i], end=" ")
151
'''simple docstring''' import argparse import os import torch from diffusers import ( CMStochasticIterativeScheduler, ConsistencyModelPipeline, UNetaDModel, ) lowercase__ = { "sample_size": 32, "in_channels": 3, "out_channels": 3, "layers_per_block": 2, "num_class_embeds": 1000, "block_out_channels": [32, 64], "attention_head_dim": 8, "down_block_types": [ "ResnetDownsampleBlock2D", "AttnDownBlock2D", ], "up_block_types": [ "AttnUpBlock2D", "ResnetUpsampleBlock2D", ], "resnet_time_scale_shift": "scale_shift", "upsample_type": "resnet", "downsample_type": "resnet", } lowercase__ = { "sample_size": 64, "in_channels": 3, "out_channels": 3, "layers_per_block": 3, "num_class_embeds": 1000, "block_out_channels": [192, 192 * 2, 192 * 3, 192 * 4], "attention_head_dim": 64, "down_block_types": [ "ResnetDownsampleBlock2D", "AttnDownBlock2D", "AttnDownBlock2D", "AttnDownBlock2D", ], "up_block_types": [ "AttnUpBlock2D", "AttnUpBlock2D", "AttnUpBlock2D", "ResnetUpsampleBlock2D", ], "resnet_time_scale_shift": "scale_shift", "upsample_type": "resnet", "downsample_type": "resnet", } lowercase__ = { "sample_size": 256, "in_channels": 3, "out_channels": 3, "layers_per_block": 2, "num_class_embeds": None, "block_out_channels": [256, 256, 256 * 2, 256 * 2, 256 * 4, 256 * 4], "attention_head_dim": 64, "down_block_types": [ "ResnetDownsampleBlock2D", "ResnetDownsampleBlock2D", "ResnetDownsampleBlock2D", "AttnDownBlock2D", "AttnDownBlock2D", "AttnDownBlock2D", ], "up_block_types": [ "AttnUpBlock2D", "AttnUpBlock2D", "AttnUpBlock2D", "ResnetUpsampleBlock2D", "ResnetUpsampleBlock2D", "ResnetUpsampleBlock2D", ], "resnet_time_scale_shift": "default", "upsample_type": "resnet", "downsample_type": "resnet", } lowercase__ = { "num_train_timesteps": 40, "sigma_min": 0.002, "sigma_max": 80.0, } lowercase__ = { "num_train_timesteps": 201, "sigma_min": 0.002, "sigma_max": 80.0, } lowercase__ = { "num_train_timesteps": 151, "sigma_min": 0.002, "sigma_max": 80.0, } def UpperCamelCase( UpperCAmelCase_ ): if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ): return v if v.lower() in ("yes", "true", "t", "y", "1"): return True elif v.lower() in ("no", "false", "f", "n", "0"): return False else: raise argparse.ArgumentTypeError('boolean value expected' ) def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_=False ): UpperCAmelCase : str = checkpoint[F"""{old_prefix}.in_layers.0.weight"""] UpperCAmelCase : List[str] = checkpoint[F"""{old_prefix}.in_layers.0.bias"""] UpperCAmelCase : int = checkpoint[F"""{old_prefix}.in_layers.2.weight"""] UpperCAmelCase : int = checkpoint[F"""{old_prefix}.in_layers.2.bias"""] UpperCAmelCase : List[str] = checkpoint[F"""{old_prefix}.emb_layers.1.weight"""] UpperCAmelCase : Optional[int] = checkpoint[F"""{old_prefix}.emb_layers.1.bias"""] UpperCAmelCase : str = checkpoint[F"""{old_prefix}.out_layers.0.weight"""] UpperCAmelCase : int = checkpoint[F"""{old_prefix}.out_layers.0.bias"""] UpperCAmelCase : str = checkpoint[F"""{old_prefix}.out_layers.3.weight"""] UpperCAmelCase : Tuple = checkpoint[F"""{old_prefix}.out_layers.3.bias"""] if has_skip: UpperCAmelCase : int = checkpoint[F"""{old_prefix}.skip_connection.weight"""] UpperCAmelCase : Optional[int] = checkpoint[F"""{old_prefix}.skip_connection.bias"""] return new_checkpoint def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_=None ): UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Any = checkpoint[F"""{old_prefix}.qkv.weight"""].chunk(3 , dim=0 ) UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : str = checkpoint[F"""{old_prefix}.qkv.bias"""].chunk(3 , dim=0 ) UpperCAmelCase : List[Any] = checkpoint[F"""{old_prefix}.norm.weight"""] UpperCAmelCase : Dict = checkpoint[F"""{old_prefix}.norm.bias"""] UpperCAmelCase : Dict = weight_q.squeeze(-1 ).squeeze(-1 ) UpperCAmelCase : List[Any] = bias_q.squeeze(-1 ).squeeze(-1 ) UpperCAmelCase : Optional[Any] = weight_k.squeeze(-1 ).squeeze(-1 ) UpperCAmelCase : Any = bias_k.squeeze(-1 ).squeeze(-1 ) UpperCAmelCase : Dict = weight_v.squeeze(-1 ).squeeze(-1 ) UpperCAmelCase : int = bias_v.squeeze(-1 ).squeeze(-1 ) UpperCAmelCase : List[str] = ( checkpoint[F"""{old_prefix}.proj_out.weight"""].squeeze(-1 ).squeeze(-1 ) ) UpperCAmelCase : Tuple = checkpoint[F"""{old_prefix}.proj_out.bias"""].squeeze(-1 ).squeeze(-1 ) return new_checkpoint def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ ): UpperCAmelCase : Union[str, Any] = torch.load(UpperCAmelCase_ , map_location='cpu' ) UpperCAmelCase : Optional[Any] = {} UpperCAmelCase : str = checkpoint['time_embed.0.weight'] UpperCAmelCase : Dict = checkpoint['time_embed.0.bias'] UpperCAmelCase : Optional[int] = checkpoint['time_embed.2.weight'] UpperCAmelCase : str = checkpoint['time_embed.2.bias'] if unet_config["num_class_embeds"] is not None: UpperCAmelCase : str = checkpoint['label_emb.weight'] UpperCAmelCase : Any = checkpoint['input_blocks.0.0.weight'] UpperCAmelCase : List[str] = checkpoint['input_blocks.0.0.bias'] UpperCAmelCase : Tuple = unet_config['down_block_types'] UpperCAmelCase : Union[str, Any] = unet_config['layers_per_block'] UpperCAmelCase : Dict = unet_config['attention_head_dim'] UpperCAmelCase : Optional[Any] = unet_config['block_out_channels'] UpperCAmelCase : str = 1 UpperCAmelCase : int = channels_list[0] for i, layer_type in enumerate(UpperCAmelCase_ ): UpperCAmelCase : Optional[Any] = channels_list[i] UpperCAmelCase : Any = current_channels != prev_channels if layer_type == "ResnetDownsampleBlock2D": for j in range(UpperCAmelCase_ ): UpperCAmelCase : Any = F"""down_blocks.{i}.resnets.{j}""" UpperCAmelCase : Any = F"""input_blocks.{current_layer}.0""" UpperCAmelCase : Dict = True if j == 0 and downsample_block_has_skip else False UpperCAmelCase : List[Any] = convert_resnet(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , has_skip=UpperCAmelCase_ ) current_layer += 1 elif layer_type == "AttnDownBlock2D": for j in range(UpperCAmelCase_ ): UpperCAmelCase : str = F"""down_blocks.{i}.resnets.{j}""" UpperCAmelCase : Optional[Any] = F"""input_blocks.{current_layer}.0""" UpperCAmelCase : Tuple = True if j == 0 and downsample_block_has_skip else False UpperCAmelCase : Tuple = convert_resnet(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , has_skip=UpperCAmelCase_ ) UpperCAmelCase : Optional[Any] = F"""down_blocks.{i}.attentions.{j}""" UpperCAmelCase : List[Any] = F"""input_blocks.{current_layer}.1""" UpperCAmelCase : Optional[int] = convert_attention( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) current_layer += 1 if i != len(UpperCAmelCase_ ) - 1: UpperCAmelCase : Optional[Any] = F"""down_blocks.{i}.downsamplers.0""" UpperCAmelCase : List[str] = F"""input_blocks.{current_layer}.0""" UpperCAmelCase : str = convert_resnet(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) current_layer += 1 UpperCAmelCase : Tuple = current_channels # hardcoded the mid-block for now UpperCAmelCase : int = 'mid_block.resnets.0' UpperCAmelCase : Tuple = 'middle_block.0' UpperCAmelCase : str = convert_resnet(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) UpperCAmelCase : List[Any] = 'mid_block.attentions.0' UpperCAmelCase : List[Any] = 'middle_block.1' UpperCAmelCase : Optional[int] = convert_attention(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) UpperCAmelCase : Optional[Any] = 'mid_block.resnets.1' UpperCAmelCase : Dict = 'middle_block.2' UpperCAmelCase : Union[str, Any] = convert_resnet(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) UpperCAmelCase : List[str] = 0 UpperCAmelCase : int = unet_config['up_block_types'] for i, layer_type in enumerate(UpperCAmelCase_ ): if layer_type == "ResnetUpsampleBlock2D": for j in range(layers_per_block + 1 ): UpperCAmelCase : int = F"""up_blocks.{i}.resnets.{j}""" UpperCAmelCase : List[Any] = F"""output_blocks.{current_layer}.0""" UpperCAmelCase : List[str] = convert_resnet(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , has_skip=UpperCAmelCase_ ) current_layer += 1 if i != len(UpperCAmelCase_ ) - 1: UpperCAmelCase : Any = F"""up_blocks.{i}.upsamplers.0""" UpperCAmelCase : int = F"""output_blocks.{current_layer-1}.1""" UpperCAmelCase : List[Any] = convert_resnet(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) elif layer_type == "AttnUpBlock2D": for j in range(layers_per_block + 1 ): UpperCAmelCase : int = F"""up_blocks.{i}.resnets.{j}""" UpperCAmelCase : int = F"""output_blocks.{current_layer}.0""" UpperCAmelCase : Optional[int] = convert_resnet(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , has_skip=UpperCAmelCase_ ) UpperCAmelCase : Union[str, Any] = F"""up_blocks.{i}.attentions.{j}""" UpperCAmelCase : Tuple = F"""output_blocks.{current_layer}.1""" UpperCAmelCase : Any = convert_attention( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) current_layer += 1 if i != len(UpperCAmelCase_ ) - 1: UpperCAmelCase : str = F"""up_blocks.{i}.upsamplers.0""" UpperCAmelCase : Optional[Any] = F"""output_blocks.{current_layer-1}.2""" UpperCAmelCase : Any = convert_resnet(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) UpperCAmelCase : Any = checkpoint['out.0.weight'] UpperCAmelCase : Optional[int] = checkpoint['out.0.bias'] UpperCAmelCase : Tuple = checkpoint['out.2.weight'] UpperCAmelCase : str = checkpoint['out.2.bias'] return new_checkpoint if __name__ == "__main__": lowercase__ = argparse.ArgumentParser() parser.add_argument("--unet_path", default=None, type=str, required=True, help="Path to the unet.pt to convert.") parser.add_argument( "--dump_path", default=None, type=str, required=True, help="Path to output the converted UNet model." ) parser.add_argument("--class_cond", default=True, type=str, help="Whether the model is class-conditional.") lowercase__ = parser.parse_args() lowercase__ = strabool(args.class_cond) lowercase__ = os.path.basename(args.unet_path) print(f'''Checkpoint: {ckpt_name}''') # Get U-Net config if "imagenet64" in ckpt_name: lowercase__ = IMAGENET_64_UNET_CONFIG elif "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)): lowercase__ = LSUN_256_UNET_CONFIG elif "test" in ckpt_name: lowercase__ = TEST_UNET_CONFIG else: raise ValueError(f'''Checkpoint type {ckpt_name} is not currently supported.''') if not args.class_cond: lowercase__ = None lowercase__ = con_pt_to_diffuser(args.unet_path, unet_config) lowercase__ = UNetaDModel(**unet_config) image_unet.load_state_dict(converted_unet_ckpt) # Get scheduler config if "cd" in ckpt_name or "test" in ckpt_name: lowercase__ = CD_SCHEDULER_CONFIG elif "ct" in ckpt_name and "imagenet64" in ckpt_name: lowercase__ = CT_IMAGENET_64_SCHEDULER_CONFIG elif "ct" in ckpt_name and "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)): lowercase__ = CT_LSUN_256_SCHEDULER_CONFIG else: raise ValueError(f'''Checkpoint type {ckpt_name} is not currently supported.''') lowercase__ = CMStochasticIterativeScheduler(**scheduler_config) lowercase__ = ConsistencyModelPipeline(unet=image_unet, scheduler=cm_scheduler) consistency_model.save_pretrained(args.dump_path)
151
1
from typing import List, Optional, Union import torch from ...models import UNetaDConditionModel, VQModel from ...pipelines import DiffusionPipeline from ...pipelines.pipeline_utils import ImagePipelineOutput from ...schedulers import DDPMScheduler from ...utils import ( is_accelerate_available, is_accelerate_version, logging, randn_tensor, replace_example_docstring, ) snake_case : Dict = logging.get_logger(__name__) # pylint: disable=invalid-name snake_case : Optional[Any] = ''' Examples: ```py >>> from diffusers import KandinskyV22Pipeline, KandinskyV22PriorPipeline >>> import torch >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained("kandinsky-community/kandinsky-2-2-prior") >>> pipe_prior.to("cuda") >>> prompt = "red cat, 4k photo" >>> out = pipe_prior(prompt) >>> image_emb = out.image_embeds >>> zero_image_emb = out.negative_image_embeds >>> pipe = KandinskyV22Pipeline.from_pretrained("kandinsky-community/kandinsky-2-2-decoder") >>> pipe.to("cuda") >>> image = pipe( ... image_embeds=image_emb, ... negative_image_embeds=zero_image_emb, ... height=768, ... width=768, ... num_inference_steps=50, ... ).images >>> image[0].save("cat.png") ``` ''' def __lowercase ( __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : List[Any]=8 ): a__ = height // scale_factor**2 if height % scale_factor**2 != 0: new_height += 1 a__ = width // scale_factor**2 if width % scale_factor**2 != 0: new_width += 1 return new_height * scale_factor, new_width * scale_factor class snake_case_ (lowerCamelCase_ ): def __init__( self :List[str] ,__snake_case :UNetaDConditionModel ,__snake_case :DDPMScheduler ,__snake_case :VQModel ,) -> Any: super().__init__() self.register_modules( unet=__snake_case ,scheduler=__snake_case ,movq=__snake_case ,) a__ = 2 ** (len(self.movq.config.block_out_channels ) - 1) def lowerCamelCase__( self :Tuple ,__snake_case :Any ,__snake_case :Optional[Any] ,__snake_case :Any ,__snake_case :str ,__snake_case :int ,__snake_case :Any ) -> Optional[Any]: if latents is None: a__ = randn_tensor(__snake_case ,generator=__snake_case ,device=__snake_case ,dtype=__snake_case ) else: if latents.shape != shape: raise ValueError(F'Unexpected latents shape, got {latents.shape}, expected {shape}' ) a__ = latents.to(__snake_case ) a__ = latents * scheduler.init_noise_sigma return latents def lowerCamelCase__( self :Tuple ,__snake_case :int=0 ) -> int: if is_accelerate_available(): from accelerate import cpu_offload else: raise ImportError('Please install accelerate via `pip install accelerate`' ) a__ = torch.device(F'cuda:{gpu_id}' ) a__ = [ self.unet, self.movq, ] for cpu_offloaded_model in models: if cpu_offloaded_model is not None: cpu_offload(__snake_case ,__snake_case ) def lowerCamelCase__( self :Any ,__snake_case :Optional[Any]=0 ) -> int: if is_accelerate_available() and is_accelerate_version('>=' ,'0.17.0.dev0' ): from accelerate import cpu_offload_with_hook else: raise ImportError('`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.' ) a__ = torch.device(F'cuda:{gpu_id}' ) if self.device.type != "cpu": self.to('cpu' ,silence_dtype_warnings=__snake_case ) torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist) a__ = None for cpu_offloaded_model in [self.unet, self.movq]: a__ , a__ = cpu_offload_with_hook(__snake_case ,__snake_case ,prev_module_hook=__snake_case ) # We'll offload the last model manually. a__ = hook @property # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device def lowerCamelCase__( self :Union[str, Any] ) -> int: if not hasattr(self.unet ,'_hf_hook' ): return self.device for module in self.unet.modules(): if ( hasattr(__snake_case ,'_hf_hook' ) and hasattr(module._hf_hook ,'execution_device' ) and module._hf_hook.execution_device is not None ): return torch.device(module._hf_hook.execution_device ) return self.device @torch.no_grad() @replace_example_docstring(__snake_case ) def __call__( self :Union[str, Any] ,__snake_case :Union[torch.FloatTensor, List[torch.FloatTensor]] ,__snake_case :Union[torch.FloatTensor, List[torch.FloatTensor]] ,__snake_case :int = 5_12 ,__snake_case :int = 5_12 ,__snake_case :int = 1_00 ,__snake_case :float = 4.0 ,__snake_case :int = 1 ,__snake_case :Optional[Union[torch.Generator, List[torch.Generator]]] = None ,__snake_case :Optional[torch.FloatTensor] = None ,__snake_case :Optional[str] = "pil" ,__snake_case :bool = True ,) -> Optional[int]: a__ = self._execution_device a__ = guidance_scale > 1.0 if isinstance(__snake_case ,__snake_case ): a__ = torch.cat(__snake_case ,dim=0 ) a__ = image_embeds.shape[0] * num_images_per_prompt if isinstance(__snake_case ,__snake_case ): a__ = torch.cat(__snake_case ,dim=0 ) if do_classifier_free_guidance: a__ = image_embeds.repeat_interleave(__snake_case ,dim=0 ) a__ = negative_image_embeds.repeat_interleave(__snake_case ,dim=0 ) a__ = torch.cat([negative_image_embeds, image_embeds] ,dim=0 ).to(dtype=self.unet.dtype ,device=__snake_case ) self.scheduler.set_timesteps(__snake_case ,device=__snake_case ) a__ = self.scheduler.timesteps a__ = self.unet.config.in_channels a__ , a__ = downscale_height_and_width(__snake_case ,__snake_case ,self.movq_scale_factor ) # create initial latent a__ = self.prepare_latents( (batch_size, num_channels_latents, height, width) ,image_embeds.dtype ,__snake_case ,__snake_case ,__snake_case ,self.scheduler ,) for i, t in enumerate(self.progress_bar(__snake_case ) ): # expand the latents if we are doing classifier free guidance a__ = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents a__ = {'image_embeds': image_embeds} a__ = self.unet( sample=__snake_case ,timestep=__snake_case ,encoder_hidden_states=__snake_case ,added_cond_kwargs=__snake_case ,return_dict=__snake_case ,)[0] if do_classifier_free_guidance: a__ , a__ = noise_pred.split(latents.shape[1] ,dim=1 ) a__ , a__ = noise_pred.chunk(2 ) a__ , a__ = variance_pred.chunk(2 ) a__ = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) a__ = torch.cat([noise_pred, variance_pred_text] ,dim=1 ) if not ( hasattr(self.scheduler.config ,'variance_type' ) and self.scheduler.config.variance_type in ["learned", "learned_range"] ): a__ , a__ = noise_pred.split(latents.shape[1] ,dim=1 ) # compute the previous noisy sample x_t -> x_t-1 a__ = self.scheduler.step( __snake_case ,__snake_case ,__snake_case ,generator=__snake_case ,)[0] # post-processing a__ = self.movq.decode(__snake_case ,force_not_quantize=__snake_case )['sample'] if output_type not in ["pt", "np", "pil"]: raise ValueError(F'Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}' ) if output_type in ["np", "pil"]: a__ = image * 0.5 + 0.5 a__ = image.clamp(0 ,1 ) a__ = image.cpu().permute(0 ,2 ,3 ,1 ).float().numpy() if output_type == "pil": a__ = self.numpy_to_pil(__snake_case ) if not return_dict: return (image,) return ImagePipelineOutput(images=__snake_case )
351
import argparse import torch # Step 1. clone https://github.com/microsoft/unilm # Step 2. git checkout to https://github.com/microsoft/unilm/commit/b94ec76c36f02fb2b0bf0dcb0b8554a2185173cd # Step 3. cd unilm # Step 4. ln -s $(realpath wavlm/modules.py) ./ # create simlink # import classes from unilm.wavlm.WavLM import WavLM as WavLMOrig from unilm.wavlm.WavLM import WavLMConfig as WavLMConfigOrig from transformers import WavLMConfig, WavLMModel, logging logging.set_verbosity_info() snake_case : str = logging.get_logger(__name__) snake_case : Optional[Any] = { '''post_extract_proj''': '''feature_projection.projection''', '''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''', '''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''', '''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''', '''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''', '''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''', '''self_attn.grep_linear''': '''encoder.layers.*.attention.gru_rel_pos_linear''', '''self_attn.relative_attention_bias''': '''encoder.layers.*.attention.rel_attn_embed''', '''self_attn.grep_a''': '''encoder.layers.*.attention.gru_rel_pos_const''', '''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''', '''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''', '''fc2''': '''encoder.layers.*.feed_forward.output_dense''', '''final_layer_norm''': '''encoder.layers.*.final_layer_norm''', '''encoder.layer_norm''': '''encoder.layer_norm''', '''w2v_model.layer_norm''': '''feature_projection.layer_norm''', '''quantizer.weight_proj''': '''quantizer.weight_proj''', '''quantizer.vars''': '''quantizer.codevectors''', '''project_q''': '''project_q''', '''final_proj''': '''project_hid''', '''w2v_encoder.proj''': '''ctc_proj''', '''mask_emb''': '''masked_spec_embed''', } snake_case : Dict = [ '''ctc_proj''', '''quantizer.weight_proj''', '''quantizer.codevectors''', '''project_q''', '''project_hid''', ] def __lowercase ( __lowerCAmelCase : Dict , __lowerCAmelCase : List[str] , __lowerCAmelCase : str , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Dict ): for attribute in key.split('.' ): a__ = getattr(__lowerCAmelCase , __lowerCAmelCase ) if weight_type is not None: a__ = getattr(__lowerCAmelCase , __lowerCAmelCase ).shape else: a__ = hf_pointer.shape assert hf_shape == value.shape, ( F'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be' F' {value.shape} for {full_name}' ) if weight_type == "weight": a__ = value elif weight_type == "weight_g": a__ = value elif weight_type == "weight_v": a__ = value elif weight_type == "bias": a__ = value else: a__ = value logger.info(F'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.' ) def __lowercase ( __lowerCAmelCase : List[str] , __lowerCAmelCase : List[Any] ): a__ = [] a__ = fairseq_model.state_dict() a__ = hf_model.feature_extractor for name, value in fairseq_dict.items(): a__ = False if "conv_layers" in name: load_conv_layer( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , hf_model.config.feat_extract_norm == 'group' , ) a__ = True else: for key, mapped_key in MAPPING.items(): if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]: a__ = True if "*" in mapped_key: a__ = name.split(__lowerCAmelCase )[0].split('.' )[-2] a__ = mapped_key.replace('*' , __lowerCAmelCase ) if "weight_g" in name: a__ = 'weight_g' elif "weight_v" in name: a__ = 'weight_v' elif "bias" in name and "relative_attention_bias" not in name: a__ = 'bias' elif "weight" in name: # TODO: don't match quantizer.weight_proj a__ = 'weight' else: a__ = None set_recursively(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) continue if not is_used: unused_weights.append(__lowerCAmelCase ) logger.warning(F'Unused weights: {unused_weights}' ) def __lowercase ( __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Dict , __lowerCAmelCase : int , __lowerCAmelCase : Optional[int] ): a__ = full_name.split('conv_layers.' )[-1] a__ = name.split('.' ) a__ = int(items[0] ) a__ = int(items[1] ) if type_id == 0: if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, ( F'{full_name} has size {value.shape}, but' F' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.' ) a__ = value logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.' ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, ( F'{full_name} has size {value.shape}, but' F' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.' ) a__ = value logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.' ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, ( F'{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was' " found." ) a__ = value logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, ( F'{full_name} has size {value.shape}, but' F' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.' ) a__ = value logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' ) else: unused_weights.append(__lowerCAmelCase ) @torch.no_grad() def __lowercase ( __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Any , __lowerCAmelCase : List[Any]=None ): # load the pre-trained checkpoints a__ = torch.load(__lowerCAmelCase ) a__ = WavLMConfigOrig(checkpoint['cfg'] ) a__ = WavLMOrig(__lowerCAmelCase ) model.load_state_dict(checkpoint['model'] ) model.eval() if config_path is not None: a__ = WavLMConfig.from_pretrained(__lowerCAmelCase ) else: a__ = WavLMConfig() a__ = WavLMModel(__lowerCAmelCase ) recursively_load_weights(__lowerCAmelCase , __lowerCAmelCase ) hf_wavlm.save_pretrained(__lowerCAmelCase ) if __name__ == "__main__": snake_case : Union[str, Any] = argparse.ArgumentParser() parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''') parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''') snake_case : Union[str, Any] = parser.parse_args() convert_wavlm_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
109
0
"""simple docstring""" import argparse import os.path as osp import re import torch from safetensors.torch import load_file, save_file # =================# # UNet Conversion # # =================# A: int = [ # (stable-diffusion, HF Diffusers) ("time_embed.0.weight", "time_embedding.linear_1.weight"), ("time_embed.0.bias", "time_embedding.linear_1.bias"), ("time_embed.2.weight", "time_embedding.linear_2.weight"), ("time_embed.2.bias", "time_embedding.linear_2.bias"), ("input_blocks.0.0.weight", "conv_in.weight"), ("input_blocks.0.0.bias", "conv_in.bias"), ("out.0.weight", "conv_norm_out.weight"), ("out.0.bias", "conv_norm_out.bias"), ("out.2.weight", "conv_out.weight"), ("out.2.bias", "conv_out.bias"), ] A: str = [ # (stable-diffusion, HF Diffusers) ("in_layers.0", "norm1"), ("in_layers.2", "conv1"), ("out_layers.0", "norm2"), ("out_layers.3", "conv2"), ("emb_layers.1", "time_emb_proj"), ("skip_connection", "conv_shortcut"), ] A: Optional[Any] = [] # hardcoded number of downblocks and resnets/attentions... # would need smarter logic for other networks. for i in range(4): # loop over downblocks/upblocks for j in range(2): # loop over resnets/attentions for downblocks A: Optional[Any] = f"""down_blocks.{i}.resnets.{j}.""" A: Optional[Any] = f"""input_blocks.{3*i + j + 1}.0.""" unet_conversion_map_layer.append((sd_down_res_prefix, hf_down_res_prefix)) if i < 3: # no attention layers in down_blocks.3 A: Any = f"""down_blocks.{i}.attentions.{j}.""" A: List[Any] = f"""input_blocks.{3*i + j + 1}.1.""" unet_conversion_map_layer.append((sd_down_atn_prefix, hf_down_atn_prefix)) for j in range(3): # loop over resnets/attentions for upblocks A: str = f"""up_blocks.{i}.resnets.{j}.""" A: Union[str, Any] = f"""output_blocks.{3*i + j}.0.""" unet_conversion_map_layer.append((sd_up_res_prefix, hf_up_res_prefix)) if i > 0: # no attention layers in up_blocks.0 A: int = f"""up_blocks.{i}.attentions.{j}.""" A: List[Any] = f"""output_blocks.{3*i + j}.1.""" unet_conversion_map_layer.append((sd_up_atn_prefix, hf_up_atn_prefix)) if i < 3: # no downsample in down_blocks.3 A: Optional[int] = f"""down_blocks.{i}.downsamplers.0.conv.""" A: str = f"""input_blocks.{3*(i+1)}.0.op.""" unet_conversion_map_layer.append((sd_downsample_prefix, hf_downsample_prefix)) # no upsample in up_blocks.3 A: List[Any] = f"""up_blocks.{i}.upsamplers.0.""" A: Optional[Any] = f"""output_blocks.{3*i + 2}.{1 if i == 0 else 2}.""" unet_conversion_map_layer.append((sd_upsample_prefix, hf_upsample_prefix)) A: List[Any] = "mid_block.attentions.0." A: List[Any] = "middle_block.1." unet_conversion_map_layer.append((sd_mid_atn_prefix, hf_mid_atn_prefix)) for j in range(2): A: List[Any] = f"""mid_block.resnets.{j}.""" A: int = f"""middle_block.{2*j}.""" unet_conversion_map_layer.append((sd_mid_res_prefix, hf_mid_res_prefix)) def _snake_case ( UpperCamelCase : Tuple ): # buyer beware: this is a *brittle* function, # and correct output requires that all of these pieces interact in # the exact order in which I have arranged them. UpperCAmelCase : Any = {k: k for k in unet_state_dict.keys()} for sd_name, hf_name in unet_conversion_map: UpperCAmelCase : List[Any] = sd_name for k, v in mapping.items(): if "resnets" in k: for sd_part, hf_part in unet_conversion_map_resnet: UpperCAmelCase : Optional[int] = v.replace(__lowerCAmelCase , __lowerCAmelCase ) UpperCAmelCase : List[Any] = v for k, v in mapping.items(): for sd_part, hf_part in unet_conversion_map_layer: UpperCAmelCase : Optional[int] = v.replace(__lowerCAmelCase , __lowerCAmelCase ) UpperCAmelCase : Dict = v UpperCAmelCase : Optional[int] = {v: unet_state_dict[k] for k, v in mapping.items()} return new_state_dict # ================# # VAE Conversion # # ================# A: Union[str, Any] = [ # (stable-diffusion, HF Diffusers) ("nin_shortcut", "conv_shortcut"), ("norm_out", "conv_norm_out"), ("mid.attn_1.", "mid_block.attentions.0."), ] for i in range(4): # down_blocks have two resnets for j in range(2): A: Any = f"""encoder.down_blocks.{i}.resnets.{j}.""" A: Tuple = f"""encoder.down.{i}.block.{j}.""" vae_conversion_map.append((sd_down_prefix, hf_down_prefix)) if i < 3: A: Optional[Any] = f"""down_blocks.{i}.downsamplers.0.""" A: Any = f"""down.{i}.downsample.""" vae_conversion_map.append((sd_downsample_prefix, hf_downsample_prefix)) A: Tuple = f"""up_blocks.{i}.upsamplers.0.""" A: Optional[int] = f"""up.{3-i}.upsample.""" vae_conversion_map.append((sd_upsample_prefix, hf_upsample_prefix)) # up_blocks have three resnets # also, up blocks in hf are numbered in reverse from sd for j in range(3): A: Union[str, Any] = f"""decoder.up_blocks.{i}.resnets.{j}.""" A: Tuple = f"""decoder.up.{3-i}.block.{j}.""" vae_conversion_map.append((sd_up_prefix, hf_up_prefix)) # this part accounts for mid blocks in both the encoder and the decoder for i in range(2): A: Optional[int] = f"""mid_block.resnets.{i}.""" A: List[Any] = f"""mid.block_{i+1}.""" vae_conversion_map.append((sd_mid_res_prefix, hf_mid_res_prefix)) A: int = [ # (stable-diffusion, HF Diffusers) ("norm.", "group_norm."), ("q.", "query."), ("k.", "key."), ("v.", "value."), ("proj_out.", "proj_attn."), ] def _snake_case ( UpperCamelCase : Any ): # convert HF linear weights to SD conv2d weights return w.reshape(*w.shape , 1 , 1 ) def _snake_case ( UpperCamelCase : Any ): UpperCAmelCase : Any = {k: k for k in vae_state_dict.keys()} for k, v in mapping.items(): for sd_part, hf_part in vae_conversion_map: UpperCAmelCase : Optional[int] = v.replace(__lowerCAmelCase , __lowerCAmelCase ) UpperCAmelCase : List[Any] = v for k, v in mapping.items(): if "attentions" in k: for sd_part, hf_part in vae_conversion_map_attn: UpperCAmelCase : int = v.replace(__lowerCAmelCase , __lowerCAmelCase ) UpperCAmelCase : str = v UpperCAmelCase : Any = {v: vae_state_dict[k] for k, v in mapping.items()} UpperCAmelCase : Union[str, Any] = ['''q''', '''k''', '''v''', '''proj_out'''] for k, v in new_state_dict.items(): for weight_name in weights_to_convert: if F"mid.attn_1.{weight_name}.weight" in k: print(F"Reshaping {k} for SD format" ) UpperCAmelCase : Tuple = reshape_weight_for_sd(__lowerCAmelCase ) return new_state_dict # =========================# # Text Encoder Conversion # # =========================# A: str = [ # (stable-diffusion, HF Diffusers) ("resblocks.", "text_model.encoder.layers."), ("ln_1", "layer_norm1"), ("ln_2", "layer_norm2"), (".c_fc.", ".fc1."), (".c_proj.", ".fc2."), (".attn", ".self_attn"), ("ln_final.", "transformer.text_model.final_layer_norm."), ("token_embedding.weight", "transformer.text_model.embeddings.token_embedding.weight"), ("positional_embedding", "transformer.text_model.embeddings.position_embedding.weight"), ] A: Any = {re.escape(x[1]): x[0] for x in textenc_conversion_lst} A: Dict = re.compile("|".join(protected.keys())) # Ordering is from https://github.com/pytorch/pytorch/blob/master/test/cpp/api/modules.cpp A: Dict = {"q": 0, "k": 1, "v": 2} def _snake_case ( UpperCamelCase : str ): UpperCAmelCase : Tuple = {} UpperCAmelCase : int = {} UpperCAmelCase : List[Any] = {} for k, v in text_enc_dict.items(): if ( k.endswith(""".self_attn.q_proj.weight""" ) or k.endswith(""".self_attn.k_proj.weight""" ) or k.endswith(""".self_attn.v_proj.weight""" ) ): UpperCAmelCase : Dict = k[: -len(""".q_proj.weight""" )] UpperCAmelCase : List[str] = k[-len("""q_proj.weight""" )] if k_pre not in capture_qkv_weight: UpperCAmelCase : Union[str, Any] = [None, None, None] UpperCAmelCase : Union[str, Any] = v continue if ( k.endswith(""".self_attn.q_proj.bias""" ) or k.endswith(""".self_attn.k_proj.bias""" ) or k.endswith(""".self_attn.v_proj.bias""" ) ): UpperCAmelCase : Optional[int] = k[: -len(""".q_proj.bias""" )] UpperCAmelCase : List[Any] = k[-len("""q_proj.bias""" )] if k_pre not in capture_qkv_bias: UpperCAmelCase : str = [None, None, None] UpperCAmelCase : Any = v continue UpperCAmelCase : Any = textenc_pattern.sub(lambda UpperCamelCase : protected[re.escape(m.group(0 ) )] , __lowerCAmelCase ) UpperCAmelCase : int = v for k_pre, tensors in capture_qkv_weight.items(): if None in tensors: raise Exception("""CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing""" ) UpperCAmelCase : str = textenc_pattern.sub(lambda UpperCamelCase : protected[re.escape(m.group(0 ) )] , __lowerCAmelCase ) UpperCAmelCase : Tuple = torch.cat(__lowerCAmelCase ) for k_pre, tensors in capture_qkv_bias.items(): if None in tensors: raise Exception("""CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing""" ) UpperCAmelCase : str = textenc_pattern.sub(lambda UpperCamelCase : protected[re.escape(m.group(0 ) )] , __lowerCAmelCase ) UpperCAmelCase : str = torch.cat(__lowerCAmelCase ) return new_state_dict def _snake_case ( UpperCamelCase : Union[str, Any] ): return text_enc_dict if __name__ == "__main__": A: Any = argparse.ArgumentParser() parser.add_argument("--model_path", default=None, type=str, required=True, help="Path to the model to convert.") parser.add_argument("--checkpoint_path", default=None, type=str, required=True, help="Path to the output model.") parser.add_argument("--half", action="store_true", help="Save weights in half precision.") parser.add_argument( "--use_safetensors", action="store_true", help="Save weights use safetensors, default is ckpt." ) A: List[str] = parser.parse_args() assert args.model_path is not None, "Must provide a model path!" assert args.checkpoint_path is not None, "Must provide a checkpoint path!" # Path for safetensors A: Any = osp.join(args.model_path, "unet", "diffusion_pytorch_model.safetensors") A: List[str] = osp.join(args.model_path, "vae", "diffusion_pytorch_model.safetensors") A: Union[str, Any] = osp.join(args.model_path, "text_encoder", "model.safetensors") # Load models from safetensors if it exists, if it doesn't pytorch if osp.exists(unet_path): A: List[str] = load_file(unet_path, device="cpu") else: A: List[Any] = osp.join(args.model_path, "unet", "diffusion_pytorch_model.bin") A: Dict = torch.load(unet_path, map_location="cpu") if osp.exists(vae_path): A: str = load_file(vae_path, device="cpu") else: A: str = osp.join(args.model_path, "vae", "diffusion_pytorch_model.bin") A: List[str] = torch.load(vae_path, map_location="cpu") if osp.exists(text_enc_path): A: Dict = load_file(text_enc_path, device="cpu") else: A: Dict = osp.join(args.model_path, "text_encoder", "pytorch_model.bin") A: Optional[int] = torch.load(text_enc_path, map_location="cpu") # Convert the UNet model A: Optional[int] = convert_unet_state_dict(unet_state_dict) A: Optional[int] = {"model.diffusion_model." + k: v for k, v in unet_state_dict.items()} # Convert the VAE model A: Optional[int] = convert_vae_state_dict(vae_state_dict) A: Optional[int] = {"first_stage_model." + k: v for k, v in vae_state_dict.items()} # Easiest way to identify v2.0 model seems to be that the text encoder (OpenCLIP) is deeper A: str = "text_model.encoder.layers.22.layer_norm2.bias" in text_enc_dict if is_vaa_model: # Need to add the tag 'transformer' in advance so we can knock it out from the final layer-norm A: Dict = {"transformer." + k: v for k, v in text_enc_dict.items()} A: Any = convert_text_enc_state_dict_vaa(text_enc_dict) A: Tuple = {"cond_stage_model.model." + k: v for k, v in text_enc_dict.items()} else: A: int = convert_text_enc_state_dict(text_enc_dict) A: Union[str, Any] = {"cond_stage_model.transformer." + k: v for k, v in text_enc_dict.items()} # Put together new checkpoint A: List[Any] = {**unet_state_dict, **vae_state_dict, **text_enc_dict} if args.half: A: int = {k: v.half() for k, v in state_dict.items()} if args.use_safetensors: save_file(state_dict, args.checkpoint_path) else: A: List[str] = {"state_dict": state_dict} torch.save(state_dict, args.checkpoint_path)
109
from __future__ import annotations from PIL import Image # Define glider example __lowerCAmelCase : Optional[int] = [ [0, 1, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0, 0], [1, 1, 1, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], ] # Define blinker example __lowerCAmelCase : Union[str, Any] = [[0, 1, 0], [0, 1, 0], [0, 1, 0]] def UpperCAmelCase_ ( __lowerCAmelCase ) -> list[list[int]]: __lowercase : int = [] for i in range(len(__lowerCAmelCase ) ): __lowercase : Optional[int] = [] for j in range(len(cells[i] ) ): # Get the number of live neighbours __lowercase : Union[str, Any] = 0 if i > 0 and j > 0: neighbour_count += cells[i - 1][j - 1] if i > 0: neighbour_count += cells[i - 1][j] if i > 0 and j < len(cells[i] ) - 1: neighbour_count += cells[i - 1][j + 1] if j > 0: neighbour_count += cells[i][j - 1] if j < len(cells[i] ) - 1: neighbour_count += cells[i][j + 1] if i < len(__lowerCAmelCase ) - 1 and j > 0: neighbour_count += cells[i + 1][j - 1] if i < len(__lowerCAmelCase ) - 1: neighbour_count += cells[i + 1][j] if i < len(__lowerCAmelCase ) - 1 and j < len(cells[i] ) - 1: neighbour_count += cells[i + 1][j + 1] # Rules of the game of life (excerpt from Wikipedia): # 1. Any live cell with two or three live neighbours survives. # 2. Any dead cell with three live neighbours becomes a live cell. # 3. All other live cells die in the next generation. # Similarly, all other dead cells stay dead. __lowercase : List[Any] = cells[i][j] == 1 if ( (alive and 2 <= neighbour_count <= 3) or not alive and neighbour_count == 3 ): next_generation_row.append(1 ) else: next_generation_row.append(0 ) next_generation.append(__lowerCAmelCase ) return next_generation def UpperCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase ) -> list[Image.Image]: __lowercase : Tuple = [] for _ in range(__lowerCAmelCase ): # Create output image __lowercase : Tuple = Image.new('''RGB''' , (len(cells[0] ), len(__lowerCAmelCase )) ) __lowercase : Dict = img.load() # Save cells to image for x in range(len(__lowerCAmelCase ) ): for y in range(len(cells[0] ) ): __lowercase : int = 255 - cells[y][x] * 255 __lowercase : Tuple = (colour, colour, colour) # Save image images.append(__lowerCAmelCase ) __lowercase : Tuple = new_generation(__lowerCAmelCase ) return images if __name__ == "__main__": __lowerCAmelCase : Any = generate_images(GLIDER, 16) images[0].save("out.gif", save_all=True, append_images=images[1:])
156
0
from collections import OrderedDict from typing import Any, Mapping, Optional from ... import PreTrainedTokenizer from ...configuration_utils import PretrainedConfig from ...file_utils import TensorType, is_torch_available from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast from ...onnx.utils import compute_effective_axis_dimension from ...utils import logging __magic_name__: Any = logging.get_logger(__name__) __magic_name__: Dict = { "facebook/blenderbot_small-90M": "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/config.json", # See all BlenderbotSmall models at https://huggingface.co/models?filter=blenderbot_small } class snake_case__ ( _lowerCAmelCase ): lowercase__ : Any = '''blenderbot-small''' lowercase__ : Optional[int] = ['''past_key_values'''] lowercase__ : Dict = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''} def __init__( self , lowerCAmelCase__=5_02_65 , lowerCAmelCase__=5_12 , lowerCAmelCase__=8 , lowerCAmelCase__=20_48 , lowerCAmelCase__=16 , lowerCAmelCase__=8 , lowerCAmelCase__=20_48 , lowerCAmelCase__=16 , lowerCAmelCase__=0.0 , lowerCAmelCase__=0.0 , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__="gelu" , lowerCAmelCase__=5_12 , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.0 , lowerCAmelCase__=0.0 , lowerCAmelCase__=0.0_2 , lowerCAmelCase__=1 , lowerCAmelCase__=False , lowerCAmelCase__=0 , lowerCAmelCase__=1 , lowerCAmelCase__=2 , lowerCAmelCase__=2 , **lowerCAmelCase__ , ) -> Tuple: __magic_name__ : Tuple = vocab_size __magic_name__ : List[str] = max_position_embeddings __magic_name__ : Union[str, Any] = d_model __magic_name__ : Optional[int] = encoder_ffn_dim __magic_name__ : Union[str, Any] = encoder_layers __magic_name__ : List[str] = encoder_attention_heads __magic_name__ : List[Any] = decoder_ffn_dim __magic_name__ : str = decoder_layers __magic_name__ : List[str] = decoder_attention_heads __magic_name__ : Union[str, Any] = dropout __magic_name__ : Tuple = attention_dropout __magic_name__ : List[Any] = activation_dropout __magic_name__ : List[Any] = activation_function __magic_name__ : Optional[int] = init_std __magic_name__ : Dict = encoder_layerdrop __magic_name__ : Union[str, Any] = decoder_layerdrop __magic_name__ : Optional[int] = use_cache __magic_name__ : List[Any] = encoder_layers __magic_name__ : Dict = scale_embedding # scale factor will be sqrt(d_model) if True super().__init__( pad_token_id=lowerCAmelCase__ , bos_token_id=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ , is_encoder_decoder=lowerCAmelCase__ , decoder_start_token_id=lowerCAmelCase__ , forced_eos_token_id=lowerCAmelCase__ , **lowerCAmelCase__ , ) class snake_case__ ( _lowerCAmelCase ): @property def __magic_name__ ( self ) -> Mapping[str, Mapping[int, str]]: if self.task in ["default", "seq2seq-lm"]: __magic_name__ : Optional[int] = OrderedDict( [ ("""input_ids""", {0: """batch""", 1: """encoder_sequence"""}), ("""attention_mask""", {0: """batch""", 1: """encoder_sequence"""}), ] ) if self.use_past: __magic_name__ : List[Any] = {0: """batch"""} __magic_name__ : Dict = {0: """batch""", 1: """past_decoder_sequence + sequence"""} else: __magic_name__ : List[str] = {0: """batch""", 1: """decoder_sequence"""} __magic_name__ : Any = {0: """batch""", 1: """decoder_sequence"""} if self.use_past: self.fill_with_past_key_values_(lowerCAmelCase__ , direction="""inputs""" ) elif self.task == "causal-lm": # TODO: figure this case out. __magic_name__ : Union[str, Any] = OrderedDict( [ ("""input_ids""", {0: """batch""", 1: """encoder_sequence"""}), ("""attention_mask""", {0: """batch""", 1: """encoder_sequence"""}), ] ) if self.use_past: __magic_name__ ,__magic_name__ : Dict = self.num_layers for i in range(lowerCAmelCase__ ): __magic_name__ : Dict = {0: """batch""", 2: """past_sequence + sequence"""} __magic_name__ : int = {0: """batch""", 2: """past_sequence + sequence"""} else: __magic_name__ : Union[str, Any] = OrderedDict( [ ("""input_ids""", {0: """batch""", 1: """encoder_sequence"""}), ("""attention_mask""", {0: """batch""", 1: """encoder_sequence"""}), ("""decoder_input_ids""", {0: """batch""", 1: """decoder_sequence"""}), ("""decoder_attention_mask""", {0: """batch""", 1: """decoder_sequence"""}), ] ) return common_inputs @property def __magic_name__ ( self ) -> Mapping[str, Mapping[int, str]]: if self.task in ["default", "seq2seq-lm"]: __magic_name__ : Any = super().outputs else: __magic_name__ : int = super(lowerCAmelCase__ , self ).outputs if self.use_past: __magic_name__ ,__magic_name__ : str = self.num_layers for i in range(lowerCAmelCase__ ): __magic_name__ : Tuple = {0: """batch""", 2: """past_sequence + sequence"""} __magic_name__ : Dict = {0: """batch""", 2: """past_sequence + sequence"""} return common_outputs def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = -1 , lowerCAmelCase__ = -1 , lowerCAmelCase__ = False , lowerCAmelCase__ = None , ) -> Mapping[str, Any]: __magic_name__ : Tuple = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) # Generate decoder inputs __magic_name__ : Optional[int] = seq_length if not self.use_past else 1 __magic_name__ : Optional[Any] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) __magic_name__ : Optional[Any] = {F'decoder_{name}': tensor for name, tensor in decoder_inputs.items()} __magic_name__ : Dict = dict(**lowerCAmelCase__ , **lowerCAmelCase__ ) if self.use_past: if not is_torch_available(): raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" ) else: import torch __magic_name__ ,__magic_name__ : List[Any] = common_inputs["""input_ids"""].shape __magic_name__ : Optional[Any] = common_inputs["""decoder_input_ids"""].shape[1] __magic_name__ ,__magic_name__ : str = self.num_attention_heads __magic_name__ : Optional[Any] = ( batch, num_encoder_attention_heads, encoder_seq_length, self._config.hidden_size // num_encoder_attention_heads, ) __magic_name__ : Any = decoder_seq_length + 3 __magic_name__ : Dict = ( batch, num_decoder_attention_heads, decoder_past_length, self._config.hidden_size // num_decoder_attention_heads, ) __magic_name__ : List[str] = torch.cat( [common_inputs["""decoder_attention_mask"""], torch.ones(lowerCAmelCase__ , lowerCAmelCase__ )] , dim=1 ) __magic_name__ : Union[str, Any] = [] # If the number of encoder and decoder layers are present in the model configuration, both are considered __magic_name__ ,__magic_name__ : List[str] = self.num_layers __magic_name__ : Optional[Any] = min(lowerCAmelCase__ , lowerCAmelCase__ ) __magic_name__ : List[str] = max(lowerCAmelCase__ , lowerCAmelCase__ ) - min_num_layers __magic_name__ : Tuple = """encoder""" if num_encoder_layers > num_decoder_layers else """decoder""" for _ in range(lowerCAmelCase__ ): common_inputs["past_key_values"].append( ( torch.zeros(lowerCAmelCase__ ), torch.zeros(lowerCAmelCase__ ), torch.zeros(lowerCAmelCase__ ), torch.zeros(lowerCAmelCase__ ), ) ) # TODO: test this. __magic_name__ : Union[str, Any] = encoder_shape if remaining_side_name == """encoder""" else decoder_shape for _ in range(lowerCAmelCase__ , lowerCAmelCase__ ): common_inputs["past_key_values"].append((torch.zeros(lowerCAmelCase__ ), torch.zeros(lowerCAmelCase__ )) ) return common_inputs def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = -1 , lowerCAmelCase__ = -1 , lowerCAmelCase__ = False , lowerCAmelCase__ = None , ) -> Mapping[str, Any]: __magic_name__ : int = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) if self.use_past: if not is_torch_available(): raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" ) else: import torch __magic_name__ ,__magic_name__ : Tuple = common_inputs["""input_ids"""].shape # Not using the same length for past_key_values __magic_name__ : List[Any] = seqlen + 2 __magic_name__ ,__magic_name__ : Any = self.num_layers __magic_name__ ,__magic_name__ : int = self.num_attention_heads __magic_name__ : Optional[int] = ( batch, num_encoder_attention_heads, past_key_values_length, self._config.hidden_size // num_encoder_attention_heads, ) __magic_name__ : Optional[int] = common_inputs["""attention_mask"""].dtype __magic_name__ : Optional[Any] = torch.cat( [common_inputs["""attention_mask"""], torch.ones(lowerCAmelCase__ , lowerCAmelCase__ , dtype=lowerCAmelCase__ )] , dim=1 ) __magic_name__ : Tuple = [ (torch.zeros(lowerCAmelCase__ ), torch.zeros(lowerCAmelCase__ )) for _ in range(lowerCAmelCase__ ) ] return common_inputs def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = -1 , lowerCAmelCase__ = -1 , lowerCAmelCase__ = False , lowerCAmelCase__ = None , ) -> Mapping[str, Any]: # Copied from OnnxConfig.generate_dummy_inputs # Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity. # If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX __magic_name__ : Tuple = compute_effective_axis_dimension( lowerCAmelCase__ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 ) # If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX __magic_name__ : str = tokenizer.num_special_tokens_to_add(lowerCAmelCase__ ) __magic_name__ : List[str] = compute_effective_axis_dimension( lowerCAmelCase__ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=lowerCAmelCase__ ) # Generate dummy inputs according to compute batch and sequence __magic_name__ : List[Any] = [""" """.join([tokenizer.unk_token] ) * seq_length] * batch_size __magic_name__ : List[str] = dict(tokenizer(lowerCAmelCase__ , return_tensors=lowerCAmelCase__ ) ) return common_inputs def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = -1 , lowerCAmelCase__ = -1 , lowerCAmelCase__ = False , lowerCAmelCase__ = None , ) -> Mapping[str, Any]: if self.task in ["default", "seq2seq-lm"]: __magic_name__ : Tuple = self._generate_dummy_inputs_for_default_and_seqaseq_lm( lowerCAmelCase__ , batch_size=lowerCAmelCase__ , seq_length=lowerCAmelCase__ , is_pair=lowerCAmelCase__ , framework=lowerCAmelCase__ ) elif self.task == "causal-lm": __magic_name__ : str = self._generate_dummy_inputs_for_causal_lm( lowerCAmelCase__ , batch_size=lowerCAmelCase__ , seq_length=lowerCAmelCase__ , is_pair=lowerCAmelCase__ , framework=lowerCAmelCase__ ) else: __magic_name__ : Dict = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( lowerCAmelCase__ , batch_size=lowerCAmelCase__ , seq_length=lowerCAmelCase__ , is_pair=lowerCAmelCase__ , framework=lowerCAmelCase__ ) return common_inputs def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Dict: if self.task in ["default", "seq2seq-lm"]: __magic_name__ : List[Any] = super()._flatten_past_key_values_(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) else: __magic_name__ : Tuple = super(lowerCAmelCase__ , self )._flatten_past_key_values_( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
138
from manim import * class snake_case__ ( _lowerCAmelCase ): def __magic_name__ ( self ) -> Dict: __magic_name__ : int = Rectangle(height=0.5 , width=0.5 ) __magic_name__ : Optional[int] = Rectangle(height=0.2_5 , width=0.2_5 ) __magic_name__ : str = Rectangle(height=0.4_6 , width=0.4_6 ).set_stroke(width=0 ) __magic_name__ : List[Any] = [mem.copy() for i in range(6 )] __magic_name__ : int = [mem.copy() for i in range(6 )] __magic_name__ : Tuple = VGroup(*lowerCAmelCase__ ).arrange(lowerCAmelCase__ , buff=0 ) __magic_name__ : List[str] = VGroup(*lowerCAmelCase__ ).arrange(lowerCAmelCase__ , buff=0 ) __magic_name__ : str = VGroup(lowerCAmelCase__ , lowerCAmelCase__ ).arrange(lowerCAmelCase__ , buff=0 ) __magic_name__ : Union[str, Any] = Text("""CPU""" , font_size=24 ) __magic_name__ : Tuple = Group(lowerCAmelCase__ , lowerCAmelCase__ ).arrange(lowerCAmelCase__ , buff=0.5 , aligned_edge=lowerCAmelCase__ ) cpu.move_to([-2.5, -0.5, 0] ) self.add(lowerCAmelCase__ ) __magic_name__ : Any = [mem.copy() for i in range(4 )] __magic_name__ : List[Any] = VGroup(*lowerCAmelCase__ ).arrange(lowerCAmelCase__ , buff=0 ) __magic_name__ : Tuple = Text("""GPU""" , font_size=24 ) __magic_name__ : Tuple = Group(lowerCAmelCase__ , lowerCAmelCase__ ).arrange(lowerCAmelCase__ , buff=0.5 , aligned_edge=lowerCAmelCase__ ) gpu.move_to([-1, -1, 0] ) self.add(lowerCAmelCase__ ) __magic_name__ : Union[str, Any] = [mem.copy() for i in range(6 )] __magic_name__ : Union[str, Any] = VGroup(*lowerCAmelCase__ ).arrange(lowerCAmelCase__ , buff=0 ) __magic_name__ : str = Text("""Model""" , font_size=24 ) __magic_name__ : Optional[int] = Group(lowerCAmelCase__ , lowerCAmelCase__ ).arrange(lowerCAmelCase__ , buff=0.5 , aligned_edge=lowerCAmelCase__ ) model.move_to([3, -1.0, 0] ) self.add(lowerCAmelCase__ ) __magic_name__ : str = [] __magic_name__ : Tuple = [] __magic_name__ : Union[str, Any] = [] for i, rect in enumerate(lowerCAmelCase__ ): rect.set_stroke(lowerCAmelCase__ ) __magic_name__ : Optional[Any] = Rectangle(height=0.4_6 / 4 , width=0.4_6 / 3 ).set_stroke(width=0.0 ).set_fill(lowerCAmelCase__ , opacity=0.7 ) if i == 0: cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.0_2 , direction=lowerCAmelCase__ ) cpu_target.set_x(cpu_target.get_x() + 0.1 ) elif i == 3: cpu_target.next_to(model_cpu_arr[0] , direction=lowerCAmelCase__ , buff=0.0 ) else: cpu_target.next_to(model_cpu_arr[i - 1] , direction=lowerCAmelCase__ , buff=0.0 ) self.add(lowerCAmelCase__ ) model_cpu_arr.append(lowerCAmelCase__ ) self.add(*lowerCAmelCase__ , *lowerCAmelCase__ , *lowerCAmelCase__ ) __magic_name__ : Optional[Any] = [mem.copy() for i in range(6 )] __magic_name__ : Optional[Any] = VGroup(*lowerCAmelCase__ ).arrange(lowerCAmelCase__ , buff=0 ) __magic_name__ : Any = Text("""Loaded Checkpoint""" , font_size=24 ) __magic_name__ : Optional[int] = Group(lowerCAmelCase__ , lowerCAmelCase__ ).arrange(lowerCAmelCase__ , buff=0.5 , aligned_edge=lowerCAmelCase__ ) checkpoint.move_to([3, 0.5, 0] ) self.add(lowerCAmelCase__ ) __magic_name__ : Optional[int] = [] __magic_name__ : Tuple = [] for i, rect in enumerate(lowerCAmelCase__ ): __magic_name__ : Dict = fill.copy().set_fill(lowerCAmelCase__ , opacity=0.7 ) target.move_to(lowerCAmelCase__ ) ckpt_arr.append(lowerCAmelCase__ ) __magic_name__ : int = target.copy() if i < 5: cpu_target.move_to(cpu_left_col_base[i + 1] ) else: cpu_target.move_to(cpu_right_col_base[i - 5] ) ckpt_cpu_arr.append(lowerCAmelCase__ ) self.add(*lowerCAmelCase__ , *lowerCAmelCase__ ) __magic_name__ : Tuple = Square(side_length=2.2 ) key.move_to([-5, 2, 0] ) __magic_name__ : str = MarkupText( F'<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model' , font_size=18 , ) key_text.move_to([-5, 2.4, 0] ) self.add(lowerCAmelCase__ , lowerCAmelCase__ ) __magic_name__ : Any = MarkupText( F'<span fgcolor=\'{BLUE}\'>●</span> Checkpoint' , font_size=18 , ) blue_text.next_to(lowerCAmelCase__ , DOWN * 2.4 , aligned_edge=key_text.get_left() ) self.add(lowerCAmelCase__ ) __magic_name__ : Optional[Any] = MarkupText( F'Based on the passed in configuration, weights are stored in\na variety of np.memmaps on disk or to a particular device.' , font_size=24 , ) step_a.move_to([2, 2, 0] ) __magic_name__ : int = [meta_mem.copy() for i in range(6 )] __magic_name__ : Union[str, Any] = [meta_mem.copy() for i in range(6 )] __magic_name__ : Any = VGroup(*lowerCAmelCase__ ).arrange(lowerCAmelCase__ , buff=0 ) __magic_name__ : str = VGroup(*lowerCAmelCase__ ).arrange(lowerCAmelCase__ , buff=0 ) __magic_name__ : Tuple = VGroup(lowerCAmelCase__ , lowerCAmelCase__ ).arrange(lowerCAmelCase__ , buff=0 ) __magic_name__ : int = Text("""Disk""" , font_size=24 ) __magic_name__ : Union[str, Any] = Group(lowerCAmelCase__ , lowerCAmelCase__ ).arrange(lowerCAmelCase__ , buff=0.5 , aligned_edge=lowerCAmelCase__ ) disk.move_to([-4.0, -1.2_5, 0] ) self.play(Write(lowerCAmelCase__ , run_time=3 ) , Write(lowerCAmelCase__ , run_time=1 ) , Create(lowerCAmelCase__ , run_time=1 ) ) __magic_name__ : List[Any] = [] for i, rect in enumerate(lowerCAmelCase__ ): __magic_name__ : Dict = rect.copy() target.generate_target() target.target.move_to(disk_left_col_base[i] ).scale(0.5 ) animations.append(MoveToTarget(lowerCAmelCase__ , run_time=1.5 ) ) self.play(*lowerCAmelCase__ ) self.play(FadeOut(lowerCAmelCase__ ) ) __magic_name__ : str = MarkupText(F'Then, the checkpoint is removed from memory\nthrough garbage collection.' , font_size=24 ) step_a.move_to([2, 2, 0] ) self.play(Write(lowerCAmelCase__ , run_time=3 ) ) self.play( FadeOut(lowerCAmelCase__ , lowerCAmelCase__ , *lowerCAmelCase__ , *lowerCAmelCase__ ) , ) self.wait()
138
1
from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCamelCase : Optional[int] = logging.get_logger(__name__) lowerCamelCase : List[Any] = { 'microsoft/cvt-13': 'https://huggingface.co/microsoft/cvt-13/resolve/main/config.json', # See all Cvt models at https://huggingface.co/models?filter=cvt } class __lowercase (UpperCamelCase__ ): """simple docstring""" _snake_case = """cvt""" def __init__( self , A=3 , A=[7, 3, 3] , A=[4, 2, 2] , A=[2, 1, 1] , A=[6_4, 1_9_2, 3_8_4] , A=[1, 3, 6] , A=[1, 2, 1_0] , A=[4.0, 4.0, 4.0] , A=[0.0, 0.0, 0.0] , A=[0.0, 0.0, 0.0] , A=[0.0, 0.0, 0.1] , A=[True, True, True] , A=[False, False, True] , A=["dw_bn", "dw_bn", "dw_bn"] , A=[3, 3, 3] , A=[1, 1, 1] , A=[2, 2, 2] , A=[1, 1, 1] , A=[1, 1, 1] , A=0.02 , A=1e-1_2 , **A , ) -> Optional[Any]: super().__init__(**A ) snake_case : List[str] = num_channels snake_case : Dict = patch_sizes snake_case : Union[str, Any] = patch_stride snake_case : Optional[Any] = patch_padding snake_case : Tuple = embed_dim snake_case : Optional[Any] = num_heads snake_case : Any = depth snake_case : Any = mlp_ratio snake_case : int = attention_drop_rate snake_case : List[Any] = drop_rate snake_case : Optional[int] = drop_path_rate snake_case : str = qkv_bias snake_case : Tuple = cls_token snake_case : str = qkv_projection_method snake_case : Tuple = kernel_qkv snake_case : List[str] = padding_kv snake_case : Tuple = stride_kv snake_case : Any = padding_q snake_case : Dict = stride_q snake_case : Tuple = initializer_range snake_case : Dict = layer_norm_eps
124
import unittest from transformers import AutoTokenizer, NystromformerConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( NystromformerForMaskedLM, NystromformerForMultipleChoice, NystromformerForQuestionAnswering, NystromformerForSequenceClassification, NystromformerForTokenClassification, NystromformerModel, ) from transformers.models.nystromformer.modeling_nystromformer import NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST class __lowercase : """simple docstring""" def __init__( self , A , A=1_3 , A=7 , A=True , A=True , A=True , A=True , A=9_9 , A=3_2 , A=5 , A=4 , A=3_7 , A="gelu" , A=0.1 , A=0.1 , A=5_1_2 , A=1_6 , A=2 , A=0.02 , A=3 , A=4 , A=None , ) -> Dict: snake_case : Any = parent snake_case : List[Any] = batch_size snake_case : List[Any] = seq_length snake_case : Dict = is_training snake_case : List[str] = use_input_mask snake_case : List[str] = use_token_type_ids snake_case : Dict = use_labels snake_case : Optional[int] = vocab_size snake_case : Optional[int] = hidden_size snake_case : Optional[Any] = num_hidden_layers snake_case : Optional[Any] = num_attention_heads snake_case : Union[str, Any] = intermediate_size snake_case : List[Any] = hidden_act snake_case : int = hidden_dropout_prob snake_case : str = attention_probs_dropout_prob snake_case : List[Any] = max_position_embeddings snake_case : List[Any] = type_vocab_size snake_case : int = type_sequence_label_size snake_case : Optional[int] = initializer_range snake_case : Union[str, Any] = num_labels snake_case : List[str] = num_choices snake_case : Optional[int] = scope def UpperCAmelCase ( self ) -> int: snake_case : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) snake_case : Union[str, Any] = None if self.use_input_mask: snake_case : str = random_attention_mask([self.batch_size, self.seq_length] ) snake_case : Tuple = None if self.use_token_type_ids: snake_case : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) snake_case : Optional[int] = None snake_case : str = None snake_case : List[Any] = None if self.use_labels: snake_case : str = ids_tensor([self.batch_size] , self.type_sequence_label_size ) snake_case : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) snake_case : int = ids_tensor([self.batch_size] , self.num_choices ) snake_case : List[Any] = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def UpperCAmelCase ( self ) -> Any: return NystromformerConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=A , initializer_range=self.initializer_range , ) def UpperCAmelCase ( self , A , A , A , A , A , A , A ) -> List[str]: snake_case : Tuple = NystromformerModel(config=A ) model.to(A ) model.eval() snake_case : str = model(A , attention_mask=A , token_type_ids=A ) snake_case : List[str] = model(A , token_type_ids=A ) snake_case : Any = model(A ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def UpperCAmelCase ( self , A , A , A , A , A , A , A ) -> Union[str, Any]: snake_case : List[Any] = NystromformerForMaskedLM(config=A ) model.to(A ) model.eval() snake_case : List[Any] = model(A , attention_mask=A , token_type_ids=A , labels=A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def UpperCAmelCase ( self , A , A , A , A , A , A , A ) -> Optional[Any]: snake_case : List[Any] = NystromformerForQuestionAnswering(config=A ) model.to(A ) model.eval() snake_case : Dict = model( A , attention_mask=A , token_type_ids=A , start_positions=A , end_positions=A , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def UpperCAmelCase ( self , A , A , A , A , A , A , A ) -> Tuple: snake_case : Optional[int] = self.num_labels snake_case : Union[str, Any] = NystromformerForSequenceClassification(A ) model.to(A ) model.eval() snake_case : Union[str, Any] = model(A , attention_mask=A , token_type_ids=A , labels=A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def UpperCAmelCase ( self , A , A , A , A , A , A , A ) -> int: snake_case : Union[str, Any] = self.num_labels snake_case : Union[str, Any] = NystromformerForTokenClassification(config=A ) model.to(A ) model.eval() snake_case : int = model(A , attention_mask=A , token_type_ids=A , labels=A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def UpperCAmelCase ( self , A , A , A , A , A , A , A ) -> Any: snake_case : List[Any] = self.num_choices snake_case : Union[str, Any] = NystromformerForMultipleChoice(config=A ) model.to(A ) model.eval() snake_case : Tuple = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() snake_case : Optional[int] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() snake_case : Any = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() snake_case : List[str] = model( A , attention_mask=A , token_type_ids=A , labels=A , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def UpperCAmelCase ( self ) -> Tuple: snake_case : Tuple = self.prepare_config_and_inputs() ( ( snake_case ) , ( snake_case ) , ( snake_case ) , ( snake_case ) , ( snake_case ) , ( snake_case ) , ( snake_case ) , ) : Any = config_and_inputs snake_case : Union[str, Any] = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask} return config, inputs_dict @require_torch class __lowercase (UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase ): """simple docstring""" _snake_case = ( ( NystromformerModel, NystromformerForMaskedLM, NystromformerForMultipleChoice, NystromformerForQuestionAnswering, NystromformerForSequenceClassification, NystromformerForTokenClassification, ) if is_torch_available() else () ) _snake_case = ( { """feature-extraction""": NystromformerModel, """fill-mask""": NystromformerForMaskedLM, """question-answering""": NystromformerForQuestionAnswering, """text-classification""": NystromformerForSequenceClassification, """token-classification""": NystromformerForTokenClassification, """zero-shot""": NystromformerForSequenceClassification, } if is_torch_available() else {} ) _snake_case = False _snake_case = False def UpperCAmelCase ( self ) -> str: snake_case : Dict = NystromformerModelTester(self ) snake_case : str = ConfigTester(self , config_class=A , hidden_size=3_7 ) def UpperCAmelCase ( self ) -> Tuple: self.config_tester.run_common_tests() def UpperCAmelCase ( self ) -> Any: snake_case : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*A ) def UpperCAmelCase ( self ) -> Tuple: snake_case : Tuple = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: snake_case : int = type self.model_tester.create_and_check_model(*A ) def UpperCAmelCase ( self ) -> Dict: snake_case : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*A ) def UpperCAmelCase ( self ) -> Tuple: snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*A ) def UpperCAmelCase ( self ) -> Tuple: snake_case : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*A ) def UpperCAmelCase ( self ) -> Tuple: snake_case : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*A ) def UpperCAmelCase ( self ) -> Any: snake_case : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*A ) @slow def UpperCAmelCase ( self ) -> Optional[int]: for model_name in NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: snake_case : Any = NystromformerModel.from_pretrained(A ) self.assertIsNotNone(A ) @require_torch class __lowercase (unittest.TestCase ): """simple docstring""" @slow def UpperCAmelCase ( self ) -> Dict: snake_case : Optional[int] = NystromformerModel.from_pretrained("""uw-madison/nystromformer-512""" ) snake_case : int = torch.tensor([[0, 1, 2, 3, 4, 5]] ) with torch.no_grad(): snake_case : Optional[int] = model(A )[0] snake_case : List[Any] = torch.Size((1, 6, 7_6_8) ) self.assertEqual(output.shape , A ) snake_case : Union[str, Any] = torch.tensor( [[[-0.45_32, -0.09_36, 0.51_37], [-0.26_76, 0.06_28, 0.61_86], [-0.36_29, -0.17_26, 0.47_16]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , A , atol=1e-4 ) ) @slow def UpperCAmelCase ( self ) -> Optional[int]: snake_case : Union[str, Any] = """the [MASK] of Belgium is Brussels""" snake_case : Any = AutoTokenizer.from_pretrained("""uw-madison/nystromformer-512""" ) snake_case : Union[str, Any] = NystromformerForMaskedLM.from_pretrained("""uw-madison/nystromformer-512""" ) snake_case : int = tokenizer(A , return_tensors="""pt""" ) with torch.no_grad(): snake_case : Optional[int] = model(encoding.input_ids ).logits snake_case : str = token_logits[:, 2, :].argmax(-1 )[0] self.assertEqual(tokenizer.decode(A ) , """capital""" )
124
1
'''simple docstring''' from ...utils import deprecate from ..controlnet.multicontrolnet import MultiControlNetModel # noqa: F401 from ..controlnet.pipeline_controlnet import StableDiffusionControlNetPipeline # noqa: F401 deprecate( 'stable diffusion controlnet', '0.22.0', 'Importing `StableDiffusionControlNetPipeline` or `MultiControlNetModel` from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import StableDiffusionControlNetPipeline` instead.', standard_warn=False, stacklevel=3, )
363
'''simple docstring''' import copy import os from typing import Union from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCAmelCase_ = logging.get_logger(__name__) UpperCAmelCase_ = { 'google/pix2struct-textcaps-base': ( 'https://huggingface.co/google/pix2struct-textcaps-base/resolve/main/config.json' ), } class lowerCAmelCase_ ( lowerCamelCase_ ): '''simple docstring''' lowerCAmelCase_ : Dict = """pix2struct_text_model""" lowerCAmelCase_ : str = ["""past_key_values"""] lowerCAmelCase_ : Dict = { """hidden_size""": """hidden_size""", """num_attention_heads""": """num_heads""", """num_hidden_layers""": """num_layers""", } def __init__( self : List[str] , _UpperCAmelCase : Dict=5_02_44 , _UpperCAmelCase : Tuple=7_68 , _UpperCAmelCase : List[Any]=64 , _UpperCAmelCase : Dict=20_48 , _UpperCAmelCase : List[str]=12 , _UpperCAmelCase : Tuple=12 , _UpperCAmelCase : Dict=32 , _UpperCAmelCase : Any=1_28 , _UpperCAmelCase : Optional[int]=0.1 , _UpperCAmelCase : str=1E-6 , _UpperCAmelCase : List[str]=1.0 , _UpperCAmelCase : str="gelu_new" , _UpperCAmelCase : str=0 , _UpperCAmelCase : Any=False , _UpperCAmelCase : List[Any]=0 , _UpperCAmelCase : Union[str, Any]=1 , _UpperCAmelCase : Dict=False , _UpperCAmelCase : Any=True , **_UpperCAmelCase : str , ): """simple docstring""" UpperCAmelCase__ = vocab_size UpperCAmelCase__ = hidden_size UpperCAmelCase__ = d_kv UpperCAmelCase__ = d_ff UpperCAmelCase__ = num_layers UpperCAmelCase__ = num_heads UpperCAmelCase__ = relative_attention_num_buckets UpperCAmelCase__ = relative_attention_max_distance UpperCAmelCase__ = dropout_rate UpperCAmelCase__ = layer_norm_epsilon UpperCAmelCase__ = initializer_factor UpperCAmelCase__ = use_cache UpperCAmelCase__ = eos_token_id UpperCAmelCase__ = decoder_start_token_id # for backwards compatibility UpperCAmelCase__ = dense_act_fn super().__init__( pad_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , decoder_start_token_id=_UpperCAmelCase , tie_word_embeddings=_UpperCAmelCase , is_decoder=_UpperCAmelCase , **_UpperCAmelCase , ) @classmethod def SCREAMING_SNAKE_CASE__ ( cls : Optional[Any] , _UpperCAmelCase : Union[str, os.PathLike] , **_UpperCAmelCase : int ): """simple docstring""" cls._set_token_in_kwargs(_UpperCAmelCase ) UpperCAmelCase__ , UpperCAmelCase__ = cls.get_config_dict(_UpperCAmelCase , **_UpperCAmelCase ) # get the text config dict if we are loading from Pix2StructConfig if config_dict.get("""model_type""" ) == "pix2struct": UpperCAmelCase__ = config_dict["""text_config"""] if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type: logger.warning( f'''You are using a model of type {config_dict['model_type']} to instantiate a model of type ''' f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' ) return cls.from_dict(_UpperCAmelCase , **_UpperCAmelCase ) class lowerCAmelCase_ ( lowerCamelCase_ ): '''simple docstring''' lowerCAmelCase_ : Optional[int] = """pix2struct_vision_model""" def __init__( self : Any , _UpperCAmelCase : List[Any]=7_68 , _UpperCAmelCase : Optional[int]=7_68 , _UpperCAmelCase : Dict=20_48 , _UpperCAmelCase : Tuple=64 , _UpperCAmelCase : Any=12 , _UpperCAmelCase : int=12 , _UpperCAmelCase : Dict="gelu_new" , _UpperCAmelCase : List[Any]=1E-6 , _UpperCAmelCase : int=0.0 , _UpperCAmelCase : str=0.0 , _UpperCAmelCase : Union[str, Any]=1E-10 , _UpperCAmelCase : Union[str, Any]=1.0 , _UpperCAmelCase : Optional[int]=40_96 , _UpperCAmelCase : Optional[int]=32 , _UpperCAmelCase : Dict=1_28 , **_UpperCAmelCase : int , ): """simple docstring""" super().__init__(**_UpperCAmelCase ) UpperCAmelCase__ = hidden_size UpperCAmelCase__ = patch_embed_hidden_size UpperCAmelCase__ = d_ff UpperCAmelCase__ = dropout_rate UpperCAmelCase__ = num_hidden_layers UpperCAmelCase__ = num_attention_heads UpperCAmelCase__ = initializer_range UpperCAmelCase__ = initializer_factor UpperCAmelCase__ = attention_dropout UpperCAmelCase__ = layer_norm_eps UpperCAmelCase__ = dense_act_fn UpperCAmelCase__ = seq_len UpperCAmelCase__ = relative_attention_num_buckets UpperCAmelCase__ = relative_attention_max_distance UpperCAmelCase__ = d_kv @classmethod def SCREAMING_SNAKE_CASE__ ( cls : Optional[Any] , _UpperCAmelCase : Union[str, os.PathLike] , **_UpperCAmelCase : Optional[int] ): """simple docstring""" cls._set_token_in_kwargs(_UpperCAmelCase ) UpperCAmelCase__ , UpperCAmelCase__ = cls.get_config_dict(_UpperCAmelCase , **_UpperCAmelCase ) # get the vision config dict if we are loading from Pix2StructConfig if config_dict.get("""model_type""" ) == "pix2struct": UpperCAmelCase__ = config_dict["""vision_config"""] if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type: logger.warning( f'''You are using a model of type {config_dict['model_type']} to instantiate a model of type ''' f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' ) return cls.from_dict(_UpperCAmelCase , **_UpperCAmelCase ) class lowerCAmelCase_ ( lowerCamelCase_ ): '''simple docstring''' lowerCAmelCase_ : str = """pix2struct""" lowerCAmelCase_ : Union[str, Any] = True def __init__( self : int , _UpperCAmelCase : Tuple=None , _UpperCAmelCase : Dict=None , _UpperCAmelCase : Any=1.0 , _UpperCAmelCase : Tuple=0.02 , _UpperCAmelCase : Optional[Any]=False , _UpperCAmelCase : Union[str, Any]=False , _UpperCAmelCase : List[str]=True , **_UpperCAmelCase : Optional[int] , ): """simple docstring""" super().__init__(tie_word_embeddings=_UpperCAmelCase , is_encoder_decoder=_UpperCAmelCase , **_UpperCAmelCase ) if text_config is None: UpperCAmelCase__ = {} logger.info("""text_config is None. Initializing the Pix2StructTextConfig with default values.""" ) if vision_config is None: UpperCAmelCase__ = {} logger.info("""vision_config is None. Initializing the Pix2StructVisionConfig with default values.""" ) UpperCAmelCase__ = PixaStructTextConfig(**_UpperCAmelCase ) UpperCAmelCase__ = PixaStructVisionConfig(**_UpperCAmelCase ) UpperCAmelCase__ = self.text_config.decoder_start_token_id UpperCAmelCase__ = self.text_config.pad_token_id UpperCAmelCase__ = self.text_config.eos_token_id UpperCAmelCase__ = initializer_factor UpperCAmelCase__ = initializer_range UpperCAmelCase__ = self.initializer_range UpperCAmelCase__ = self.initializer_range UpperCAmelCase__ = is_vqa @classmethod def SCREAMING_SNAKE_CASE__ ( cls : Any , _UpperCAmelCase : PixaStructTextConfig , _UpperCAmelCase : PixaStructVisionConfig , **_UpperCAmelCase : Any ): """simple docstring""" return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **_UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self : str ): """simple docstring""" UpperCAmelCase__ = copy.deepcopy(self.__dict__ ) UpperCAmelCase__ = self.text_config.to_dict() UpperCAmelCase__ = self.vision_config.to_dict() UpperCAmelCase__ = self.__class__.model_type return output
61
0
"""simple docstring""" import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import ChineseCLIPImageProcessor class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ): """simple docstring""" def __init__( self , snake_case__ , snake_case__=7 , snake_case__=3 , snake_case__=18 , snake_case__=30 , snake_case__=400 , snake_case__=True , snake_case__=None , snake_case__=True , snake_case__=None , snake_case__=True , snake_case__=[0.48145466, 0.4578275, 0.40821073] , snake_case__=[0.26862954, 0.26130258, 0.27577711] , snake_case__=True , ): """simple docstring""" lowerCAmelCase : str = size if size is not None else {"height": 224, "width": 224} lowerCAmelCase : str = crop_size if crop_size is not None else {"height": 18, "width": 18} lowerCAmelCase : Union[str, Any] = parent lowerCAmelCase : List[Any] = batch_size lowerCAmelCase : Union[str, Any] = num_channels lowerCAmelCase : Tuple = image_size lowerCAmelCase : Dict = min_resolution lowerCAmelCase : Optional[Any] = max_resolution lowerCAmelCase : int = do_resize lowerCAmelCase : Any = size lowerCAmelCase : Any = do_center_crop lowerCAmelCase : Tuple = crop_size lowerCAmelCase : List[str] = do_normalize lowerCAmelCase : Any = image_mean lowerCAmelCase : Any = image_std lowerCAmelCase : Tuple = do_convert_rgb def lowercase__ ( self ): """simple docstring""" return { "do_resize": self.do_resize, "size": self.size, "do_center_crop": self.do_center_crop, "crop_size": self.crop_size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_convert_rgb": self.do_convert_rgb, } def lowercase__ ( self , snake_case__=False , snake_case__=False , snake_case__=False ): """simple docstring""" assert not (numpify and torchify), "You cannot specify both numpy and PyTorch tensors at the same time" if equal_resolution: lowerCAmelCase : Any = [] for i in range(self.batch_size ): image_inputs.append( np.random.randint( 255 , size=(self.num_channels, self.max_resolution, self.max_resolution) , dtype=np.uinta ) ) else: lowerCAmelCase : List[str] = [] for i in range(self.batch_size ): lowerCAmelCase , lowerCAmelCase : Optional[Any] = np.random.choice(np.arange(self.min_resolution , self.max_resolution ) , 2 ) image_inputs.append(np.random.randint(255 , size=(self.num_channels, width, height) , dtype=np.uinta ) ) if not numpify and not torchify: # PIL expects the channel dimension as last dimension lowerCAmelCase : int = [Image.fromarray(np.moveaxis(snake_case__ , 0 , -1 ) ) for x in image_inputs] if torchify: lowerCAmelCase : Optional[int] = [torch.from_numpy(snake_case__ ) for x in image_inputs] return image_inputs @require_torch @require_vision class SCREAMING_SNAKE_CASE__ ( lowercase , unittest.TestCase ): """simple docstring""" a : int =ChineseCLIPImageProcessor if is_vision_available() else None def lowercase__ ( self ): """simple docstring""" lowerCAmelCase : int = ChineseCLIPImageProcessingTester(self , do_center_crop=snake_case__ ) @property def lowercase__ ( self ): """simple docstring""" return self.image_processor_tester.prepare_image_processor_dict() def lowercase__ ( self ): """simple docstring""" lowerCAmelCase : Union[str, Any] = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(snake_case__ , "do_resize" ) ) self.assertTrue(hasattr(snake_case__ , "size" ) ) self.assertTrue(hasattr(snake_case__ , "do_center_crop" ) ) self.assertTrue(hasattr(snake_case__ , "center_crop" ) ) self.assertTrue(hasattr(snake_case__ , "do_normalize" ) ) self.assertTrue(hasattr(snake_case__ , "image_mean" ) ) self.assertTrue(hasattr(snake_case__ , "image_std" ) ) self.assertTrue(hasattr(snake_case__ , "do_convert_rgb" ) ) def lowercase__ ( self ): """simple docstring""" lowerCAmelCase : Tuple = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {"height": 224, "width": 224} ) self.assertEqual(image_processor.crop_size , {"height": 18, "width": 18} ) lowerCAmelCase : str = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 ) self.assertEqual(image_processor.size , {"shortest_edge": 42} ) self.assertEqual(image_processor.crop_size , {"height": 84, "width": 84} ) def lowercase__ ( self ): """simple docstring""" pass def lowercase__ ( self ): """simple docstring""" lowerCAmelCase : Optional[Any] = self.image_processing_class(**self.image_processor_dict ) # create random PIL images lowerCAmelCase : Tuple = self.image_processor_tester.prepare_inputs(equal_resolution=snake_case__ ) for image in image_inputs: self.assertIsInstance(snake_case__ , Image.Image ) # Test not batched input lowerCAmelCase : Optional[int] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) , ) # Test batched lowerCAmelCase : Tuple = image_processing(snake_case__ , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) , ) def lowercase__ ( self ): """simple docstring""" lowerCAmelCase : Optional[int] = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors lowerCAmelCase : Any = self.image_processor_tester.prepare_inputs(equal_resolution=snake_case__ , numpify=snake_case__ ) for image in image_inputs: self.assertIsInstance(snake_case__ , np.ndarray ) # Test not batched input lowerCAmelCase : Optional[int] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) , ) # Test batched lowerCAmelCase : Tuple = image_processing(snake_case__ , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) , ) def lowercase__ ( self ): """simple docstring""" lowerCAmelCase : Dict = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors lowerCAmelCase : int = self.image_processor_tester.prepare_inputs(equal_resolution=snake_case__ , torchify=snake_case__ ) for image in image_inputs: self.assertIsInstance(snake_case__ , torch.Tensor ) # Test not batched input lowerCAmelCase : Any = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) , ) # Test batched lowerCAmelCase : List[Any] = image_processing(snake_case__ , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) , ) @require_torch @require_vision class SCREAMING_SNAKE_CASE__ ( lowercase , unittest.TestCase ): """simple docstring""" a : Optional[int] =ChineseCLIPImageProcessor if is_vision_available() else None def lowercase__ ( self ): """simple docstring""" lowerCAmelCase : List[Any] = ChineseCLIPImageProcessingTester(self , num_channels=4 , do_center_crop=snake_case__ ) lowerCAmelCase : Optional[int] = 3 @property def lowercase__ ( self ): """simple docstring""" return self.image_processor_tester.prepare_image_processor_dict() def lowercase__ ( self ): """simple docstring""" lowerCAmelCase : Any = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(snake_case__ , "do_resize" ) ) self.assertTrue(hasattr(snake_case__ , "size" ) ) self.assertTrue(hasattr(snake_case__ , "do_center_crop" ) ) self.assertTrue(hasattr(snake_case__ , "center_crop" ) ) self.assertTrue(hasattr(snake_case__ , "do_normalize" ) ) self.assertTrue(hasattr(snake_case__ , "image_mean" ) ) self.assertTrue(hasattr(snake_case__ , "image_std" ) ) self.assertTrue(hasattr(snake_case__ , "do_convert_rgb" ) ) def lowercase__ ( self ): """simple docstring""" pass def lowercase__ ( self ): """simple docstring""" lowerCAmelCase : List[Any] = self.image_processing_class(**self.image_processor_dict ) # create random PIL images lowerCAmelCase : Any = self.image_processor_tester.prepare_inputs(equal_resolution=snake_case__ ) for image in image_inputs: self.assertIsInstance(snake_case__ , Image.Image ) # Test not batched input lowerCAmelCase : Tuple = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.expected_encoded_image_num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) , ) # Test batched lowerCAmelCase : Dict = image_processing(snake_case__ , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.expected_encoded_image_num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) , )
108
"""simple docstring""" import importlib import inspect import json import os import re import shutil import sys from pathlib import Path from typing import Dict, Optional, Union from urllib import request from huggingface_hub import HfFolder, cached_download, hf_hub_download, model_info from packaging import version from .. import __version__ from . import DIFFUSERS_DYNAMIC_MODULE_NAME, HF_MODULES_CACHE, logging a = ( 'https://raw.githubusercontent.com/huggingface/diffusers/{revision}/examples/community/{pipeline}.py' ) a = logging.get_logger(__name__) # pylint: disable=invalid-name def lowercase () -> int: '''simple docstring''' lowerCAmelCase = """https://pypi.org/pypi/diffusers/json""" lowerCAmelCase = json.loads(request.urlopen(snake_case__ ).read() )["""releases"""].keys() return sorted(snake_case__ , key=lambda snake_case__ : version.Version(snake_case__ ) ) def lowercase () -> List[str]: '''simple docstring''' if HF_MODULES_CACHE in sys.path: return sys.path.append(snake_case__ ) os.makedirs(snake_case__ , exist_ok=snake_case__ ) lowerCAmelCase = Path(snake_case__ ) / """__init__.py""" if not init_path.exists(): init_path.touch() def lowercase (snake_case__ : Union[str, os.PathLike] ) -> List[Any]: '''simple docstring''' init_hf_modules() lowerCAmelCase = Path(snake_case__ ) / name # If the parent module does not exist yet, recursively create it. if not dynamic_module_path.parent.exists(): create_dynamic_module(dynamic_module_path.parent ) os.makedirs(snake_case__ , exist_ok=snake_case__ ) lowerCAmelCase = dynamic_module_path / """__init__.py""" if not init_path.exists(): init_path.touch() def lowercase (snake_case__ : Optional[int] ) -> Tuple: '''simple docstring''' with open(snake_case__ , """r""" , encoding="""utf-8""" ) as f: lowerCAmelCase = f.read() # Imports of the form `import .xxx` lowerCAmelCase = re.findall("""^\s*import\s+\.(\S+)\s*$""" , snake_case__ , flags=re.MULTILINE ) # Imports of the form `from .xxx import yyy` relative_imports += re.findall("""^\s*from\s+\.(\S+)\s+import""" , snake_case__ , flags=re.MULTILINE ) # Unique-ify return list(set(snake_case__ ) ) def lowercase (snake_case__ : Dict ) -> Union[str, Any]: '''simple docstring''' lowerCAmelCase = False lowerCAmelCase = [module_file] lowerCAmelCase = [] # Let's recurse through all relative imports while not no_change: lowerCAmelCase = [] for f in files_to_check: new_imports.extend(get_relative_imports(snake_case__ ) ) lowerCAmelCase = Path(snake_case__ ).parent lowerCAmelCase = [str(module_path / m ) for m in new_imports] lowerCAmelCase = [f for f in new_import_files if f not in all_relative_imports] lowerCAmelCase = [f'''{f}.py''' for f in new_import_files] lowerCAmelCase = len(snake_case__ ) == 0 all_relative_imports.extend(snake_case__ ) return all_relative_imports def lowercase (snake_case__ : Optional[int] ) -> int: '''simple docstring''' with open(snake_case__ , """r""" , encoding="""utf-8""" ) as f: lowerCAmelCase = f.read() # Imports of the form `import xxx` lowerCAmelCase = re.findall("""^\s*import\s+(\S+)\s*$""" , snake_case__ , flags=re.MULTILINE ) # Imports of the form `from xxx import yyy` imports += re.findall("""^\s*from\s+(\S+)\s+import""" , snake_case__ , flags=re.MULTILINE ) # Only keep the top-level module lowerCAmelCase = [imp.split(""".""" )[0] for imp in imports if not imp.startswith(""".""" )] # Unique-ify and test we got them all lowerCAmelCase = list(set(snake_case__ ) ) lowerCAmelCase = [] for imp in imports: try: importlib.import_module(snake_case__ ) except ImportError: missing_packages.append(snake_case__ ) if len(snake_case__ ) > 0: raise ImportError( """This modeling file requires the following packages that were not found in your environment: """ f'''{', '.join(snake_case__ )}. Run `pip install {' '.join(snake_case__ )}`''' ) return get_relative_imports(snake_case__ ) def lowercase (snake_case__ : Any , snake_case__ : Optional[Any] ) -> Union[str, Any]: '''simple docstring''' lowerCAmelCase = module_path.replace(os.path.sep , """.""" ) lowerCAmelCase = importlib.import_module(snake_case__ ) if class_name is None: return find_pipeline_class(snake_case__ ) return getattr(snake_case__ , snake_case__ ) def lowercase (snake_case__ : List[Any] ) -> Union[str, Any]: '''simple docstring''' from ..pipelines import DiffusionPipeline lowerCAmelCase = dict(inspect.getmembers(snake_case__ , inspect.isclass ) ) lowerCAmelCase = None for cls_name, cls in cls_members.items(): if ( cls_name != DiffusionPipeline.__name__ and issubclass(cls , snake_case__ ) and cls.__module__.split(""".""" )[0] != "diffusers" ): if pipeline_class is not None: raise ValueError( f'''Multiple classes that inherit from {DiffusionPipeline.__name__} have been found:''' f''' {pipeline_class.__name__}, and {cls_name}. Please make sure to define only one in''' f''' {loaded_module}.''' ) lowerCAmelCase = cls return pipeline_class def lowercase (snake_case__ : Union[str, os.PathLike] , snake_case__ : str , snake_case__ : Optional[Union[str, os.PathLike]] = None , snake_case__ : bool = False , snake_case__ : bool = False , snake_case__ : Optional[Dict[str, str]] = None , snake_case__ : Optional[Union[bool, str]] = None , snake_case__ : Optional[str] = None , snake_case__ : bool = False , ) -> int: '''simple docstring''' lowerCAmelCase = str(snake_case__ ) lowerCAmelCase = os.path.join(snake_case__ , snake_case__ ) if os.path.isfile(snake_case__ ): lowerCAmelCase = module_file_or_url lowerCAmelCase = """local""" elif pretrained_model_name_or_path.count("""/""" ) == 0: lowerCAmelCase = get_diffusers_versions() # cut ".dev0" lowerCAmelCase = """v""" + """.""".join(__version__.split(""".""" )[:3] ) # retrieve github version that matches if revision is None: lowerCAmelCase = latest_version if latest_version[1:] in available_versions else """main""" logger.info(f'''Defaulting to latest_version: {revision}.''' ) elif revision in available_versions: lowerCAmelCase = f'''v{revision}''' elif revision == "main": lowerCAmelCase = revision else: raise ValueError( f'''`custom_revision`: {revision} does not exist. Please make sure to choose one of''' f''' {', '.join(available_versions + ['main'] )}.''' ) # community pipeline on GitHub lowerCAmelCase = COMMUNITY_PIPELINES_URL.format(revision=snake_case__ , pipeline=snake_case__ ) try: lowerCAmelCase = cached_download( snake_case__ , cache_dir=snake_case__ , force_download=snake_case__ , proxies=snake_case__ , resume_download=snake_case__ , local_files_only=snake_case__ , use_auth_token=snake_case__ , ) lowerCAmelCase = """git""" lowerCAmelCase = pretrained_model_name_or_path + """.py""" except EnvironmentError: logger.error(f'''Could not locate the {module_file} inside {pretrained_model_name_or_path}.''' ) raise else: try: # Load from URL or cache if already cached lowerCAmelCase = hf_hub_download( snake_case__ , snake_case__ , cache_dir=snake_case__ , force_download=snake_case__ , proxies=snake_case__ , resume_download=snake_case__ , local_files_only=snake_case__ , use_auth_token=snake_case__ , ) lowerCAmelCase = os.path.join("""local""" , """--""".join(pretrained_model_name_or_path.split("""/""" ) ) ) except EnvironmentError: logger.error(f'''Could not locate the {module_file} inside {pretrained_model_name_or_path}.''' ) raise # Check we have all the requirements in our environment lowerCAmelCase = check_imports(snake_case__ ) # Now we move the module inside our cached dynamic modules. lowerCAmelCase = DIFFUSERS_DYNAMIC_MODULE_NAME + os.path.sep + submodule create_dynamic_module(snake_case__ ) lowerCAmelCase = Path(snake_case__ ) / full_submodule if submodule == "local" or submodule == "git": # We always copy local files (we could hash the file to see if there was a change, and give them the name of # that hash, to only copy when there is a modification but it seems overkill for now). # The only reason we do the copy is to avoid putting too many folders in sys.path. shutil.copy(snake_case__ , submodule_path / module_file ) for module_needed in modules_needed: lowerCAmelCase = f'''{module_needed}.py''' shutil.copy(os.path.join(snake_case__ , snake_case__ ) , submodule_path / module_needed ) else: # Get the commit hash # TODO: we will get this info in the etag soon, so retrieve it from there and not here. if isinstance(snake_case__ , snake_case__ ): lowerCAmelCase = use_auth_token elif use_auth_token is True: lowerCAmelCase = HfFolder.get_token() else: lowerCAmelCase = None lowerCAmelCase = model_info(snake_case__ , revision=snake_case__ , token=snake_case__ ).sha # The module file will end up being placed in a subfolder with the git hash of the repo. This way we get the # benefit of versioning. lowerCAmelCase = submodule_path / commit_hash lowerCAmelCase = full_submodule + os.path.sep + commit_hash create_dynamic_module(snake_case__ ) if not (submodule_path / module_file).exists(): shutil.copy(snake_case__ , submodule_path / module_file ) # Make sure we also have every file with relative for module_needed in modules_needed: if not (submodule_path / module_needed).exists(): get_cached_module_file( snake_case__ , f'''{module_needed}.py''' , cache_dir=snake_case__ , force_download=snake_case__ , resume_download=snake_case__ , proxies=snake_case__ , use_auth_token=snake_case__ , revision=snake_case__ , local_files_only=snake_case__ , ) return os.path.join(snake_case__ , snake_case__ ) def lowercase (snake_case__ : Union[str, os.PathLike] , snake_case__ : str , snake_case__ : Optional[str] = None , snake_case__ : Optional[Union[str, os.PathLike]] = None , snake_case__ : bool = False , snake_case__ : bool = False , snake_case__ : Optional[Dict[str, str]] = None , snake_case__ : Optional[Union[bool, str]] = None , snake_case__ : Optional[str] = None , snake_case__ : bool = False , **snake_case__ : Any , ) -> int: '''simple docstring''' lowerCAmelCase = get_cached_module_file( snake_case__ , snake_case__ , cache_dir=snake_case__ , force_download=snake_case__ , resume_download=snake_case__ , proxies=snake_case__ , use_auth_token=snake_case__ , revision=snake_case__ , local_files_only=snake_case__ , ) return get_class_in_module(snake_case__ , final_module.replace(""".py""" , """""" ) )
155
0
from . import ( albert, align, altclip, audio_spectrogram_transformer, auto, autoformer, bark, bart, barthez, bartpho, beit, bert, bert_generation, bert_japanese, bertweet, big_bird, bigbird_pegasus, biogpt, bit, blenderbot, blenderbot_small, blip, blip_a, bloom, bridgetower, byta, camembert, canine, chinese_clip, clap, clip, clipseg, codegen, conditional_detr, convbert, convnext, convnextva, cpm, cpmant, ctrl, cvt, dataavec, deberta, deberta_va, decision_transformer, deformable_detr, deit, deprecated, deta, detr, dialogpt, dinat, distilbert, dit, donut, dpr, dpt, efficientformer, efficientnet, electra, encodec, encoder_decoder, ernie, ernie_m, esm, falcon, flaubert, flava, fnet, focalnet, fsmt, funnel, git, glpn, gpta, gpt_bigcode, gpt_neo, gpt_neox, gpt_neox_japanese, gpt_swa, gptj, gptsan_japanese, graphormer, groupvit, herbert, hubert, ibert, imagegpt, informer, instructblip, jukebox, layoutlm, layoutlmva, layoutlmva, layoutxlm, led, levit, lilt, llama, longformer, longta, luke, lxmert, mam_aaa, marian, markuplm, maskaformer, maskformer, mbart, mbartaa, mega, megatron_bert, megatron_gpta, mgp_str, mluke, mobilebert, mobilenet_va, mobilenet_va, mobilevit, mobilevitva, mpnet, mra, mta, musicgen, mvp, nat, nezha, nllb, nllb_moe, nystromformer, oneformer, open_llama, openai, opt, owlvit, pegasus, pegasus_x, perceiver, phobert, pixastruct, plbart, poolformer, prophetnet, qdqbert, rag, realm, reformer, regnet, rembert, resnet, roberta, roberta_prelayernorm, roc_bert, roformer, rwkv, sam, segformer, sew, sew_d, speech_encoder_decoder, speech_to_text, speech_to_text_a, speechta, splinter, squeezebert, swiftformer, swin, swinasr, swinva, switch_transformers, ta, table_transformer, tapas, time_series_transformer, timesformer, timm_backbone, transfo_xl, trocr, tvlt, umta, unispeech, unispeech_sat, upernet, videomae, vilt, vision_encoder_decoder, vision_text_dual_encoder, visual_bert, vit, vit_hybrid, vit_mae, vit_msn, vivit, wavaveca, wavaveca_conformer, wavaveca_phoneme, wavaveca_with_lm, wavlm, whisper, x_clip, xglm, xlm, xlm_prophetnet, xlm_roberta, xlm_roberta_xl, xlnet, xmod, yolos, yoso, )
362
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available _lowerCAmelCase : Union[str, Any] = { '''configuration_xmod''': [ '''XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XmodConfig''', '''XmodOnnxConfig''', ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCAmelCase : str = [ '''XMOD_PRETRAINED_MODEL_ARCHIVE_LIST''', '''XmodForCausalLM''', '''XmodForMaskedLM''', '''XmodForMultipleChoice''', '''XmodForQuestionAnswering''', '''XmodForSequenceClassification''', '''XmodForTokenClassification''', '''XmodModel''', '''XmodPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_xmod import XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP, XmodConfig, XmodOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_xmod import ( XMOD_PRETRAINED_MODEL_ARCHIVE_LIST, XmodForCausalLM, XmodForMaskedLM, XmodForMultipleChoice, XmodForQuestionAnswering, XmodForSequenceClassification, XmodForTokenClassification, XmodModel, XmodPreTrainedModel, ) else: import sys _lowerCAmelCase : List[str] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
70
0
import warnings from ...utils import logging from .image_processing_deit import DeiTImageProcessor snake_case : Union[str, Any] = logging.get_logger(__name__) class snake_case_ (lowerCamelCase_ ): def __init__( self :Tuple ,*__snake_case :List[str] ,**__snake_case :Any ) -> None: warnings.warn( 'The class DeiTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please' ' use DeiTImageProcessor instead.' ,__snake_case ,) super().__init__(*__snake_case ,**__snake_case )
240
def __lowercase ( __lowerCAmelCase : list[int] ): a__ = [] if len(__lowerCAmelCase ) == 1: return [nums.copy()] for _ in range(len(__lowerCAmelCase ) ): a__ = nums.pop(0 ) a__ = permute(__lowerCAmelCase ) for perm in permutations: perm.append(__lowerCAmelCase ) result.extend(__lowerCAmelCase ) nums.append(__lowerCAmelCase ) return result def __lowercase ( __lowerCAmelCase : Optional[int] ): def backtrack(__lowerCAmelCase : Any ): if start == len(__lowerCAmelCase ) - 1: output.append(nums[:] ) else: for i in range(__lowerCAmelCase , len(__lowerCAmelCase ) ): a__ , a__ = nums[i], nums[start] backtrack(start + 1 ) a__ , a__ = nums[i], nums[start] # backtrack a__ = [] backtrack(0 ) return output if __name__ == "__main__": import doctest # use res to print the data in permute2 function snake_case : Optional[Any] = permutea([1, 2, 3]) print(res) doctest.testmod()
240
1
"""simple docstring""" def lowerCAmelCase__ ( _UpperCamelCase : int ) -> list[int]: """simple docstring""" if length <= 0 or not isinstance(_UpperCamelCase , _UpperCamelCase ): raise ValueError('Length must be a positive integer.' ) return [n * (2 * n - 1) for n in range(_UpperCamelCase )] if __name__ == "__main__": print(hexagonal_numbers(length=5)) print(hexagonal_numbers(length=10))
149
"""simple docstring""" import math import unittest from transformers import BioGptConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( BioGptForCausalLM, BioGptForSequenceClassification, BioGptForTokenClassification, BioGptModel, BioGptTokenizer, ) from transformers.models.biogpt.modeling_biogpt import BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST class lowerCAmelCase_ : """simple docstring""" def __init__( self , lowerCAmelCase , lowerCAmelCase=13 , lowerCAmelCase=7 , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase=False , lowerCAmelCase=True , lowerCAmelCase=99 , lowerCAmelCase=32 , lowerCAmelCase=5 , lowerCAmelCase=4 , lowerCAmelCase=37 , lowerCAmelCase="gelu" , lowerCAmelCase=0.1 , lowerCAmelCase=0.1 , lowerCAmelCase=5_12 , lowerCAmelCase=16 , lowerCAmelCase=2 , lowerCAmelCase=0.02 , lowerCAmelCase=3 , lowerCAmelCase=4 , lowerCAmelCase=None , ): """simple docstring""" snake_case = parent snake_case = batch_size snake_case = seq_length snake_case = is_training snake_case = use_input_mask snake_case = use_token_type_ids snake_case = use_labels snake_case = vocab_size snake_case = hidden_size snake_case = num_hidden_layers snake_case = num_attention_heads snake_case = intermediate_size snake_case = hidden_act snake_case = hidden_dropout_prob snake_case = attention_probs_dropout_prob snake_case = max_position_embeddings snake_case = type_vocab_size snake_case = type_sequence_label_size snake_case = initializer_range snake_case = num_labels snake_case = num_choices snake_case = scope def snake_case ( self ): """simple docstring""" snake_case = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) snake_case = None if self.use_input_mask: snake_case = random_attention_mask([self.batch_size, self.seq_length] ) snake_case = None if self.use_token_type_ids: snake_case = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) snake_case = None snake_case = None snake_case = None if self.use_labels: snake_case = ids_tensor([self.batch_size] , self.type_sequence_label_size ) snake_case = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) snake_case = ids_tensor([self.batch_size] , self.num_choices ) snake_case = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def snake_case ( self ): """simple docstring""" return BioGptConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCAmelCase , initializer_range=self.initializer_range , ) def snake_case ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ): """simple docstring""" snake_case = BioGptModel(config=lowerCAmelCase ) model.to(lowerCAmelCase ) model.eval() snake_case = model(lowerCAmelCase , attention_mask=lowerCAmelCase ) snake_case = model(lowerCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def snake_case ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ): """simple docstring""" snake_case = BioGptForCausalLM(config=lowerCAmelCase ) model.to(lowerCAmelCase ) model.eval() snake_case = model(lowerCAmelCase , attention_mask=lowerCAmelCase , token_type_ids=lowerCAmelCase , labels=lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def snake_case ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , *lowerCAmelCase ): """simple docstring""" snake_case = BioGptModel(config=lowerCAmelCase ) model.to(lowerCAmelCase ) model.eval() # create attention mask snake_case = torch.ones(input_ids.shape , dtype=torch.long , device=lowerCAmelCase ) snake_case = self.seq_length // 2 snake_case = 0 # first forward pass snake_case ,snake_case = model(lowerCAmelCase , attention_mask=lowerCAmelCase ).to_tuple() # create hypothetical next token and extent to next_input_ids snake_case = ids_tensor((self.batch_size, 1) , config.vocab_size ) # change a random masked slice from input_ids snake_case = ids_tensor((1,) , lowerCAmelCase ).item() + 1 snake_case = ids_tensor((self.batch_size, 1) , config.vocab_size ).squeeze(-1 ) snake_case = random_other_next_tokens # append to next input_ids and attn_mask snake_case = torch.cat([input_ids, next_tokens] , dim=-1 ) snake_case = torch.cat( [attn_mask, torch.ones((attn_mask.shape[0], 1) , dtype=torch.long , device=lowerCAmelCase )] , dim=1 , ) # get two different outputs snake_case = model(lowerCAmelCase , attention_mask=lowerCAmelCase )['last_hidden_state'] snake_case = model(lowerCAmelCase , past_key_values=lowerCAmelCase , attention_mask=lowerCAmelCase )['last_hidden_state'] # select random slice snake_case = ids_tensor((1,) , output_from_past.shape[-1] ).item() snake_case = output_from_no_past[:, -1, random_slice_idx].detach() snake_case = output_from_past[:, 0, random_slice_idx].detach() # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(lowerCAmelCase , lowerCAmelCase , atol=1E-3 ) ) def snake_case ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , *lowerCAmelCase ): """simple docstring""" snake_case = BioGptModel(config=lowerCAmelCase ).to(lowerCAmelCase ).eval() snake_case = torch.ones(input_ids.shape , dtype=torch.long , device=lowerCAmelCase ) # first forward pass snake_case = model(lowerCAmelCase , attention_mask=lowerCAmelCase , use_cache=lowerCAmelCase ) snake_case ,snake_case = outputs.to_tuple() # create hypothetical multiple next token and extent to next_input_ids snake_case = ids_tensor((self.batch_size, 3) , config.vocab_size ) snake_case = ids_tensor((self.batch_size, 3) , 2 ) # append to next input_ids and snake_case = torch.cat([input_ids, next_tokens] , dim=-1 ) snake_case = torch.cat([attention_mask, next_attn_mask] , dim=-1 ) snake_case = model(lowerCAmelCase , attention_mask=lowerCAmelCase )['last_hidden_state'] snake_case = model(lowerCAmelCase , attention_mask=lowerCAmelCase , past_key_values=lowerCAmelCase )[ 'last_hidden_state' ] # select random slice snake_case = ids_tensor((1,) , output_from_past.shape[-1] ).item() snake_case = output_from_no_past[:, -3:, random_slice_idx].detach() snake_case = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] ) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(lowerCAmelCase , lowerCAmelCase , atol=1E-3 ) ) def snake_case ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , *lowerCAmelCase , lowerCAmelCase=False ): """simple docstring""" snake_case = BioGptForCausalLM(lowerCAmelCase ) model.to(lowerCAmelCase ) if gradient_checkpointing: model.gradient_checkpointing_enable() snake_case = model(lowerCAmelCase , labels=lowerCAmelCase ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) result.loss.backward() def snake_case ( self , lowerCAmelCase , *lowerCAmelCase ): """simple docstring""" snake_case = BioGptModel(lowerCAmelCase ) snake_case = model.config.initializer_range / math.sqrt(2 * model.config.num_hidden_layers ) for key in model.state_dict().keys(): if "c_proj" in key and "weight" in key: self.parent.assertLessEqual(abs(torch.std(model.state_dict()[key] ) - model_std ) , 0.0_01 ) self.parent.assertLessEqual(abs(torch.mean(model.state_dict()[key] ) - 0.0 ) , 0.01 ) def snake_case ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , *lowerCAmelCase ): """simple docstring""" snake_case = self.num_labels snake_case = BioGptForTokenClassification(lowerCAmelCase ) model.to(lowerCAmelCase ) model.eval() snake_case = model(lowerCAmelCase , attention_mask=lowerCAmelCase , token_type_ids=lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def snake_case ( self ): """simple docstring""" snake_case = self.prepare_config_and_inputs() ( ( snake_case ) ,( snake_case ) ,( snake_case ) ,( snake_case ) ,( snake_case ) ,( snake_case ) ,( snake_case ) , ) = config_and_inputs snake_case = {'input_ids': input_ids, 'attention_mask': input_mask} return config, inputs_dict @require_torch class lowerCAmelCase_ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , unittest.TestCase ): """simple docstring""" _lowerCAmelCase : List[Any] = ( (BioGptModel, BioGptForCausalLM, BioGptForSequenceClassification, BioGptForTokenClassification) if is_torch_available() else () ) _lowerCAmelCase : str = (BioGptForCausalLM,) if is_torch_available() else () _lowerCAmelCase : str = ( { """feature-extraction""": BioGptModel, """text-classification""": BioGptForSequenceClassification, """text-generation""": BioGptForCausalLM, """token-classification""": BioGptForTokenClassification, """zero-shot""": BioGptForSequenceClassification, } if is_torch_available() else {} ) _lowerCAmelCase : List[str] = False def snake_case ( self ): """simple docstring""" snake_case = BioGptModelTester(self ) snake_case = ConfigTester(self , config_class=lowerCAmelCase , hidden_size=37 ) def snake_case ( self ): """simple docstring""" self.config_tester.run_common_tests() def snake_case ( self ): """simple docstring""" snake_case = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowerCAmelCase ) def snake_case ( self ): """simple docstring""" snake_case = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: snake_case = type self.model_tester.create_and_check_model(*lowerCAmelCase ) def snake_case ( self ): """simple docstring""" snake_case = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_biogpt_model_attention_mask_past(*lowerCAmelCase ) def snake_case ( self ): """simple docstring""" snake_case = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_forward_and_backwards(*lowerCAmelCase , gradient_checkpointing=lowerCAmelCase ) def snake_case ( self ): """simple docstring""" snake_case = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_biogpt_model_past_large_inputs(*lowerCAmelCase ) def snake_case ( self ): """simple docstring""" snake_case = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_biogpt_weight_initialization(*lowerCAmelCase ) def snake_case ( self ): """simple docstring""" snake_case = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_biogpt_for_token_classification(*lowerCAmelCase ) @slow def snake_case ( self ): """simple docstring""" snake_case = BioGptForCausalLM.from_pretrained('microsoft/biogpt' ) model.to(lowerCAmelCase ) snake_case = BioGptTokenizer.from_pretrained('microsoft/biogpt' ) snake_case = 'left' # Define PAD Token = EOS Token = 50256 snake_case = tokenizer.eos_token snake_case = model.config.eos_token_id # use different length sentences to test batching snake_case = [ 'Hello, my dog is a little', 'Today, I', ] snake_case = tokenizer(lowerCAmelCase , return_tensors='pt' , padding=lowerCAmelCase ) snake_case = inputs['input_ids'].to(lowerCAmelCase ) snake_case = model.generate( input_ids=lowerCAmelCase , attention_mask=inputs['attention_mask'].to(lowerCAmelCase ) , ) snake_case = tokenizer(sentences[0] , return_tensors='pt' ).input_ids.to(lowerCAmelCase ) snake_case = model.generate(input_ids=lowerCAmelCase ) snake_case = inputs_non_padded.shape[-1] - inputs['attention_mask'][-1].long().sum().cpu().item() snake_case = tokenizer(sentences[1] , return_tensors='pt' ).input_ids.to(lowerCAmelCase ) snake_case = model.generate(input_ids=lowerCAmelCase , max_length=model.config.max_length - num_paddings ) snake_case = tokenizer.batch_decode(lowerCAmelCase , skip_special_tokens=lowerCAmelCase ) snake_case = tokenizer.decode(output_non_padded[0] , skip_special_tokens=lowerCAmelCase ) snake_case = tokenizer.decode(output_padded[0] , skip_special_tokens=lowerCAmelCase ) snake_case = [ 'Hello, my dog is a little bit bigger than a little bit.', 'Today, I have a good idea of how to use the information', ] self.assertListEqual(lowerCAmelCase , lowerCAmelCase ) self.assertListEqual(lowerCAmelCase , [non_padded_sentence, padded_sentence] ) @slow def snake_case ( self ): """simple docstring""" for model_name in BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: snake_case = BioGptModel.from_pretrained(lowerCAmelCase ) self.assertIsNotNone(lowerCAmelCase ) def snake_case ( self ): """simple docstring""" snake_case ,snake_case = self.model_tester.prepare_config_and_inputs_for_common() snake_case = 3 snake_case = input_dict['input_ids'] snake_case = input_ids.ne(1 ).to(lowerCAmelCase ) snake_case = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size ) snake_case = BioGptForSequenceClassification(lowerCAmelCase ) model.to(lowerCAmelCase ) model.eval() snake_case = model(lowerCAmelCase , attention_mask=lowerCAmelCase , labels=lowerCAmelCase ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) def snake_case ( self ): """simple docstring""" snake_case ,snake_case = self.model_tester.prepare_config_and_inputs_for_common() snake_case = 3 snake_case = 'multi_label_classification' snake_case = input_dict['input_ids'] snake_case = input_ids.ne(1 ).to(lowerCAmelCase ) snake_case = ids_tensor( [self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float ) snake_case = BioGptForSequenceClassification(lowerCAmelCase ) model.to(lowerCAmelCase ) model.eval() snake_case = model(lowerCAmelCase , attention_mask=lowerCAmelCase , labels=lowerCAmelCase ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) @require_torch class lowerCAmelCase_ ( unittest.TestCase ): """simple docstring""" @slow def snake_case ( self ): """simple docstring""" snake_case = BioGptForCausalLM.from_pretrained('microsoft/biogpt' ) snake_case = torch.tensor([[2, 48_05, 9, 6_56, 21]] ) snake_case = model(lowerCAmelCase )[0] snake_case = 4_23_84 snake_case = torch.Size((1, 5, vocab_size) ) self.assertEqual(output.shape , lowerCAmelCase ) snake_case = torch.tensor( [[[-9.52_36, -9.89_18, 10.45_57], [-11.04_69, -9.64_23, 8.10_22], [-8.86_64, -7.88_26, 5.53_25]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , lowerCAmelCase , atol=1E-4 ) ) @slow def snake_case ( self ): """simple docstring""" snake_case = BioGptTokenizer.from_pretrained('microsoft/biogpt' ) snake_case = BioGptForCausalLM.from_pretrained('microsoft/biogpt' ) model.to(lowerCAmelCase ) torch.manual_seed(0 ) snake_case = tokenizer('COVID-19 is' , return_tensors='pt' ).to(lowerCAmelCase ) snake_case = model.generate( **lowerCAmelCase , min_length=1_00 , max_length=10_24 , num_beams=5 , early_stopping=lowerCAmelCase , ) snake_case = tokenizer.decode(output_ids[0] , skip_special_tokens=lowerCAmelCase ) snake_case = ( 'COVID-19 is a global pandemic caused by severe acute respiratory syndrome coronavirus 2 (SARS-CoV-2), the' ' causative agent of coronavirus disease 2019 (COVID-19), which has spread to more than 200 countries and' ' territories, including the United States (US), Canada, Australia, New Zealand, the United Kingdom (UK),' ' and the United States of America (USA), as of March 11, 2020, with more than 800,000 confirmed cases and' ' more than 800,000 deaths.' ) self.assertEqual(lowerCAmelCase , lowerCAmelCase )
149
1
"""simple docstring""" import os import shutil import tempfile from unittest import TestCase from unittest.mock import patch import numpy as np from datasets import Dataset from transformers.models.realm.configuration_realm import RealmConfig from transformers.models.realm.retrieval_realm import _REALM_BLOCK_RECORDS_FILENAME, RealmRetriever from transformers.models.realm.tokenization_realm import VOCAB_FILES_NAMES, RealmTokenizer class UpperCamelCase ( __a ): def a_ ( self) -> List[str]: snake_case_ = tempfile.mkdtemp() snake_case_ = 5 # Realm tok snake_case_ = [ '[UNK]', '[CLS]', '[SEP]', '[PAD]', '[MASK]', 'test', 'question', 'this', 'is', 'the', 'first', 'second', 'third', 'fourth', 'fifth', 'record', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing', ',', 'low', 'lowest', ] snake_case_ = os.path.join(self.tmpdirname, 'realm_tokenizer') os.makedirs(lowerCAmelCase__, exist_ok=lowerCAmelCase__) snake_case_ = os.path.join(lowerCAmelCase__, VOCAB_FILES_NAMES['vocab_file']) with open(self.vocab_file, 'w', encoding='utf-8') as vocab_writer: vocab_writer.write(''.join([x + '\n' for x in vocab_tokens])) snake_case_ = os.path.join(self.tmpdirname, 'realm_block_records') os.makedirs(lowerCAmelCase__, exist_ok=lowerCAmelCase__) def a_ ( self) -> List[str]: return RealmTokenizer.from_pretrained(os.path.join(self.tmpdirname, 'realm_tokenizer')) def a_ ( self) -> Optional[Any]: shutil.rmtree(self.tmpdirname) def a_ ( self) -> Tuple: snake_case_ = RealmConfig(num_block_records=self.num_block_records) return config def a_ ( self) -> Union[str, Any]: snake_case_ = Dataset.from_dict( { 'id': ['0', '1'], 'question': ['foo', 'bar'], 'answers': [['Foo', 'Bar'], ['Bar']], }) return dataset def a_ ( self) -> Union[str, Any]: snake_case_ = np.array( [ b'This is the first record', b'This is the second record', b'This is the third record', b'This is the fourth record', b'This is the fifth record', b'This is a longer longer longer record', ], dtype=lowerCAmelCase__, ) return block_records def a_ ( self) -> Dict: snake_case_ = RealmRetriever( block_records=self.get_dummy_block_records(), tokenizer=self.get_tokenizer(), ) return retriever def a_ ( self) -> List[Any]: snake_case_ = self.get_config() snake_case_ = self.get_dummy_retriever() snake_case_ = retriever.tokenizer snake_case_ = np.array([0, 3], dtype='long') snake_case_ = tokenizer(['Test question']).input_ids snake_case_ = tokenizer( ['the fourth'], add_special_tokens=lowerCAmelCase__, return_token_type_ids=lowerCAmelCase__, return_attention_mask=lowerCAmelCase__, ).input_ids snake_case_ = config.reader_seq_len snake_case_ , snake_case_ , snake_case_ , snake_case_ = retriever( lowerCAmelCase__, lowerCAmelCase__, answer_ids=lowerCAmelCase__, max_length=lowerCAmelCase__, return_tensors='np') self.assertEqual(len(lowerCAmelCase__), 2) self.assertEqual(len(lowerCAmelCase__), 2) self.assertEqual(len(lowerCAmelCase__), 2) self.assertEqual(concat_inputs.input_ids.shape, (2, 10)) self.assertEqual(concat_inputs.attention_mask.shape, (2, 10)) self.assertEqual(concat_inputs.token_type_ids.shape, (2, 10)) self.assertEqual(concat_inputs.special_tokens_mask.shape, (2, 10)) self.assertEqual( tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[0]), ['[CLS]', 'test', 'question', '[SEP]', 'this', 'is', 'the', 'first', 'record', '[SEP]'], ) self.assertEqual( tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[1]), ['[CLS]', 'test', 'question', '[SEP]', 'this', 'is', 'the', 'fourth', 'record', '[SEP]'], ) def a_ ( self) -> str: snake_case_ = self.get_config() snake_case_ = self.get_dummy_retriever() snake_case_ = retriever.tokenizer snake_case_ = np.array([0, 3, 5], dtype='long') snake_case_ = tokenizer(['Test question']).input_ids snake_case_ = tokenizer( ['the fourth', 'longer longer'], add_special_tokens=lowerCAmelCase__, return_token_type_ids=lowerCAmelCase__, return_attention_mask=lowerCAmelCase__, ).input_ids snake_case_ = config.reader_seq_len snake_case_ , snake_case_ , snake_case_ , snake_case_ = retriever( lowerCAmelCase__, lowerCAmelCase__, answer_ids=lowerCAmelCase__, max_length=lowerCAmelCase__, return_tensors='np') self.assertEqual([False, True, True], lowerCAmelCase__) self.assertEqual([[-1, -1, -1], [6, -1, -1], [6, 7, 8]], lowerCAmelCase__) self.assertEqual([[-1, -1, -1], [7, -1, -1], [7, 8, 9]], lowerCAmelCase__) def a_ ( self) -> Optional[Any]: snake_case_ = self.get_dummy_retriever() retriever.save_pretrained(os.path.join(self.tmpdirname, 'realm_block_records')) # Test local path snake_case_ = retriever.from_pretrained(os.path.join(self.tmpdirname, 'realm_block_records')) self.assertEqual(retriever.block_records[0], b'This is the first record') # Test mocked remote path with patch('transformers.models.realm.retrieval_realm.hf_hub_download') as mock_hf_hub_download: snake_case_ = os.path.join( os.path.join(self.tmpdirname, 'realm_block_records'), _REALM_BLOCK_RECORDS_FILENAME) snake_case_ = RealmRetriever.from_pretrained('google/realm-cc-news-pretrained-openqa') self.assertEqual(retriever.block_records[0], b'This is the first record')
69
'''simple docstring''' from queue import PriorityQueue from typing import Any import numpy as np def snake_case_ (_a : dict , _a : str , _a : set , _a : set , _a : dict , _a : dict , _a : PriorityQueue , _a : dict , _a : float | int , ): for nxt, d in graph[v]: if nxt in visited_forward: continue UpperCAmelCase = cst_fwd.get(_a , np.inf ) UpperCAmelCase = cst_fwd[v] + d if new_cost_f < old_cost_f: queue.put((new_cost_f, nxt) ) UpperCAmelCase = new_cost_f UpperCAmelCase = v if nxt in visited_backward: if cst_fwd[v] + d + cst_bwd[nxt] < shortest_distance: UpperCAmelCase = cst_fwd[v] + d + cst_bwd[nxt] return shortest_distance def snake_case_ (_a : str , _a : str , _a : dict , _a : dict ): UpperCAmelCase = -1 UpperCAmelCase = set() UpperCAmelCase = set() UpperCAmelCase = {source: 0} UpperCAmelCase = {destination: 0} UpperCAmelCase = {source: None} UpperCAmelCase = {destination: None} UpperCAmelCase = PriorityQueue() UpperCAmelCase = PriorityQueue() UpperCAmelCase = np.inf queue_forward.put((0, source) ) queue_backward.put((0, destination) ) if source == destination: return 0 while not queue_forward.empty() and not queue_backward.empty(): UpperCAmelCase , UpperCAmelCase = queue_forward.get() visited_forward.add(_a ) UpperCAmelCase , UpperCAmelCase = queue_backward.get() visited_backward.add(_a ) UpperCAmelCase = pass_and_relaxation( _a , _a , _a , _a , _a , _a , _a , _a , _a , ) UpperCAmelCase = pass_and_relaxation( _a , _a , _a , _a , _a , _a , _a , _a , _a , ) if cst_fwd[v_fwd] + cst_bwd[v_bwd] >= shortest_distance: break if shortest_distance != np.inf: UpperCAmelCase = shortest_distance return shortest_path_distance A ={ 'B': [['C', 1]], 'C': [['D', 1]], 'D': [['F', 1]], 'E': [['B', 1], ['G', 2]], 'F': [], 'G': [['F', 1]], } A ={ 'B': [['E', 1]], 'C': [['B', 1]], 'D': [['C', 1]], 'F': [['D', 1], ['G', 1]], 'E': [[None, np.inf]], 'G': [['E', 2]], } if __name__ == "__main__": import doctest doctest.testmod()
34
0
'''simple docstring''' from typing import Any def lowerCamelCase__ ( __lowerCamelCase : list , __lowerCamelCase : list , __lowerCamelCase : dict , __lowerCamelCase : dict , __lowerCamelCase : dict , ): '''simple docstring''' _validation( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , ) # Creates data structures and fill initial step _UpperCAmelCase : dict ={} _UpperCAmelCase : dict ={} for state in states_space: _UpperCAmelCase : int =observations_space[0] _UpperCAmelCase : int =( initial_probabilities[state] * emission_probabilities[state][observation] ) _UpperCAmelCase : int =None # Fills the data structure with the probabilities of # different transitions and pointers to previous states for o in range(1 , len(__lowerCamelCase ) ): _UpperCAmelCase : List[Any] =observations_space[o] _UpperCAmelCase : Optional[int] =observations_space[o - 1] for state in states_space: # Calculates the argmax for probability function _UpperCAmelCase : List[str] ='' _UpperCAmelCase : Dict =-1 for k_state in states_space: _UpperCAmelCase : List[str] =( probabilities[(k_state, prior_observation)] * transition_probabilities[k_state][state] * emission_probabilities[state][observation] ) if probability > max_probability: _UpperCAmelCase : int =probability _UpperCAmelCase : List[Any] =k_state # Update probabilities and pointers dicts _UpperCAmelCase : str =( probabilities[(arg_max, prior_observation)] * transition_probabilities[arg_max][state] * emission_probabilities[state][observation] ) _UpperCAmelCase : List[Any] =arg_max # The final observation _UpperCAmelCase : int =observations_space[len(__lowerCamelCase ) - 1] # argmax for given final observation _UpperCAmelCase : Any ='' _UpperCAmelCase : Union[str, Any] =-1 for k_state in states_space: _UpperCAmelCase : Optional[int] =probabilities[(k_state, final_observation)] if probability > max_probability: _UpperCAmelCase : Union[str, Any] =probability _UpperCAmelCase : int =k_state _UpperCAmelCase : int =arg_max # Process pointers backwards _UpperCAmelCase : List[str] =last_state _UpperCAmelCase : Optional[int] =[] for o in range(len(__lowerCamelCase ) - 1 , -1 , -1 ): result.append(__lowerCamelCase ) _UpperCAmelCase : Optional[Any] =pointers[previous, observations_space[o]] result.reverse() return result def lowerCamelCase__ ( __lowerCamelCase : Any , __lowerCamelCase : Any , __lowerCamelCase : Any , __lowerCamelCase : Any , __lowerCamelCase : Any , ): '''simple docstring''' _validate_not_empty( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , ) _validate_lists(__lowerCamelCase , __lowerCamelCase ) _validate_dicts( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) def lowerCamelCase__ ( __lowerCamelCase : Any , __lowerCamelCase : Any , __lowerCamelCase : Any , __lowerCamelCase : Any , __lowerCamelCase : Any , ): '''simple docstring''' if not all( [ observations_space, states_space, initial_probabilities, transition_probabilities, emission_probabilities, ] ): raise ValueError('There\'s an empty parameter' ) def lowerCamelCase__ ( __lowerCamelCase : Any , __lowerCamelCase : Any ): '''simple docstring''' _validate_list(__lowerCamelCase , 'observations_space' ) _validate_list(__lowerCamelCase , 'states_space' ) def lowerCamelCase__ ( __lowerCamelCase : Any , __lowerCamelCase : str ): '''simple docstring''' if not isinstance(_object , __lowerCamelCase ): _UpperCAmelCase : Any =f"{var_name} must be a list" raise ValueError(__lowerCamelCase ) else: for x in _object: if not isinstance(__lowerCamelCase , __lowerCamelCase ): _UpperCAmelCase : Optional[int] =f"{var_name} must be a list of strings" raise ValueError(__lowerCamelCase ) def lowerCamelCase__ ( __lowerCamelCase : Any , __lowerCamelCase : Any , __lowerCamelCase : Any , ): '''simple docstring''' _validate_dict(__lowerCamelCase , 'initial_probabilities' , __lowerCamelCase ) _validate_nested_dict(__lowerCamelCase , 'transition_probabilities' ) _validate_nested_dict(__lowerCamelCase , 'emission_probabilities' ) def lowerCamelCase__ ( __lowerCamelCase : Any , __lowerCamelCase : str ): '''simple docstring''' _validate_dict(_object , __lowerCamelCase , __lowerCamelCase ) for x in _object.values(): _validate_dict(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) def lowerCamelCase__ ( __lowerCamelCase : Any , __lowerCamelCase : str , __lowerCamelCase : type , __lowerCamelCase : bool = False ): '''simple docstring''' if not isinstance(_object , __lowerCamelCase ): _UpperCAmelCase : List[str] =f"{var_name} must be a dict" raise ValueError(__lowerCamelCase ) if not all(isinstance(__lowerCamelCase , __lowerCamelCase ) for x in _object ): _UpperCAmelCase : str =f"{var_name} all keys must be strings" raise ValueError(__lowerCamelCase ) if not all(isinstance(__lowerCamelCase , __lowerCamelCase ) for x in _object.values() ): _UpperCAmelCase : int ='nested dictionary ' if nested else '' _UpperCAmelCase : Optional[int] =f"{var_name} {nested_text}all values must be {value_type.__name__}" raise ValueError(__lowerCamelCase ) if __name__ == "__main__": from doctest import testmod testmod()
242
'''simple docstring''' from collections import UserDict from typing import List, Union from ..utils import ( add_end_docstrings, is_tf_available, is_torch_available, is_vision_available, logging, requires_backends, ) from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING if is_tf_available(): from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING from ..tf_utils import stable_softmax lowercase =logging.get_logger(__name__) @add_end_docstrings(lowerCAmelCase ) class __magic_name__ ( lowerCAmelCase ): def __init__( self , **snake_case) -> Optional[int]: '''simple docstring''' super().__init__(**snake_case) requires_backends(self , 'vision') self.check_model_type( TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING if self.framework == 'tf' else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING) def __call__( self , snake_case , **snake_case) -> str: '''simple docstring''' return super().__call__(snake_case , **snake_case) def lowerCAmelCase ( self , **snake_case) -> int: '''simple docstring''' _UpperCAmelCase : str ={} if "candidate_labels" in kwargs: _UpperCAmelCase : Union[str, Any] =kwargs['candidate_labels'] if "hypothesis_template" in kwargs: _UpperCAmelCase : List[Any] =kwargs['hypothesis_template'] return preprocess_params, {}, {} def lowerCAmelCase ( self , snake_case , snake_case=None , snake_case="This is a photo of {}.") -> Any: '''simple docstring''' _UpperCAmelCase : Optional[Any] =load_image(snake_case) _UpperCAmelCase : Union[str, Any] =self.image_processor(images=[image] , return_tensors=self.framework) _UpperCAmelCase : Union[str, Any] =candidate_labels _UpperCAmelCase : List[Any] =[hypothesis_template.format(snake_case) for x in candidate_labels] _UpperCAmelCase : str =self.tokenizer(snake_case , return_tensors=self.framework , padding=snake_case) _UpperCAmelCase : Any =[text_inputs] return inputs def lowerCAmelCase ( self , snake_case) -> str: '''simple docstring''' _UpperCAmelCase : List[str] =model_inputs.pop('candidate_labels') _UpperCAmelCase : Tuple =model_inputs.pop('text_inputs') if isinstance(text_inputs[0] , snake_case): _UpperCAmelCase : Any =text_inputs[0] else: # Batching case. _UpperCAmelCase : str =text_inputs[0][0] _UpperCAmelCase : Any =self.model(**snake_case , **snake_case) _UpperCAmelCase : List[str] ={ 'candidate_labels': candidate_labels, 'logits': outputs.logits_per_image, } return model_outputs def lowerCAmelCase ( self , snake_case) -> Optional[int]: '''simple docstring''' _UpperCAmelCase : str =model_outputs.pop('candidate_labels') _UpperCAmelCase : Union[str, Any] =model_outputs['logits'][0] if self.framework == "pt": _UpperCAmelCase : Dict =logits.softmax(dim=-1).squeeze(-1) _UpperCAmelCase : Union[str, Any] =probs.tolist() if not isinstance(snake_case , snake_case): _UpperCAmelCase : Union[str, Any] =[scores] elif self.framework == "tf": _UpperCAmelCase : Dict =stable_softmax(snake_case , axis=-1) _UpperCAmelCase : str =probs.numpy().tolist() else: raise ValueError(f"Unsupported framework: {self.framework}") _UpperCAmelCase : List[str] =[ {'score': score, 'label': candidate_label} for score, candidate_label in sorted(zip(snake_case , snake_case) , key=lambda snake_case: -x[0]) ] return result
242
1
import warnings from typing import Any, Dict, List, Optional, Union import numpy as np from ...audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function from ...feature_extraction_sequence_utils import SequenceFeatureExtractor from ...feature_extraction_utils import BatchFeature from ...utils import PaddingStrategy, TensorType, logging SCREAMING_SNAKE_CASE :Dict = logging.get_logger(__name__) class __lowerCAmelCase ( a ): """simple docstring""" _SCREAMING_SNAKE_CASE = ['input_values', 'attention_mask'] def __init__( self : Any , _lowerCAmelCase : int = 1 , _lowerCAmelCase : int = 1_6_0_0_0 , _lowerCAmelCase : float = 0.0 , _lowerCAmelCase : bool = False , _lowerCAmelCase : int = 8_0 , _lowerCAmelCase : int = 1_6 , _lowerCAmelCase : int = 6_4 , _lowerCAmelCase : str = "hann_window" , _lowerCAmelCase : float = 1.0 , _lowerCAmelCase : float = 8_0 , _lowerCAmelCase : float = 7_6_0_0 , _lowerCAmelCase : float = 1e-10 , _lowerCAmelCase : int = 2 , _lowerCAmelCase : bool = True , **_lowerCAmelCase : Dict , ) -> Optional[Any]: """simple docstring""" super().__init__(feature_size=_lowerCAmelCase , sampling_rate=_lowerCAmelCase , padding_value=_lowerCAmelCase , **_lowerCAmelCase ) snake_case_ = do_normalize snake_case_ = return_attention_mask snake_case_ = num_mel_bins snake_case_ = hop_length snake_case_ = win_length snake_case_ = win_function snake_case_ = frame_signal_scale snake_case_ = fmin snake_case_ = fmax snake_case_ = mel_floor snake_case_ = reduction_factor snake_case_ = win_length * sampling_rate // 1_0_0_0 snake_case_ = hop_length * sampling_rate // 1_0_0_0 snake_case_ = optimal_fft_length(self.sample_size ) snake_case_ = (self.n_fft // 2) + 1 snake_case_ = window_function(window_length=self.sample_size , name=self.win_function , periodic=_lowerCAmelCase ) snake_case_ = mel_filter_bank( num_frequency_bins=self.n_freqs , num_mel_filters=self.num_mel_bins , min_frequency=self.fmin , max_frequency=self.fmax , sampling_rate=self.sampling_rate , norm="slaney" , mel_scale="slaney" , ) if frame_signal_scale != 1.0: warnings.warn( "The argument `frame_signal_scale` is deprecated and will be removed in version 4.30.0 of Transformers" , _lowerCAmelCase , ) if reduction_factor != 2.0: warnings.warn( "The argument `reduction_factor` is deprecated and will be removed in version 4.30.0 of Transformers" , _lowerCAmelCase , ) @staticmethod # Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm def lowerCAmelCase__ ( _lowerCAmelCase : List[np.ndarray] , _lowerCAmelCase : List[np.ndarray] , _lowerCAmelCase : float = 0.0 ) -> List[np.ndarray]: """simple docstring""" if attention_mask is not None: snake_case_ = np.array(_lowerCAmelCase , np.intaa ) snake_case_ = [] for vector, length in zip(_lowerCAmelCase , attention_mask.sum(-1 ) ): snake_case_ = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1e-7 ) if length < normed_slice.shape[0]: snake_case_ = padding_value normed_input_values.append(_lowerCAmelCase ) else: snake_case_ = [(x - x.mean()) / np.sqrt(x.var() + 1e-7 ) for x in input_values] return normed_input_values def lowerCAmelCase__ ( self : Optional[int] , _lowerCAmelCase : np.ndarray , ) -> np.ndarray: """simple docstring""" snake_case_ = spectrogram( _lowerCAmelCase , window=self.window , frame_length=self.sample_size , hop_length=self.sample_stride , fft_length=self.n_fft , mel_filters=self.mel_filters , mel_floor=self.mel_floor , log_mel="log10" , ) return log_mel_spec.T def __call__( self : str , _lowerCAmelCase : Optional[Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]]] = None , _lowerCAmelCase : Optional[Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]]] = None , _lowerCAmelCase : Union[bool, str, PaddingStrategy] = False , _lowerCAmelCase : Optional[int] = None , _lowerCAmelCase : bool = False , _lowerCAmelCase : Optional[int] = None , _lowerCAmelCase : Optional[bool] = None , _lowerCAmelCase : Optional[Union[str, TensorType]] = None , _lowerCAmelCase : Optional[int] = None , **_lowerCAmelCase : Any , ) -> BatchFeature: """simple docstring""" if audio is None and audio_target is None: raise ValueError("You must provide either `audio` or `audio_target` values." ) if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( F'''The model corresponding to this feature extractor: {self} was trained using a sampling rate of''' F''' {self.sampling_rate}. Please make sure that the provided audio input was sampled with''' F''' {self.sampling_rate} and not {sampling_rate}.''' ) else: logger.warning( "It is strongly recommended to pass the ``sampling_rate`` argument to this function. " "Failing to do so can result in silent errors that might be hard to debug." ) if audio is not None: snake_case_ = self._process_audio( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , **_lowerCAmelCase , ) else: snake_case_ = None if audio_target is not None: snake_case_ = self._process_audio( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , **_lowerCAmelCase , ) if inputs is None: return inputs_target else: snake_case_ = inputs_target["input_values"] snake_case_ = inputs_target.get("attention_mask" ) if decoder_attention_mask is not None: snake_case_ = decoder_attention_mask return inputs def lowerCAmelCase__ ( self : Union[str, Any] , _lowerCAmelCase : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , _lowerCAmelCase : bool = False , _lowerCAmelCase : Union[bool, str, PaddingStrategy] = False , _lowerCAmelCase : Optional[int] = None , _lowerCAmelCase : bool = False , _lowerCAmelCase : Optional[int] = None , _lowerCAmelCase : Optional[bool] = None , _lowerCAmelCase : Optional[Union[str, TensorType]] = None , **_lowerCAmelCase : Optional[int] , ) -> BatchFeature: """simple docstring""" snake_case_ = isinstance(_lowerCAmelCase , np.ndarray ) and len(speech.shape ) > 1 if is_batched_numpy and len(speech.shape ) > 2: raise ValueError(F'''Only mono-channel audio is supported for input to {self}''' ) snake_case_ = is_batched_numpy or ( isinstance(_lowerCAmelCase , (list, tuple) ) and (isinstance(speech[0] , (np.ndarray, tuple, list) )) ) if is_batched: snake_case_ = [np.asarray(_lowerCAmelCase , dtype=np.floataa ) for speech in speech] elif not is_batched and not isinstance(_lowerCAmelCase , np.ndarray ): snake_case_ = np.asarray(_lowerCAmelCase , dtype=np.floataa ) elif isinstance(_lowerCAmelCase , np.ndarray ) and speech.dtype is np.dtype(np.floataa ): snake_case_ = speech.astype(np.floataa ) # always return batch if not is_batched: snake_case_ = [speech] # needed to make pad() work on spectrogram inputs snake_case_ = self.feature_size # convert into correct format for padding if is_target: snake_case_ = [self._extract_mel_features(_lowerCAmelCase ) for waveform in speech] snake_case_ = BatchFeature({"input_values": features} ) snake_case_ = self.num_mel_bins else: snake_case_ = BatchFeature({"input_values": speech} ) snake_case_ = self.pad( _lowerCAmelCase , padding=_lowerCAmelCase , max_length=_lowerCAmelCase , truncation=_lowerCAmelCase , pad_to_multiple_of=_lowerCAmelCase , return_attention_mask=_lowerCAmelCase , **_lowerCAmelCase , ) snake_case_ = feature_size_hack # convert input values to correct format snake_case_ = padded_inputs["input_values"] if not isinstance(input_values[0] , np.ndarray ): snake_case_ = [np.asarray(_lowerCAmelCase , dtype=np.floataa ) for array in input_values] elif ( not isinstance(_lowerCAmelCase , np.ndarray ) and isinstance(input_values[0] , np.ndarray ) and input_values[0].dtype is np.dtype(np.floataa ) ): snake_case_ = [array.astype(np.floataa ) for array in input_values] elif isinstance(_lowerCAmelCase , np.ndarray ) and input_values.dtype is np.dtype(np.floataa ): snake_case_ = input_values.astype(np.floataa ) # convert attention_mask to correct format snake_case_ = padded_inputs.get("attention_mask" ) if attention_mask is not None: snake_case_ = [np.asarray(_lowerCAmelCase , dtype=np.intaa ) for array in attention_mask] # zero-mean and unit-variance normalization if not is_target and self.do_normalize: snake_case_ = ( attention_mask if self._get_padding_strategies(_lowerCAmelCase , max_length=_lowerCAmelCase ) is not PaddingStrategy.DO_NOT_PAD else None ) snake_case_ = self.zero_mean_unit_var_norm( padded_inputs["input_values"] , attention_mask=_lowerCAmelCase , padding_value=self.padding_value ) if return_tensors is not None: snake_case_ = padded_inputs.convert_to_tensors(_lowerCAmelCase ) return padded_inputs def lowerCAmelCase__ ( self : Any ) -> Dict[str, Any]: """simple docstring""" snake_case_ = super().to_dict() # Don't serialize these as they are derived from the other properties. snake_case_ = ["window", "mel_filters", "sample_size", "sample_stride", "n_fft", "n_freqs"] for name in names: if name in output: del output[name] return output
159
import copy import inspect import unittest from transformers import PretrainedConfig, SwiftFormerConfig from transformers.testing_utils import ( require_torch, require_vision, slow, torch_device, ) from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import SwiftFormerForImageClassification, SwiftFormerModel from transformers.models.swiftformer.modeling_swiftformer import SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import ViTImageProcessor class __lowerCAmelCase : """simple docstring""" def __init__( self : str , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : int=1_3 , _lowerCAmelCase : Optional[int]=3 , _lowerCAmelCase : Any=True , _lowerCAmelCase : str=True , _lowerCAmelCase : Any=0.1 , _lowerCAmelCase : Tuple=0.1 , _lowerCAmelCase : Any=2_2_4 , _lowerCAmelCase : Any=1_0_0_0 , _lowerCAmelCase : Any=[3, 3, 6, 4] , _lowerCAmelCase : Any=[4_8, 5_6, 1_1_2, 2_2_0] , ) -> List[Any]: """simple docstring""" snake_case_ = parent snake_case_ = batch_size snake_case_ = num_channels snake_case_ = is_training snake_case_ = use_labels snake_case_ = hidden_dropout_prob snake_case_ = attention_probs_dropout_prob snake_case_ = num_labels snake_case_ = image_size snake_case_ = layer_depths snake_case_ = embed_dims def lowerCAmelCase__ ( self : Optional[int] ) -> List[Any]: """simple docstring""" snake_case_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) snake_case_ = None if self.use_labels: snake_case_ = ids_tensor([self.batch_size] , self.num_labels ) snake_case_ = self.get_config() return config, pixel_values, labels def lowerCAmelCase__ ( self : Union[str, Any] ) -> List[str]: """simple docstring""" return SwiftFormerConfig( depths=self.layer_depths , embed_dims=self.embed_dims , mlp_ratio=4 , downsamples=[True, True, True, True] , hidden_act="gelu" , num_labels=self.num_labels , down_patch_size=3 , down_stride=2 , down_pad=1 , drop_rate=0.0 , drop_path_rate=0.0 , use_layer_scale=_lowerCAmelCase , layer_scale_init_value=1e-5 , ) def lowerCAmelCase__ ( self : Union[str, Any] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Dict ) -> str: """simple docstring""" snake_case_ = SwiftFormerModel(config=_lowerCAmelCase ) model.to(_lowerCAmelCase ) model.eval() snake_case_ = model(_lowerCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dims[-1], 7, 7) ) def lowerCAmelCase__ ( self : Tuple , _lowerCAmelCase : Tuple , _lowerCAmelCase : str , _lowerCAmelCase : int ) -> List[Any]: """simple docstring""" snake_case_ = self.num_labels snake_case_ = SwiftFormerForImageClassification(_lowerCAmelCase ) model.to(_lowerCAmelCase ) model.eval() snake_case_ = model(_lowerCAmelCase , labels=_lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) snake_case_ = SwiftFormerForImageClassification(_lowerCAmelCase ) model.to(_lowerCAmelCase ) model.eval() snake_case_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) snake_case_ = model(_lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def lowerCAmelCase__ ( self : int ) -> Any: """simple docstring""" ((snake_case_) , (snake_case_) , (snake_case_)) = self.prepare_config_and_inputs() snake_case_ = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class __lowerCAmelCase ( a , a , unittest.TestCase ): """simple docstring""" _SCREAMING_SNAKE_CASE = (SwiftFormerModel, SwiftFormerForImageClassification) if is_torch_available() else () _SCREAMING_SNAKE_CASE = ( {'feature-extraction': SwiftFormerModel, 'image-classification': SwiftFormerForImageClassification} if is_torch_available() else {} ) _SCREAMING_SNAKE_CASE = False _SCREAMING_SNAKE_CASE = False _SCREAMING_SNAKE_CASE = False _SCREAMING_SNAKE_CASE = False _SCREAMING_SNAKE_CASE = False def lowerCAmelCase__ ( self : Optional[int] ) -> Any: """simple docstring""" snake_case_ = SwiftFormerModelTester(self ) snake_case_ = ConfigTester( self , config_class=_lowerCAmelCase , has_text_modality=_lowerCAmelCase , hidden_size=3_7 , num_attention_heads=1_2 , num_hidden_layers=1_2 , ) def lowerCAmelCase__ ( self : Any ) -> Optional[Any]: """simple docstring""" self.config_tester.run_common_tests() @unittest.skip(reason="SwiftFormer does not use inputs_embeds" ) def lowerCAmelCase__ ( self : Union[str, Any] ) -> Union[str, Any]: """simple docstring""" pass def lowerCAmelCase__ ( self : List[str] ) -> List[str]: """simple docstring""" snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: snake_case_ = model_class(_lowerCAmelCase ) snake_case_ = model.get_output_embeddings() self.assertTrue(x is None or isinstance(_lowerCAmelCase , nn.Linear ) ) def lowerCAmelCase__ ( self : List[str] ) -> Any: """simple docstring""" snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: snake_case_ = model_class(_lowerCAmelCase ) snake_case_ = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic snake_case_ = [*signature.parameters.keys()] snake_case_ = ["pixel_values"] self.assertListEqual(arg_names[:1] , _lowerCAmelCase ) def lowerCAmelCase__ ( self : str ) -> Optional[Any]: """simple docstring""" snake_case_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_lowerCAmelCase ) def lowerCAmelCase__ ( self : str ) -> Any: """simple docstring""" snake_case_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*_lowerCAmelCase ) @slow def lowerCAmelCase__ ( self : List[str] ) -> Optional[Any]: """simple docstring""" for model_name in SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: snake_case_ = SwiftFormerModel.from_pretrained(_lowerCAmelCase ) self.assertIsNotNone(_lowerCAmelCase ) @unittest.skip(reason="SwiftFormer does not output attentions" ) def lowerCAmelCase__ ( self : Any ) -> Tuple: """simple docstring""" pass def lowerCAmelCase__ ( self : List[Any] ) -> Optional[Any]: """simple docstring""" def check_hidden_states_output(_lowerCAmelCase : List[Any] , _lowerCAmelCase : str , _lowerCAmelCase : Tuple ): snake_case_ = model_class(_lowerCAmelCase ) model.to(_lowerCAmelCase ) model.eval() with torch.no_grad(): snake_case_ = model(**self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase ) ) snake_case_ = outputs.hidden_states snake_case_ = 8 self.assertEqual(len(_lowerCAmelCase ) , _lowerCAmelCase ) # TODO # SwiftFormer's feature maps are of shape (batch_size, embed_dims, height, width) # with the width and height being successively divided by 2, after every 2 blocks for i in range(len(_lowerCAmelCase ) ): self.assertEqual( hidden_states[i].shape , torch.Size( [ self.model_tester.batch_size, self.model_tester.embed_dims[i // 2], (self.model_tester.image_size // 4) // 2 ** (i // 2), (self.model_tester.image_size // 4) // 2 ** (i // 2), ] ) , ) snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: snake_case_ = True check_hidden_states_output(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] snake_case_ = True check_hidden_states_output(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) def lowerCAmelCase__ ( self : Any ) -> Optional[int]: """simple docstring""" def _config_zero_init(_lowerCAmelCase : List[str] ): snake_case_ = copy.deepcopy(_lowerCAmelCase ) for key in configs_no_init.__dict__.keys(): if "_range" in key or "_std" in key or "initializer_factor" in key or "layer_scale" in key: setattr(_lowerCAmelCase , _lowerCAmelCase , 1e-10 ) if isinstance(getattr(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) , _lowerCAmelCase ): snake_case_ = _config_zero_init(getattr(_lowerCAmelCase , _lowerCAmelCase ) ) setattr(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) return configs_no_init snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common() snake_case_ = _config_zero_init(_lowerCAmelCase ) for model_class in self.all_model_classes: snake_case_ = model_class(config=_lowerCAmelCase ) for name, param in model.named_parameters(): if param.requires_grad: self.assertIn( ((param.data.mean() * 1e9) / 1e9).round().item() , [0.0, 1.0] , msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' , ) @unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." ) def lowerCAmelCase__ ( self : Tuple ) -> Optional[Any]: """simple docstring""" pass def _lowerCAmelCase ( )->str: '''simple docstring''' snake_case_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) return image @require_torch @require_vision class __lowerCAmelCase ( unittest.TestCase ): """simple docstring""" @cached_property def lowerCAmelCase__ ( self : int ) -> Optional[int]: """simple docstring""" return ViTImageProcessor.from_pretrained("MBZUAI/swiftformer-xs" ) if is_vision_available() else None @slow def lowerCAmelCase__ ( self : List[str] ) -> Optional[int]: """simple docstring""" snake_case_ = SwiftFormerForImageClassification.from_pretrained("MBZUAI/swiftformer-xs" ).to(_lowerCAmelCase ) snake_case_ = self.default_image_processor snake_case_ = prepare_img() snake_case_ = image_processor(images=_lowerCAmelCase , return_tensors="pt" ).to(_lowerCAmelCase ) # forward pass with torch.no_grad(): snake_case_ = model(**_lowerCAmelCase ) # verify the logits snake_case_ = torch.Size((1, 1_0_0_0) ) self.assertEqual(outputs.logits.shape , _lowerCAmelCase ) snake_case_ = torch.tensor([[-2.1703e00, 2.1107e00, -2.0811e00]] ).to(_lowerCAmelCase ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , _lowerCAmelCase , atol=1e-4 ) )
159
1
import gc import unittest import numpy as np import torch from diffusers import AutoencoderKL, DDIMScheduler, DiTPipeline, DPMSolverMultistepScheduler, TransformeraDModel from diffusers.utils import is_xformers_available, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..pipeline_params import ( CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS, CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS, ) from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class A ( _UpperCAmelCase , unittest.TestCase ): """simple docstring""" lowerCamelCase = DiTPipeline lowerCamelCase = CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS lowerCamelCase = PipelineTesterMixin.required_optional_params - { 'latents', 'num_images_per_prompt', 'callback', 'callback_steps', } lowerCamelCase = CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS lowerCamelCase = False def snake_case__ ( self : Union[str, Any] )-> Any: '''simple docstring''' torch.manual_seed(0 ) A__ = TransformeraDModel( sample_size=1_6,num_layers=2,patch_size=4,attention_head_dim=8,num_attention_heads=2,in_channels=4,out_channels=8,attention_bias=lowercase_,activation_fn='gelu-approximate',num_embeds_ada_norm=1_0_0_0,norm_type='ada_norm_zero',norm_elementwise_affine=lowercase_,) A__ = AutoencoderKL() A__ = DDIMScheduler() A__ = {'transformer': transformer.eval(), 'vae': vae.eval(), 'scheduler': scheduler} return components def snake_case__ ( self : Optional[int],lowercase_ : Any,lowercase_ : int=0 )-> int: '''simple docstring''' if str(lowercase_ ).startswith('mps' ): A__ = torch.manual_seed(lowercase_ ) else: A__ = torch.Generator(device=lowercase_ ).manual_seed(lowercase_ ) A__ = { 'class_labels': [1], 'generator': generator, 'num_inference_steps': 2, 'output_type': 'numpy', } return inputs def snake_case__ ( self : Union[str, Any] )-> Tuple: '''simple docstring''' A__ = 'cpu' A__ = self.get_dummy_components() A__ = self.pipeline_class(**lowercase_ ) pipe.to(lowercase_ ) pipe.set_progress_bar_config(disable=lowercase_ ) A__ = self.get_dummy_inputs(lowercase_ ) A__ = pipe(**lowercase_ ).images A__ = image[0, -3:, -3:, -1] self.assertEqual(image.shape,(1, 1_6, 1_6, 3) ) A__ = np.array([0.2_946, 0.6_601, 0.4_329, 0.3_296, 0.4_144, 0.5_319, 0.7_273, 0.5_013, 0.4_457] ) A__ = np.abs(image_slice.flatten() - expected_slice ).max() self.assertLessEqual(lowercase_,1E-3 ) def snake_case__ ( self : List[Any] )-> List[str]: '''simple docstring''' self._test_inference_batch_single_identical(relax_max_difference=lowercase_,expected_max_diff=1E-3 ) @unittest.skipIf( torch_device != 'cuda' or not is_xformers_available(),reason='XFormers attention is only available with CUDA and `xformers` installed',) def snake_case__ ( self : Any )-> List[str]: '''simple docstring''' self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 ) @require_torch_gpu @slow class A ( unittest.TestCase ): """simple docstring""" def snake_case__ ( self : List[str] )-> List[Any]: '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def snake_case__ ( self : Union[str, Any] )-> Tuple: '''simple docstring''' A__ = torch.manual_seed(0 ) A__ = DiTPipeline.from_pretrained('facebook/DiT-XL-2-256' ) pipe.to('cuda' ) A__ = ['vase', 'umbrella', 'white shark', 'white wolf'] A__ = pipe.get_label_ids(lowercase_ ) A__ = pipe(lowercase_,generator=lowercase_,num_inference_steps=4_0,output_type='np' ).images for word, image in zip(lowercase_,lowercase_ ): A__ = load_numpy( F'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/dit/{word}.npy' ) assert np.abs((expected_image - image).max() ) < 1E-2 def snake_case__ ( self : str )-> str: '''simple docstring''' A__ = DiTPipeline.from_pretrained('facebook/DiT-XL-2-512' ) A__ = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config ) pipe.to('cuda' ) A__ = ['vase', 'umbrella'] A__ = pipe.get_label_ids(lowercase_ ) A__ = torch.manual_seed(0 ) A__ = pipe(lowercase_,generator=lowercase_,num_inference_steps=2_5,output_type='np' ).images for word, image in zip(lowercase_,lowercase_ ): A__ = load_numpy( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' F'/dit/{word}_512.npy' ) assert np.abs((expected_image - image).max() ) < 1E-1
282
import unittest from transformers import LiltConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( LiltForQuestionAnswering, LiltForSequenceClassification, LiltForTokenClassification, LiltModel, ) from transformers.models.lilt.modeling_lilt import LILT_PRETRAINED_MODEL_ARCHIVE_LIST class A : """simple docstring""" def __init__( self : Any,lowercase_ : Optional[int],lowercase_ : Optional[int]=1_3,lowercase_ : int=7,lowercase_ : List[str]=True,lowercase_ : str=True,lowercase_ : List[str]=True,lowercase_ : Optional[Any]=True,lowercase_ : Dict=9_9,lowercase_ : Dict=2_4,lowercase_ : Union[str, Any]=2,lowercase_ : str=6,lowercase_ : Dict=3_7,lowercase_ : Optional[Any]="gelu",lowercase_ : Any=0.1,lowercase_ : Any=0.1,lowercase_ : Any=5_1_2,lowercase_ : Dict=1_6,lowercase_ : List[str]=2,lowercase_ : Dict=0.02,lowercase_ : Any=3,lowercase_ : Dict=None,lowercase_ : List[str]=1_0_0_0,)-> Optional[Any]: '''simple docstring''' A__ = parent A__ = batch_size A__ = seq_length A__ = is_training A__ = use_input_mask A__ = use_token_type_ids A__ = use_labels A__ = vocab_size A__ = hidden_size A__ = num_hidden_layers A__ = num_attention_heads A__ = intermediate_size A__ = hidden_act A__ = hidden_dropout_prob A__ = attention_probs_dropout_prob A__ = max_position_embeddings A__ = type_vocab_size A__ = type_sequence_label_size A__ = initializer_range A__ = num_labels A__ = scope A__ = range_bbox def snake_case__ ( self : List[Any] )-> Tuple: '''simple docstring''' A__ = ids_tensor([self.batch_size, self.seq_length],self.vocab_size ) A__ = ids_tensor([self.batch_size, self.seq_length, 4],self.range_bbox ) # Ensure that bbox is legal for i in range(bbox.shape[0] ): for j in range(bbox.shape[1] ): if bbox[i, j, 3] < bbox[i, j, 1]: A__ = bbox[i, j, 3] A__ = bbox[i, j, 1] A__ = t if bbox[i, j, 2] < bbox[i, j, 0]: A__ = bbox[i, j, 2] A__ = bbox[i, j, 0] A__ = t A__ = None if self.use_input_mask: A__ = ids_tensor([self.batch_size, self.seq_length],vocab_size=2 ) A__ = None if self.use_token_type_ids: A__ = ids_tensor([self.batch_size, self.seq_length],self.type_vocab_size ) A__ = None A__ = None if self.use_labels: A__ = ids_tensor([self.batch_size],self.type_sequence_label_size ) A__ = ids_tensor([self.batch_size, self.seq_length],self.num_labels ) A__ = self.get_config() return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels def snake_case__ ( self : Dict )-> int: '''simple docstring''' return LiltConfig( vocab_size=self.vocab_size,hidden_size=self.hidden_size,num_hidden_layers=self.num_hidden_layers,num_attention_heads=self.num_attention_heads,intermediate_size=self.intermediate_size,hidden_act=self.hidden_act,hidden_dropout_prob=self.hidden_dropout_prob,attention_probs_dropout_prob=self.attention_probs_dropout_prob,max_position_embeddings=self.max_position_embeddings,type_vocab_size=self.type_vocab_size,initializer_range=self.initializer_range,) def snake_case__ ( self : Optional[Any],lowercase_ : Tuple,lowercase_ : str,lowercase_ : Optional[int],lowercase_ : Optional[Any],lowercase_ : str,lowercase_ : List[str],lowercase_ : Tuple,)-> Optional[Any]: '''simple docstring''' A__ = LiltModel(config=lowercase_ ) model.to(lowercase_ ) model.eval() A__ = model(lowercase_,bbox=lowercase_,attention_mask=lowercase_,token_type_ids=lowercase_ ) A__ = model(lowercase_,bbox=lowercase_,token_type_ids=lowercase_ ) A__ = model(lowercase_,bbox=lowercase_ ) self.parent.assertEqual(result.last_hidden_state.shape,(self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape,(self.batch_size, self.hidden_size) ) def snake_case__ ( self : Any,lowercase_ : Dict,lowercase_ : List[Any],lowercase_ : List[str],lowercase_ : Optional[int],lowercase_ : Optional[int],lowercase_ : Optional[int],lowercase_ : List[Any],)-> List[str]: '''simple docstring''' A__ = self.num_labels A__ = LiltForTokenClassification(config=lowercase_ ) model.to(lowercase_ ) model.eval() A__ = model( lowercase_,bbox=lowercase_,attention_mask=lowercase_,token_type_ids=lowercase_,labels=lowercase_ ) self.parent.assertEqual(result.logits.shape,(self.batch_size, self.seq_length, self.num_labels) ) def snake_case__ ( self : int,lowercase_ : Union[str, Any],lowercase_ : int,lowercase_ : Tuple,lowercase_ : Any,lowercase_ : Optional[int],lowercase_ : Tuple,lowercase_ : List[str],)-> Any: '''simple docstring''' A__ = LiltForQuestionAnswering(config=lowercase_ ) model.to(lowercase_ ) model.eval() A__ = model( lowercase_,bbox=lowercase_,attention_mask=lowercase_,token_type_ids=lowercase_,start_positions=lowercase_,end_positions=lowercase_,) self.parent.assertEqual(result.start_logits.shape,(self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape,(self.batch_size, self.seq_length) ) def snake_case__ ( self : Optional[int] )-> Tuple: '''simple docstring''' A__ = self.prepare_config_and_inputs() ( ( A__ ) , ( A__ ) , ( A__ ) , ( A__ ) , ( A__ ) , ( A__ ) , ( A__ ) , ) = config_and_inputs A__ = { 'input_ids': input_ids, 'bbox': bbox, 'token_type_ids': token_type_ids, 'attention_mask': input_mask, } return config, inputs_dict @require_torch class A ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ): """simple docstring""" lowerCamelCase = ( ( LiltModel, LiltForSequenceClassification, LiltForTokenClassification, LiltForQuestionAnswering, ) if is_torch_available() else () ) lowerCamelCase = ( { 'feature-extraction': LiltModel, 'question-answering': LiltForQuestionAnswering, 'text-classification': LiltForSequenceClassification, 'token-classification': LiltForTokenClassification, 'zero-shot': LiltForSequenceClassification, } if is_torch_available() else {} ) lowerCamelCase = False lowerCamelCase = False def snake_case__ ( self : List[str],lowercase_ : int,lowercase_ : List[str],lowercase_ : str,lowercase_ : Optional[Any],lowercase_ : Optional[Any] )-> Any: '''simple docstring''' return True def snake_case__ ( self : int )-> Tuple: '''simple docstring''' A__ = LiltModelTester(self ) A__ = ConfigTester(self,config_class=lowercase_,hidden_size=3_7 ) def snake_case__ ( self : List[Any] )-> Optional[int]: '''simple docstring''' self.config_tester.run_common_tests() def snake_case__ ( self : Dict )-> Union[str, Any]: '''simple docstring''' A__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowercase_ ) def snake_case__ ( self : Union[str, Any] )-> Dict: '''simple docstring''' A__ = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: A__ = type self.model_tester.create_and_check_model(*lowercase_ ) def snake_case__ ( self : str )-> Optional[Any]: '''simple docstring''' A__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*lowercase_ ) def snake_case__ ( self : List[Any] )-> Optional[Any]: '''simple docstring''' A__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*lowercase_ ) @slow def snake_case__ ( self : List[Any] )-> int: '''simple docstring''' for model_name in LILT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: A__ = LiltModel.from_pretrained(lowercase_ ) self.assertIsNotNone(lowercase_ ) @require_torch @slow class A ( unittest.TestCase ): """simple docstring""" def snake_case__ ( self : List[Any] )-> Dict: '''simple docstring''' A__ = LiltModel.from_pretrained('SCUT-DLVCLab/lilt-roberta-en-base' ).to(lowercase_ ) A__ = torch.tensor([[1, 2]],device=lowercase_ ) A__ = torch.tensor([[[1, 2, 3, 4], [5, 6, 7, 8]]],device=lowercase_ ) # forward pass with torch.no_grad(): A__ = model(input_ids=lowercase_,bbox=lowercase_ ) A__ = torch.Size([1, 2, 7_6_8] ) A__ = torch.tensor( [[-0.0_653, 0.0_950, -0.0_061], [-0.0_545, 0.0_926, -0.0_324]],device=lowercase_,) self.assertTrue(outputs.last_hidden_state.shape,lowercase_ ) self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :, :3],lowercase_,atol=1E-3 ) )
282
1
import warnings from ...utils import logging from .image_processing_poolformer import PoolFormerImageProcessor lowerCamelCase__ : List[str] = logging.get_logger(__name__) class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE ): '''simple docstring''' def __init__( self : Optional[int] , *_lowerCAmelCase : List[Any] , **_lowerCAmelCase : Optional[Any] ): warnings.warn( 'The class PoolFormerFeatureExtractor is deprecated and will be removed in version 5 of Transformers.' ' Please use PoolFormerImageProcessor instead.' , _lowerCAmelCase , ) super().__init__(*_lowerCAmelCase , **_lowerCAmelCase )
225
from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCamelCase__ : List[str] = logging.get_logger(__name__) lowerCamelCase__ : List[str] = { 'facebook/s2t-small-librispeech-asr': ( 'https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/config.json' ), # See all Speech2Text models at https://huggingface.co/models?filter=speech_to_text } class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE ): '''simple docstring''' lowercase_ = "speech_to_text" lowercase_ = ["past_key_values"] lowercase_ = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"} def __init__( self : Tuple , _lowerCAmelCase : List[Any]=10_000 , _lowerCAmelCase : List[Any]=12 , _lowerCAmelCase : Union[str, Any]=2_048 , _lowerCAmelCase : Optional[int]=4 , _lowerCAmelCase : Union[str, Any]=6 , _lowerCAmelCase : Optional[int]=2_048 , _lowerCAmelCase : Optional[Any]=4 , _lowerCAmelCase : Any=0.0 , _lowerCAmelCase : Optional[Any]=0.0 , _lowerCAmelCase : List[str]=True , _lowerCAmelCase : List[Any]=True , _lowerCAmelCase : int="relu" , _lowerCAmelCase : Union[str, Any]=256 , _lowerCAmelCase : List[str]=0.1 , _lowerCAmelCase : str=0.0 , _lowerCAmelCase : Tuple=0.0 , _lowerCAmelCase : List[str]=0.02 , _lowerCAmelCase : List[str]=2 , _lowerCAmelCase : Tuple=True , _lowerCAmelCase : List[str]=1 , _lowerCAmelCase : str=0 , _lowerCAmelCase : str=2 , _lowerCAmelCase : Union[str, Any]=6_000 , _lowerCAmelCase : List[str]=1_024 , _lowerCAmelCase : str=2 , _lowerCAmelCase : Optional[Any]=(5, 5) , _lowerCAmelCase : str=1_024 , _lowerCAmelCase : str=80 , _lowerCAmelCase : Tuple=1 , **_lowerCAmelCase : Any , ): SCREAMING_SNAKE_CASE_ = vocab_size SCREAMING_SNAKE_CASE_ = d_model SCREAMING_SNAKE_CASE_ = encoder_ffn_dim SCREAMING_SNAKE_CASE_ = encoder_layers SCREAMING_SNAKE_CASE_ = encoder_attention_heads SCREAMING_SNAKE_CASE_ = decoder_ffn_dim SCREAMING_SNAKE_CASE_ = decoder_layers SCREAMING_SNAKE_CASE_ = decoder_attention_heads SCREAMING_SNAKE_CASE_ = dropout SCREAMING_SNAKE_CASE_ = attention_dropout SCREAMING_SNAKE_CASE_ = activation_dropout SCREAMING_SNAKE_CASE_ = activation_function SCREAMING_SNAKE_CASE_ = init_std SCREAMING_SNAKE_CASE_ = encoder_layerdrop SCREAMING_SNAKE_CASE_ = decoder_layerdrop SCREAMING_SNAKE_CASE_ = use_cache SCREAMING_SNAKE_CASE_ = encoder_layers SCREAMING_SNAKE_CASE_ = scale_embedding # scale factor will be sqrt(d_model) if True SCREAMING_SNAKE_CASE_ = max_source_positions SCREAMING_SNAKE_CASE_ = max_target_positions SCREAMING_SNAKE_CASE_ = num_conv_layers SCREAMING_SNAKE_CASE_ = list(_lowerCAmelCase ) SCREAMING_SNAKE_CASE_ = conv_channels SCREAMING_SNAKE_CASE_ = input_feat_per_channel SCREAMING_SNAKE_CASE_ = input_channels if len(self.conv_kernel_sizes ) != self.num_conv_layers: raise ValueError( 'Configuration for convolutional module is incorrect. ' 'It is required that `len(config.conv_kernel_sizes)` == `config.num_conv_layers` ' F"but is `len(config.conv_kernel_sizes) = {len(self.conv_kernel_sizes )}`, " F"`config.num_conv_layers = {self.num_conv_layers}`." ) super().__init__( pad_token_id=_lowerCAmelCase , bos_token_id=_lowerCAmelCase , eos_token_id=_lowerCAmelCase , is_encoder_decoder=_lowerCAmelCase , decoder_start_token_id=_lowerCAmelCase , **_lowerCAmelCase , )
225
1
import torch from transformers import AutoModel class __UpperCAmelCase ( torch.nn.Module ): def __init__( self , lowerCAmelCase_="sayef/fsner-bert-base-uncased" ): """simple docstring""" super(lowerCAmelCase_ , self ).__init__() _snake_case = AutoModel.from_pretrained(lowerCAmelCase_ , return_dict=lowerCAmelCase_ ) _snake_case = torch.nn.CosineSimilarity(3 , 1E-08 ) _snake_case = torch.nn.Softmax(dim=1 ) def lowerCamelCase ( self , **lowerCAmelCase_ ): """simple docstring""" return self.bert(**lowerCAmelCase_ ).last_hidden_state def lowerCamelCase ( self , lowerCAmelCase_ ): """simple docstring""" return token_embeddings.sum(2 , keepdim=lowerCAmelCase_ ) def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=1 ): """simple docstring""" return self.softmax(T * self.cos(lowerCAmelCase_ , lowerCAmelCase_ ) ) def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ ): """simple docstring""" _snake_case = W_supports['sizes'].tolist() _snake_case = W_supports['start_token_id'].item() _snake_case = W_supports['end_token_id'].item() del W_supports["sizes"] del W_supports["start_token_id"] del W_supports["end_token_id"] _snake_case = self.BERT(**lowerCAmelCase_ ) _snake_case = self.BERT(**lowerCAmelCase_ ) _snake_case = None _snake_case = None _snake_case = W_supports['input_ids'] == start_token_id _snake_case = W_supports['input_ids'] == end_token_id for i, size in enumerate(lowerCAmelCase_ ): if i == 0: _snake_case = 0 else: _snake_case = support_sizes[i - 1] _snake_case = S[s : s + size][start_token_masks[s : s + size]] _snake_case = S[s : s + size][end_token_masks[s : s + size]] _snake_case = torch.matmul(q[i] , s_start.T ).sum(1 ).softmax(0 ) _snake_case = torch.matmul(q[i] , s_end.T ).sum(1 ).softmax(0 ) if p_starts is not None: _snake_case = torch.vstack((p_starts, p_start) ) _snake_case = torch.vstack((p_ends, p_end) ) else: _snake_case = p_start _snake_case = p_end return p_starts, p_ends
354
'''simple docstring''' import json import os import shutil import tempfile import unittest from transformers import BatchEncoding, CanineTokenizer from transformers.testing_utils import require_tokenizers, require_torch from transformers.tokenization_utils import AddedToken from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin class __UpperCAmelCase ( _lowerCamelCase , unittest.TestCase ): __lowercase = CanineTokenizer __lowercase = False def lowerCamelCase ( self ): """simple docstring""" super().setUp() _snake_case = CanineTokenizer() tokenizer.save_pretrained(self.tmpdirname ) @cached_property def lowerCamelCase ( self ): """simple docstring""" return CanineTokenizer.from_pretrained('google/canine-s' ) def lowerCamelCase ( self , **lowerCAmelCase_ ): """simple docstring""" _snake_case = self.tokenizer_class.from_pretrained(self.tmpdirname , **lowerCAmelCase_ ) _snake_case = 10_24 return tokenizer @require_torch def lowerCamelCase ( self ): """simple docstring""" _snake_case = self.canine_tokenizer _snake_case = ['Life is like a box of chocolates.', 'You never know what you\'re gonna get.'] # fmt: off _snake_case = [5_73_44, 76, 1_05, 1_02, 1_01, 32, 1_05, 1_15, 32, 1_08, 1_05, 1_07, 1_01, 32, 97, 32, 98, 1_11, 1_20, 32, 1_11, 1_02, 32, 99, 1_04, 1_11, 99, 1_11, 1_08, 97, 1_16, 1_01, 1_15, 46, 5_73_45, 0, 0, 0, 0] # fmt: on _snake_case = tokenizer(lowerCAmelCase_ , padding=lowerCAmelCase_ , return_tensors='pt' ) self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ ) _snake_case = list(batch.input_ids.numpy()[0] ) self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ ) self.assertEqual((2, 39) , batch.input_ids.shape ) self.assertEqual((2, 39) , batch.attention_mask.shape ) @require_torch def lowerCamelCase ( self ): """simple docstring""" _snake_case = self.canine_tokenizer _snake_case = ['Once there was a man.', 'He wrote a test in HuggingFace Tranformers.'] _snake_case = tokenizer(lowerCAmelCase_ , padding=lowerCAmelCase_ , return_tensors='pt' ) # check if input_ids, attention_mask and token_type_ids are returned self.assertIn('input_ids' , lowerCAmelCase_ ) self.assertIn('attention_mask' , lowerCAmelCase_ ) self.assertIn('token_type_ids' , lowerCAmelCase_ ) @require_torch def lowerCamelCase ( self ): """simple docstring""" _snake_case = self.canine_tokenizer _snake_case = [ 'What\'s the weater?', 'It\'s about 25 degrees.', ] _snake_case = tokenizer( text_target=lowerCAmelCase_ , max_length=32 , padding='max_length' , truncation=lowerCAmelCase_ , return_tensors='pt' ) self.assertEqual(32 , targets['input_ids'].shape[1] ) def lowerCamelCase ( self ): """simple docstring""" _snake_case = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(F'{tokenizer.__class__.__name__}' ): self.assertNotEqual(tokenizer.model_max_length , 42 ) # Now let's start the test _snake_case = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(F'{tokenizer.__class__.__name__}' ): # Isolate this from the other tests because we save additional tokens/etc _snake_case = tempfile.mkdtemp() _snake_case = ' He is very happy, UNwant\u00E9d,running' _snake_case = tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ ) tokenizer.save_pretrained(lowerCAmelCase_ ) _snake_case = tokenizer.__class__.from_pretrained(lowerCAmelCase_ ) _snake_case = after_tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ ) self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ ) shutil.rmtree(lowerCAmelCase_ ) _snake_case = self.get_tokenizers(model_max_length=42 ) for tokenizer in tokenizers: with self.subTest(F'{tokenizer.__class__.__name__}' ): # Isolate this from the other tests because we save additional tokens/etc _snake_case = tempfile.mkdtemp() _snake_case = ' He is very happy, UNwant\u00E9d,running' _snake_case = tokenizer.additional_special_tokens # We can add a new special token for Canine as follows: _snake_case = chr(0XE_0_0_7 ) additional_special_tokens.append(lowerCAmelCase_ ) tokenizer.add_special_tokens({'additional_special_tokens': additional_special_tokens} ) _snake_case = tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ ) tokenizer.save_pretrained(lowerCAmelCase_ ) _snake_case = tokenizer.__class__.from_pretrained(lowerCAmelCase_ ) _snake_case = after_tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ ) self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ ) self.assertIn(lowerCAmelCase_ , after_tokenizer.additional_special_tokens ) self.assertEqual(after_tokenizer.model_max_length , 42 ) _snake_case = tokenizer.__class__.from_pretrained(lowerCAmelCase_ , model_max_length=43 ) self.assertEqual(tokenizer.model_max_length , 43 ) shutil.rmtree(lowerCAmelCase_ ) def lowerCamelCase ( self ): """simple docstring""" _snake_case = self.get_tokenizers(do_lower_case=lowerCAmelCase_ ) for tokenizer in tokenizers: with self.subTest(F'{tokenizer.__class__.__name__}' ): _snake_case , _snake_case = self.get_clean_sequence(lowerCAmelCase_ ) # a special token for Canine can be defined as follows: _snake_case = 0XE_0_0_5 _snake_case = chr(lowerCAmelCase_ ) tokenizer.add_special_tokens({'cls_token': special_token} ) _snake_case = tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ ) self.assertEqual(len(lowerCAmelCase_ ) , 1 ) _snake_case = tokenizer.decode(ids + encoded_special_token , clean_up_tokenization_spaces=lowerCAmelCase_ ) _snake_case = tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ ) _snake_case = tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ ) _snake_case = tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ ) self.assertEqual(lowerCAmelCase_ , input_encoded + special_token_id ) _snake_case = tokenizer.decode(lowerCAmelCase_ , skip_special_tokens=lowerCAmelCase_ ) self.assertTrue(special_token not in decoded ) def lowerCamelCase ( self ): """simple docstring""" _snake_case = self.get_tokenizers(do_lower_case=lowerCAmelCase_ ) for tokenizer in tokenizers: with self.subTest(F'{tokenizer.__class__.__name__}' ): _snake_case = chr(0XE_0_0_5 ) _snake_case = chr(0XE_0_0_6 ) # `add_tokens` method stores special tokens only in `tokenizer.unique_no_split_tokens`. (in tokenization_utils.py) tokenizer.add_tokens([SPECIAL_TOKEN_1] , special_tokens=lowerCAmelCase_ ) # `add_special_tokens` method stores special tokens in `tokenizer.additional_special_tokens`, # which also occur in `tokenizer.all_special_tokens`. (in tokenization_utils_base.py) tokenizer.add_special_tokens({'additional_special_tokens': [SPECIAL_TOKEN_2]} ) _snake_case = tokenizer.tokenize(lowerCAmelCase_ ) _snake_case = tokenizer.tokenize(lowerCAmelCase_ ) self.assertEqual(len(lowerCAmelCase_ ) , 1 ) self.assertEqual(len(lowerCAmelCase_ ) , 1 ) self.assertEqual(token_a[0] , lowerCAmelCase_ ) self.assertEqual(token_a[0] , lowerCAmelCase_ ) @require_tokenizers def lowerCamelCase ( self ): """simple docstring""" _snake_case = self.get_tokenizers(do_lower_case=lowerCAmelCase_ ) for tokenizer in tokenizers: with self.subTest(F'{tokenizer.__class__.__name__}' ): # a special token for Canine can be defined as follows: _snake_case = 0XE_0_0_6 _snake_case = chr(lowerCAmelCase_ ) _snake_case = AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ ) tokenizer.add_special_tokens({'additional_special_tokens': [new_token]} ) with tempfile.TemporaryDirectory() as tmp_dir_name: tokenizer.save_pretrained(lowerCAmelCase_ ) tokenizer.from_pretrained(lowerCAmelCase_ ) def lowerCamelCase ( self ): """simple docstring""" _snake_case = [] if self.test_slow_tokenizer: tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) ) if self.test_rust_tokenizer: tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) ) for tokenizer_class, tokenizer_utils in tokenizer_list: with tempfile.TemporaryDirectory() as tmp_dir: tokenizer_utils.save_pretrained(lowerCAmelCase_ ) with open(os.path.join(lowerCAmelCase_ , 'special_tokens_map.json' ) , encoding='utf-8' ) as json_file: _snake_case = json.load(lowerCAmelCase_ ) with open(os.path.join(lowerCAmelCase_ , 'tokenizer_config.json' ) , encoding='utf-8' ) as json_file: _snake_case = json.load(lowerCAmelCase_ ) # a special token for Canine can be defined as follows: _snake_case = 0XE_0_0_6 _snake_case = chr(lowerCAmelCase_ ) _snake_case = [new_token_a] _snake_case = [new_token_a] with open(os.path.join(lowerCAmelCase_ , 'special_tokens_map.json' ) , 'w' , encoding='utf-8' ) as outfile: json.dump(lowerCAmelCase_ , lowerCAmelCase_ ) with open(os.path.join(lowerCAmelCase_ , 'tokenizer_config.json' ) , 'w' , encoding='utf-8' ) as outfile: json.dump(lowerCAmelCase_ , lowerCAmelCase_ ) # the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes # into account the new value of additional_special_tokens given in the "tokenizer_config.json" and # "special_tokens_map.json" files _snake_case = tokenizer_class.from_pretrained(lowerCAmelCase_ , extra_ids=0 ) self.assertIn(lowerCAmelCase_ , tokenizer_without_change_in_init.additional_special_tokens ) # self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab self.assertEqual( [new_token_a] , tokenizer_without_change_in_init.convert_ids_to_tokens( tokenizer_without_change_in_init.convert_tokens_to_ids([new_token_a] ) ) , ) _snake_case = 0XE_0_0_7 _snake_case = chr(lowerCAmelCase_ ) # Now we test that we can change the value of additional_special_tokens in the from_pretrained _snake_case = [AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ )] _snake_case = tokenizer_class.from_pretrained( lowerCAmelCase_ , additional_special_tokens=lowerCAmelCase_ , extra_ids=0 ) self.assertIn(lowerCAmelCase_ , tokenizer.additional_special_tokens ) # self.assertIn(new_token_2,tokenizer.get_vocab()) # ByT5Tokenization no vocab self.assertEqual( [new_token_a] , tokenizer.convert_ids_to_tokens(tokenizer.convert_tokens_to_ids([new_token_a] ) ) ) @require_tokenizers def lowerCamelCase ( self ): """simple docstring""" _snake_case = self.get_tokenizers(do_lower_case=lowerCAmelCase_ ) for tokenizer in tokenizers: with self.subTest(F'{tokenizer.__class__.__name__}' ): _snake_case = 'hello world' if self.space_between_special_tokens: _snake_case = '[CLS] hello world [SEP]' else: _snake_case = input _snake_case = tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ ) _snake_case = tokenizer.decode(lowerCAmelCase_ , spaces_between_special_tokens=self.space_between_special_tokens ) self.assertIn(lowerCAmelCase_ , [output, output.lower()] ) def lowerCamelCase ( self ): """simple docstring""" _snake_case = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(F'{tokenizer.__class__.__name__}' ): _snake_case = [ 'bos_token', 'eos_token', 'unk_token', 'sep_token', 'pad_token', 'cls_token', 'mask_token', ] _snake_case = 'a' _snake_case = ord(lowerCAmelCase_ ) for attr in attributes_list: setattr(lowerCAmelCase_ , attr + '_id' , lowerCAmelCase_ ) self.assertEqual(getattr(lowerCAmelCase_ , lowerCAmelCase_ ) , lowerCAmelCase_ ) self.assertEqual(getattr(lowerCAmelCase_ , attr + '_id' ) , lowerCAmelCase_ ) setattr(lowerCAmelCase_ , attr + '_id' , lowerCAmelCase_ ) self.assertEqual(getattr(lowerCAmelCase_ , lowerCAmelCase_ ) , lowerCAmelCase_ ) self.assertEqual(getattr(lowerCAmelCase_ , attr + '_id' ) , lowerCAmelCase_ ) setattr(lowerCAmelCase_ , 'additional_special_tokens_ids' , [] ) self.assertListEqual(getattr(lowerCAmelCase_ , 'additional_special_tokens' ) , [] ) self.assertListEqual(getattr(lowerCAmelCase_ , 'additional_special_tokens_ids' ) , [] ) _snake_case = 0XE_0_0_6 _snake_case = chr(lowerCAmelCase_ ) setattr(lowerCAmelCase_ , 'additional_special_tokens_ids' , [additional_special_token_id] ) self.assertListEqual(getattr(lowerCAmelCase_ , 'additional_special_tokens' ) , [additional_special_token] ) self.assertListEqual(getattr(lowerCAmelCase_ , 'additional_special_tokens_ids' ) , [additional_special_token_id] ) def lowerCamelCase ( self ): """simple docstring""" pass def lowerCamelCase ( self ): """simple docstring""" pass def lowerCamelCase ( self ): """simple docstring""" pass def lowerCamelCase ( self ): """simple docstring""" pass def lowerCamelCase ( self ): """simple docstring""" pass def lowerCamelCase ( self ): """simple docstring""" pass def lowerCamelCase ( self ): """simple docstring""" pass def lowerCamelCase ( self ): """simple docstring""" pass
160
0
from typing import Dict, List, Optional, Tuple, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_torch_available, is_torch_tensor, logging if is_torch_available(): import torch _lowerCamelCase : List[Any] = logging.get_logger(__name__) class UpperCamelCase_ ( UpperCAmelCase__ ): '''simple docstring''' UpperCAmelCase__ = ['pixel_values'] def __init__( self : str , UpperCAmelCase__ : List[str] = True , UpperCAmelCase__ : str = None , UpperCAmelCase__ : Dict = PILImageResampling.BILINEAR , UpperCAmelCase__ : Union[str, Any] = True , UpperCAmelCase__ : List[Any] = None , UpperCAmelCase__ : List[str] = True , UpperCAmelCase__ : List[Any] = 1 / 255 , UpperCAmelCase__ : Union[str, Any] = True , UpperCAmelCase__ : Tuple = None , UpperCAmelCase__ : List[str] = None , **UpperCAmelCase__ : Tuple , ) ->None: '''simple docstring''' super().__init__(**_SCREAMING_SNAKE_CASE) A__ = size if size is not None else {"""shortest_edge""": 256} A__ = get_size_dict(_SCREAMING_SNAKE_CASE , default_to_square=_SCREAMING_SNAKE_CASE) A__ = crop_size if crop_size is not None else {"""height""": 224, """width""": 224} A__ = get_size_dict(_SCREAMING_SNAKE_CASE , param_name='''crop_size''') A__ = do_resize A__ = size A__ = resample A__ = do_center_crop A__ = crop_size A__ = do_rescale A__ = rescale_factor A__ = do_normalize A__ = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN A__ = image_std if image_std is not None else IMAGENET_STANDARD_STD def SCREAMING_SNAKE_CASE ( self : Dict , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : List[Any] = PILImageResampling.BICUBIC , UpperCAmelCase__ : List[Any] = None , **UpperCAmelCase__ : int , ) ->np.ndarray: '''simple docstring''' A__ = get_size_dict(_SCREAMING_SNAKE_CASE , default_to_square=_SCREAMING_SNAKE_CASE) if "shortest_edge" not in size: raise ValueError(f"""The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}""") A__ = get_resize_output_image_size(_SCREAMING_SNAKE_CASE , size=size['''shortest_edge'''] , default_to_square=_SCREAMING_SNAKE_CASE) return resize(_SCREAMING_SNAKE_CASE , size=_SCREAMING_SNAKE_CASE , resample=_SCREAMING_SNAKE_CASE , data_format=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE) def SCREAMING_SNAKE_CASE ( self : Optional[int] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : List[str] = None , **UpperCAmelCase__ : int , ) ->np.ndarray: '''simple docstring''' A__ = get_size_dict(_SCREAMING_SNAKE_CASE) if "height" not in size or "width" not in size: raise ValueError(f"""The `size` parameter must contain the keys `height` and `width`. Got {size.keys()}""") return center_crop(_SCREAMING_SNAKE_CASE , size=(size['''height'''], size['''width''']) , data_format=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE) def SCREAMING_SNAKE_CASE ( self : Any , UpperCAmelCase__ : Any , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Tuple = None , **UpperCAmelCase__ : List[str]) ->np.ndarray: '''simple docstring''' return rescale(_SCREAMING_SNAKE_CASE , scale=_SCREAMING_SNAKE_CASE , data_format=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE) def SCREAMING_SNAKE_CASE ( self : List[str] , UpperCAmelCase__ : int , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Union[str, Any] = None , **UpperCAmelCase__ : str , ) ->np.ndarray: '''simple docstring''' return normalize(_SCREAMING_SNAKE_CASE , mean=_SCREAMING_SNAKE_CASE , std=_SCREAMING_SNAKE_CASE , data_format=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE) def SCREAMING_SNAKE_CASE ( self : List[Any] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : int = None , UpperCAmelCase__ : Any = None , UpperCAmelCase__ : Any = None , UpperCAmelCase__ : Union[str, Any] = None , UpperCAmelCase__ : int = None , UpperCAmelCase__ : Any = None , UpperCAmelCase__ : Optional[Any] = None , UpperCAmelCase__ : str = None , UpperCAmelCase__ : Any = None , UpperCAmelCase__ : Union[str, Any] = None , UpperCAmelCase__ : int = None , UpperCAmelCase__ : Any = ChannelDimension.FIRST , **UpperCAmelCase__ : Any , ) ->Optional[Any]: '''simple docstring''' A__ = do_resize if do_resize is not None else self.do_resize A__ = size if size is not None else self.size A__ = get_size_dict(_SCREAMING_SNAKE_CASE , default_to_square=_SCREAMING_SNAKE_CASE) A__ = resample if resample is not None else self.resample A__ = do_center_crop if do_center_crop is not None else self.do_center_crop A__ = crop_size if crop_size is not None else self.crop_size A__ = get_size_dict(_SCREAMING_SNAKE_CASE , param_name='''crop_size''') A__ = do_rescale if do_rescale is not None else self.do_rescale A__ = rescale_factor if rescale_factor is not None else self.rescale_factor A__ = do_normalize if do_normalize is not None else self.do_normalize A__ = image_mean if image_mean is not None else self.image_mean A__ = image_std if image_std is not None else self.image_std A__ = make_list_of_images(_SCREAMING_SNAKE_CASE) if not valid_images(_SCREAMING_SNAKE_CASE): raise ValueError( '''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ''' '''torch.Tensor, tf.Tensor or jax.ndarray.''') if do_resize and size is None: raise ValueError('''Size must be specified if do_resize is True.''') if do_center_crop and crop_size is None: raise ValueError('''Crop size must be specified if do_center_crop is True.''') if do_rescale and rescale_factor is None: raise ValueError('''Rescale factor must be specified if do_rescale is True.''') if do_normalize and (image_mean is None or image_std is None): raise ValueError('''Image mean and std must be specified if do_normalize is True.''') # All transformations expect numpy arrays. A__ = [to_numpy_array(_SCREAMING_SNAKE_CASE) for image in images] if do_resize: A__ = [self.resize(image=_SCREAMING_SNAKE_CASE , size=_SCREAMING_SNAKE_CASE , resample=_SCREAMING_SNAKE_CASE) for image in images] if do_center_crop: A__ = [self.center_crop(image=_SCREAMING_SNAKE_CASE , size=_SCREAMING_SNAKE_CASE) for image in images] if do_rescale: A__ = [self.rescale(image=_SCREAMING_SNAKE_CASE , scale=_SCREAMING_SNAKE_CASE) for image in images] if do_normalize: A__ = [self.normalize(image=_SCREAMING_SNAKE_CASE , mean=_SCREAMING_SNAKE_CASE , std=_SCREAMING_SNAKE_CASE) for image in images] A__ = [to_channel_dimension_format(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE) for image in images] A__ = {"""pixel_values""": images} return BatchFeature(data=_SCREAMING_SNAKE_CASE , tensor_type=_SCREAMING_SNAKE_CASE) def SCREAMING_SNAKE_CASE ( self : Optional[int] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Any = None) ->List[Any]: '''simple docstring''' A__ = outputs.logits # Resize logits and compute semantic segmentation maps if target_sizes is not None: if len(_SCREAMING_SNAKE_CASE) != len(_SCREAMING_SNAKE_CASE): raise ValueError( '''Make sure that you pass in as many target sizes as the batch dimension of the logits''') if is_torch_tensor(_SCREAMING_SNAKE_CASE): A__ = target_sizes.numpy() A__ = [] for idx in range(len(_SCREAMING_SNAKE_CASE)): A__ = torch.nn.functional.interpolate( logits[idx].unsqueeze(dim=0) , size=target_sizes[idx] , mode='''bilinear''' , align_corners=_SCREAMING_SNAKE_CASE) A__ = resized_logits[0].argmax(dim=0) semantic_segmentation.append(_SCREAMING_SNAKE_CASE) else: A__ = logits.argmax(dim=1) A__ = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0])] return semantic_segmentation
14
"""simple docstring""" A: int = range(2, 2_0 + 1) A: Any = [1_0**k for k in range(ks[-1] + 1)] A: dict[int, dict[int, list[list[int]]]] = {} def _snake_case ( UpperCamelCase : Dict , UpperCamelCase : Any , UpperCamelCase : Any , UpperCamelCase : int ): UpperCAmelCase : List[str] = sum(a_i[j] for j in range(UpperCamelCase , len(UpperCamelCase ) ) ) UpperCAmelCase : str = sum(a_i[j] * base[j] for j in range(min(len(UpperCamelCase ) , UpperCamelCase ) ) ) UpperCAmelCase , UpperCAmelCase : str = 0, 0 UpperCAmelCase : Optional[Any] = n - i UpperCAmelCase : Optional[int] = memo.get(UpperCamelCase ) if sub_memo is not None: UpperCAmelCase : str = sub_memo.get(UpperCamelCase ) if jumps is not None and len(UpperCamelCase ) > 0: # find and make the largest jump without going over UpperCAmelCase : Tuple = -1 for _k in range(len(UpperCamelCase ) - 1 , -1 , -1 ): if jumps[_k][2] <= k and jumps[_k][1] <= max_dn: UpperCAmelCase : int = _k break if max_jump >= 0: UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Optional[Any] = jumps[max_jump] # since the difference between jumps is cached, add c UpperCAmelCase : List[str] = diff + c for j in range(min(UpperCamelCase , len(UpperCamelCase ) ) ): UpperCAmelCase , UpperCAmelCase : Union[str, Any] = divmod(UpperCamelCase , 10 ) if new_c > 0: add(UpperCamelCase , UpperCamelCase , UpperCamelCase ) else: UpperCAmelCase : int = [] else: UpperCAmelCase : List[str] = {c: []} UpperCAmelCase : str = sub_memo if dn >= max_dn or c + diff >= base[k]: return diff, dn if k > ks[0]: while True: # keep doing smaller jumps UpperCAmelCase , UpperCAmelCase : List[str] = next_term(UpperCamelCase , k - 1 , i + dn , UpperCamelCase ) diff += _diff dn += terms_jumped if dn >= max_dn or c + diff >= base[k]: break else: # would be too small a jump, just compute sequential terms instead UpperCAmelCase , UpperCAmelCase : int = compute(UpperCamelCase , UpperCamelCase , i + dn , UpperCamelCase ) diff += _diff dn += terms_jumped UpperCAmelCase : Dict = sub_memo[c] # keep jumps sorted by # of terms skipped UpperCAmelCase : str = 0 while j < len(UpperCamelCase ): if jumps[j][1] > dn: break j += 1 # cache the jump for this value digitsum(b) and c sub_memo[c].insert(UpperCamelCase , (diff, dn, k) ) return (diff, dn) def _snake_case ( UpperCamelCase : int , UpperCamelCase : Tuple , UpperCamelCase : Union[str, Any] , UpperCamelCase : Any ): if i >= n: return 0, i if k > len(UpperCamelCase ): a_i.extend([0 for _ in range(k - len(UpperCamelCase ) )] ) # note: a_i -> b * 10^k + c # ds_b -> digitsum(b) # ds_c -> digitsum(c) UpperCAmelCase : List[str] = i UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Optional[int] = 0, 0, 0 for j in range(len(UpperCamelCase ) ): if j >= k: ds_b += a_i[j] else: ds_c += a_i[j] while i < n: i += 1 UpperCAmelCase : Optional[int] = ds_c + ds_b diff += addend UpperCAmelCase : str = 0 for j in range(UpperCamelCase ): UpperCAmelCase : Any = a_i[j] + addend UpperCAmelCase , UpperCAmelCase : Any = divmod(UpperCamelCase , 10 ) ds_c += a_i[j] if addend > 0: break if addend > 0: add(UpperCamelCase , UpperCamelCase , UpperCamelCase ) return diff, i - start_i def _snake_case ( UpperCamelCase : Union[str, Any] , UpperCamelCase : str , UpperCamelCase : Optional[int] ): for j in range(UpperCamelCase , len(UpperCamelCase ) ): UpperCAmelCase : Optional[int] = digits[j] + addend if s >= 10: UpperCAmelCase , UpperCAmelCase : int = divmod(UpperCamelCase , 10 ) UpperCAmelCase : str = addend // 10 + quotient else: UpperCAmelCase : Any = s UpperCAmelCase : Union[str, Any] = addend // 10 if addend == 0: break while addend > 0: UpperCAmelCase , UpperCAmelCase : Any = divmod(UpperCamelCase , 10 ) digits.append(UpperCamelCase ) def _snake_case ( UpperCamelCase : int = 10**15 ): UpperCAmelCase : Dict = [1] UpperCAmelCase : int = 1 UpperCAmelCase : Tuple = 0 while True: UpperCAmelCase , UpperCAmelCase : Tuple = next_term(UpperCamelCase , 20 , i + dn , UpperCamelCase ) dn += terms_jumped if dn == n - i: break UpperCAmelCase : Any = 0 for j in range(len(UpperCamelCase ) ): a_n += digits[j] * 10**j return a_n if __name__ == "__main__": print(f"""{solution() = }""")
109
0
import re def UpperCAmelCase ( a_ ) -> str: """simple docstring""" if len(re.findall("[ATCG]" , a_ ) ) != len(a_ ): raise ValueError("Invalid Strand" ) return dna.translate(dna.maketrans("ATCG" , "TAGC" ) ) if __name__ == "__main__": import doctest doctest.testmod()
124
from collections import Counter import numpy as np from sklearn import datasets from sklearn.model_selection import train_test_split SCREAMING_SNAKE_CASE :Tuple = datasets.load_iris() SCREAMING_SNAKE_CASE :Dict = np.array(data['data']) SCREAMING_SNAKE_CASE :Optional[int] = np.array(data['target']) SCREAMING_SNAKE_CASE :List[str] = data['target_names'] SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE :List[Any] = train_test_split(X, y) def UpperCAmelCase ( a_ , a_ ) -> Tuple: """simple docstring""" return np.linalg.norm(np.array(a_ ) - np.array(a_ ) ) def UpperCAmelCase ( a_ , a_ , a_ , a_ , a_=5 ) -> Dict: """simple docstring""" __A = zip(a_ , a_ ) # List of distances of all points from the point to be classified __A = [] for data_point in data: __A = euclidean_distance(data_point[0] , a_ ) distances.append((distance, data_point[1]) ) # Choosing 'k' points with the least distances. __A = [i[1] for i in sorted(a_ )[:k]] # Most commonly occurring class among them # is the class into which the point is classified __A = Counter(a_ ).most_common(1 )[0][0] return classes[result] if __name__ == "__main__": print(classifier(X_train, y_train, classes, [4.4, 3.1, 1.3, 1.4]))
124
1
import os import sys import unittest __A : Optional[Any] = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) sys.path.append(os.path.join(git_repo_path, '''utils''')) import check_dummies # noqa: E402 from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402 # Align TRANSFORMERS_PATH in check_dummies with the current path __A : Tuple = os.path.join(git_repo_path, '''src''', '''transformers''') __A : List[str] = ''' {0} = None ''' __A : Union[str, Any] = ''' class {0}(metaclass=DummyObject): _backends = {1} def __init__(self, *args, **kwargs): requires_backends(self, {1}) ''' __A : Dict = ''' def {0}(*args, **kwargs): requires_backends({0}, {1}) ''' class __A ( unittest.TestCase ): def lowercase__ ( self : Optional[Any] ): lowerCAmelCase : Tuple = find_backend(' _import_structure["models.albert"].append("AlbertTokenizerFast")' ) self.assertIsNone(UpperCAmelCase_ ) lowerCAmelCase : Tuple = find_backend(' if not is_tokenizers_available():' ) self.assertEqual(UpperCAmelCase_ , 'tokenizers' ) lowerCAmelCase : str = find_backend(' if not is_tensorflow_text_available():' ) self.assertEqual(UpperCAmelCase_ , 'tensorflow_text' ) lowerCAmelCase : Union[str, Any] = find_backend(' if not (is_sentencepiece_available() and is_tokenizers_available()):' ) self.assertEqual(UpperCAmelCase_ , 'sentencepiece_and_tokenizers' ) lowerCAmelCase : str = find_backend( ' if not (is_sentencepiece_available() and is_tensorflow_text_available()):' ) self.assertEqual(UpperCAmelCase_ , 'sentencepiece_and_tensorflow_text' ) lowerCAmelCase : Dict = find_backend( ' if not (is_sentencepiece_available() and is_tokenizers_available() and is_vision_available()):' ) self.assertEqual(UpperCAmelCase_ , 'sentencepiece_and_tokenizers_and_vision' ) def lowercase__ ( self : int ): lowerCAmelCase : List[Any] = read_init() # We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects self.assertIn('torch' , UpperCAmelCase_ ) self.assertIn('tensorflow_text' , UpperCAmelCase_ ) self.assertIn('sentencepiece_and_tokenizers' , UpperCAmelCase_ ) # Likewise, we can't assert on the exact content of a key self.assertIn('BertModel' , objects['torch'] ) self.assertIn('TFBertModel' , objects['tf'] ) self.assertIn('FlaxBertModel' , objects['flax'] ) self.assertIn('BertModel' , objects['torch'] ) self.assertIn('TFBertTokenizer' , objects['tensorflow_text'] ) self.assertIn('convert_slow_tokenizer' , objects['sentencepiece_and_tokenizers'] ) def lowercase__ ( self : Optional[Any] ): lowerCAmelCase : Optional[Any] = create_dummy_object('CONSTANT' , '\'torch\'' ) self.assertEqual(UpperCAmelCase_ , '\nCONSTANT = None\n' ) lowerCAmelCase : Dict = create_dummy_object('function' , '\'torch\'' ) self.assertEqual( UpperCAmelCase_ , '\ndef function(*args, **kwargs):\n requires_backends(function, \'torch\')\n' ) lowerCAmelCase : Union[str, Any] = '\nclass FakeClass(metaclass=DummyObject):\n _backends = \'torch\'\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, \'torch\')\n' lowerCAmelCase : str = create_dummy_object('FakeClass' , '\'torch\'' ) self.assertEqual(UpperCAmelCase_ , UpperCAmelCase_ ) def lowercase__ ( self : Any ): lowerCAmelCase : Optional[Any] = '# This file is autogenerated by the command `make fix-copies`, do not edit.\nfrom ..utils import DummyObject, requires_backends\n\n\nCONSTANT = None\n\n\ndef function(*args, **kwargs):\n requires_backends(function, ["torch"])\n\n\nclass FakeClass(metaclass=DummyObject):\n _backends = ["torch"]\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, ["torch"])\n' lowerCAmelCase : Any = create_dummy_files({'torch': ['CONSTANT', 'function', 'FakeClass']} ) self.assertEqual(dummy_files['torch'] , UpperCAmelCase_ )
138
import hashlib import unittest from typing import Dict import numpy as np from transformers import ( MODEL_FOR_MASK_GENERATION_MAPPING, TF_MODEL_FOR_MASK_GENERATION_MAPPING, is_vision_available, pipeline, ) from transformers.pipelines import MaskGenerationPipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_tf, require_torch, require_vision, slow, ) if is_vision_available(): from PIL import Image else: class __A : @staticmethod def lowercase__ ( *UpperCAmelCase_ : Tuple , **UpperCAmelCase_ : Any ): pass def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> str: '''simple docstring''' lowerCAmelCase : List[Any] = hashlib.mda(image.tobytes() ) return m.hexdigest()[:10] def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> Dict: '''simple docstring''' lowerCAmelCase : Tuple = np.array(_UpperCAmelCase ) lowerCAmelCase : Dict = npimg.shape return {"hash": hashimage(_UpperCAmelCase ), "shape": shape} @is_pipeline_test @require_vision @require_torch class __A ( unittest.TestCase ): lowerCAmelCase_ : Dict = dict( (list(MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if MODEL_FOR_MASK_GENERATION_MAPPING else []) ) lowerCAmelCase_ : Any = dict( (list(TF_MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if TF_MODEL_FOR_MASK_GENERATION_MAPPING else []) ) def lowercase__ ( self : Union[str, Any] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : List[str] ): lowerCAmelCase : List[str] = MaskGenerationPipeline(model=UpperCAmelCase_ , image_processor=UpperCAmelCase_ ) return image_segmenter, [ "./tests/fixtures/tests_samples/COCO/000000039769.png", "./tests/fixtures/tests_samples/COCO/000000039769.png", ] def lowercase__ ( self : Tuple , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Dict ): pass @require_tf @unittest.skip('Image segmentation not implemented in TF' ) def lowercase__ ( self : Dict ): pass @slow @require_torch def lowercase__ ( self : str ): lowerCAmelCase : Optional[int] = pipeline('mask-generation' , model='facebook/sam-vit-huge' ) lowerCAmelCase : Union[str, Any] = image_segmenter('http://images.cocodataset.org/val2017/000000039769.jpg' , points_per_batch=256 ) # Shortening by hashing lowerCAmelCase : List[str] = [] for i, o in enumerate(outputs['masks'] ): new_outupt += [{"mask": mask_to_test_readable(UpperCAmelCase_ ), "scores": outputs["scores"][i]}] # fmt: off self.assertEqual( nested_simplify(UpperCAmelCase_ , decimals=4 ) , [ {'mask': {'hash': '115ad19f5f', 'shape': (480, 640)}, 'scores': 1.04_44}, {'mask': {'hash': '6affa964c6', 'shape': (480, 640)}, 'scores': 1.0_21}, {'mask': {'hash': 'dfe28a0388', 'shape': (480, 640)}, 'scores': 1.01_67}, {'mask': {'hash': 'c0a5f4a318', 'shape': (480, 640)}, 'scores': 1.01_32}, {'mask': {'hash': 'fe8065c197', 'shape': (480, 640)}, 'scores': 1.00_53}, {'mask': {'hash': 'e2d0b7a0b7', 'shape': (480, 640)}, 'scores': 0.99_67}, {'mask': {'hash': '453c7844bd', 'shape': (480, 640)}, 'scores': 0.9_93}, {'mask': {'hash': '3d44f2926d', 'shape': (480, 640)}, 'scores': 0.99_09}, {'mask': {'hash': '64033ddc3f', 'shape': (480, 640)}, 'scores': 0.98_79}, {'mask': {'hash': '801064ff79', 'shape': (480, 640)}, 'scores': 0.98_34}, {'mask': {'hash': '6172f276ef', 'shape': (480, 640)}, 'scores': 0.97_16}, {'mask': {'hash': 'b49e60e084', 'shape': (480, 640)}, 'scores': 0.96_12}, {'mask': {'hash': 'a811e775fd', 'shape': (480, 640)}, 'scores': 0.95_99}, {'mask': {'hash': 'a6a8ebcf4b', 'shape': (480, 640)}, 'scores': 0.95_52}, {'mask': {'hash': '9d8257e080', 'shape': (480, 640)}, 'scores': 0.95_32}, {'mask': {'hash': '32de6454a8', 'shape': (480, 640)}, 'scores': 0.95_16}, {'mask': {'hash': 'af3d4af2c8', 'shape': (480, 640)}, 'scores': 0.94_99}, {'mask': {'hash': '3c6db475fb', 'shape': (480, 640)}, 'scores': 0.94_83}, {'mask': {'hash': 'c290813fb9', 'shape': (480, 640)}, 'scores': 0.94_64}, {'mask': {'hash': 'b6f0b8f606', 'shape': (480, 640)}, 'scores': 0.9_43}, {'mask': {'hash': '92ce16bfdf', 'shape': (480, 640)}, 'scores': 0.9_43}, {'mask': {'hash': 'c749b25868', 'shape': (480, 640)}, 'scores': 0.94_08}, {'mask': {'hash': 'efb6cab859', 'shape': (480, 640)}, 'scores': 0.93_35}, {'mask': {'hash': '1ff2eafb30', 'shape': (480, 640)}, 'scores': 0.93_26}, {'mask': {'hash': '788b798e24', 'shape': (480, 640)}, 'scores': 0.92_62}, {'mask': {'hash': 'abea804f0e', 'shape': (480, 640)}, 'scores': 0.89_99}, {'mask': {'hash': '7b9e8ddb73', 'shape': (480, 640)}, 'scores': 0.89_86}, {'mask': {'hash': 'cd24047c8a', 'shape': (480, 640)}, 'scores': 0.89_84}, {'mask': {'hash': '6943e6bcbd', 'shape': (480, 640)}, 'scores': 0.88_73}, {'mask': {'hash': 'b5f47c9191', 'shape': (480, 640)}, 'scores': 0.88_71} ] , ) # fmt: on @require_torch @slow def lowercase__ ( self : List[Any] ): lowerCAmelCase : Union[str, Any] = 'facebook/sam-vit-huge' lowerCAmelCase : str = pipeline('mask-generation' , model=UpperCAmelCase_ ) lowerCAmelCase : int = image_segmenter( 'http://images.cocodataset.org/val2017/000000039769.jpg' , pred_iou_thresh=1 , points_per_batch=256 ) # Shortening by hashing lowerCAmelCase : Optional[int] = [] for i, o in enumerate(outputs['masks'] ): new_outupt += [{"mask": mask_to_test_readable(UpperCAmelCase_ ), "scores": outputs["scores"][i]}] self.assertEqual( nested_simplify(UpperCAmelCase_ , decimals=4 ) , [ {'mask': {'hash': '115ad19f5f', 'shape': (480, 640)}, 'scores': 1.04_44}, {'mask': {'hash': '6affa964c6', 'shape': (480, 640)}, 'scores': 1.02_10}, {'mask': {'hash': 'dfe28a0388', 'shape': (480, 640)}, 'scores': 1.01_67}, {'mask': {'hash': 'c0a5f4a318', 'shape': (480, 640)}, 'scores': 1.01_32}, {'mask': {'hash': 'fe8065c197', 'shape': (480, 640)}, 'scores': 1.00_53}, ] , )
138
1
import copy import os from typing import Union from ...configuration_utils import PretrainedConfig from ...utils import logging SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__) SCREAMING_SNAKE_CASE_ = { """BridgeTower/bridgetower-base""": """https://huggingface.co/BridgeTower/bridgetower-base/blob/main/config.json""", """BridgeTower/bridgetower-base-itm-mlm""": ( """https://huggingface.co/BridgeTower/bridgetower-base-itm-mlm/blob/main/config.json""" ), } class UpperCamelCase__ ( A__ ): '''simple docstring''' __snake_case : Dict = "bridgetower_vision_model" def __init__( self : List[str] ,lowerCamelCase__ : Optional[int]=768 ,lowerCamelCase__ : int=12 ,lowerCamelCase__ : Optional[int]=3 ,lowerCamelCase__ : str=16 ,lowerCamelCase__ : Optional[int]=288 ,lowerCamelCase__ : List[Any]=1 ,lowerCamelCase__ : List[str]=1e-0_5 ,lowerCamelCase__ : Dict=False ,lowerCamelCase__ : str=True ,lowerCamelCase__ : Optional[int]=False ,**lowerCamelCase__ : str ,) -> List[str]: '''simple docstring''' super().__init__(**__snake_case ) SCREAMING_SNAKE_CASE = hidden_size SCREAMING_SNAKE_CASE = num_hidden_layers SCREAMING_SNAKE_CASE = num_channels SCREAMING_SNAKE_CASE = patch_size SCREAMING_SNAKE_CASE = image_size SCREAMING_SNAKE_CASE = initializer_factor SCREAMING_SNAKE_CASE = layer_norm_eps SCREAMING_SNAKE_CASE = stop_gradient SCREAMING_SNAKE_CASE = share_layernorm SCREAMING_SNAKE_CASE = remove_last_layer @classmethod def SCREAMING_SNAKE_CASE__ ( cls : int ,lowerCamelCase__ : Union[str, os.PathLike] ,**lowerCamelCase__ : Optional[Any] ) -> "PretrainedConfig": '''simple docstring''' SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE = cls.get_config_dict(__snake_case ,**__snake_case ) if config_dict.get("""model_type""" ) == "bridgetower": SCREAMING_SNAKE_CASE = config_dict["""text_config"""] if "model_type" in config_dict and hasattr(cls ,"""model_type""" ) and config_dict["model_type"] != cls.model_type: logger.warning( F"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """ F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" ) return cls.from_dict(__snake_case ,**__snake_case ) class UpperCamelCase__ ( A__ ): '''simple docstring''' __snake_case : List[Any] = "bridgetower_text_model" def __init__( self : int ,lowerCamelCase__ : Dict=50265 ,lowerCamelCase__ : List[Any]=768 ,lowerCamelCase__ : Any=12 ,lowerCamelCase__ : Any=12 ,lowerCamelCase__ : str=1 ,lowerCamelCase__ : int=3072 ,lowerCamelCase__ : int="gelu" ,lowerCamelCase__ : Optional[int]=0.1 ,lowerCamelCase__ : Tuple=0.1 ,lowerCamelCase__ : Any=514 ,lowerCamelCase__ : Tuple=1 ,lowerCamelCase__ : Tuple=1e-0_5 ,lowerCamelCase__ : Union[str, Any]=1 ,lowerCamelCase__ : str=0 ,lowerCamelCase__ : List[str]=2 ,lowerCamelCase__ : Optional[int]="absolute" ,lowerCamelCase__ : int=True ,**lowerCamelCase__ : Dict ,) -> List[Any]: '''simple docstring''' super().__init__(**__snake_case ) SCREAMING_SNAKE_CASE = vocab_size SCREAMING_SNAKE_CASE = hidden_size SCREAMING_SNAKE_CASE = num_hidden_layers SCREAMING_SNAKE_CASE = num_attention_heads SCREAMING_SNAKE_CASE = hidden_act SCREAMING_SNAKE_CASE = initializer_factor SCREAMING_SNAKE_CASE = intermediate_size SCREAMING_SNAKE_CASE = hidden_dropout_prob SCREAMING_SNAKE_CASE = attention_probs_dropout_prob SCREAMING_SNAKE_CASE = max_position_embeddings SCREAMING_SNAKE_CASE = type_vocab_size SCREAMING_SNAKE_CASE = layer_norm_eps SCREAMING_SNAKE_CASE = position_embedding_type SCREAMING_SNAKE_CASE = use_cache SCREAMING_SNAKE_CASE = pad_token_id SCREAMING_SNAKE_CASE = bos_token_id SCREAMING_SNAKE_CASE = eos_token_id @classmethod def SCREAMING_SNAKE_CASE__ ( cls : int ,lowerCamelCase__ : Union[str, os.PathLike] ,**lowerCamelCase__ : Dict ) -> "PretrainedConfig": '''simple docstring''' SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE = cls.get_config_dict(__snake_case ,**__snake_case ) if config_dict.get("""model_type""" ) == "bridgetower": SCREAMING_SNAKE_CASE = config_dict["""text_config"""] if "model_type" in config_dict and hasattr(cls ,"""model_type""" ) and config_dict["model_type"] != cls.model_type: logger.warning( F"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """ F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" ) return cls.from_dict(__snake_case ,**__snake_case ) class UpperCamelCase__ ( A__ ): '''simple docstring''' __snake_case : Any = "bridgetower" def __init__( self : List[str] ,lowerCamelCase__ : List[Any]=True ,lowerCamelCase__ : str="gelu" ,lowerCamelCase__ : Union[str, Any]=768 ,lowerCamelCase__ : Any=1 ,lowerCamelCase__ : Any=1e-0_5 ,lowerCamelCase__ : Tuple=False ,lowerCamelCase__ : List[str]="add" ,lowerCamelCase__ : Tuple=12 ,lowerCamelCase__ : Union[str, Any]=6 ,lowerCamelCase__ : Optional[Any]=False ,lowerCamelCase__ : Tuple=False ,lowerCamelCase__ : Any=None ,lowerCamelCase__ : List[Any]=None ,**lowerCamelCase__ : List[Any] ,) -> List[str]: '''simple docstring''' SCREAMING_SNAKE_CASE = kwargs.pop("""text_config_dict""" ,__snake_case ) SCREAMING_SNAKE_CASE = kwargs.pop("""vision_config_dict""" ,__snake_case ) super().__init__(**__snake_case ) SCREAMING_SNAKE_CASE = share_cross_modal_transformer_layers SCREAMING_SNAKE_CASE = hidden_act SCREAMING_SNAKE_CASE = hidden_size SCREAMING_SNAKE_CASE = initializer_factor SCREAMING_SNAKE_CASE = layer_norm_eps SCREAMING_SNAKE_CASE = share_link_tower_layers SCREAMING_SNAKE_CASE = link_tower_type SCREAMING_SNAKE_CASE = num_attention_heads SCREAMING_SNAKE_CASE = num_hidden_layers SCREAMING_SNAKE_CASE = tie_word_embeddings SCREAMING_SNAKE_CASE = init_layernorm_from_vision_encoder if text_config is None: SCREAMING_SNAKE_CASE = {} logger.info("""`text_config` is `None`. Initializing the `BridgeTowerTextConfig` with default values.""" ) if vision_config is None: SCREAMING_SNAKE_CASE = {} logger.info("""`vision_config` is `None`. Initializing the `BridgeTowerVisionConfig` with default values.""" ) SCREAMING_SNAKE_CASE = BridgeTowerTextConfig(**__snake_case ) SCREAMING_SNAKE_CASE = BridgeTowerVisionConfig(**__snake_case ) @classmethod def SCREAMING_SNAKE_CASE__ ( cls : int ,lowerCamelCase__ : BridgeTowerTextConfig ,lowerCamelCase__ : BridgeTowerVisionConfig ,**lowerCamelCase__ : Tuple ) -> str: '''simple docstring''' return cls(text_config=text_config.to_dict() ,vision_config=vision_config.to_dict() ,**__snake_case ) def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> List[Any]: '''simple docstring''' SCREAMING_SNAKE_CASE = copy.deepcopy(self.__dict__ ) SCREAMING_SNAKE_CASE = self.text_config.to_dict() SCREAMING_SNAKE_CASE = self.vision_config.to_dict() SCREAMING_SNAKE_CASE = self.__class__.model_type return output
353
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available, ) SCREAMING_SNAKE_CASE_ = {"""configuration_reformer""": ["""REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ReformerConfig"""]} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE_ = ["""ReformerTokenizer"""] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE_ = ["""ReformerTokenizerFast"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE_ = [ """REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""", """ReformerAttention""", """ReformerForMaskedLM""", """ReformerForQuestionAnswering""", """ReformerForSequenceClassification""", """ReformerLayer""", """ReformerModel""", """ReformerModelWithLMHead""", """ReformerPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_reformer import REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, ReformerConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_reformer import ReformerTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_reformer_fast import ReformerTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_reformer import ( REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, ReformerAttention, ReformerForMaskedLM, ReformerForQuestionAnswering, ReformerForSequenceClassification, ReformerLayer, ReformerModel, ReformerModelWithLMHead, ReformerPreTrainedModel, ) else: import sys SCREAMING_SNAKE_CASE_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
193
0
import unittest from transformers import is_torch_available from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow if is_torch_available(): import torch from transformers import XLMRobertaModel @require_sentencepiece @require_tokenizers @require_torch class lowerCAmelCase__ ( unittest.TestCase): '''simple docstring''' @slow def _lowerCamelCase ( self) -> Union[str, Any]: _A : List[str] = XLMRobertaModel.from_pretrained("xlm-roberta-base") _A : str = torch.tensor([[0, 5_8_1, 1_0_2_6_9, 8_3, 9_9_9_4_2, 1_3_6, 6_0_7_4_2, 2_3, 7_0, 8_0_5_8_3, 1_8_2_7_6, 2]]) # The dog is cute and lives in the garden house _A : List[Any] = torch.Size((1, 1_2, 7_6_8)) # batch_size, sequence_length, embedding_vector_dim _A : List[Any] = torch.tensor( [[-0.0_1_0_1, 0.1_2_1_8, -0.0_8_0_3, 0.0_8_0_1, 0.1_3_2_7, 0.0_7_7_6, -0.1_2_1_5, 0.2_3_8_3, 0.3_3_3_8, 0.3_1_0_6, 0.0_3_0_0, 0.0_2_5_2]]) # xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.eval() # expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1] with torch.no_grad(): _A : List[Any] = model(__lowerCamelCase)["last_hidden_state"].detach() self.assertEqual(output.shape , __lowerCamelCase) # compare the actual values for a slice of last dim self.assertTrue(torch.allclose(output[:, :, -1] , __lowerCamelCase , atol=1e-3)) @slow def _lowerCamelCase ( self) -> List[Any]: _A : Dict = XLMRobertaModel.from_pretrained("xlm-roberta-large") _A : int = torch.tensor([[0, 5_8_1, 1_0_2_6_9, 8_3, 9_9_9_4_2, 1_3_6, 6_0_7_4_2, 2_3, 7_0, 8_0_5_8_3, 1_8_2_7_6, 2]]) # The dog is cute and lives in the garden house _A : List[Any] = torch.Size((1, 1_2, 1_0_2_4)) # batch_size, sequence_length, embedding_vector_dim _A : str = torch.tensor( [[-0.0_6_9_9, -0.0_3_1_8, 0.0_7_0_5, -0.1_2_4_1, 0.0_9_9_9, -0.0_5_2_0, 0.1_0_0_4, -0.1_8_3_8, -0.4_7_0_4, 0.1_4_3_7, 0.0_8_2_1, 0.0_1_2_6]]) # xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.large') # xlmr.eval() # expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1] with torch.no_grad(): _A : int = model(__lowerCamelCase)["last_hidden_state"].detach() self.assertEqual(output.shape , __lowerCamelCase) # compare the actual values for a slice of last dim self.assertTrue(torch.allclose(output[:, :, -1] , __lowerCamelCase , atol=1e-3))
11
"""simple docstring""" from unittest import TestCase from datasets import Dataset from minhash_deduplication import deduplicate_dataset, make_duplicate_clusters def __a ( ): UpperCAmelCase_ : List[Any] = { "repo_name": ["test_repo1", "test_repo2", "test_repo3"], "path": ["test_1.py", "test_2.py", "unit_test.py"], "content": ["a " * 20, "a " * 30, "b " * 7], } UpperCAmelCase_ : Optional[int] = Dataset.from_dict(__lowerCamelCase ) return dataset class A_ (lowercase__ ): '''simple docstring''' def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ : Union[str, Any] = get_dataset() UpperCAmelCase_ : Any = make_duplicate_clusters(lowercase_ , 0.85 ) self.assertEqual(len(duplicate_clusters[0] ) , 2 ) def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ : List[str] = get_dataset() UpperCAmelCase_ , UpperCAmelCase_ : List[str] = deduplicate_dataset(lowercase_ ) self.assertEqual(len(lowercase_ ) , 2 ) print(lowercase_ ) self.assertEqual(duplicate_clusters[0][0]["copies"] , 2 ) self.assertEqual(duplicate_clusters[0][0]["is_extreme"] , lowercase_ )
61
0
import json from typing import TYPE_CHECKING, List, Optional, Tuple from tokenizers import pre_tokenizers, processors from ...tokenization_utils_base import AddedToken, BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_blenderbot import BlenderbotTokenizer if TYPE_CHECKING: from transformers.pipelines.conversational import Conversation a__ : Optional[Any] = logging.get_logger(__name__) a__ : List[str] = { '''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_config_file''': '''tokenizer_config.json''', } a__ : Any = { '''vocab_file''': {'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json'''}, '''merges_file''': {'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt'''}, '''tokenizer_config_file''': { '''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json''' }, } a__ : str = {'''facebook/blenderbot-3B''': 128} class a_ ( a__ ): __SCREAMING_SNAKE_CASE : Optional[Any] = VOCAB_FILES_NAMES __SCREAMING_SNAKE_CASE : Optional[int] = PRETRAINED_VOCAB_FILES_MAP __SCREAMING_SNAKE_CASE : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __SCREAMING_SNAKE_CASE : Optional[int] = ['input_ids', 'attention_mask'] __SCREAMING_SNAKE_CASE : Union[str, Any] = BlenderbotTokenizer def __init__( self , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase="replace" , _lowerCamelCase="<s>" , _lowerCamelCase="</s>" , _lowerCamelCase="</s>" , _lowerCamelCase="<s>" , _lowerCamelCase="<unk>" , _lowerCamelCase="<pad>" , _lowerCamelCase="<mask>" , _lowerCamelCase=False , _lowerCamelCase=True , **_lowerCamelCase , ) ->Any: super().__init__( _lowerCamelCase , _lowerCamelCase , tokenizer_file=_lowerCamelCase , errors=_lowerCamelCase , bos_token=_lowerCamelCase , eos_token=_lowerCamelCase , sep_token=_lowerCamelCase , cls_token=_lowerCamelCase , unk_token=_lowerCamelCase , pad_token=_lowerCamelCase , mask_token=_lowerCamelCase , add_prefix_space=_lowerCamelCase , trim_offsets=_lowerCamelCase , **_lowerCamelCase , ) SCREAMING_SNAKE_CASE : Union[str, Any] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() ) if pre_tok_state.get('''add_prefix_space''' , _lowerCamelCase ) != add_prefix_space: SCREAMING_SNAKE_CASE : Optional[Any] = getattr(_lowerCamelCase , pre_tok_state.pop('''type''' ) ) SCREAMING_SNAKE_CASE : Dict = add_prefix_space SCREAMING_SNAKE_CASE : Optional[int] = pre_tok_class(**_lowerCamelCase ) SCREAMING_SNAKE_CASE : Optional[Any] = add_prefix_space SCREAMING_SNAKE_CASE : Union[str, Any] = '''post_processor''' SCREAMING_SNAKE_CASE : Tuple = getattr(self.backend_tokenizer , _lowerCamelCase , _lowerCamelCase ) if tokenizer_component_instance: SCREAMING_SNAKE_CASE : List[Any] = json.loads(tokenizer_component_instance.__getstate__() ) # The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class` if "sep" in state: SCREAMING_SNAKE_CASE : List[str] = tuple(state['''sep'''] ) if "cls" in state: SCREAMING_SNAKE_CASE : List[str] = tuple(state['''cls'''] ) SCREAMING_SNAKE_CASE : Union[str, Any] = False if state.get('''add_prefix_space''' , _lowerCamelCase ) != add_prefix_space: SCREAMING_SNAKE_CASE : int = add_prefix_space SCREAMING_SNAKE_CASE : Union[str, Any] = True if state.get('''trim_offsets''' , _lowerCamelCase ) != trim_offsets: SCREAMING_SNAKE_CASE : str = trim_offsets SCREAMING_SNAKE_CASE : int = True if changes_to_apply: SCREAMING_SNAKE_CASE : str = getattr(_lowerCamelCase , state.pop('''type''' ) ) SCREAMING_SNAKE_CASE : str = component_class(**_lowerCamelCase ) setattr(self.backend_tokenizer , _lowerCamelCase , _lowerCamelCase ) @property # Copied from transformers.models.roberta.tokenization_roberta_fast.RobertaTokenizerFast.mask_token with Roberta->Blenderbot, RoBERTa->Blenderbot def __lowerCAmelCase ( self ) ->str: if self._mask_token is None: if self.verbose: logger.error('''Using mask_token, but it is not set yet.''' ) return None return str(self._mask_token ) @mask_token.setter def __lowerCAmelCase ( self , _lowerCamelCase ) ->List[str]: SCREAMING_SNAKE_CASE : str = AddedToken(_lowerCamelCase , lstrip=_lowerCamelCase , rstrip=_lowerCamelCase ) if isinstance(_lowerCamelCase , _lowerCamelCase ) else value SCREAMING_SNAKE_CASE : Tuple = value def __lowerCAmelCase ( self , *_lowerCamelCase , **_lowerCamelCase ) ->BatchEncoding: SCREAMING_SNAKE_CASE : int = kwargs.get('''is_split_into_words''' , _lowerCamelCase ) assert self.add_prefix_space or not is_split_into_words, ( F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """ "to use it with pretokenized inputs." ) return super()._batch_encode_plus(*_lowerCamelCase , **_lowerCamelCase ) def __lowerCAmelCase ( self , *_lowerCamelCase , **_lowerCamelCase ) ->BatchEncoding: SCREAMING_SNAKE_CASE : List[str] = kwargs.get('''is_split_into_words''' , _lowerCamelCase ) assert self.add_prefix_space or not is_split_into_words, ( F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """ "to use it with pretokenized inputs." ) return super()._encode_plus(*_lowerCamelCase , **_lowerCamelCase ) def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase = None ) ->Tuple[str]: SCREAMING_SNAKE_CASE : int = self._tokenizer.model.save(_lowerCamelCase , name=_lowerCamelCase ) return tuple(_lowerCamelCase ) def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase = None ) ->List[int]: SCREAMING_SNAKE_CASE : Optional[Any] = [self.sep_token_id] SCREAMING_SNAKE_CASE : Dict = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase = None ) ->Optional[Any]: return token_ids_a + [self.eos_token_id] def __lowerCAmelCase ( self , _lowerCamelCase ) ->List[int]: SCREAMING_SNAKE_CASE : Optional[int] = [] for is_user, text in conversation.iter_texts(): if is_user: # We need to space prefix as it's being done within blenderbot inputs.append(''' ''' + text ) else: # Generated responses should contain them already. inputs.append(_lowerCamelCase ) SCREAMING_SNAKE_CASE : int = ''' '''.join(_lowerCamelCase ) SCREAMING_SNAKE_CASE : List[Any] = self.encode(_lowerCamelCase ) if len(_lowerCamelCase ) > self.model_max_length: SCREAMING_SNAKE_CASE : Tuple = input_ids[-self.model_max_length :] logger.warning(F"""Trimmed input from conversation as it was longer than {self.model_max_length} tokens.""" ) return input_ids
356
import json import os from typing import Optional, Tuple from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging a__ : str = logging.get_logger(__name__) a__ : Optional[Any] = {'''vocab_file''': '''vocab.json'''} a__ : str = { '''vocab_file''': { '''mgp-str''': '''https://huggingface.co/alibaba-damo/mgp-str-base/blob/main/vocab.json''', } } a__ : Tuple = {'''mgp-str''': 27} class a_ ( a__ ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[int] = VOCAB_FILES_NAMES __SCREAMING_SNAKE_CASE : Dict = PRETRAINED_VOCAB_FILES_MAP __SCREAMING_SNAKE_CASE : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES def __init__( self , _lowerCamelCase , _lowerCamelCase="[GO]" , _lowerCamelCase="[GO]" , _lowerCamelCase="[s]" , _lowerCamelCase="[GO]" , **_lowerCamelCase ) ->Dict: super().__init__( unk_token=_lowerCamelCase , bos_token=_lowerCamelCase , eos_token=_lowerCamelCase , pad_token=_lowerCamelCase , **_lowerCamelCase , ) with open(_lowerCamelCase , encoding='''utf-8''' ) as vocab_handle: SCREAMING_SNAKE_CASE : List[Any] = json.load(_lowerCamelCase ) SCREAMING_SNAKE_CASE : int = {v: k for k, v in self.vocab.items()} @property def __lowerCAmelCase ( self ) ->List[Any]: return len(self.vocab ) def __lowerCAmelCase ( self ) ->Union[str, Any]: return dict(self.vocab , **self.added_tokens_encoder ) def __lowerCAmelCase ( self , _lowerCamelCase ) ->int: SCREAMING_SNAKE_CASE : Union[str, Any] = [] for s in text: char_tokens.extend(_lowerCamelCase ) return char_tokens def __lowerCAmelCase ( self , _lowerCamelCase ) ->Dict: return self.vocab.get(_lowerCamelCase , self.vocab.get(self.unk_token ) ) def __lowerCAmelCase ( self , _lowerCamelCase ) ->int: return self.decoder.get(_lowerCamelCase ) def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase = None ) ->Tuple[str]: if not os.path.isdir(_lowerCamelCase ): logger.error('''Vocabulary path ({}) should be a directory'''.format(_lowerCamelCase ) ) return SCREAMING_SNAKE_CASE : str = os.path.join( _lowerCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) with open(_lowerCamelCase , '''w''' , encoding='''utf-8''' ) as f: f.write(json.dumps(self.vocab , indent=2 , sort_keys=_lowerCamelCase , ensure_ascii=_lowerCamelCase ) + '''\n''' ) return (vocab_file,)
19
0
"""simple docstring""" def _lowerCAmelCase ( lowercase_ ): UpperCAmelCase = current_set.copy() for row_index, row in enumerate(lowercase_ ): UpperCAmelCase = row[0] for column_index, column in enumerate(lowercase_ ): if magnitude == 0: UpperCAmelCase = column continue UpperCAmelCase = column / magnitude # Subtract to cancel term UpperCAmelCase = current_set[0] UpperCAmelCase = [first_row] UpperCAmelCase = current_set[1::] for row in current_set: UpperCAmelCase = [] # If first term is 0, it is already in form we want, so we preserve it if row[0] == 0: final_set.append(lowercase_ ) continue for column_index in range(len(lowercase_ ) ): temp_row.append(first_row[column_index] - row[column_index] ) final_set.append(lowercase_ ) # Create next recursion iteration set if len(final_set[0] ) != 3: UpperCAmelCase = final_set[0] UpperCAmelCase = [] UpperCAmelCase = [] for row in final_set[1::]: current_first_column.append(row[0] ) next_iteration.append(row[1::] ) UpperCAmelCase = simplify(lowercase_ ) for i in range(len(lowercase_ ) ): resultant[i].insert(0 , current_first_column[i] ) resultant.insert(0 , lowercase_ ) UpperCAmelCase = resultant return final_set def _lowerCAmelCase ( lowercase_ ): if len(lowercase_ ) == 0: raise IndexError('solve_simultaneous() requires n lists of length n+1' ) UpperCAmelCase = len(lowercase_ ) + 1 if any(len(lowercase_ ) != _length for item in equations ): raise IndexError('solve_simultaneous() requires n lists of length n+1' ) for row in equations: if any(not isinstance(lowercase_ , (int, float) ) for column in row ): raise ValueError('solve_simultaneous() requires lists of integers' ) if len(lowercase_ ) == 1: return [equations[0][-1] / equations[0][0]] UpperCAmelCase = equations.copy() if any(0 in row for row in data_set ): UpperCAmelCase = data_set.copy() UpperCAmelCase = [] for row_index, row in enumerate(lowercase_ ): if 0 not in row: UpperCAmelCase = data_set.pop(lowercase_ ) break if not full_row: raise ValueError('solve_simultaneous() requires at least 1 full equation' ) data_set.insert(0 , lowercase_ ) UpperCAmelCase = data_set.copy() UpperCAmelCase = simplify(lowercase_ ) UpperCAmelCase = simplified[::-1] UpperCAmelCase = [] for row in simplified: UpperCAmelCase = row[-1] if not solutions: if row[-2] == 0: solutions.append(0 ) continue solutions.append(current_solution / row[-2] ) continue UpperCAmelCase = row.copy()[: len(lowercase_ ) - 1 :] while temp_row[0] == 0: temp_row.pop(0 ) if len(lowercase_ ) == 0: solutions.append(0 ) continue UpperCAmelCase = temp_row[1::] UpperCAmelCase = temp_row[::-1] for column_index, column in enumerate(lowercase_ ): current_solution -= column * solutions[column_index] solutions.append(lowercase_ ) UpperCAmelCase = [] for item in solutions: final.append(float(round(lowercase_ , 5 ) ) ) return final[::-1] if __name__ == "__main__": import doctest doctest.testmod() snake_case_ = [ [2, 1, 1, 1, 1, 4], [1, 2, 1, 1, 1, 5], [1, 1, 2, 1, 1, 6], [1, 1, 1, 2, 1, 7], [1, 1, 1, 1, 2, 8], ] print(solve_simultaneous(eq)) print(solve_simultaneous([[4, 2]]))
78
'''simple docstring''' import argparse import json from tqdm import tqdm def UpperCamelCase__ ( ): """simple docstring""" _lowerCAmelCase = argparse.ArgumentParser() # Required parameters parser.add_argument( """--src_path""" , type=lowerCAmelCase , default="""biencoder-nq-dev.json""" , help="""Path to raw DPR training data""" , ) parser.add_argument( """--evaluation_set""" , type=lowerCAmelCase , help="""where to store parsed evaluation_set file""" , ) parser.add_argument( """--gold_data_path""" , type=lowerCAmelCase , help="""where to store parsed gold_data_path file""" , ) _lowerCAmelCase = parser.parse_args() with open(args.src_path , """r""" ) as src_file, open(args.evaluation_set , """w""" ) as eval_file, open( args.gold_data_path , """w""" ) as gold_file: _lowerCAmelCase = json.load(lowerCAmelCase ) for dpr_record in tqdm(lowerCAmelCase ): _lowerCAmelCase = dpr_record["""question"""] _lowerCAmelCase = [context["""title"""] for context in dpr_record["""positive_ctxs"""]] eval_file.write(question + """\n""" ) gold_file.write("""\t""".join(lowerCAmelCase ) + """\n""" ) if __name__ == "__main__": main()
70
0
'''simple docstring''' import os from argparse import ArgumentParser, Namespace from ..data import SingleSentenceClassificationProcessor as Processor from ..pipelines import TextClassificationPipeline from ..utils import is_tf_available, is_torch_available, logging from . import BaseTransformersCLICommand if not is_tf_available() and not is_torch_available(): raise RuntimeError('''At least one of PyTorch or TensorFlow 2.0+ should be installed to use CLI training''') # TF training parameters UpperCamelCase_ : Dict = False UpperCamelCase_ : Optional[int] = False def __a ( _UpperCamelCase: Namespace ) -> Optional[Any]: """simple docstring""" return TrainCommand(_UpperCamelCase ) class _a ( _UpperCamelCase ): @staticmethod def _lowercase ( _SCREAMING_SNAKE_CASE ) -> Dict: _snake_case = parser.add_parser("train" ,help="CLI tool to train a model on a task." ) train_parser.add_argument( "--train_data" ,type=_SCREAMING_SNAKE_CASE ,required=_SCREAMING_SNAKE_CASE ,help="path to train (and optionally evaluation) dataset as a csv with tab separated labels and sentences." ,) train_parser.add_argument( "--column_label" ,type=_SCREAMING_SNAKE_CASE ,default=0 ,help="Column of the dataset csv file with example labels." ) train_parser.add_argument( "--column_text" ,type=_SCREAMING_SNAKE_CASE ,default=1 ,help="Column of the dataset csv file with example texts." ) train_parser.add_argument( "--column_id" ,type=_SCREAMING_SNAKE_CASE ,default=2 ,help="Column of the dataset csv file with example ids." ) train_parser.add_argument( "--skip_first_row" ,action="store_true" ,help="Skip the first row of the csv file (headers)." ) train_parser.add_argument("--validation_data" ,type=_SCREAMING_SNAKE_CASE ,default="" ,help="path to validation dataset." ) train_parser.add_argument( "--validation_split" ,type=_SCREAMING_SNAKE_CASE ,default=0.1 ,help="if validation dataset is not provided, fraction of train dataset to use as validation dataset." ,) train_parser.add_argument("--output" ,type=_SCREAMING_SNAKE_CASE ,default="./" ,help="path to saved the trained model." ) train_parser.add_argument( "--task" ,type=_SCREAMING_SNAKE_CASE ,default="text_classification" ,help="Task to train the model on." ) train_parser.add_argument( "--model" ,type=_SCREAMING_SNAKE_CASE ,default="bert-base-uncased" ,help="Model\'s name or path to stored model." ) train_parser.add_argument("--train_batch_size" ,type=_SCREAMING_SNAKE_CASE ,default=32 ,help="Batch size for training." ) train_parser.add_argument("--valid_batch_size" ,type=_SCREAMING_SNAKE_CASE ,default=64 ,help="Batch size for validation." ) train_parser.add_argument("--learning_rate" ,type=_SCREAMING_SNAKE_CASE ,default=3e-5 ,help="Learning rate." ) train_parser.add_argument("--adam_epsilon" ,type=_SCREAMING_SNAKE_CASE ,default=1e-08 ,help="Epsilon for Adam optimizer." ) train_parser.set_defaults(func=_SCREAMING_SNAKE_CASE ) def __init__( self ,_SCREAMING_SNAKE_CASE ) -> Optional[int]: _snake_case = logging.get_logger("transformers-cli/training" ) _snake_case = "tf" if is_tf_available() else "torch" os.makedirs(args.output ,exist_ok=_SCREAMING_SNAKE_CASE ) _snake_case = args.output _snake_case = args.column_label _snake_case = args.column_text _snake_case = args.column_id self.logger.info(f"""Loading {args.task} pipeline for {args.model}""" ) if args.task == "text_classification": _snake_case = TextClassificationPipeline.from_pretrained(args.model ) elif args.task == "token_classification": raise NotImplementedError elif args.task == "question_answering": raise NotImplementedError self.logger.info(f"""Loading dataset from {args.train_data}""" ) _snake_case = Processor.create_from_csv( args.train_data ,column_label=args.column_label ,column_text=args.column_text ,column_id=args.column_id ,skip_first_row=args.skip_first_row ,) _snake_case = None if args.validation_data: self.logger.info(f"""Loading validation dataset from {args.validation_data}""" ) _snake_case = Processor.create_from_csv( args.validation_data ,column_label=args.column_label ,column_text=args.column_text ,column_id=args.column_id ,skip_first_row=args.skip_first_row ,) _snake_case = args.validation_split _snake_case = args.train_batch_size _snake_case = args.valid_batch_size _snake_case = args.learning_rate _snake_case = args.adam_epsilon def _lowercase ( self ) -> List[str]: if self.framework == "tf": return self.run_tf() return self.run_torch() def _lowercase ( self ) -> Union[str, Any]: raise NotImplementedError def _lowercase ( self ) -> Union[str, Any]: self.pipeline.fit( self.train_dataset ,validation_data=self.valid_dataset ,validation_split=self.validation_split ,learning_rate=self.learning_rate ,adam_epsilon=self.adam_epsilon ,train_batch_size=self.train_batch_size ,valid_batch_size=self.valid_batch_size ,) # Save trained pipeline self.pipeline.save_pretrained(self.output )
368
'''simple docstring''' from decimal import Decimal, getcontext from math import ceil, factorial def __a ( _UpperCamelCase: int ) -> str: """simple docstring""" if not isinstance(_UpperCamelCase , _UpperCamelCase ): raise TypeError("Undefined for non-integers" ) elif precision < 1: raise ValueError("Undefined for non-natural numbers" ) _snake_case = precision _snake_case = ceil(precision / 14 ) _snake_case = 426_880 * Decimal(10_005 ).sqrt() _snake_case = 1 _snake_case = 13_591_409 _snake_case = Decimal(_UpperCamelCase ) for k in range(1 , _UpperCamelCase ): _snake_case = factorial(6 * k ) // (factorial(3 * k ) * factorial(_UpperCamelCase ) ** 3) linear_term += 545_140_134 exponential_term *= -262_537_412_640_768_000 partial_sum += Decimal(multinomial_term * linear_term ) / exponential_term return str(constant_term / partial_sum )[:-1] if __name__ == "__main__": UpperCamelCase_ : Any = 50 print(F'The first {n} digits of pi is: {pi(n)}')
142
0
from __future__ import annotations from typing import Any class _a ( UpperCamelCase__): """simple docstring""" pass class _a : """simple docstring""" def __init__( self: Union[str, Any] , __lowerCamelCase: Any ): '''simple docstring''' UpperCamelCase__: Any = data UpperCamelCase__: Node | None = None def __iter__( self: Optional[int] ): '''simple docstring''' UpperCamelCase__: List[str] = self UpperCamelCase__: Optional[int] = [] while node: if node in visited: raise ContainsLoopError visited.append(__lowerCamelCase ) yield node.data UpperCamelCase__: Optional[int] = node.next_node @property def UpperCAmelCase_ ( self: List[Any] ): '''simple docstring''' try: list(self ) return False except ContainsLoopError: return True if __name__ == "__main__": A__: List[str] = Node(1) A__: Dict = Node(2) A__: Union[str, Any] = Node(3) A__: int = Node(4) print(root_node.has_loop) # False A__: Optional[Any] = root_node.next_node print(root_node.has_loop) # True A__: Any = Node(5) A__: List[Any] = Node(6) A__: int = Node(5) A__: Optional[Any] = Node(6) print(root_node.has_loop) # False A__: List[Any] = Node(1) print(root_node.has_loop) # False
149
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available, is_vision_available, ) A__: str = { '''configuration_blip''': [ '''BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''BlipConfig''', '''BlipTextConfig''', '''BlipVisionConfig''', ], '''processing_blip''': ['''BlipProcessor'''], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A__: List[str] = ['''BlipImageProcessor'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A__: Tuple = [ '''BLIP_PRETRAINED_MODEL_ARCHIVE_LIST''', '''BlipModel''', '''BlipPreTrainedModel''', '''BlipForConditionalGeneration''', '''BlipForQuestionAnswering''', '''BlipVisionModel''', '''BlipTextModel''', '''BlipForImageTextRetrieval''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A__: str = [ '''TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFBlipModel''', '''TFBlipPreTrainedModel''', '''TFBlipForConditionalGeneration''', '''TFBlipForQuestionAnswering''', '''TFBlipVisionModel''', '''TFBlipTextModel''', '''TFBlipForImageTextRetrieval''', ] if TYPE_CHECKING: from .configuration_blip import BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, BlipConfig, BlipTextConfig, BlipVisionConfig from .processing_blip import BlipProcessor try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .image_processing_blip import BlipImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_blip import ( BLIP_PRETRAINED_MODEL_ARCHIVE_LIST, BlipForConditionalGeneration, BlipForImageTextRetrieval, BlipForQuestionAnswering, BlipModel, BlipPreTrainedModel, BlipTextModel, BlipVisionModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_blip import ( TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST, TFBlipForConditionalGeneration, TFBlipForImageTextRetrieval, TFBlipForQuestionAnswering, TFBlipModel, TFBlipPreTrainedModel, TFBlipTextModel, TFBlipVisionModel, ) else: import sys A__: Any = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
149
1
import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( SwiftFormerConfig, SwiftFormerForImageClassification, ViTImageProcessor, ) from transformers.utils import logging logging.set_verbosity_info() _snake_case : Dict = logging.get_logger(__name__) _snake_case : Union[str, Any] = torch.device('cpu') def a_ ( ): __lowerCAmelCase = 'http://images.cocodataset.org/val2017/000000039769.jpg' __lowerCAmelCase = Image.open(requests.get(lowerCAmelCase_, stream=lowerCAmelCase_ ).raw ) return im def a_ ( lowerCAmelCase_ : int ): if swiftformer_name == "swiftformer_xs": return torch.tensor([-2.17_03E00, 2.11_07E00, -2.08_11E00, 8.86_85E-01, 2.43_60E-01] ) elif swiftformer_name == "swiftformer_s": return torch.tensor([3.96_36E-01, 2.34_78E-01, -1.69_63E00, -1.73_81E00, -8.63_37E-01] ) elif swiftformer_name == "swiftformer_l1": return torch.tensor([-4.27_68E-01, -4.74_29E-01, -1.08_97E00, -1.02_48E00, 3.55_23E-02] ) elif swiftformer_name == "swiftformer_l3": return torch.tensor([-2.53_30E-01, 2.42_11E-01, -6.01_85E-01, -8.27_89E-01, -6.04_46E-02] ) def a_ ( lowerCAmelCase_ : Tuple, lowerCAmelCase_ : List[str], lowerCAmelCase_ : List[Any] ): __lowerCAmelCase = dct.pop(lowerCAmelCase_ ) __lowerCAmelCase = val def a_ ( lowerCAmelCase_ : str ): __lowerCAmelCase = [] for k in state_dict.keys(): __lowerCAmelCase = k if ".pwconv" in k: __lowerCAmelCase = k_new.replace('.pwconv', '.point_wise_conv' ) if ".dwconv" in k: __lowerCAmelCase = k_new.replace('.dwconv', '.depth_wise_conv' ) if ".Proj." in k: __lowerCAmelCase = k_new.replace('.Proj.', '.proj.' ) if "patch_embed" in k_new: __lowerCAmelCase = k_new.replace('patch_embed', 'swiftformer.patch_embed.patch_embedding' ) if "network" in k_new: __lowerCAmelCase = k_new.split('.' ) if ls[2].isdigit(): __lowerCAmelCase = 'swiftformer.encoder.network.' + ls[1] + '.blocks.' + ls[2] + '.' + '.'.join(ls[3:] ) else: __lowerCAmelCase = k_new.replace('network', 'swiftformer.encoder.network' ) rename_keys.append((k, k_new) ) return rename_keys @torch.no_grad() def a_ ( lowerCAmelCase_ : str, lowerCAmelCase_ : Optional[int], lowerCAmelCase_ : Optional[Any] ): __lowerCAmelCase = SwiftFormerConfig() # dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size __lowerCAmelCase = 1000 __lowerCAmelCase = 'huggingface/label-files' __lowerCAmelCase = 'imagenet-1k-id2label.json' __lowerCAmelCase = json.load(open(hf_hub_download(lowerCAmelCase_, lowerCAmelCase_, repo_type='dataset' ), 'r' ) ) __lowerCAmelCase = {int(lowerCAmelCase_ ): v for k, v in idalabel.items()} __lowerCAmelCase = idalabel __lowerCAmelCase = {v: k for k, v in idalabel.items()} # size of the architecture if swiftformer_name == "swiftformer_xs": __lowerCAmelCase = [3, 3, 6, 4] __lowerCAmelCase = [48, 56, 112, 220] elif swiftformer_name == "swiftformer_s": __lowerCAmelCase = [3, 3, 9, 6] __lowerCAmelCase = [48, 64, 168, 224] elif swiftformer_name == "swiftformer_l1": __lowerCAmelCase = [4, 3, 10, 5] __lowerCAmelCase = [48, 96, 192, 384] elif swiftformer_name == "swiftformer_l3": __lowerCAmelCase = [4, 4, 12, 6] __lowerCAmelCase = [64, 128, 320, 512] # load state_dict of original model, remove and rename some keys if original_ckpt: if original_ckpt.startswith('https' ): __lowerCAmelCase = torch.hub.load_state_dict_from_url(lowerCAmelCase_, map_location='cpu', check_hash=lowerCAmelCase_ ) else: __lowerCAmelCase = torch.load(lowerCAmelCase_, map_location='cpu' ) __lowerCAmelCase = checkpoint __lowerCAmelCase = create_rename_keys(lowerCAmelCase_ ) for rename_key_src, rename_key_dest in rename_keys: rename_key(lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ ) # load HuggingFace model __lowerCAmelCase = SwiftFormerForImageClassification(lowerCAmelCase_ ).eval() hf_model.load_state_dict(lowerCAmelCase_ ) # prepare test inputs __lowerCAmelCase = prepare_img() __lowerCAmelCase = ViTImageProcessor.from_pretrained('preprocessor_config' ) __lowerCAmelCase = processor(images=lowerCAmelCase_, return_tensors='pt' ) # compare outputs from both models __lowerCAmelCase = get_expected_output(lowerCAmelCase_ ) __lowerCAmelCase = hf_model(inputs['pixel_values'] ).logits assert hf_logits.shape == torch.Size([1, 1000] ) assert torch.allclose(hf_logits[0, 0:5], lowerCAmelCase_, atol=1E-3 ) Path(lowerCAmelCase_ ).mkdir(exist_ok=lowerCAmelCase_ ) print(F"""Saving model {swiftformer_name} to {pytorch_dump_folder_path}""" ) hf_model.save_pretrained(lowerCAmelCase_ ) if __name__ == "__main__": _snake_case : Dict = argparse.ArgumentParser() # Required parameters parser.add_argument( '--swiftformer_name', default='swiftformer_xs', choices=['swiftformer_xs', 'swiftformer_s', 'swiftformer_l1', 'swiftformer_l3'], type=str, help='Name of the SwiftFormer model you\'d like to convert.', ) parser.add_argument( '--pytorch_dump_folder_path', default='./converted_outputs/', type=str, help='Path to the output PyTorch model directory.', ) parser.add_argument('--original_ckpt', default=None, type=str, help='Path to the original model checkpoint.') _snake_case : Optional[int] = parser.parse_args() convert_swiftformer_checkpoint(args.swiftformer_name, args.pytorch_dump_folder_path, args.original_ckpt)
207
import warnings from typing import List, Optional, Tuple, Union import numpy as np import PIL import torch from ...models import UNetaDModel from ...schedulers import RePaintScheduler from ...utils import PIL_INTERPOLATION, logging, randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput _snake_case : Union[str, Any] = logging.get_logger(__name__) # pylint: disable=invalid-name def a_ ( lowerCAmelCase_ : Union[List, PIL.Image.Image, torch.Tensor] ): warnings.warn( 'The preprocess method is deprecated and will be removed in a future version. Please' ' use VaeImageProcessor.preprocess instead', lowerCAmelCase_, ) if isinstance(lowerCAmelCase_, torch.Tensor ): return image elif isinstance(lowerCAmelCase_, PIL.Image.Image ): __lowerCAmelCase = [image] if isinstance(image[0], PIL.Image.Image ): __lowerCAmelCase , __lowerCAmelCase = image[0].size __lowerCAmelCase , __lowerCAmelCase = (x - x % 8 for x in (w, h)) # resize to integer multiple of 8 __lowerCAmelCase = [np.array(i.resize((w, h), resample=PIL_INTERPOLATION['lanczos'] ) )[None, :] for i in image] __lowerCAmelCase = np.concatenate(lowerCAmelCase_, axis=0 ) __lowerCAmelCase = np.array(lowerCAmelCase_ ).astype(np.floataa ) / 255.0 __lowerCAmelCase = image.transpose(0, 3, 1, 2 ) __lowerCAmelCase = 2.0 * image - 1.0 __lowerCAmelCase = torch.from_numpy(lowerCAmelCase_ ) elif isinstance(image[0], torch.Tensor ): __lowerCAmelCase = torch.cat(lowerCAmelCase_, dim=0 ) return image def a_ ( lowerCAmelCase_ : Union[List, PIL.Image.Image, torch.Tensor] ): if isinstance(lowerCAmelCase_, torch.Tensor ): return mask elif isinstance(lowerCAmelCase_, PIL.Image.Image ): __lowerCAmelCase = [mask] if isinstance(mask[0], PIL.Image.Image ): __lowerCAmelCase , __lowerCAmelCase = mask[0].size __lowerCAmelCase , __lowerCAmelCase = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32 __lowerCAmelCase = [np.array(m.convert('L' ).resize((w, h), resample=PIL_INTERPOLATION['nearest'] ) )[None, :] for m in mask] __lowerCAmelCase = np.concatenate(lowerCAmelCase_, axis=0 ) __lowerCAmelCase = mask.astype(np.floataa ) / 255.0 __lowerCAmelCase = 0 __lowerCAmelCase = 1 __lowerCAmelCase = torch.from_numpy(lowerCAmelCase_ ) elif isinstance(mask[0], torch.Tensor ): __lowerCAmelCase = torch.cat(lowerCAmelCase_, dim=0 ) return mask class _UpperCAmelCase ( _UpperCamelCase ): """simple docstring""" a_ = 42 a_ = 42 def __init__( self : Optional[Any] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : List[Any] ) -> Optional[int]: super().__init__() self.register_modules(unet=lowerCAmelCase_ , scheduler=lowerCAmelCase_ ) @torch.no_grad() def __call__( self : Dict , lowerCAmelCase_ : Union[torch.Tensor, PIL.Image.Image] , lowerCAmelCase_ : Union[torch.Tensor, PIL.Image.Image] , lowerCAmelCase_ : int = 2_5_0 , lowerCAmelCase_ : float = 0.0 , lowerCAmelCase_ : int = 1_0 , lowerCAmelCase_ : int = 1_0 , lowerCAmelCase_ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , lowerCAmelCase_ : Optional[str] = "pil" , lowerCAmelCase_ : bool = True , ) -> Union[ImagePipelineOutput, Tuple]: __lowerCAmelCase = image __lowerCAmelCase = _preprocess_image(lowerCAmelCase_ ) __lowerCAmelCase = original_image.to(device=self.device , dtype=self.unet.dtype ) __lowerCAmelCase = _preprocess_mask(lowerCAmelCase_ ) __lowerCAmelCase = mask_image.to(device=self.device , dtype=self.unet.dtype ) __lowerCAmelCase = original_image.shape[0] # sample gaussian noise to begin the loop if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and len(lowerCAmelCase_ ) != batch_size: raise ValueError( f"""You have passed a list of generators of length {len(lowerCAmelCase_ )}, but requested an effective batch""" f""" size of {batch_size}. Make sure the batch size matches the length of the generators.""" ) __lowerCAmelCase = original_image.shape __lowerCAmelCase = randn_tensor(lowerCAmelCase_ , generator=lowerCAmelCase_ , device=self.device , dtype=self.unet.dtype ) # set step values self.scheduler.set_timesteps(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , self.device ) __lowerCAmelCase = eta __lowerCAmelCase = self.scheduler.timesteps[0] + 1 __lowerCAmelCase = generator[0] if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else generator for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ): if t < t_last: # predict the noise residual __lowerCAmelCase = self.unet(lowerCAmelCase_ , lowerCAmelCase_ ).sample # compute previous image: x_t -> x_t-1 __lowerCAmelCase = self.scheduler.step(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ).prev_sample else: # compute the reverse: x_t-1 -> x_t __lowerCAmelCase = self.scheduler.undo_step(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) __lowerCAmelCase = t __lowerCAmelCase = (image / 2 + 0.5).clamp(0 , 1 ) __lowerCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 ).numpy() if output_type == "pil": __lowerCAmelCase = self.numpy_to_pil(lowerCAmelCase_ ) if not return_dict: return (image,) return ImagePipelineOutput(images=lowerCAmelCase_ )
207
1
"""simple docstring""" import argparse import os import gluonnlp as nlp import mxnet as mx import numpy as np import torch from gluonnlp.base import get_home_dir from gluonnlp.model.bert import BERTEncoder from gluonnlp.model.utils import _load_vocab from gluonnlp.vocab import Vocab from packaging import version from torch import nn from transformers import BertConfig, BertForMaskedLM, BertModel, RobertaTokenizer from transformers.models.bert.modeling_bert import ( BertIntermediate, BertLayer, BertOutput, BertSelfAttention, BertSelfOutput, ) from transformers.utils import logging if version.parse(nlp.__version__) != version.parse("""0.8.3"""): raise Exception("""requires gluonnlp == 0.8.3""") if version.parse(mx.__version__) != version.parse("""1.5.0"""): raise Exception("""requires mxnet == 1.5.0""") logging.set_verbosity_info() _A = logging.get_logger(__name__) _A = """The Nymphenburg Palace is a beautiful palace in Munich!""" def lowercase_ ( __UpperCAmelCase , __UpperCAmelCase ) -> Union[str, Any]: lowerCAmelCase__ : str = { """attention_cell""": """multi_head""", """num_layers""": 4, """units""": 1024, """hidden_size""": 768, """max_length""": 512, """num_heads""": 8, """scaled""": True, """dropout""": 0.1, """use_residual""": True, """embed_size""": 1024, """embed_dropout""": 0.1, """word_embed""": None, """layer_norm_eps""": 1E-5, """token_type_vocab_size""": 2, } lowerCAmelCase__ : Dict = bort_4_8_768_1024_hparams # Let's construct the original Bort model here # Taken from official BERT implementation, see: # https://github.com/alexa/bort/blob/master/bort/bort.py lowerCAmelCase__ : Optional[int] = BERTEncoder( attention_cell=predefined_args["""attention_cell"""] , num_layers=predefined_args["""num_layers"""] , units=predefined_args["""units"""] , hidden_size=predefined_args["""hidden_size"""] , max_length=predefined_args["""max_length"""] , num_heads=predefined_args["""num_heads"""] , scaled=predefined_args["""scaled"""] , dropout=predefined_args["""dropout"""] , output_attention=__UpperCAmelCase , output_all_encodings=__UpperCAmelCase , use_residual=predefined_args["""use_residual"""] , activation=predefined_args.get("""activation""" , """gelu""" ) , layer_norm_eps=predefined_args.get("""layer_norm_eps""" , __UpperCAmelCase ) , ) # Vocab information needs to be fetched first # It's the same as RoBERTa, so RobertaTokenizer can be used later lowerCAmelCase__ : Dict = """openwebtext_ccnews_stories_books_cased""" # Specify download folder to Gluonnlp's vocab lowerCAmelCase__ : Dict = os.path.join(get_home_dir() , """models""" ) lowerCAmelCase__ : Optional[int] = _load_vocab(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , cls=__UpperCAmelCase ) lowerCAmelCase__ : Optional[Any] = nlp.model.BERTModel( __UpperCAmelCase , len(__UpperCAmelCase ) , units=predefined_args["""units"""] , embed_size=predefined_args["""embed_size"""] , embed_dropout=predefined_args["""embed_dropout"""] , word_embed=predefined_args["""word_embed"""] , use_pooler=__UpperCAmelCase , use_token_type_embed=__UpperCAmelCase , token_type_vocab_size=predefined_args["""token_type_vocab_size"""] , use_classifier=__UpperCAmelCase , use_decoder=__UpperCAmelCase , ) original_bort.load_parameters(__UpperCAmelCase , cast_dtype=__UpperCAmelCase , ignore_extra=__UpperCAmelCase ) lowerCAmelCase__ : List[str] = original_bort._collect_params_with_prefix() # Build our config 🤗 lowerCAmelCase__ : List[Any] = { """architectures""": ["""BertForMaskedLM"""], """attention_probs_dropout_prob""": predefined_args["""dropout"""], """hidden_act""": """gelu""", """hidden_dropout_prob""": predefined_args["""dropout"""], """hidden_size""": predefined_args["""embed_size"""], """initializer_range""": 0.02, """intermediate_size""": predefined_args["""hidden_size"""], """layer_norm_eps""": predefined_args["""layer_norm_eps"""], """max_position_embeddings""": predefined_args["""max_length"""], """model_type""": """bort""", """num_attention_heads""": predefined_args["""num_heads"""], """num_hidden_layers""": predefined_args["""num_layers"""], """pad_token_id""": 1, # 2 = BERT, 1 = RoBERTa """type_vocab_size""": 1, # 2 = BERT, 1 = RoBERTa """vocab_size""": len(__UpperCAmelCase ), } lowerCAmelCase__ : Dict = BertConfig.from_dict(__UpperCAmelCase ) lowerCAmelCase__ : Optional[int] = BertForMaskedLM(__UpperCAmelCase ) hf_bort_model.eval() # Parameter mapping table (Gluonnlp to Transformers) # * denotes layer index # # | Gluon Parameter | Transformers Parameter # | -------------------------------------------------------------- | ---------------------- # | `encoder.layer_norm.beta` | `bert.embeddings.LayerNorm.bias` # | `encoder.layer_norm.gamma` | `bert.embeddings.LayerNorm.weight` # | `encoder.position_weight` | `bert.embeddings.position_embeddings.weight` # | `word_embed.0.weight` | `bert.embeddings.word_embeddings.weight` # | `encoder.transformer_cells.*.attention_cell.proj_key.bias` | `bert.encoder.layer.*.attention.self.key.bias` # | `encoder.transformer_cells.*.attention_cell.proj_key.weight` | `bert.encoder.layer.*.attention.self.key.weight` # | `encoder.transformer_cells.*.attention_cell.proj_query.bias` | `bert.encoder.layer.*.attention.self.query.bias` # | `encoder.transformer_cells.*.attention_cell.proj_query.weight` | `bert.encoder.layer.*.attention.self.query.weight` # | `encoder.transformer_cells.*.attention_cell.proj_value.bias` | `bert.encoder.layer.*.attention.self.value.bias` # | `encoder.transformer_cells.*.attention_cell.proj_value.weight` | `bert.encoder.layer.*.attention.self.value.weight` # | `encoder.transformer_cells.*.ffn.ffn_2.bias` | `bert.encoder.layer.*.attention.output.dense.bias` # | `encoder.transformer_cells.*.ffn.ffn_2.weight` | `bert.encoder.layer.*.attention.output.dense.weight` # | `encoder.transformer_cells.*.layer_norm.beta` | `bert.encoder.layer.*.attention.output.LayerNorm.bias` # | `encoder.transformer_cells.*.layer_norm.gamma` | `bert.encoder.layer.*.attention.output.LayerNorm.weight` # | `encoder.transformer_cells.*.ffn.ffn_1.bias` | `bert.encoder.layer.*.intermediate.dense.bias` # | `encoder.transformer_cells.*.ffn.ffn_1.weight` | `bert.encoder.layer.*.intermediate.dense.weight` # | `encoder.transformer_cells.*.ffn.layer_norm.beta` | `bert.encoder.layer.*.output.LayerNorm.bias` # | `encoder.transformer_cells.*.ffn.layer_norm.gamma` | `bert.encoder.layer.*.output.LayerNorm.weight` # | `encoder.transformer_cells.*.proj.bias` | `bert.encoder.layer.*.output.dense.bias` # | `encoder.transformer_cells.*.proj.weight` | `bert.encoder.layer.*.output.dense.weight` # Helper function to convert MXNET Arrays to PyTorch def to_torch(__UpperCAmelCase ) -> nn.Parameter: return nn.Parameter(torch.FloatTensor(mx_array.data().asnumpy() ) ) # Check param shapes and map new HF param back def check_and_map_params(__UpperCAmelCase , __UpperCAmelCase ): lowerCAmelCase__ : Tuple = hf_param.shape lowerCAmelCase__ : int = to_torch(params[gluon_param] ) lowerCAmelCase__ : Dict = gluon_param.shape assert ( shape_hf == shape_gluon ), f"""The gluon parameter {gluon_param} has shape {shape_gluon}, but expects shape {shape_hf} for Transformers""" return gluon_param lowerCAmelCase__ : str = check_and_map_params( hf_bort_model.bert.embeddings.word_embeddings.weight , """word_embed.0.weight""" ) lowerCAmelCase__ : int = check_and_map_params( hf_bort_model.bert.embeddings.position_embeddings.weight , """encoder.position_weight""" ) lowerCAmelCase__ : Optional[Any] = check_and_map_params( hf_bort_model.bert.embeddings.LayerNorm.bias , """encoder.layer_norm.beta""" ) lowerCAmelCase__ : Optional[Any] = check_and_map_params( hf_bort_model.bert.embeddings.LayerNorm.weight , """encoder.layer_norm.gamma""" ) # Inspired by RoBERTa conversion script, we just zero them out (Bort does not use them) lowerCAmelCase__ : Optional[Any] = torch.zeros_like( hf_bort_model.bert.embeddings.token_type_embeddings.weight.data ) for i in range(hf_bort_config.num_hidden_layers ): lowerCAmelCase__ : BertLayer = hf_bort_model.bert.encoder.layer[i] # self attention lowerCAmelCase__ : BertSelfAttention = layer.attention.self lowerCAmelCase__ : Optional[Any] = check_and_map_params( self_attn.key.bias.data , f"""encoder.transformer_cells.{i}.attention_cell.proj_key.bias""" ) lowerCAmelCase__ : Any = check_and_map_params( self_attn.key.weight.data , f"""encoder.transformer_cells.{i}.attention_cell.proj_key.weight""" ) lowerCAmelCase__ : Any = check_and_map_params( self_attn.query.bias.data , f"""encoder.transformer_cells.{i}.attention_cell.proj_query.bias""" ) lowerCAmelCase__ : Any = check_and_map_params( self_attn.query.weight.data , f"""encoder.transformer_cells.{i}.attention_cell.proj_query.weight""" ) lowerCAmelCase__ : List[Any] = check_and_map_params( self_attn.value.bias.data , f"""encoder.transformer_cells.{i}.attention_cell.proj_value.bias""" ) lowerCAmelCase__ : Dict = check_and_map_params( self_attn.value.weight.data , f"""encoder.transformer_cells.{i}.attention_cell.proj_value.weight""" ) # self attention output lowerCAmelCase__ : BertSelfOutput = layer.attention.output lowerCAmelCase__ : int = check_and_map_params( self_output.dense.bias , f"""encoder.transformer_cells.{i}.proj.bias""" ) lowerCAmelCase__ : Union[str, Any] = check_and_map_params( self_output.dense.weight , f"""encoder.transformer_cells.{i}.proj.weight""" ) lowerCAmelCase__ : int = check_and_map_params( self_output.LayerNorm.bias , f"""encoder.transformer_cells.{i}.layer_norm.beta""" ) lowerCAmelCase__ : Any = check_and_map_params( self_output.LayerNorm.weight , f"""encoder.transformer_cells.{i}.layer_norm.gamma""" ) # intermediate lowerCAmelCase__ : BertIntermediate = layer.intermediate lowerCAmelCase__ : Union[str, Any] = check_and_map_params( intermediate.dense.bias , f"""encoder.transformer_cells.{i}.ffn.ffn_1.bias""" ) lowerCAmelCase__ : str = check_and_map_params( intermediate.dense.weight , f"""encoder.transformer_cells.{i}.ffn.ffn_1.weight""" ) # output lowerCAmelCase__ : BertOutput = layer.output lowerCAmelCase__ : List[str] = check_and_map_params( bert_output.dense.bias , f"""encoder.transformer_cells.{i}.ffn.ffn_2.bias""" ) lowerCAmelCase__ : Optional[int] = check_and_map_params( bert_output.dense.weight , f"""encoder.transformer_cells.{i}.ffn.ffn_2.weight""" ) lowerCAmelCase__ : Tuple = check_and_map_params( bert_output.LayerNorm.bias , f"""encoder.transformer_cells.{i}.ffn.layer_norm.beta""" ) lowerCAmelCase__ : Any = check_and_map_params( bert_output.LayerNorm.weight , f"""encoder.transformer_cells.{i}.ffn.layer_norm.gamma""" ) # Save space and energy 🎄 hf_bort_model.half() # Compare output of both models lowerCAmelCase__ : Dict = RobertaTokenizer.from_pretrained("""roberta-base""" ) lowerCAmelCase__ : Optional[int] = tokenizer.encode_plus(__UpperCAmelCase )["""input_ids"""] # Get gluon output lowerCAmelCase__ : Union[str, Any] = mx.nd.array([input_ids] ) lowerCAmelCase__ : Dict = original_bort(inputs=__UpperCAmelCase , token_types=[] ) # Get Transformer output (save and reload model again) hf_bort_model.save_pretrained(__UpperCAmelCase ) lowerCAmelCase__ : Any = BertModel.from_pretrained(__UpperCAmelCase ) hf_bort_model.eval() lowerCAmelCase__ : Optional[int] = tokenizer.encode_plus(__UpperCAmelCase , return_tensors="""pt""" ) lowerCAmelCase__ : Optional[int] = hf_bort_model(**__UpperCAmelCase )[0] lowerCAmelCase__ : Optional[int] = output_gluon[0].asnumpy() lowerCAmelCase__ : Union[str, Any] = output_hf[0].detach().numpy() lowerCAmelCase__ : List[str] = np.max(np.abs(hf_layer - gluon_layer ) ).item() lowerCAmelCase__ : int = np.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1E-3 ) if success: print("""✔️ Both model do output the same tensors""" ) else: print("""❌ Both model do **NOT** output the same tensors""" ) print("""Absolute difference is:""" , __UpperCAmelCase ) if __name__ == "__main__": _A = argparse.ArgumentParser() # Required parameters parser.add_argument( """--bort_checkpoint_path""", default=None, type=str, required=True, help="""Path the official Bort params file.""" ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model.""" ) _A = parser.parse_args() convert_bort_checkpoint_to_pytorch(args.bort_checkpoint_path, args.pytorch_dump_folder_path)
242
"""simple docstring""" from itertools import product def lowercase_ ( __UpperCAmelCase , __UpperCAmelCase ) -> list[int]: lowerCAmelCase__ : Union[str, Any] = sides_number lowerCAmelCase__ : Optional[int] = max_face_number * dice_number lowerCAmelCase__ : List[str] = [0] * (max_total + 1) lowerCAmelCase__ : Union[str, Any] = 1 lowerCAmelCase__ : Optional[int] = range(__UpperCAmelCase , max_face_number + 1 ) for dice_numbers in product(__UpperCAmelCase , repeat=__UpperCAmelCase ): lowerCAmelCase__ : str = sum(__UpperCAmelCase ) totals_frequencies[total] += 1 return totals_frequencies def lowercase_ ( ) -> float: lowerCAmelCase__ : Union[str, Any] = total_frequency_distribution( sides_number=4 , dice_number=9 ) lowerCAmelCase__ : Tuple = total_frequency_distribution( sides_number=6 , dice_number=6 ) lowerCAmelCase__ : str = 0 lowerCAmelCase__ : int = 9 lowerCAmelCase__ : Tuple = 4 * 9 lowerCAmelCase__ : Optional[int] = 6 for peter_total in range(__UpperCAmelCase , max_peter_total + 1 ): peter_wins_count += peter_totals_frequencies[peter_total] * sum( colin_totals_frequencies[min_colin_total:peter_total] ) lowerCAmelCase__ : Tuple = (4**9) * (6**6) lowerCAmelCase__ : Union[str, Any] = peter_wins_count / total_games_number lowerCAmelCase__ : Optional[int] = round(__UpperCAmelCase , ndigits=7 ) return rounded_peter_win_probability if __name__ == "__main__": print(f"""{solution() = }""")
242
1
"""simple docstring""" from __future__ import annotations import requests a__ : List[str] = set( '''approved_at_utc approved_by author_flair_background_color author_flair_css_class author_flair_richtext author_flair_template_id author_fullname author_premium can_mod_post category clicked content_categories created_utc downs edited gilded gildings hidden hide_score is_created_from_ads_ui is_meta is_original_content is_reddit_media_domain is_video link_flair_css_class link_flair_richtext link_flair_text link_flair_text_color media_embed mod_reason_title name permalink pwls quarantine saved score secure_media secure_media_embed selftext subreddit subreddit_name_prefixed subreddit_type thumbnail title top_awarded_type total_awards_received ups upvote_ratio url user_reports'''.split() ) def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ = 1 , lowerCAmelCase_ = "new" , lowerCAmelCase_ = None ): '''simple docstring''' __SCREAMING_SNAKE_CASE = wanted_data or [] if invalid_search_terms := ", ".join(sorted(set(lowerCAmelCase_ ) - valid_terms ) ): __SCREAMING_SNAKE_CASE = f"""Invalid search term: {invalid_search_terms}""" raise ValueError(lowerCAmelCase_ ) __SCREAMING_SNAKE_CASE = requests.get( f"""https://reddit.com/r/{subreddit}/{age}.json?limit={limit}""" , headers={"User-agent": "A random string"} , ) if response.status_code == 429: raise requests.HTTPError __SCREAMING_SNAKE_CASE = response.json() if not wanted_data: return {id_: data["data"]["children"][id_] for id_ in range(lowerCAmelCase_ )} __SCREAMING_SNAKE_CASE = {} for id_ in range(lowerCAmelCase_ ): __SCREAMING_SNAKE_CASE = { item: data["data"]["children"][id_]["data"][item] for item in wanted_data } return data_dict if __name__ == "__main__": # If you get Error 429, that means you are rate limited.Try after some time print(get_subreddit_data('''learnpython''', wanted_data=['''title''', '''url''', '''selftext''']))
195
"""simple docstring""" import numpy as np import torch import torch.nn as nn from transformers import CLIPConfig, CLIPVisionModelWithProjection, PreTrainedModel from ...utils import logging a__ : Optional[Any] = logging.get_logger(__name__) class UpperCamelCase_ ( UpperCamelCase): """simple docstring""" snake_case__ : int = CLIPConfig snake_case__ : str = ["CLIPEncoderLayer"] def __init__( self : Optional[int] , UpperCAmelCase__ : CLIPConfig ) -> Dict: super().__init__(UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = CLIPVisionModelWithProjection(config.vision_config ) __SCREAMING_SNAKE_CASE = nn.Linear(config.vision_config.projection_dim , 1 ) __SCREAMING_SNAKE_CASE = nn.Linear(config.vision_config.projection_dim , 1 ) @torch.no_grad() def UpperCAmelCase_ ( self : str , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : str , UpperCAmelCase__ : int=0.5 , UpperCAmelCase__ : Optional[int]=0.5 ) -> Union[str, Any]: __SCREAMING_SNAKE_CASE = self.vision_model(UpperCAmelCase__ )[0] __SCREAMING_SNAKE_CASE = self.p_head(UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = nsfw_detected.flatten() __SCREAMING_SNAKE_CASE = nsfw_detected > p_threshold __SCREAMING_SNAKE_CASE = nsfw_detected.tolist() if any(UpperCAmelCase__ ): logger.warning( "Potential NSFW content was detected in one or more images. A black image will be returned instead." " Try again with a different prompt and/or seed." ) for idx, nsfw_detected_ in enumerate(UpperCAmelCase__ ): if nsfw_detected_: __SCREAMING_SNAKE_CASE = np.zeros(images[idx].shape ) __SCREAMING_SNAKE_CASE = self.w_head(UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = watermark_detected.flatten() __SCREAMING_SNAKE_CASE = watermark_detected > w_threshold __SCREAMING_SNAKE_CASE = watermark_detected.tolist() if any(UpperCAmelCase__ ): logger.warning( "Potential watermarked content was detected in one or more images. A black image will be returned instead." " Try again with a different prompt and/or seed." ) for idx, watermark_detected_ in enumerate(UpperCAmelCase__ ): if watermark_detected_: __SCREAMING_SNAKE_CASE = np.zeros(images[idx].shape ) return images, nsfw_detected, watermark_detected
195
1
import warnings from ...utils import logging from .image_processing_chinese_clip import ChineseCLIPImageProcessor _lowerCamelCase : Dict = logging.get_logger(__name__) class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ): '''simple docstring''' def __init__( self : Tuple , *lowercase : Optional[int] , **lowercase : Any ): '''simple docstring''' warnings.warn( 'The class ChineseCLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers.' ' Please use ChineseCLIPImageProcessor instead.' , lowercase , ) super().__init__(*lowercase , **lowercase )
282
def a_ ( __lowercase : str ) -> int: _snake_case = hex_num.strip() if not hex_num: raise ValueError('No value was passed to the function' ) _snake_case = hex_num[0] == '-' if is_negative: _snake_case = hex_num[1:] try: _snake_case = int(__lowercase , 16 ) except ValueError: raise ValueError('Invalid value was passed to the function' ) _snake_case = '' while int_num > 0: _snake_case = str(int_num % 2 ) + bin_str int_num >>= 1 return int(('-' + bin_str) if is_negative else bin_str ) if __name__ == "__main__": import doctest doctest.testmod()
282
1
'''simple docstring''' def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): if not (isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )): raise ValueError("""longest_common_substring() takes two strings for inputs""" ) _snake_case = len(_SCREAMING_SNAKE_CASE ) _snake_case = len(_SCREAMING_SNAKE_CASE ) _snake_case = [[0] * (texta_length + 1) for _ in range(texta_length + 1 )] _snake_case = 0 _snake_case = 0 for i in range(1 , texta_length + 1 ): for j in range(1 , texta_length + 1 ): if texta[i - 1] == texta[j - 1]: _snake_case = 1 + dp[i - 1][j - 1] if dp[i][j] > ans_length: _snake_case = i _snake_case = dp[i][j] return texta[ans_index - ans_length : ans_index] if __name__ == "__main__": import doctest doctest.testmod()
270
'''simple docstring''' import gc import unittest from diffusers import FlaxControlNetModel, FlaxStableDiffusionControlNetPipeline from diffusers.utils import is_flax_available, load_image, slow from diffusers.utils.testing_utils import require_flax if is_flax_available(): import jax import jax.numpy as jnp from flax.jax_utils import replicate from flax.training.common_utils import shard @slow @require_flax class _lowerCAmelCase ( unittest.TestCase ): '''simple docstring''' def lowercase (self ) -> Optional[int]: # clean up the VRAM after each test super().tearDown() gc.collect() def lowercase (self ) -> Dict: _snake_case, _snake_case = FlaxControlNetModel.from_pretrained( """lllyasviel/sd-controlnet-canny""" , from_pt=UpperCAmelCase , dtype=jnp.bfloataa ) _snake_case, _snake_case = FlaxStableDiffusionControlNetPipeline.from_pretrained( """runwayml/stable-diffusion-v1-5""" , controlnet=UpperCAmelCase , from_pt=UpperCAmelCase , dtype=jnp.bfloataa ) _snake_case = controlnet_params _snake_case = """bird""" _snake_case = jax.device_count() _snake_case = pipe.prepare_text_inputs([prompts] * num_samples ) _snake_case = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png""" ) _snake_case = pipe.prepare_image_inputs([canny_image] * num_samples ) _snake_case = jax.random.PRNGKey(0 ) _snake_case = jax.random.split(UpperCAmelCase , jax.device_count() ) _snake_case = replicate(UpperCAmelCase ) _snake_case = shard(UpperCAmelCase ) _snake_case = shard(UpperCAmelCase ) _snake_case = pipe( prompt_ids=UpperCAmelCase , image=UpperCAmelCase , params=UpperCAmelCase , prng_seed=UpperCAmelCase , num_inference_steps=50 , jit=UpperCAmelCase , ).images assert images.shape == (jax.device_count(), 1, 768, 512, 3) _snake_case = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] ) _snake_case = images[0, 253:256, 253:256, -1] _snake_case = jnp.asarray(jax.device_get(image_slice.flatten() ) ) _snake_case = jnp.array( [0.16_7969, 0.11_6699, 0.08_1543, 0.15_4297, 0.13_2812, 0.10_8887, 0.16_9922, 0.16_9922, 0.20_5078] ) print(f"""output_slice: {output_slice}""" ) assert jnp.abs(output_slice - expected_slice ).max() < 1e-2 def lowercase (self ) -> Optional[int]: _snake_case, _snake_case = FlaxControlNetModel.from_pretrained( """lllyasviel/sd-controlnet-openpose""" , from_pt=UpperCAmelCase , dtype=jnp.bfloataa ) _snake_case, _snake_case = FlaxStableDiffusionControlNetPipeline.from_pretrained( """runwayml/stable-diffusion-v1-5""" , controlnet=UpperCAmelCase , from_pt=UpperCAmelCase , dtype=jnp.bfloataa ) _snake_case = controlnet_params _snake_case = """Chef in the kitchen""" _snake_case = jax.device_count() _snake_case = pipe.prepare_text_inputs([prompts] * num_samples ) _snake_case = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/pose.png""" ) _snake_case = pipe.prepare_image_inputs([pose_image] * num_samples ) _snake_case = jax.random.PRNGKey(0 ) _snake_case = jax.random.split(UpperCAmelCase , jax.device_count() ) _snake_case = replicate(UpperCAmelCase ) _snake_case = shard(UpperCAmelCase ) _snake_case = shard(UpperCAmelCase ) _snake_case = pipe( prompt_ids=UpperCAmelCase , image=UpperCAmelCase , params=UpperCAmelCase , prng_seed=UpperCAmelCase , num_inference_steps=50 , jit=UpperCAmelCase , ).images assert images.shape == (jax.device_count(), 1, 768, 512, 3) _snake_case = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] ) _snake_case = images[0, 253:256, 253:256, -1] _snake_case = jnp.asarray(jax.device_get(image_slice.flatten() ) ) _snake_case = jnp.array( [[0.27_1484, 0.26_1719, 0.27_5391, 0.27_7344, 0.27_9297, 0.29_1016, 0.29_4922, 0.30_2734, 0.30_2734]] ) print(f"""output_slice: {output_slice}""" ) assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
270
1
'''simple docstring''' from argparse import ArgumentParser from . import BaseTransformersCLICommand def lowerCamelCase ( __lowerCamelCase : int ) ->Optional[Any]: return DownloadCommand(args.model , args.cache_dir , args.force , args.trust_remote_code ) class a_ ( _UpperCamelCase ): '''simple docstring''' @staticmethod def snake_case_( A ) -> List[Any]: _SCREAMING_SNAKE_CASE = parser.add_parser("""download""" ) download_parser.add_argument( """--cache-dir""" , type=_UpperCAmelCase , default=_UpperCAmelCase , help="""Path to location to store the models""" ) download_parser.add_argument( """--force""" , action="""store_true""" , help="""Force the model to be download even if already in cache-dir""" ) download_parser.add_argument( """--trust-remote-code""" , action="""store_true""" , help="""Whether or not to allow for custom models defined on the Hub in their own modeling files. Use only if you\'ve reviewed the code as it will execute on your local machine""" , ) download_parser.add_argument("""model""" , type=_UpperCAmelCase , help="""Name of the model to download""" ) download_parser.set_defaults(func=_UpperCAmelCase ) def __init__( self , A , A , A , A ) -> Optional[Any]: _SCREAMING_SNAKE_CASE = model _SCREAMING_SNAKE_CASE = cache _SCREAMING_SNAKE_CASE = force _SCREAMING_SNAKE_CASE = trust_remote_code def snake_case_( self ) -> Optional[int]: from ..models.auto import AutoModel, AutoTokenizer AutoModel.from_pretrained( self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code ) AutoTokenizer.from_pretrained( self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code )
58
"""simple docstring""" import warnings from ...utils import logging from .image_processing_deformable_detr import DeformableDetrImageProcessor A = logging.get_logger(__name__) class __lowercase ( _UpperCamelCase ): '''simple docstring''' def __init__( self , *_UpperCAmelCase , **_UpperCAmelCase ): warnings.warn( '''The class DeformableDetrFeatureExtractor is deprecated and will be removed in version 5 of Transformers.''' ''' Please use DeformableDetrImageProcessor instead.''' , _UpperCAmelCase , ) super().__init__(*_UpperCAmelCase , **_UpperCAmelCase )
160
0
"""simple docstring""" import numpy as np def lowercase__( __SCREAMING_SNAKE_CASE : np.ndarray , __SCREAMING_SNAKE_CASE : np.ndarray , __SCREAMING_SNAKE_CASE : float = 1E-12 , __SCREAMING_SNAKE_CASE : int = 1_00 , ): assert np.shape(__SCREAMING_SNAKE_CASE )[0] == np.shape(__SCREAMING_SNAKE_CASE )[1] # Ensure proper dimensionality. assert np.shape(__SCREAMING_SNAKE_CASE )[0] == np.shape(__SCREAMING_SNAKE_CASE )[0] # Ensure inputs are either both complex or both real assert np.iscomplexobj(__SCREAMING_SNAKE_CASE ) == np.iscomplexobj(__SCREAMING_SNAKE_CASE ) lowercase_ : List[str] = np.iscomplexobj(__SCREAMING_SNAKE_CASE ) if is_complex: # Ensure complex input_matrix is Hermitian assert np.array_equal(__SCREAMING_SNAKE_CASE , input_matrix.conj().T ) # Set convergence to False. Will define convergence when we exceed max_iterations # or when we have small changes from one iteration to next. lowercase_ : Union[str, Any] = False lowercase_ : int = 0 lowercase_ : Dict = 0 lowercase_ : Tuple = 1E12 while not convergence: # Multiple matrix by the vector. lowercase_ : str = np.dot(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) # Normalize the resulting output vector. lowercase_ : Union[str, Any] = w / np.linalg.norm(__SCREAMING_SNAKE_CASE ) # Find rayleigh quotient # (faster than usual b/c we know vector is normalized already) lowercase_ : Union[str, Any] = vector.conj().T if is_complex else vector.T lowercase_ : Union[str, Any] = np.dot(__SCREAMING_SNAKE_CASE , np.dot(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) ) # Check convergence. lowercase_ : Optional[int] = np.abs(lambda_ - lambda_previous ) / lambda_ iterations += 1 if error <= error_tol or iterations >= max_iterations: lowercase_ : Union[str, Any] = True lowercase_ : Optional[int] = lambda_ if is_complex: lowercase_ : List[Any] = np.real(lambda_ ) return lambda_, vector def lowercase__( ): lowercase_ : Tuple = np.array([[41, 4, 20], [4, 26, 30], [20, 30, 50]] ) lowercase_ : List[str] = np.array([41, 4, 20] ) lowercase_ : Any = real_input_matrix.astype(np.complexaaa ) lowercase_ : Union[str, Any] = np.triu(1J * complex_input_matrix , 1 ) complex_input_matrix += imag_matrix complex_input_matrix += -1 * imag_matrix.T lowercase_ : str = np.array([41, 4, 20] ).astype(np.complexaaa ) for problem_type in ["real", "complex"]: if problem_type == "real": lowercase_ : Optional[int] = real_input_matrix lowercase_ : str = real_vector elif problem_type == "complex": lowercase_ : Optional[int] = complex_input_matrix lowercase_ : Optional[Any] = complex_vector # Our implementation. lowercase_ , lowercase_ : Optional[Any] = power_iteration(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) # Numpy implementation. # Get eigenvalues and eigenvectors using built-in numpy # eigh (eigh used for symmetric or hermetian matrices). lowercase_ , lowercase_ : List[Any] = np.linalg.eigh(__SCREAMING_SNAKE_CASE ) # Last eigenvalue is the maximum one. lowercase_ : List[Any] = eigen_values[-1] # Last column in this matrix is eigenvector corresponding to largest eigenvalue. lowercase_ : Optional[int] = eigen_vectors[:, -1] # Check our implementation and numpy gives close answers. assert np.abs(eigen_value - eigen_value_max ) <= 1E-6 # Take absolute values element wise of each eigenvector. # as they are only unique to a minus sign. assert np.linalg.norm(np.abs(__SCREAMING_SNAKE_CASE ) - np.abs(__SCREAMING_SNAKE_CASE ) ) <= 1E-6 if __name__ == "__main__": import doctest doctest.testmod() test_power_iteration()
321
"""simple docstring""" # Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING # rely on isort to merge the imports from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available __SCREAMING_SNAKE_CASE ={"configuration_mra": ["MRA_PRETRAINED_CONFIG_ARCHIVE_MAP", "MraConfig"]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __SCREAMING_SNAKE_CASE =[ "MRA_PRETRAINED_MODEL_ARCHIVE_LIST", "MraForMaskedLM", "MraForMultipleChoice", "MraForQuestionAnswering", "MraForSequenceClassification", "MraForTokenClassification", "MraLayer", "MraModel", "MraPreTrainedModel", ] if TYPE_CHECKING: from .configuration_mra import MRA_PRETRAINED_CONFIG_ARCHIVE_MAP, MraConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mra import ( MRA_PRETRAINED_MODEL_ARCHIVE_LIST, MraForMaskedLM, MraForMultipleChoice, MraForQuestionAnswering, MraForSequenceClassification, MraForTokenClassification, MraLayer, MraModel, MraPreTrainedModel, ) else: import sys __SCREAMING_SNAKE_CASE =_LazyModule(__name__, globals()["__file__"], _import_structure)
321
1
import argparse import os import re import packaging.version lowerCamelCase : Optional[int] = 'examples/' lowerCamelCase : List[str] = { 'examples': (re.compile(r'^check_min_version\("[^"]+"\)\s*$', re.MULTILINE), 'check_min_version("VERSION")\n'), 'init': (re.compile(r'^__version__\s+=\s+"([^"]+)"\s*$', re.MULTILINE), '__version__ = "VERSION"\n'), 'setup': (re.compile(r'^(\s*)version\s*=\s*"[^"]+",', re.MULTILINE), r'\1version="VERSION",'), 'doc': (re.compile(r'^(\s*)release\s*=\s*"[^"]+"$', re.MULTILINE), 'release = "VERSION"\n'), } lowerCamelCase : Optional[int] = { 'init': 'src/diffusers/__init__.py', 'setup': 'setup.py', } lowerCamelCase : Optional[int] = 'README.md' def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ,lowercase ) -> Any: with open(lowercase ,"""r""" ,encoding="""utf-8""" ,newline="""\n""" ) as f: snake_case : Tuple = f.read() snake_case , snake_case : Optional[Any] = REPLACE_PATTERNS[pattern] snake_case : int = replace.replace("""VERSION""" ,lowercase ) snake_case : Optional[int] = re_pattern.sub(lowercase ,lowercase ) with open(lowercase ,"""w""" ,encoding="""utf-8""" ,newline="""\n""" ) as f: f.write(lowercase ) def SCREAMING_SNAKE_CASE__ ( lowercase ) -> int: for folder, directories, fnames in os.walk(lowercase ): # Removing some of the folders with non-actively maintained examples from the walk if "research_projects" in directories: directories.remove("""research_projects""" ) if "legacy" in directories: directories.remove("""legacy""" ) for fname in fnames: if fname.endswith(""".py""" ): update_version_in_file(os.path.join(lowercase ,lowercase ) ,lowercase ,pattern="""examples""" ) def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase=False ) -> List[Any]: for pattern, fname in REPLACE_FILES.items(): update_version_in_file(lowercase ,lowercase ,lowercase ) if not patch: update_version_in_examples(lowercase ) def SCREAMING_SNAKE_CASE__ ( ) -> Optional[Any]: snake_case : Any = """🤗 Transformers currently provides the following architectures""" snake_case : Optional[Any] = """1. Want to contribute a new model?""" with open(lowercase ,"""r""" ,encoding="""utf-8""" ,newline="""\n""" ) as f: snake_case : int = f.readlines() # Find the start of the list. snake_case : Any = 0 while not lines[start_index].startswith(_start_prompt ): start_index += 1 start_index += 1 snake_case : Optional[Any] = start_index # Update the lines in the model list. while not lines[index].startswith(_end_prompt ): if lines[index].startswith("""1.""" ): snake_case : Union[str, Any] = lines[index].replace( """https://huggingface.co/docs/diffusers/main/model_doc""" ,"""https://huggingface.co/docs/diffusers/model_doc""" ,) index += 1 with open(lowercase ,"""w""" ,encoding="""utf-8""" ,newline="""\n""" ) as f: f.writelines(lowercase ) def SCREAMING_SNAKE_CASE__ ( ) -> Tuple: with open(REPLACE_FILES["""init"""] ,"""r""" ) as f: snake_case : int = f.read() snake_case : List[Any] = REPLACE_PATTERNS["""init"""][0].search(lowercase ).groups()[0] return packaging.version.parse(lowercase ) def SCREAMING_SNAKE_CASE__ ( lowercase=False ) -> List[str]: snake_case : List[str] = get_version() if patch and default_version.is_devrelease: raise ValueError("""Can't create a patch version from the dev branch, checkout a released version!""" ) if default_version.is_devrelease: snake_case : str = default_version.base_version elif patch: snake_case : Any = f"""{default_version.major}.{default_version.minor}.{default_version.micro + 1}""" else: snake_case : Tuple = f"""{default_version.major}.{default_version.minor + 1}.0""" # Now let's ask nicely if that's the right one. snake_case : Optional[Any] = input(f"""Which version are you releasing? [{default_version}]""" ) if len(lowercase ) == 0: snake_case : str = default_version print(f"""Updating version to {version}.""" ) global_version_update(lowercase ,patch=lowercase ) def SCREAMING_SNAKE_CASE__ ( ) -> List[str]: snake_case : Any = get_version() snake_case : Union[str, Any] = f"""{current_version.major}.{current_version.minor + 1}.0.dev0""" snake_case : List[str] = current_version.base_version # Check with the user we got that right. snake_case : List[str] = input(f"""Which version are we developing now? [{dev_version}]""" ) if len(lowercase ) == 0: snake_case : Optional[int] = dev_version print(f"""Updating version to {version}.""" ) global_version_update(lowercase ) # print("Cleaning main README, don't forget to run `make fix-copies`.") # clean_main_ref_in_model_list() if __name__ == "__main__": lowerCamelCase : List[Any] = argparse.ArgumentParser() parser.add_argument('--post_release', action='store_true', help='Whether this is pre or post release.') parser.add_argument('--patch', action='store_true', help='Whether or not this is a patch release.') lowerCamelCase : List[str] = parser.parse_args() if not args.post_release: pre_release_work(patch=args.patch) elif args.patch: print('Nothing to do after a patch :-)') else: post_release_work()
124
def SCREAMING_SNAKE_CASE__ ( ) -> list[list[int]]: return [list(range(1000 - i ,-1000 - i ,-1 ) ) for i in range(1000 )] lowerCamelCase : List[Any] = generate_large_matrix() lowerCamelCase : Optional[int] = ( [[4, 3, 2, -1], [3, 2, 1, -1], [1, 1, -1, -2], [-1, -1, -2, -3]], [[3, 2], [1, 0]], [[7, 7, 6]], [[7, 7, 6], [-1, -2, -3]], grid, ) def SCREAMING_SNAKE_CASE__ ( lowercase ) -> None: assert all(row == sorted(lowercase ,reverse=lowercase ) for row in grid ) assert all(list(lowercase ) == sorted(lowercase ,reverse=lowercase ) for col in zip(*lowercase ) ) def SCREAMING_SNAKE_CASE__ ( lowercase ) -> int: snake_case : Tuple = 0 snake_case : List[Any] = len(lowercase ) - 1 # Edge cases such as no values or all numbers are negative. if not array or array[0] < 0: return 0 while right + 1 > left: snake_case : Tuple = (left + right) // 2 snake_case : Dict = array[mid] # Num must be negative and the index must be greater than or equal to 0. if num < 0 and array[mid - 1] >= 0: return mid if num >= 0: snake_case : List[Any] = mid + 1 else: snake_case : str = mid - 1 # No negative numbers so return the last index of the array + 1 which is the length. return len(lowercase ) def SCREAMING_SNAKE_CASE__ ( lowercase ) -> int: snake_case : Union[str, Any] = 0 snake_case : Dict = len(grid[0] ) for i in range(len(lowercase ) ): snake_case : Tuple = find_negative_index(grid[i][:bound] ) total += bound return (len(lowercase ) * len(grid[0] )) - total def SCREAMING_SNAKE_CASE__ ( lowercase ) -> int: return len([number for row in grid for number in row if number < 0] ) def SCREAMING_SNAKE_CASE__ ( lowercase ) -> int: snake_case : Dict = 0 for row in grid: for i, number in enumerate(lowercase ): if number < 0: total += len(lowercase ) - i break return total def SCREAMING_SNAKE_CASE__ ( ) -> None: from timeit import timeit print("""Running benchmarks""" ) snake_case : List[Any] = ( """from __main__ import count_negatives_binary_search, """ """count_negatives_brute_force, count_negatives_brute_force_with_break, grid""" ) for func in ( "count_negatives_binary_search", # took 0.7727 seconds "count_negatives_brute_force_with_break", # took 4.6505 seconds "count_negatives_brute_force", # took 12.8160 seconds ): snake_case : int = timeit(f"""{func}(grid=grid)""" ,setup=lowercase ,number=500 ) print(f"""{func}() took {time:0.4f} seconds""" ) if __name__ == "__main__": import doctest doctest.testmod() benchmark()
124
1
"""simple docstring""" from typing import Any, Dict, Optional import torch import torch.nn.functional as F from torch import nn from ..utils import maybe_allow_in_graph from .activations import get_activation from .attention_processor import Attention from .embeddings import CombinedTimestepLabelEmbeddings @maybe_allow_in_graph class a_ ( nn.Module ): '''simple docstring''' def __init__(self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_=0.0, lowerCamelCase_ = None, lowerCamelCase_ = "geglu", lowerCamelCase_ = None, lowerCamelCase_ = False, lowerCamelCase_ = False, lowerCamelCase_ = False, lowerCamelCase_ = False, lowerCamelCase_ = True, lowerCamelCase_ = "layer_norm", lowerCamelCase_ = False, ): '''simple docstring''' super().__init__() lowerCamelCase__ : Union[str, Any] = only_cross_attention lowerCamelCase__ : Dict = (num_embeds_ada_norm is not None) and norm_type == 'ada_norm_zero' lowerCamelCase__ : Tuple = (num_embeds_ada_norm is not None) and norm_type == 'ada_norm' if norm_type in ("ada_norm", "ada_norm_zero") and num_embeds_ada_norm is None: raise ValueError( f'''`norm_type` is set to {norm_type}, but `num_embeds_ada_norm` is not defined. Please make sure to''' f''' define `num_embeds_ada_norm` if setting `norm_type` to {norm_type}.''' ) # Define 3 blocks. Each block has its own normalization layer. # 1. Self-Attn if self.use_ada_layer_norm: lowerCamelCase__ : Optional[int] = AdaLayerNorm(lowerCamelCase_, lowerCamelCase_ ) elif self.use_ada_layer_norm_zero: lowerCamelCase__ : Optional[int] = AdaLayerNormZero(lowerCamelCase_, lowerCamelCase_ ) else: lowerCamelCase__ : List[Any] = nn.LayerNorm(lowerCamelCase_, elementwise_affine=lowerCamelCase_ ) lowerCamelCase__ : List[str] = Attention( query_dim=lowerCamelCase_, heads=lowerCamelCase_, dim_head=lowerCamelCase_, dropout=lowerCamelCase_, bias=lowerCamelCase_, cross_attention_dim=cross_attention_dim if only_cross_attention else None, upcast_attention=lowerCamelCase_, ) # 2. Cross-Attn if cross_attention_dim is not None or double_self_attention: # We currently only use AdaLayerNormZero for self attention where there will only be one attention block. # I.e. the number of returned modulation chunks from AdaLayerZero would not make sense if returned during # the second cross attention block. lowerCamelCase__ : Optional[int] = ( AdaLayerNorm(lowerCamelCase_, lowerCamelCase_ ) if self.use_ada_layer_norm else nn.LayerNorm(lowerCamelCase_, elementwise_affine=lowerCamelCase_ ) ) lowerCamelCase__ : List[Any] = Attention( query_dim=lowerCamelCase_, cross_attention_dim=cross_attention_dim if not double_self_attention else None, heads=lowerCamelCase_, dim_head=lowerCamelCase_, dropout=lowerCamelCase_, bias=lowerCamelCase_, upcast_attention=lowerCamelCase_, ) # is self-attn if encoder_hidden_states is none else: lowerCamelCase__ : Optional[Any] = None lowerCamelCase__ : Dict = None # 3. Feed-forward lowerCamelCase__ : str = nn.LayerNorm(lowerCamelCase_, elementwise_affine=lowerCamelCase_ ) lowerCamelCase__ : List[Any] = FeedForward(lowerCamelCase_, dropout=lowerCamelCase_, activation_fn=lowerCamelCase_, final_dropout=lowerCamelCase_ ) # let chunk size default to None lowerCamelCase__ : Optional[Any] = None lowerCamelCase__ : str = 0 def a__ (self, lowerCamelCase_, lowerCamelCase_ ): '''simple docstring''' lowerCamelCase__ : int = chunk_size lowerCamelCase__ : Optional[Any] = dim def a__ (self, lowerCamelCase_, lowerCamelCase_ = None, lowerCamelCase_ = None, lowerCamelCase_ = None, lowerCamelCase_ = None, lowerCamelCase_ = None, lowerCamelCase_ = None, ): '''simple docstring''' if self.use_ada_layer_norm: lowerCamelCase__ : Optional[int] = self.norma(lowerCamelCase_, lowerCamelCase_ ) elif self.use_ada_layer_norm_zero: lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Tuple = self.norma( lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, hidden_dtype=hidden_states.dtype ) else: lowerCamelCase__ : int = self.norma(lowerCamelCase_ ) lowerCamelCase__ : Any = cross_attention_kwargs if cross_attention_kwargs is not None else {} lowerCamelCase__ : str = self.attna( lowerCamelCase_, encoder_hidden_states=encoder_hidden_states if self.only_cross_attention else None, attention_mask=lowerCamelCase_, **lowerCamelCase_, ) if self.use_ada_layer_norm_zero: lowerCamelCase__ : int = gate_msa.unsqueeze(1 ) * attn_output lowerCamelCase__ : Union[str, Any] = attn_output + hidden_states # 2. Cross-Attention if self.attna is not None: lowerCamelCase__ : Tuple = ( self.norma(lowerCamelCase_, lowerCamelCase_ ) if self.use_ada_layer_norm else self.norma(lowerCamelCase_ ) ) lowerCamelCase__ : Dict = self.attna( lowerCamelCase_, encoder_hidden_states=lowerCamelCase_, attention_mask=lowerCamelCase_, **lowerCamelCase_, ) lowerCamelCase__ : str = attn_output + hidden_states # 3. Feed-forward lowerCamelCase__ : Optional[int] = self.norma(lowerCamelCase_ ) if self.use_ada_layer_norm_zero: lowerCamelCase__ : Optional[Any] = norm_hidden_states * (1 + scale_mlp[:, None]) + shift_mlp[:, None] if self._chunk_size is not None: # "feed_forward_chunk_size" can be used to save memory if norm_hidden_states.shape[self._chunk_dim] % self._chunk_size != 0: raise ValueError( f'''`hidden_states` dimension to be chunked: {norm_hidden_states.shape[self._chunk_dim]} has to be divisible by chunk size: {self._chunk_size}. Make sure to set an appropriate `chunk_size` when calling `unet.enable_forward_chunking`.''' ) lowerCamelCase__ : int = norm_hidden_states.shape[self._chunk_dim] // self._chunk_size lowerCamelCase__ : Optional[Any] = torch.cat( [self.ff(lowerCamelCase_ ) for hid_slice in norm_hidden_states.chunk(lowerCamelCase_, dim=self._chunk_dim )], dim=self._chunk_dim, ) else: lowerCamelCase__ : List[str] = self.ff(lowerCamelCase_ ) if self.use_ada_layer_norm_zero: lowerCamelCase__ : Optional[Any] = gate_mlp.unsqueeze(1 ) * ff_output lowerCamelCase__ : int = ff_output + hidden_states return hidden_states class a_ ( nn.Module ): '''simple docstring''' def __init__(self, lowerCamelCase_, lowerCamelCase_ = None, lowerCamelCase_ = 4, lowerCamelCase_ = 0.0, lowerCamelCase_ = "geglu", lowerCamelCase_ = False, ): '''simple docstring''' super().__init__() lowerCamelCase__ : Tuple = int(dim * mult ) lowerCamelCase__ : List[Any] = dim_out if dim_out is not None else dim if activation_fn == "gelu": lowerCamelCase__ : Optional[Any] = GELU(lowerCamelCase_, lowerCamelCase_ ) if activation_fn == "gelu-approximate": lowerCamelCase__ : Dict = GELU(lowerCamelCase_, lowerCamelCase_, approximate='tanh' ) elif activation_fn == "geglu": lowerCamelCase__ : str = GEGLU(lowerCamelCase_, lowerCamelCase_ ) elif activation_fn == "geglu-approximate": lowerCamelCase__ : Dict = ApproximateGELU(lowerCamelCase_, lowerCamelCase_ ) lowerCamelCase__ : Optional[Any] = nn.ModuleList([] ) # project in self.net.append(lowerCamelCase_ ) # project dropout self.net.append(nn.Dropout(lowerCamelCase_ ) ) # project out self.net.append(nn.Linear(lowerCamelCase_, lowerCamelCase_ ) ) # FF as used in Vision Transformer, MLP-Mixer, etc. have a final dropout if final_dropout: self.net.append(nn.Dropout(lowerCamelCase_ ) ) def a__ (self, lowerCamelCase_ ): '''simple docstring''' for module in self.net: lowerCamelCase__ : Optional[Any] = module(lowerCamelCase_ ) return hidden_states class a_ ( nn.Module ): '''simple docstring''' def __init__(self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ = "none" ): '''simple docstring''' super().__init__() lowerCamelCase__ : Optional[int] = nn.Linear(lowerCamelCase_, lowerCamelCase_ ) lowerCamelCase__ : str = approximate def a__ (self, lowerCamelCase_ ): '''simple docstring''' if gate.device.type != "mps": return F.gelu(lowerCamelCase_, approximate=self.approximate ) # mps: gelu is not implemented for float16 return F.gelu(gate.to(dtype=torch.floataa ), approximate=self.approximate ).to(dtype=gate.dtype ) def a__ (self, lowerCamelCase_ ): '''simple docstring''' lowerCamelCase__ : Union[str, Any] = self.proj(lowerCamelCase_ ) lowerCamelCase__ : List[str] = self.gelu(lowerCamelCase_ ) return hidden_states class a_ ( nn.Module ): '''simple docstring''' def __init__(self, lowerCamelCase_, lowerCamelCase_ ): '''simple docstring''' super().__init__() lowerCamelCase__ : Union[str, Any] = nn.Linear(lowerCamelCase_, dim_out * 2 ) def a__ (self, lowerCamelCase_ ): '''simple docstring''' if gate.device.type != "mps": return F.gelu(lowerCamelCase_ ) # mps: gelu is not implemented for float16 return F.gelu(gate.to(dtype=torch.floataa ) ).to(dtype=gate.dtype ) def a__ (self, lowerCamelCase_ ): '''simple docstring''' lowerCamelCase__ , lowerCamelCase__ : List[str] = self.proj(lowerCamelCase_ ).chunk(2, dim=-1 ) return hidden_states * self.gelu(lowerCamelCase_ ) class a_ ( nn.Module ): '''simple docstring''' def __init__(self, lowerCamelCase_, lowerCamelCase_ ): '''simple docstring''' super().__init__() lowerCamelCase__ : Any = nn.Linear(lowerCamelCase_, lowerCamelCase_ ) def a__ (self, lowerCamelCase_ ): '''simple docstring''' lowerCamelCase__ : Optional[Any] = self.proj(lowerCamelCase_ ) return x * torch.sigmoid(1.702 * x ) class a_ ( nn.Module ): '''simple docstring''' def __init__(self, lowerCamelCase_, lowerCamelCase_ ): '''simple docstring''' super().__init__() lowerCamelCase__ : Optional[int] = nn.Embedding(lowerCamelCase_, lowerCamelCase_ ) lowerCamelCase__ : str = nn.SiLU() lowerCamelCase__ : int = nn.Linear(lowerCamelCase_, embedding_dim * 2 ) lowerCamelCase__ : Tuple = nn.LayerNorm(lowerCamelCase_, elementwise_affine=lowerCamelCase_ ) def a__ (self, lowerCamelCase_, lowerCamelCase_ ): '''simple docstring''' lowerCamelCase__ : Any = self.linear(self.silu(self.emb(lowerCamelCase_ ) ) ) lowerCamelCase__ , lowerCamelCase__ : Optional[Any] = torch.chunk(lowerCamelCase_, 2 ) lowerCamelCase__ : Optional[int] = self.norm(lowerCamelCase_ ) * (1 + scale) + shift return x class a_ ( nn.Module ): '''simple docstring''' def __init__(self, lowerCamelCase_, lowerCamelCase_ ): '''simple docstring''' super().__init__() lowerCamelCase__ : Tuple = CombinedTimestepLabelEmbeddings(lowerCamelCase_, lowerCamelCase_ ) lowerCamelCase__ : List[Any] = nn.SiLU() lowerCamelCase__ : List[Any] = nn.Linear(lowerCamelCase_, 6 * embedding_dim, bias=lowerCamelCase_ ) lowerCamelCase__ : int = nn.LayerNorm(lowerCamelCase_, elementwise_affine=lowerCamelCase_, eps=1e-6 ) def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_=None ): '''simple docstring''' lowerCamelCase__ : Tuple = self.linear(self.silu(self.emb(lowerCamelCase_, lowerCamelCase_, hidden_dtype=lowerCamelCase_ ) ) ) lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : str = emb.chunk(6, dim=1 ) lowerCamelCase__ : List[str] = self.norm(lowerCamelCase_ ) * (1 + scale_msa[:, None]) + shift_msa[:, None] return x, gate_msa, shift_mlp, scale_mlp, gate_mlp class a_ ( nn.Module ): '''simple docstring''' def __init__(self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ = None, lowerCamelCase_ = 1e-5 ): '''simple docstring''' super().__init__() lowerCamelCase__ : Optional[Any] = num_groups lowerCamelCase__ : List[Any] = eps if act_fn is None: lowerCamelCase__ : int = None else: lowerCamelCase__ : Optional[int] = get_activation(lowerCamelCase_ ) lowerCamelCase__ : List[str] = nn.Linear(lowerCamelCase_, out_dim * 2 ) def a__ (self, lowerCamelCase_, lowerCamelCase_ ): '''simple docstring''' if self.act: lowerCamelCase__ : List[Any] = self.act(lowerCamelCase_ ) lowerCamelCase__ : List[str] = self.linear(lowerCamelCase_ ) lowerCamelCase__ : List[Any] = emb[:, :, None, None] lowerCamelCase__ , lowerCamelCase__ : List[str] = emb.chunk(2, dim=1 ) lowerCamelCase__ : int = F.group_norm(lowerCamelCase_, self.num_groups, eps=self.eps ) lowerCamelCase__ : Optional[int] = x * (1 + scale) + shift return x
316
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) A_ : List[str] = {"configuration_encoder_decoder": ["EncoderDecoderConfig"]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A_ : Dict = ["EncoderDecoderModel"] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A_ : List[Any] = ["TFEncoderDecoderModel"] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A_ : List[Any] = ["FlaxEncoderDecoderModel"] if TYPE_CHECKING: from .configuration_encoder_decoder import EncoderDecoderConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_encoder_decoder import EncoderDecoderModel try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_encoder_decoder import TFEncoderDecoderModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_encoder_decoder import FlaxEncoderDecoderModel else: import sys A_ : Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
316
1
import argparse from pathlib import Path from typing import Dict, OrderedDict, Tuple import torch from audiocraft.models import MusicGen from transformers import ( AutoFeatureExtractor, AutoTokenizer, EncodecModel, MusicgenDecoderConfig, MusicgenForConditionalGeneration, MusicgenProcessor, TaEncoderModel, ) from transformers.models.musicgen.modeling_musicgen import MusicgenForCausalLM from transformers.utils import logging logging.set_verbosity_info() a__: Optional[int] = logging.get_logger(__name__) a__: Optional[Any] = ['model.decoder.embed_positions.weights'] def UpperCamelCase__( UpperCamelCase__ : Union[str, Any] )->int: if "emb" in name: A__ = name.replace('''emb''' , '''model.decoder.embed_tokens''' ) if "transformer" in name: A__ = name.replace('''transformer''' , '''model.decoder''' ) if "cross_attention" in name: A__ = name.replace('''cross_attention''' , '''encoder_attn''' ) if "linear1" in name: A__ = name.replace('''linear1''' , '''fc1''' ) if "linear2" in name: A__ = name.replace('''linear2''' , '''fc2''' ) if "norm1" in name: A__ = name.replace('''norm1''' , '''self_attn_layer_norm''' ) if "norm_cross" in name: A__ = name.replace('''norm_cross''' , '''encoder_attn_layer_norm''' ) if "norm2" in name: A__ = name.replace('''norm2''' , '''final_layer_norm''' ) if "out_norm" in name: A__ = name.replace('''out_norm''' , '''model.decoder.layer_norm''' ) if "linears" in name: A__ = name.replace('''linears''' , '''lm_heads''' ) if "condition_provider.conditioners.description.output_proj" in name: A__ = name.replace('''condition_provider.conditioners.description.output_proj''' , '''enc_to_dec_proj''' ) return name def UpperCamelCase__( UpperCamelCase__ : OrderedDict , UpperCamelCase__ : int )->Tuple[Dict, Dict]: A__ = list(state_dict.keys() ) A__ = {} for key in keys: A__ = state_dict.pop(UpperCamelCase__ ) A__ = rename_keys(UpperCamelCase__ ) if "in_proj_weight" in key: # split fused qkv proj A__ = val[:hidden_size, :] A__ = val[hidden_size : 2 * hidden_size, :] A__ = val[-hidden_size:, :] elif "enc_to_dec_proj" in key: A__ = val else: A__ = val return state_dict, enc_dec_proj_state_dict def UpperCamelCase__( UpperCamelCase__ : str )->MusicgenDecoderConfig: if checkpoint == "small": # default config values A__ = 10_24 A__ = 24 A__ = 16 elif checkpoint == "medium": A__ = 15_36 A__ = 48 A__ = 24 elif checkpoint == "large": A__ = 20_48 A__ = 48 A__ = 32 else: raise ValueError(f"Checkpoint should be one of `['small', 'medium', 'large']`, got {checkpoint}." ) A__ = MusicgenDecoderConfig( hidden_size=UpperCamelCase__ , ffn_dim=hidden_size * 4 , num_hidden_layers=UpperCamelCase__ , num_attention_heads=UpperCamelCase__ , ) return config @torch.no_grad() def UpperCamelCase__( UpperCamelCase__ : Tuple , UpperCamelCase__ : Union[str, Any]=None , UpperCamelCase__ : List[Any]=None , UpperCamelCase__ : Any="cpu" )->Dict: A__ = MusicGen.get_pretrained(UpperCamelCase__ , device=UpperCamelCase__ ) A__ = decoder_config_from_checkpoint(UpperCamelCase__ ) A__ = fairseq_model.lm.state_dict() A__ , A__ = rename_state_dict( UpperCamelCase__ , hidden_size=decoder_config.hidden_size ) A__ = TaEncoderModel.from_pretrained('''t5-base''' ) A__ = EncodecModel.from_pretrained('''facebook/encodec_32khz''' ) A__ = MusicgenForCausalLM(UpperCamelCase__ ).eval() # load all decoder weights - expect that we'll be missing embeddings and enc-dec projection A__ , A__ = decoder.load_state_dict(UpperCamelCase__ , strict=UpperCamelCase__ ) for key in missing_keys.copy(): if key.startswith(('''text_encoder''', '''audio_encoder''') ) or key in EXPECTED_MISSING_KEYS: missing_keys.remove(UpperCamelCase__ ) if len(UpperCamelCase__ ) > 0: raise ValueError(f"Missing key(s) in state_dict: {missing_keys}" ) if len(UpperCamelCase__ ) > 0: raise ValueError(f"Unexpected key(s) in state_dict: {unexpected_keys}" ) # init the composite model A__ = MusicgenForConditionalGeneration(text_encoder=UpperCamelCase__ , audio_encoder=UpperCamelCase__ , decoder=UpperCamelCase__ ) # load the pre-trained enc-dec projection (from the decoder state dict) model.enc_to_dec_proj.load_state_dict(UpperCamelCase__ ) # check we can do a forward pass A__ = torch.arange(0 , 8 , dtype=torch.long ).reshape(2 , -1 ) A__ = input_ids.reshape(2 * 4 , -1 ) with torch.no_grad(): A__ = model(input_ids=UpperCamelCase__ , decoder_input_ids=UpperCamelCase__ ).logits if logits.shape != (8, 1, 20_48): raise ValueError('''Incorrect shape for logits''' ) # now construct the processor A__ = AutoTokenizer.from_pretrained('''t5-base''' ) A__ = AutoFeatureExtractor.from_pretrained('''facebook/encodec_32khz''' , padding_side='''left''' ) A__ = MusicgenProcessor(feature_extractor=UpperCamelCase__ , tokenizer=UpperCamelCase__ ) # set the appropriate bos/pad token ids A__ = 20_48 A__ = 20_48 # set other default generation config params A__ = int(30 * audio_encoder.config.frame_rate ) A__ = True A__ = 3.0 if pytorch_dump_folder is not None: Path(UpperCamelCase__ ).mkdir(exist_ok=UpperCamelCase__ ) logger.info(f"Saving model {checkpoint} to {pytorch_dump_folder}" ) model.save_pretrained(UpperCamelCase__ ) processor.save_pretrained(UpperCamelCase__ ) if repo_id: logger.info(f"Pushing model {checkpoint} to {repo_id}" ) model.push_to_hub(UpperCamelCase__ ) processor.push_to_hub(UpperCamelCase__ ) if __name__ == "__main__": a__: Dict = argparse.ArgumentParser() # Required parameters parser.add_argument( '--checkpoint', default='small', type=str, help='Checkpoint size of the MusicGen model you\'d like to convert. Can be one of: `[\'small\', \'medium\', \'large\']`.', ) parser.add_argument( '--pytorch_dump_folder', required=True, default=None, type=str, help='Path to the output PyTorch model directory.', ) parser.add_argument( '--push_to_hub', default=None, type=str, help='Where to upload the converted model on the 🤗 hub.' ) parser.add_argument( '--device', default='cpu', type=str, help='Torch device to run the conversion, either cpu or cuda.' ) a__: Optional[int] = parser.parse_args() convert_musicgen_checkpoint(args.checkpoint, args.pytorch_dump_folder, args.push_to_hub)
193
import inspect import unittest import numpy as np from tests.test_modeling_common import floats_tensor from transformers import MaskaFormerConfig, is_torch_available, is_vision_available from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import MaskaFormerForUniversalSegmentation, MaskaFormerModel if is_vision_available(): from transformers import MaskaFormerImageProcessor if is_vision_available(): from PIL import Image class SCREAMING_SNAKE_CASE__ : def __init__( self,__lowerCamelCase,__lowerCamelCase=2,__lowerCamelCase=True,__lowerCamelCase=False,__lowerCamelCase=10,__lowerCamelCase=3,__lowerCamelCase=32 * 8,__lowerCamelCase=32 * 8,__lowerCamelCase=4,__lowerCamelCase=64,): A__ = parent A__ = batch_size A__ = is_training A__ = use_auxiliary_loss A__ = num_queries A__ = num_channels A__ = min_size A__ = max_size A__ = num_labels A__ = hidden_dim A__ = hidden_dim def UpperCamelCase ( self ): A__ = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to( __lowerCamelCase ) A__ = torch.ones([self.batch_size, self.min_size, self.max_size],device=__lowerCamelCase ) A__ = ( torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size],device=__lowerCamelCase ) > 0.5 ).float() A__ = (torch.rand((self.batch_size, self.num_labels),device=__lowerCamelCase ) > 0.5).long() A__ = self.get_config() return config, pixel_values, pixel_mask, mask_labels, class_labels def UpperCamelCase ( self ): A__ = MaskaFormerConfig( hidden_size=self.hidden_dim,) A__ = self.num_queries A__ = self.num_labels A__ = [1, 1, 1, 1] A__ = self.num_channels A__ = 64 A__ = 128 A__ = self.hidden_dim A__ = self.hidden_dim A__ = self.hidden_dim return config def UpperCamelCase ( self ): A__ , A__ , A__ , A__ , A__ = self.prepare_config_and_inputs() A__ = {'''pixel_values''': pixel_values, '''pixel_mask''': pixel_mask} return config, inputs_dict def UpperCamelCase ( self,__lowerCamelCase,__lowerCamelCase ): A__ = output.encoder_hidden_states A__ = output.pixel_decoder_hidden_states A__ = output.transformer_decoder_hidden_states self.parent.assertTrue(len(__lowerCamelCase ),len(config.backbone_config.depths ) ) self.parent.assertTrue(len(__lowerCamelCase ),len(config.backbone_config.depths ) ) self.parent.assertTrue(len(__lowerCamelCase ),config.decoder_layers ) def UpperCamelCase ( self,__lowerCamelCase,__lowerCamelCase,__lowerCamelCase,__lowerCamelCase=False ): with torch.no_grad(): A__ = MaskaFormerModel(config=__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() A__ = model(pixel_values=__lowerCamelCase,pixel_mask=__lowerCamelCase ) A__ = model(__lowerCamelCase,output_hidden_states=__lowerCamelCase ) self.parent.assertEqual( output.transformer_decoder_last_hidden_state.shape,(self.batch_size, self.num_queries, self.hidden_dim),) # let's ensure the other two hidden state exists self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None ) self.parent.assertTrue(output.encoder_last_hidden_state is not None ) if output_hidden_states: self.check_output_hidden_state(__lowerCamelCase,__lowerCamelCase ) def UpperCamelCase ( self,__lowerCamelCase,__lowerCamelCase,__lowerCamelCase,__lowerCamelCase,__lowerCamelCase ): A__ = MaskaFormerForUniversalSegmentation(config=__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() def comm_check_on_output(__lowerCamelCase ): # let's still check that all the required stuff is there self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None ) self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None ) self.parent.assertTrue(result.encoder_last_hidden_state is not None ) # okay, now we need to check the logits shape # due to the encoder compression, masks have a //4 spatial size self.parent.assertEqual( result.masks_queries_logits.shape,(self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4),) # + 1 for null class self.parent.assertEqual( result.class_queries_logits.shape,(self.batch_size, self.num_queries, self.num_labels + 1) ) with torch.no_grad(): A__ = model(pixel_values=__lowerCamelCase,pixel_mask=__lowerCamelCase ) A__ = model(__lowerCamelCase ) comm_check_on_output(__lowerCamelCase ) A__ = model( pixel_values=__lowerCamelCase,pixel_mask=__lowerCamelCase,mask_labels=__lowerCamelCase,class_labels=__lowerCamelCase ) comm_check_on_output(__lowerCamelCase ) self.parent.assertTrue(result.loss is not None ) self.parent.assertEqual(result.loss.shape,torch.Size([1] ) ) @require_torch class SCREAMING_SNAKE_CASE__ ( UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase ): __SCREAMING_SNAKE_CASE = (MaskaFormerModel, MaskaFormerForUniversalSegmentation) if is_torch_available() else () __SCREAMING_SNAKE_CASE = {'''feature-extraction''': MaskaFormerModel} if is_torch_available() else {} __SCREAMING_SNAKE_CASE = False __SCREAMING_SNAKE_CASE = False __SCREAMING_SNAKE_CASE = False __SCREAMING_SNAKE_CASE = False def UpperCamelCase ( self ): A__ = MaskaFormerModelTester(self ) A__ = ConfigTester(self,config_class=__lowerCamelCase,has_text_modality=__lowerCamelCase ) def UpperCamelCase ( self ): self.config_tester.run_common_tests() def UpperCamelCase ( self ): A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.create_and_check_maskaformer_model(__lowerCamelCase,**__lowerCamelCase,output_hidden_states=__lowerCamelCase ) def UpperCamelCase ( self ): A__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_maskaformer_instance_segmentation_head_model(*__lowerCamelCase ) @unittest.skip(reason='''Mask2Former does not use inputs_embeds''' ) def UpperCamelCase ( self ): pass @unittest.skip(reason='''Mask2Former does not have a get_input_embeddings method''' ) def UpperCamelCase ( self ): pass @unittest.skip(reason='''Mask2Former is not a generative model''' ) def UpperCamelCase ( self ): pass @unittest.skip(reason='''Mask2Former does not use token embeddings''' ) def UpperCamelCase ( self ): pass @require_torch_multi_gpu @unittest.skip( reason='''Mask2Former has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`''' ) def UpperCamelCase ( self ): pass @unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' ) def UpperCamelCase ( self ): pass def UpperCamelCase ( self ): A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A__ = model_class(__lowerCamelCase ) A__ = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic A__ = [*signature.parameters.keys()] A__ = ['''pixel_values'''] self.assertListEqual(arg_names[:1],__lowerCamelCase ) @slow def UpperCamelCase ( self ): for model_name in ["facebook/mask2former-swin-small-coco-instance"]: A__ = MaskaFormerModel.from_pretrained(__lowerCamelCase ) self.assertIsNotNone(__lowerCamelCase ) def UpperCamelCase ( self ): A__ = (self.model_tester.min_size,) * 2 A__ = { '''pixel_values''': torch.randn((2, 3, *size),device=__lowerCamelCase ), '''mask_labels''': torch.randn((2, 10, *size),device=__lowerCamelCase ), '''class_labels''': torch.zeros(2,10,device=__lowerCamelCase ).long(), } A__ = self.model_tester.get_config() A__ = MaskaFormerForUniversalSegmentation(__lowerCamelCase ).to(__lowerCamelCase ) A__ = model(**__lowerCamelCase ) self.assertTrue(outputs.loss is not None ) def UpperCamelCase ( self ): A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.create_and_check_maskaformer_model(__lowerCamelCase,**__lowerCamelCase,output_hidden_states=__lowerCamelCase ) def UpperCamelCase ( self ): A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A__ = model_class(__lowerCamelCase ).to(__lowerCamelCase ) A__ = model(**__lowerCamelCase,output_attentions=__lowerCamelCase ) self.assertTrue(outputs.attentions is not None ) def UpperCamelCase ( self ): if not self.model_tester.is_training: return A__ = self.all_model_classes[1] A__ , A__ , A__ , A__ , A__ = self.model_tester.prepare_config_and_inputs() A__ = model_class(__lowerCamelCase ) model.to(__lowerCamelCase ) model.train() A__ = model(__lowerCamelCase,mask_labels=__lowerCamelCase,class_labels=__lowerCamelCase ).loss loss.backward() def UpperCamelCase ( self ): A__ = self.all_model_classes[1] A__ , A__ , A__ , A__ , A__ = self.model_tester.prepare_config_and_inputs() A__ = True A__ = True A__ = model_class(__lowerCamelCase ).to(__lowerCamelCase ) model.train() A__ = model(__lowerCamelCase,mask_labels=__lowerCamelCase,class_labels=__lowerCamelCase ) A__ = outputs.encoder_hidden_states[0] encoder_hidden_states.retain_grad() A__ = outputs.pixel_decoder_hidden_states[0] pixel_decoder_hidden_states.retain_grad() A__ = outputs.transformer_decoder_hidden_states[0] transformer_decoder_hidden_states.retain_grad() A__ = outputs.attentions[0] attentions.retain_grad() outputs.loss.backward(retain_graph=__lowerCamelCase ) self.assertIsNotNone(encoder_hidden_states.grad ) self.assertIsNotNone(pixel_decoder_hidden_states.grad ) self.assertIsNotNone(transformer_decoder_hidden_states.grad ) self.assertIsNotNone(attentions.grad ) a__: str = 1e-4 def UpperCamelCase__( )->Union[str, Any]: A__ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_vision @slow class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ): @cached_property def UpperCamelCase ( self ): return "facebook/mask2former-swin-small-coco-instance" @cached_property def UpperCamelCase ( self ): return MaskaFormerImageProcessor.from_pretrained(self.model_checkpoints ) if is_vision_available() else None def UpperCamelCase ( self ): A__ = MaskaFormerModel.from_pretrained(self.model_checkpoints ).to(__lowerCamelCase ) A__ = self.default_image_processor A__ = prepare_img() A__ = image_processor(__lowerCamelCase,return_tensors='''pt''' ).to(__lowerCamelCase ) A__ = inputs['''pixel_values'''].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 ) # check size self.assertEqual(__lowerCamelCase,(1, 3, 384, 384) ) with torch.no_grad(): A__ = model(**__lowerCamelCase ) A__ = torch.tensor( [[-0.2790, -1.0717, -1.1668], [-0.5128, -0.3128, -0.4987], [-0.5832, 0.1971, -0.0197]] ).to(__lowerCamelCase ) self.assertTrue( torch.allclose( outputs.encoder_last_hidden_state[0, 0, :3, :3],__lowerCamelCase,atol=__lowerCamelCase ) ) A__ = torch.tensor( [[0.8973, 1.1847, 1.1776], [1.1934, 1.5040, 1.5128], [1.1153, 1.4486, 1.4951]] ).to(__lowerCamelCase ) self.assertTrue( torch.allclose( outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3],__lowerCamelCase,atol=__lowerCamelCase ) ) A__ = torch.tensor( [[2.1152, 1.7000, -0.8603], [1.5808, 1.8004, -0.9353], [1.6043, 1.7495, -0.5999]] ).to(__lowerCamelCase ) self.assertTrue( torch.allclose( outputs.transformer_decoder_last_hidden_state[0, :3, :3],__lowerCamelCase,atol=__lowerCamelCase ) ) def UpperCamelCase ( self ): A__ = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(__lowerCamelCase ).eval() A__ = self.default_image_processor A__ = prepare_img() A__ = image_processor(__lowerCamelCase,return_tensors='''pt''' ).to(__lowerCamelCase ) A__ = inputs['''pixel_values'''].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 ) # check size self.assertEqual(__lowerCamelCase,(1, 3, 384, 384) ) with torch.no_grad(): A__ = model(**__lowerCamelCase ) # masks_queries_logits A__ = outputs.masks_queries_logits self.assertEqual( masks_queries_logits.shape,(1, model.config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) ) A__ = [ [-8.7839, -9.0056, -8.8121], [-7.4104, -7.0313, -6.5401], [-6.6105, -6.3427, -6.4675], ] A__ = torch.tensor(__lowerCamelCase ).to(__lowerCamelCase ) self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3],__lowerCamelCase,atol=__lowerCamelCase ) ) # class_queries_logits A__ = outputs.class_queries_logits self.assertEqual(class_queries_logits.shape,(1, model.config.num_queries, model.config.num_labels + 1) ) A__ = torch.tensor( [ [1.8324, -8.0835, -4.1922], [0.8450, -9.0050, -3.6053], [0.3045, -7.7293, -3.0275], ] ).to(__lowerCamelCase ) self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3],__lowerCamelCase,atol=__lowerCamelCase ) ) def UpperCamelCase ( self ): A__ = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(__lowerCamelCase ).eval() A__ = self.default_image_processor A__ = image_processor( [np.zeros((3, 800, 1333) ), np.zeros((3, 800, 1333) )],segmentation_maps=[np.zeros((384, 384) ).astype(np.floataa ), np.zeros((384, 384) ).astype(np.floataa )],return_tensors='''pt''',) A__ = inputs['''pixel_values'''].to(__lowerCamelCase ) A__ = [el.to(__lowerCamelCase ) for el in inputs['''mask_labels''']] A__ = [el.to(__lowerCamelCase ) for el in inputs['''class_labels''']] with torch.no_grad(): A__ = model(**__lowerCamelCase ) self.assertTrue(outputs.loss is not None )
193
1
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging __a: Union[str, Any] = logging.get_logger(__name__) __a: Any = {"""ctrl""": """https://huggingface.co/ctrl/resolve/main/config.json"""} class UpperCAmelCase ( a__ ): '''simple docstring''' SCREAMING_SNAKE_CASE = "ctrl" SCREAMING_SNAKE_CASE = ["past_key_values"] SCREAMING_SNAKE_CASE = { "max_position_embeddings": "n_positions", "hidden_size": "n_embd", "num_attention_heads": "n_head", "num_hidden_layers": "n_layer", } def __init__( self , __lowerCAmelCase=246534 , __lowerCAmelCase=256 , __lowerCAmelCase=1280 , __lowerCAmelCase=8192 , __lowerCAmelCase=48 , __lowerCAmelCase=16 , __lowerCAmelCase=0.1 , __lowerCAmelCase=0.1 , __lowerCAmelCase=1E-6 , __lowerCAmelCase=0.0_2 , __lowerCAmelCase=True , **__lowerCAmelCase , ) -> Tuple: lowercase__ : Union[str, Any] = vocab_size lowercase__ : Optional[Any] = n_positions lowercase__ : Optional[int] = n_embd lowercase__ : Optional[Any] = n_layer lowercase__ : Union[str, Any] = n_head lowercase__ : List[str] = dff lowercase__ : Optional[Any] = resid_pdrop lowercase__ : Any = embd_pdrop lowercase__ : Union[str, Any] = layer_norm_epsilon lowercase__ : List[Any] = initializer_range lowercase__ : Optional[Any] = use_cache super().__init__(**__lowerCAmelCase )
214
'''simple docstring''' import collections import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging __a: List[str] = logging.get_logger(__name__) __a: int = """▁""" __a: Optional[int] = {"""vocab_file""": """prophetnet.tokenizer"""} __a: Optional[int] = { """vocab_file""": { """microsoft/xprophetnet-large-wiki100-cased""": ( """https://huggingface.co/microsoft/xprophetnet-large-wiki100-cased/resolve/main/prophetnet.tokenizer""" ), } } __a: List[str] = { """microsoft/xprophetnet-large-wiki100-cased""": {"""do_lower_case""": False}, } __a: Tuple = { """microsoft/xprophetnet-large-wiki100-cased""": 5_12, } def __UpperCamelCase ( UpperCAmelCase ): lowercase__ : Dict = collections.OrderedDict() with open(UpperCAmelCase , '''r''' , encoding='''utf-8''' ) as reader: lowercase__ : List[Any] = reader.readlines() for index, token in enumerate(UpperCAmelCase ): lowercase__ : List[Any] = token.rstrip('''\n''' ) lowercase__ : Tuple = index return vocab class UpperCAmelCase ( a__ ): '''simple docstring''' SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES SCREAMING_SNAKE_CASE = ["input_ids", "attention_mask"] def __init__( self , __lowerCAmelCase , __lowerCAmelCase="[SEP]" , __lowerCAmelCase="[SEP]" , __lowerCAmelCase="[SEP]" , __lowerCAmelCase="[UNK]" , __lowerCAmelCase="[PAD]" , __lowerCAmelCase="[CLS]" , __lowerCAmelCase="[MASK]" , __lowerCAmelCase = None , **__lowerCAmelCase , ) -> None: lowercase__ : Dict = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=__lowerCAmelCase , eos_token=__lowerCAmelCase , sep_token=__lowerCAmelCase , unk_token=__lowerCAmelCase , pad_token=__lowerCAmelCase , cls_token=__lowerCAmelCase , mask_token=__lowerCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **__lowerCAmelCase , ) try: import sentencepiece as spm except ImportError: logger.warning( '''You need to install SentencePiece to use XLMRobertaTokenizer: https://github.com/google/sentencepiece''' ''' pip install sentencepiece''' ) raise lowercase__ : str = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(str(__lowerCAmelCase ) ) lowercase__ : Optional[int] = vocab_file # Original fairseq vocab and spm vocab must be "aligned": # Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 # -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ---- # fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-' # spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a' # put special tokens and [unused] tokens into the vocab lowercase__ : str = {'''[PAD]''': 0, '''[CLS]''': 1, '''[SEP]''': 2, '''[UNK]''': 3, '''[MASK]''': 4} for i in range(10 ): lowercase__ : Tuple = F"""[unused{i}]""" lowercase__ : List[Any] = 5 + i # The first "real" token "," has position 15 in the embedding vocab and position 3 in the spm vocab lowercase__ : Optional[Any] = 12 lowercase__ : Tuple = {v: k for k, v in self.fairseq_tokens_to_ids.items()} for k in self.fairseq_tokens_to_ids.keys(): self.unique_no_split_tokens.append(__lowerCAmelCase ) def __getstate__( self ) -> Union[str, Any]: lowercase__ : Dict = self.__dict__.copy() lowercase__ : Optional[Any] = None return state def __setstate__( self , __lowerCAmelCase ) -> Dict: lowercase__ : Any = d try: import sentencepiece as spm except ImportError: logger.warning( '''You need to install SentencePiece to use XLMRobertaTokenizer: https://github.com/google/sentencepiece''' ''' pip install sentencepiece''' ) raise # for backward compatibility if not hasattr(self , '''sp_model_kwargs''' ): lowercase__ : List[Any] = {} lowercase__ : int = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase = None , __lowerCAmelCase = False ) -> List[int]: if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=__lowerCAmelCase , token_ids_a=__lowerCAmelCase , already_has_special_tokens=__lowerCAmelCase ) if token_ids_a is None: return ([0] * len(__lowerCAmelCase )) + [1] return ([0] * len(__lowerCAmelCase )) + [1] + ([0] * len(__lowerCAmelCase )) + [1] def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase = None ) -> List[int]: lowercase__ : List[str] = [self.sep_token_id] if token_ids_a is None: return len(token_ids_a + sep ) * [0] return len(token_ids_a + sep + sep + token_ids_a + sep ) * [0] @property def _lowerCAmelCase( self ) -> List[Any]: return len(self.sp_model ) + self.fairseq_offset def _lowerCAmelCase( self ) -> List[str]: lowercase__ : str = {self.convert_ids_to_tokens(__lowerCAmelCase ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def _lowerCAmelCase( self , __lowerCAmelCase ) -> str: return self.sp_model.encode(__lowerCAmelCase , out_type=__lowerCAmelCase ) def _lowerCAmelCase( self , __lowerCAmelCase ) -> Optional[Any]: if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] lowercase__ : Tuple = self.sp_model.PieceToId(__lowerCAmelCase ) # Need to return unknown token if the SP model returned 0 return spm_id + self.fairseq_offset if spm_id else self.unk_token_id def _lowerCAmelCase( self , __lowerCAmelCase ) -> str: if index in self.fairseq_ids_to_tokens: return self.fairseq_ids_to_tokens[index] return self.sp_model.IdToPiece(index - self.fairseq_offset ) def _lowerCAmelCase( self , __lowerCAmelCase ) -> Union[str, Any]: lowercase__ : Optional[Any] = ''''''.join(__lowerCAmelCase ).replace(__lowerCAmelCase , ''' ''' ).strip() return out_string def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase = None ) -> Tuple[str]: if not os.path.isdir(__lowerCAmelCase ): logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" ) return lowercase__ : str = os.path.join( __lowerCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowerCAmelCase ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , __lowerCAmelCase ) elif not os.path.isfile(self.vocab_file ): with open(__lowerCAmelCase , '''wb''' ) as fi: lowercase__ : List[Any] = self.sp_model.serialized_model_proto() fi.write(__lowerCAmelCase ) return (out_vocab_file,) def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase = None ) -> List[int]: if token_ids_a is None: return token_ids_a + [self.sep_token_id] lowercase__ : str = [self.sep_token_id] return token_ids_a + sep + token_ids_a + sep
214
1
'''simple docstring''' import unittest import numpy as np import torch from diffusers import PNDMPipeline, PNDMScheduler, UNetaDModel from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device enable_full_determinism() class a__ ( unittest.TestCase ): @property def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ): """simple docstring""" torch.manual_seed(0 ) __lowerCamelCase = UNetaDModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('''DownBlock2D''', '''AttnDownBlock2D''') , up_block_types=('''AttnUpBlock2D''', '''UpBlock2D''') , ) return model def SCREAMING_SNAKE_CASE__ ( self : List[str] ): """simple docstring""" __lowerCamelCase = self.dummy_uncond_unet __lowerCamelCase = PNDMScheduler() __lowerCamelCase = PNDMPipeline(unet=a , scheduler=a ) pndm.to(a ) pndm.set_progress_bar_config(disable=a ) __lowerCamelCase = torch.manual_seed(0 ) __lowerCamelCase = pndm(generator=a , num_inference_steps=20 , output_type='''numpy''' ).images __lowerCamelCase = torch.manual_seed(0 ) __lowerCamelCase = pndm(generator=a , num_inference_steps=20 , output_type='''numpy''' , return_dict=a )[0] __lowerCamelCase = image[0, -3:, -3:, -1] __lowerCamelCase = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) __lowerCamelCase = np.array([1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 @slow @require_torch class a__ ( unittest.TestCase ): def SCREAMING_SNAKE_CASE__ ( self : int ): """simple docstring""" __lowerCamelCase = '''google/ddpm-cifar10-32''' __lowerCamelCase = UNetaDModel.from_pretrained(a ) __lowerCamelCase = PNDMScheduler() __lowerCamelCase = PNDMPipeline(unet=a , scheduler=a ) pndm.to(a ) pndm.set_progress_bar_config(disable=a ) __lowerCamelCase = torch.manual_seed(0 ) __lowerCamelCase = pndm(generator=a , output_type='''numpy''' ).images __lowerCamelCase = image[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) __lowerCamelCase = np.array([0.15_64, 0.1_46_45, 0.14_06, 0.1_47_15, 0.1_24_25, 0.1_40_45, 0.1_31_15, 0.1_21_75, 0.1_25] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
67
import math def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ ): if 0 not in (x, y): # We use the relation x^y = y*log10(x), where 10 is the base. return y * math.logaa(lowerCamelCase__ ) else: if x == 0: # 0 raised to any number is 0 return 0 elif y == 0: return 1 # any number raised to 0 is 1 raise AssertionError("This should never happen" ) if __name__ == "__main__": # Main function # Read two numbers from input and typecast them to int using map function. # Here x is the base and y is the power. __A ='''Enter the base and the power separated by a comma: ''' __A, __A =map(int, input(prompt).split(''',''')) __A, __A =map(int, input(prompt).split(''',''')) # We find the log of each number, using the function res(), which takes two # arguments. __A =res(xa, ya) __A =res(xa, ya) # We check for the largest number if resa > resa: print('''Largest number is''', xa, '''^''', ya) elif resa > resa: print('''Largest number is''', xa, '''^''', ya) else: print('''Both are equal''')
19
0
"""simple docstring""" def UpperCamelCase ( UpperCAmelCase ) ->List[Any]: """simple docstring""" if not head: return True # split the list to two parts a_ , a_ = head.next, head while fast and fast.next: a_ = fast.next.next a_ = slow.next a_ = slow.next a_ = None # Don't forget here! But forget still works! # reverse the second part a_ = None while second: a_ = second.next a_ = node a_ = second a_ = nxt # compare two parts # second part has the same or one less node while node: if node.val != head.val: return False a_ = node.next a_ = head.next return True def UpperCamelCase ( UpperCAmelCase ) ->str: """simple docstring""" if not head or not head.next: return True # 1. Get the midpoint (slow) a_ = a_ = a_ = head while fast and fast.next: a_ , a_ = fast.next.next, slow.next # 2. Push the second half into the stack a_ = [slow.val] while slow.next: a_ = slow.next stack.append(slow.val ) # 3. Comparison while stack: if stack.pop() != cur.val: return False a_ = cur.next return True def UpperCamelCase ( UpperCAmelCase ) ->str: """simple docstring""" if not head or not head.next: return True a_ = {} a_ = 0 while head: if head.val in d: d[head.val].append(UpperCAmelCase ) else: a_ = [pos] a_ = head.next pos += 1 a_ = pos - 1 a_ = 0 for v in d.values(): if len(UpperCAmelCase ) % 2 != 0: middle += 1 else: a_ = 0 for i in range(0 , len(UpperCAmelCase ) ): if v[i] + v[len(UpperCAmelCase ) - 1 - step] != checksum: return False step += 1 if middle > 1: return False return True
365
"""simple docstring""" from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging UpperCamelCase_ = logging.get_logger(__name__) UpperCamelCase_ = { 'xlm-roberta-base': 'https://huggingface.co/xlm-roberta-base/resolve/main/config.json', 'xlm-roberta-large': 'https://huggingface.co/xlm-roberta-large/resolve/main/config.json', 'xlm-roberta-large-finetuned-conll02-dutch': ( 'https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/config.json' ), 'xlm-roberta-large-finetuned-conll02-spanish': ( 'https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/config.json' ), 'xlm-roberta-large-finetuned-conll03-english': ( 'https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/config.json' ), 'xlm-roberta-large-finetuned-conll03-german': ( 'https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/config.json' ), } class snake_case ( SCREAMING_SNAKE_CASE_ ): a_ : str = """xlm-roberta""" def __init__( self , __UpperCAmelCase=3_05_22 , __UpperCAmelCase=7_68 , __UpperCAmelCase=12 , __UpperCAmelCase=12 , __UpperCAmelCase=30_72 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=5_12 , __UpperCAmelCase=2 , __UpperCAmelCase=0.02 , __UpperCAmelCase=1E-12 , __UpperCAmelCase=1 , __UpperCAmelCase=0 , __UpperCAmelCase=2 , __UpperCAmelCase="absolute" , __UpperCAmelCase=True , __UpperCAmelCase=None , **__UpperCAmelCase , ) ->Union[str, Any]: super().__init__(pad_token_id=__UpperCAmelCase , bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , **__UpperCAmelCase) a_ = vocab_size a_ = hidden_size a_ = num_hidden_layers a_ = num_attention_heads a_ = hidden_act a_ = intermediate_size a_ = hidden_dropout_prob a_ = attention_probs_dropout_prob a_ = max_position_embeddings a_ = type_vocab_size a_ = initializer_range a_ = layer_norm_eps a_ = position_embedding_type a_ = use_cache a_ = classifier_dropout class snake_case ( SCREAMING_SNAKE_CASE_ ): @property def UpperCAmelCase__ ( self) ->Mapping[str, Mapping[int, str]]: if self.task == "multiple-choice": a_ = {0: "batch", 1: "choice", 2: "sequence"} else: a_ = {0: "batch", 1: "sequence"} return OrderedDict( [ ("input_ids", dynamic_axis), ("attention_mask", dynamic_axis), ])
303
0
# Copyright 2021 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from packaging import version from .. import __version__ from .constants import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD from .doc import ( add_code_sample_docstrings, add_end_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, copy_func, replace_return_docstrings, ) from .generic import ( ContextManagers, ExplicitEnum, ModelOutput, PaddingStrategy, TensorType, add_model_info_to_auto_map, cached_property, can_return_loss, expand_dims, find_labels, flatten_dict, infer_framework, is_jax_tensor, is_numpy_array, is_tensor, is_tf_symbolic_tensor, is_tf_tensor, is_torch_device, is_torch_dtype, is_torch_tensor, reshape, squeeze, strtobool, tensor_size, to_numpy, to_py_obj, transpose, working_or_temp_dir, ) from .hub import ( CLOUDFRONT_DISTRIB_PREFIX, DISABLE_TELEMETRY, HF_MODULES_CACHE, HUGGINGFACE_CO_PREFIX, HUGGINGFACE_CO_RESOLVE_ENDPOINT, PYTORCH_PRETRAINED_BERT_CACHE, PYTORCH_TRANSFORMERS_CACHE, S3_BUCKET_PREFIX, TRANSFORMERS_CACHE, TRANSFORMERS_DYNAMIC_MODULE_NAME, EntryNotFoundError, PushToHubMixin, RepositoryNotFoundError, RevisionNotFoundError, cached_file, default_cache_path, define_sagemaker_information, download_url, extract_commit_hash, get_cached_models, get_file_from_repo, get_full_repo_name, has_file, http_user_agent, is_offline_mode, is_remote_url, move_cache, send_example_telemetry, try_to_load_from_cache, ) from .import_utils import ( ENV_VARS_TRUE_AND_AUTO_VALUES, ENV_VARS_TRUE_VALUES, TORCH_FX_REQUIRED_VERSION, USE_JAX, USE_TF, USE_TORCH, DummyObject, OptionalDependencyNotAvailable, _LazyModule, ccl_version, direct_transformers_import, get_torch_version, is_accelerate_available, is_apex_available, is_bitsandbytes_available, is_bsa_available, is_coloredlogs_available, is_cython_available, is_datasets_available, is_decord_available, is_detectrona_available, is_faiss_available, is_flax_available, is_ftfy_available, is_in_notebook, is_ipex_available, is_jieba_available, is_jumanpp_available, is_kenlm_available, is_keras_nlp_available, is_librosa_available, is_natten_available, is_ninja_available, is_onnx_available, is_openai_available, is_optimum_available, is_pandas_available, is_peft_available, is_phonemizer_available, is_protobuf_available, is_psutil_available, is_pyanvml_available, is_pyctcdecode_available, is_pytesseract_available, is_pytest_available, is_pytorch_quantization_available, is_rjieba_available, is_sacremoses_available, is_safetensors_available, is_sagemaker_dp_enabled, is_sagemaker_mp_enabled, is_scipy_available, is_sentencepiece_available, is_seqio_available, is_sklearn_available, is_soundfile_availble, is_spacy_available, is_speech_available, is_sudachi_available, is_tensorflow_probability_available, is_tensorflow_text_available, is_tfaonnx_available, is_tf_available, is_timm_available, is_tokenizers_available, is_torch_available, is_torch_bfaa_available, is_torch_bfaa_cpu_available, is_torch_bfaa_gpu_available, is_torch_compile_available, is_torch_cuda_available, is_torch_fx_available, is_torch_fx_proxy, is_torch_mps_available, is_torch_neuroncore_available, is_torch_tensorrt_fx_available, is_torch_tfaa_available, is_torch_tpu_available, is_torchaudio_available, is_torchdistx_available, is_torchdynamo_available, is_torchvision_available, is_training_run_on_sagemaker, is_vision_available, requires_backends, torch_only_method, ) SCREAMING_SNAKE_CASE :List[str] = 'pytorch_model.bin' SCREAMING_SNAKE_CASE :str = 'pytorch_model.bin.index.json' SCREAMING_SNAKE_CASE :Optional[int] = 'adapter_config.json' SCREAMING_SNAKE_CASE :Dict = 'adapter_model.bin' SCREAMING_SNAKE_CASE :Dict = 'adapter_model.safetensors' SCREAMING_SNAKE_CASE :str = 'tf_model.h5' SCREAMING_SNAKE_CASE :List[Any] = 'tf_model.h5.index.json' SCREAMING_SNAKE_CASE :str = 'model.ckpt' SCREAMING_SNAKE_CASE :List[Any] = 'flax_model.msgpack' SCREAMING_SNAKE_CASE :Optional[int] = 'flax_model.msgpack.index.json' SCREAMING_SNAKE_CASE :Tuple = 'model.safetensors' SCREAMING_SNAKE_CASE :List[Any] = 'model.safetensors.index.json' SCREAMING_SNAKE_CASE :str = 'config.json' SCREAMING_SNAKE_CASE :int = 'preprocessor_config.json' SCREAMING_SNAKE_CASE :Optional[Any] = FEATURE_EXTRACTOR_NAME SCREAMING_SNAKE_CASE :Optional[int] = 'generation_config.json' SCREAMING_SNAKE_CASE :List[str] = 'modelcard.json' SCREAMING_SNAKE_CASE :Optional[int] = '▁' SCREAMING_SNAKE_CASE :Optional[Any] = SENTENCEPIECE_UNDERLINE # Kept for backward compatibility SCREAMING_SNAKE_CASE :str = [ [[0, 1, 0, 1], [1, 0, 0, 1]] ] * 2 # Needs to have 0s and 1s only since XLM uses it for langs too. SCREAMING_SNAKE_CASE :Optional[Any] = [[7, 6, 0, 0, 1], [1, 2, 3, 0, 0], [0, 0, 0, 4, 5]] SCREAMING_SNAKE_CASE :List[Any] = [[1, 1, 1, 1, 1], [1, 1, 1, 0, 0], [0, 0, 0, 1, 1]] def UpperCAmelCase ( a_ ) -> Dict: """simple docstring""" if version.parse(a_ ) < version.parse(a_ ): if "dev" in min_version: __A = ( "This example requires a source install from HuggingFace Transformers (see " "`https://huggingface.co/docs/transformers/installation#install-from-source`)," ) else: __A = F'''This example requires a minimum version of {min_version},''' error_message += F''' but the version found is {__version__}.\n''' raise ImportError( error_message + "Check out https://github.com/huggingface/transformers/tree/main/examples#important-note for the examples corresponding to other " "versions of HuggingFace Transformers." )
15
from collections import namedtuple import requests from lxml import html # type: ignore _A : Any = namedtuple('covid_data', 'cases deaths recovered') def _a ( UpperCAmelCase = "https://www.worldometers.info/coronavirus/" ) -> covid_data: """simple docstring""" lowerCamelCase__ : Optional[Any] = '''//div[@class = "maincounter-number"]/span/text()''' return covid_data(*html.fromstring(requests.get(UpperCAmelCase ).content ).xpath(UpperCAmelCase ) ) _A : Dict = 'Total COVID-19 cases in the world: {}\nTotal deaths due to COVID-19 in the world: {}\nTotal COVID-19 patients recovered in the world: {}' print(fmt.format(*covid_stats()))
142
0
import argparse import importlib from pathlib import Path # Test all the extensions added in the setup _A = [ '''kernels/rwkv/wkv_cuda.cu''', '''kernels/rwkv/wkv_op.cpp''', '''kernels/deformable_detr/ms_deform_attn.h''', '''kernels/deformable_detr/cuda/ms_deform_im2col_cuda.cuh''', '''models/graphormer/algos_graphormer.pyx''', ] def lowerCamelCase__ ( a__ : Any ) -> Dict: # Test all the extensions added in the setup for file in FILES_TO_FIND: if not (transformers_path / file).exists(): return False return True if __name__ == "__main__": _A = argparse.ArgumentParser() parser.add_argument('''--check_lib''', action='''store_true''', help='''Whether to check the build or the actual package.''') _A = parser.parse_args() if args.check_lib: _A = importlib.import_module('''transformers''') _A = Path(transformers_module.__file__).parent else: _A = Path.cwd() / '''build/lib/transformers''' if not test_custom_files_are_present(transformers_path): raise ValueError('''The built release does not contain the custom files. Fix this before going further!''')
261
from math import pow, sqrt def lowerCamelCase__ ( *a__ : float ) -> bool: UpperCamelCase_ = len(a__ ) > 0 and all(value > 0.0 for value in values ) return result def lowerCamelCase__ ( a__ : float , a__ : float ) -> float | ValueError: return ( round(sqrt(molar_mass_a / molar_mass_a ) , 6 ) if validate(a__ , a__ ) else ValueError("""Input Error: Molar mass values must greater than 0.""" ) ) def lowerCamelCase__ ( a__ : float , a__ : float , a__ : float ) -> float | ValueError: return ( round(effusion_rate * sqrt(molar_mass_a / molar_mass_a ) , 6 ) if validate(a__ , a__ , a__ ) else ValueError( """Input Error: Molar mass and effusion rate values must greater than 0.""" ) ) def lowerCamelCase__ ( a__ : float , a__ : float , a__ : float ) -> float | ValueError: return ( round(effusion_rate / sqrt(molar_mass_a / molar_mass_a ) , 6 ) if validate(a__ , a__ , a__ ) else ValueError( """Input Error: Molar mass and effusion rate values must greater than 0.""" ) ) def lowerCamelCase__ ( a__ : float , a__ : float , a__ : float ) -> float | ValueError: return ( round(molar_mass / pow(effusion_rate_a / effusion_rate_a , 2 ) , 6 ) if validate(a__ , a__ , a__ ) else ValueError( """Input Error: Molar mass and effusion rate values must greater than 0.""" ) ) def lowerCamelCase__ ( a__ : float , a__ : float , a__ : float ) -> float | ValueError: return ( round(pow(effusion_rate_a / effusion_rate_a , 2 ) / molar_mass , 6 ) if validate(a__ , a__ , a__ ) else ValueError( """Input Error: Molar mass and effusion rate values must greater than 0.""" ) )
261
1
import gc import unittest from diffusers import FlaxStableDiffusionInpaintPipeline from diffusers.utils import is_flax_available, load_image, slow from diffusers.utils.testing_utils import require_flax if is_flax_available(): import jax import jax.numpy as jnp from flax.jax_utils import replicate from flax.training.common_utils import shard @slow @require_flax class _UpperCAmelCase ( unittest.TestCase ): """simple docstring""" def lowercase__ ( self : Union[str, Any] ): '''simple docstring''' # clean up the VRAM after each test super().tearDown() gc.collect() def lowercase__ ( self : Any ): '''simple docstring''' lowercase__ = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/sd2-inpaint/init_image.png''' ) lowercase__ = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png''' ) lowercase__ = '''xvjiarui/stable-diffusion-2-inpainting''' lowercase__ , lowercase__ = FlaxStableDiffusionInpaintPipeline.from_pretrained(lowerCamelCase, safety_checker=lowerCamelCase ) lowercase__ = '''Face of a yellow cat, high resolution, sitting on a park bench''' lowercase__ = jax.random.PRNGKey(0 ) lowercase__ = 50 lowercase__ = jax.device_count() lowercase__ = num_samples * [prompt] lowercase__ = num_samples * [init_image] lowercase__ = num_samples * [mask_image] lowercase__ , lowercase__ , lowercase__ = pipeline.prepare_inputs(lowerCamelCase, lowerCamelCase, lowerCamelCase ) # shard inputs and rng lowercase__ = replicate(lowerCamelCase ) lowercase__ = jax.random.split(lowerCamelCase, jax.device_count() ) lowercase__ = shard(lowerCamelCase ) lowercase__ = shard(lowerCamelCase ) lowercase__ = shard(lowerCamelCase ) lowercase__ = pipeline( lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, jit=lowerCamelCase ) lowercase__ = output.images.reshape(lowerCamelCase, 512, 512, 3 ) lowercase__ = images[0, 253:256, 253:256, -1] lowercase__ = jnp.asarray(jax.device_get(image_slice.flatten() ) ) lowercase__ = jnp.array( [0.3611307, 0.37649736, 0.3757408, 0.38213953, 0.39295167, 0.3841631, 0.41554978, 0.4137475, 0.4217084] ) print(F"""output_slice: {output_slice}""" ) assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
207
from typing import List, Optional, Union from ...image_utils import ImageInput from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType class _UpperCAmelCase ( A__ ): """simple docstring""" lowercase__ = ["""image_processor""", """tokenizer"""] lowercase__ = """BlipImageProcessor""" lowercase__ = ("""BertTokenizer""", """BertTokenizerFast""") def __init__( self : Dict, lowerCamelCase : Dict, lowerCamelCase : str ): '''simple docstring''' lowercase__ = False super().__init__(lowerCamelCase, lowerCamelCase ) lowercase__ = self.image_processor def __call__( self : int, lowerCamelCase : ImageInput = None, lowerCamelCase : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None, lowerCamelCase : bool = True, lowerCamelCase : Union[bool, str, PaddingStrategy] = False, lowerCamelCase : Union[bool, str, TruncationStrategy] = None, lowerCamelCase : Optional[int] = None, lowerCamelCase : int = 0, lowerCamelCase : Optional[int] = None, lowerCamelCase : Optional[bool] = None, lowerCamelCase : bool = False, lowerCamelCase : bool = False, lowerCamelCase : bool = False, lowerCamelCase : bool = False, lowerCamelCase : bool = False, lowerCamelCase : bool = True, lowerCamelCase : Optional[Union[str, TensorType]] = None, **lowerCamelCase : Any, ): '''simple docstring''' if images is None and text is None: raise ValueError('''You have to specify either images or text.''' ) # Get only text if images is None: lowercase__ = self.tokenizer lowercase__ = self.tokenizer( text=lowerCamelCase, add_special_tokens=lowerCamelCase, padding=lowerCamelCase, truncation=lowerCamelCase, max_length=lowerCamelCase, stride=lowerCamelCase, pad_to_multiple_of=lowerCamelCase, return_attention_mask=lowerCamelCase, return_overflowing_tokens=lowerCamelCase, return_special_tokens_mask=lowerCamelCase, return_offsets_mapping=lowerCamelCase, return_token_type_ids=lowerCamelCase, return_length=lowerCamelCase, verbose=lowerCamelCase, return_tensors=lowerCamelCase, **lowerCamelCase, ) return text_encoding # add pixel_values lowercase__ = self.image_processor(lowerCamelCase, return_tensors=lowerCamelCase ) if text is not None: lowercase__ = self.tokenizer( text=lowerCamelCase, add_special_tokens=lowerCamelCase, padding=lowerCamelCase, truncation=lowerCamelCase, max_length=lowerCamelCase, stride=lowerCamelCase, pad_to_multiple_of=lowerCamelCase, return_attention_mask=lowerCamelCase, return_overflowing_tokens=lowerCamelCase, return_special_tokens_mask=lowerCamelCase, return_offsets_mapping=lowerCamelCase, return_token_type_ids=lowerCamelCase, return_length=lowerCamelCase, verbose=lowerCamelCase, return_tensors=lowerCamelCase, **lowerCamelCase, ) else: lowercase__ = None if text_encoding is not None: encoding_image_processor.update(lowerCamelCase ) return encoding_image_processor def lowercase__ ( self : Tuple, *lowerCamelCase : Union[str, Any], **lowerCamelCase : Optional[int] ): '''simple docstring''' return self.tokenizer.batch_decode(*lowerCamelCase, **lowerCamelCase ) def lowercase__ ( self : List[str], *lowerCamelCase : int, **lowerCamelCase : List[str] ): '''simple docstring''' return self.tokenizer.decode(*lowerCamelCase, **lowerCamelCase ) @property def lowercase__ ( self : List[str] ): '''simple docstring''' lowercase__ = self.tokenizer.model_input_names lowercase__ = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
207
1
import random import timeit from functools import wraps from typing import Callable, Optional from ..configuration_utils import PretrainedConfig from ..models.auto.modeling_tf_auto import TF_MODEL_MAPPING, TF_MODEL_WITH_LM_HEAD_MAPPING from ..utils import is_pyanvml_available, is_tf_available, logging from .benchmark_utils import ( Benchmark, Memory, MemorySummary, measure_peak_memory_cpu, start_memory_tracing, stop_memory_tracing, ) if is_tf_available(): import tensorflow as tf from tensorflow.python.framework.errors_impl import ResourceExhaustedError from .benchmark_args_tf import TensorFlowBenchmarkArguments if is_pyanvml_available(): import pyanvml.pyanvml as nvml _A : Any = logging.get_logger(__name__) def _a ( UpperCAmelCase , UpperCAmelCase ) -> List[str]: """simple docstring""" def run_func(UpperCAmelCase ): @wraps(UpperCAmelCase ) def run_in_eager_mode(*UpperCAmelCase , **UpperCAmelCase ): return func(*UpperCAmelCase , **UpperCAmelCase ) @wraps(UpperCAmelCase ) @tf.function(experimental_compile=UpperCAmelCase ) def run_in_graph_mode(*UpperCAmelCase , **UpperCAmelCase ): return func(*UpperCAmelCase , **UpperCAmelCase ) if do_eager_mode is True: if use_xla is not False: raise ValueError( '''Cannot run model in XLA, if `args.eager_mode` is set to `True`. Please set `args.eager_mode=False`.''' ) return run_in_eager_mode else: return run_in_graph_mode return run_func def _a ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> ["tf.Tensor"]: """simple docstring""" lowerCamelCase__ : List[Any] = random.Random() lowerCamelCase__ : str = [rng.randint(0 , vocab_size - 1 ) for i in range(batch_size * sequence_length )] return tf.constant(UpperCAmelCase , shape=(batch_size, sequence_length) , dtype=tf.intaa ) class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ): _UpperCAmelCase : TensorFlowBenchmarkArguments _UpperCAmelCase : PretrainedConfig _UpperCAmelCase : str = "TensorFlow" @property def __lowerCamelCase ( self : int ) ->Optional[int]: return tf.__version__ def __lowerCamelCase ( self : Optional[int] , A : str , A : int , A : int ) ->float: # initialize GPU on separate process lowerCamelCase__ : Dict = self.args.strategy if strategy is None: raise ValueError('''A device strategy has to be initialized before using TensorFlow.''' ) lowerCamelCase__ : int = self._prepare_inference_func(A , A , A ) return self._measure_speed(_inference ) def __lowerCamelCase ( self : str , A : str , A : int , A : int ) ->float: lowerCamelCase__ : Optional[int] = self.args.strategy if strategy is None: raise ValueError('''A device strategy has to be initialized before using TensorFlow.''' ) lowerCamelCase__ : List[Any] = self._prepare_train_func(A , A , A ) return self._measure_speed(_train ) def __lowerCamelCase ( self : int , A : str , A : int , A : int ) ->[Memory, Optional[MemorySummary]]: # initialize GPU on separate process if self.args.is_gpu: tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , A ) lowerCamelCase__ : int = self.args.strategy if strategy is None: raise ValueError('''A device strategy has to be initialized before using TensorFlow.''' ) lowerCamelCase__ : str = self._prepare_inference_func(A , A , A ) return self._measure_memory(_inference ) def __lowerCamelCase ( self : List[str] , A : str , A : int , A : int ) ->[Memory, Optional[MemorySummary]]: if self.args.is_gpu: tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , A ) lowerCamelCase__ : List[Any] = self.args.strategy if strategy is None: raise ValueError('''A device strategy has to be initialized before using TensorFlow.''' ) lowerCamelCase__ : str = self._prepare_train_func(A , A , A ) return self._measure_memory(_train ) def __lowerCamelCase ( self : Dict , A : str , A : int , A : int ) ->Callable[[], None]: lowerCamelCase__ : Tuple = self.config_dict[model_name] if self.args.fpaa: raise NotImplementedError('''Mixed precision is currently not supported.''' ) lowerCamelCase__ : Tuple = ( hasattr(A , '''architectures''' ) and isinstance(config.architectures , A ) and len(config.architectures ) > 0 ) if not self.args.only_pretrain_model and has_model_class_in_config: try: lowerCamelCase__ : Any = '''TF''' + config.architectures[0] # prepend 'TF' for tensorflow model lowerCamelCase__ : List[Any] = __import__('''transformers''' , fromlist=[model_class] ) lowerCamelCase__ : int = getattr(A , A ) lowerCamelCase__ : int = model_cls(A ) except ImportError: raise ImportError( F"{model_class} does not exist. If you just want to test the pretrained model, you might want to" ''' set `--only_pretrain_model` or `args.only_pretrain_model=True`.''' ) else: lowerCamelCase__ : Union[str, Any] = TF_MODEL_MAPPING[config.__class__](A ) # encoder-decoder has vocab size saved differently lowerCamelCase__ : Tuple = config.vocab_size if hasattr(A , '''vocab_size''' ) else config.encoder.vocab_size lowerCamelCase__ : Optional[Any] = random_input_ids(A , A , A ) @run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla ) def encoder_decoder_forward(): return model(A , decoder_input_ids=A , training=A ) @run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla ) def encoder_forward(): return model(A , training=A ) lowerCamelCase__ : int = encoder_decoder_forward if config.is_encoder_decoder else encoder_forward return _inference def __lowerCamelCase ( self : List[str] , A : str , A : int , A : int ) ->Callable[[], None]: lowerCamelCase__ : Tuple = self.config_dict[model_name] if self.args.eager_mode is not False: raise ValueError('''Training cannot be done in eager mode. Please make sure that `args.eager_mode = False`.''' ) if self.args.fpaa: raise NotImplementedError('''Mixed precision is currently not supported.''' ) lowerCamelCase__ : Optional[int] = ( hasattr(A , '''architectures''' ) and isinstance(config.architectures , A ) and len(config.architectures ) > 0 ) if not self.args.only_pretrain_model and has_model_class_in_config: try: lowerCamelCase__ : Any = '''TF''' + config.architectures[0] # prepend 'TF' for tensorflow model lowerCamelCase__ : List[str] = __import__('''transformers''' , fromlist=[model_class] ) lowerCamelCase__ : Optional[int] = getattr(A , A ) lowerCamelCase__ : Optional[Any] = model_cls(A ) except ImportError: raise ImportError( F"{model_class} does not exist. If you just want to test the pretrained model, you might want to" ''' set `--only_pretrain_model` or `args.only_pretrain_model=True`.''' ) else: lowerCamelCase__ : List[str] = TF_MODEL_WITH_LM_HEAD_MAPPING[config.__class__](A ) # encoder-decoder has vocab size saved differently lowerCamelCase__ : Optional[int] = config.vocab_size if hasattr(A , '''vocab_size''' ) else config.encoder.vocab_size lowerCamelCase__ : Dict = random_input_ids(A , A , A ) @run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla ) def encoder_decoder_train(): lowerCamelCase__ : int = model(A , decoder_input_ids=A , labels=A , training=A )[0] lowerCamelCase__ : List[Any] = tf.gradients(A , model.trainable_variables ) return gradients @run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla ) def encoder_train(): lowerCamelCase__ : Optional[int] = model(A , labels=A , training=A )[0] lowerCamelCase__ : List[str] = tf.gradients(A , model.trainable_variables ) return gradients lowerCamelCase__ : Tuple = encoder_decoder_train if config.is_encoder_decoder else encoder_train return _train def __lowerCamelCase ( self : Tuple , A : Any ) ->float: with self.args.strategy.scope(): try: if self.args.is_tpu or self.args.use_xla: # run additional 10 times to stabilize compilation for tpu logger.info('''Do inference on TPU. Running model 5 times to stabilize compilation''' ) timeit.repeat(A , repeat=1 , number=5 ) # as written in https://docs.python.org/2/library/timeit.html#timeit.Timer.repeat, min should be taken rather than the average lowerCamelCase__ : Optional[Any] = timeit.repeat( A , repeat=self.args.repeat , number=1_0 , ) return min(A ) / 10.0 except ResourceExhaustedError as e: self.print_fn(F"Doesn't fit on GPU. {e}" ) def __lowerCamelCase ( self : List[Any] , A : Callable[[], None] ) ->[Memory, MemorySummary]: logger.info( '''Note that TensorFlow allocates more memory than ''' '''it might need to speed up computation. ''' '''The memory reported here corresponds to the memory ''' '''reported by `nvidia-smi`, which can vary depending ''' '''on total available memory on the GPU that is used.''' ) with self.args.strategy.scope(): try: if self.args.trace_memory_line_by_line: if not self.args.eager_mode: raise ValueError( '''`args.eager_mode` is set to `False`. Make sure to run model in eager mode to measure memory''' ''' consumption line by line.''' ) lowerCamelCase__ : Union[str, Any] = start_memory_tracing('''transformers''' ) if self.args.is_tpu: # tpu raise NotImplementedError( '''Memory Benchmarking is currently not implemented for TPU. Please disable memory benchmarking''' ''' with `args.memory=False`''' ) elif self.args.is_gpu: # gpu if not is_pyanvml_available(): logger.warning( '''py3nvml not installed, we won\'t log GPU memory usage. ''' '''Install py3nvml (pip install py3nvml) to log information about GPU.''' ) lowerCamelCase__ : Union[str, Any] = '''N/A''' else: logger.info( '''Measuring total GPU usage on GPU device. Make sure to not have additional processes''' ''' running on the same GPU.''' ) # init nvml nvml.nvmlInit() func() lowerCamelCase__ : Any = nvml.nvmlDeviceGetHandleByIndex(self.args.device_idx ) lowerCamelCase__ : Optional[int] = nvml.nvmlDeviceGetMemoryInfo(A ) lowerCamelCase__ : List[Any] = meminfo.used lowerCamelCase__ : Union[str, Any] = Memory(A ) # shutdown nvml nvml.nvmlShutdown() else: # cpu if self.args.trace_memory_line_by_line: logger.info( '''When enabling line by line tracing, the max peak memory for CPU is inaccurate in''' ''' TensorFlow.''' ) lowerCamelCase__ : Tuple = None else: lowerCamelCase__ : Dict = measure_peak_memory_cpu(A ) lowerCamelCase__ : Optional[Any] = Memory(A ) if isinstance(A , A ) else memory_bytes if self.args.trace_memory_line_by_line: lowerCamelCase__ : Union[str, Any] = stop_memory_tracing(A ) if memory is None: lowerCamelCase__ : Dict = summary.total else: lowerCamelCase__ : Optional[int] = None return memory, summary except ResourceExhaustedError as e: self.print_fn(F"Doesn't fit on GPU. {e}" ) return "N/A", None
265
import argparse import os import jax as jnp import numpy as onp import torch import torch.nn as nn from music_spectrogram_diffusion import inference from tax import checkpoints from diffusers import DDPMScheduler, OnnxRuntimeModel, SpectrogramDiffusionPipeline from diffusers.pipelines.spectrogram_diffusion import SpectrogramContEncoder, SpectrogramNotesEncoder, TaFilmDecoder _A : List[Any] = 'base_with_context' def _a ( UpperCAmelCase , UpperCAmelCase ) -> Tuple: """simple docstring""" lowerCamelCase__ : Dict = nn.Parameter(torch.FloatTensor(weights['''token_embedder''']['''embedding'''] ) ) lowerCamelCase__ : List[str] = nn.Parameter( torch.FloatTensor(weights['''Embed_0''']['''embedding'''] ) , requires_grad=UpperCAmelCase ) for lyr_num, lyr in enumerate(model.encoders ): lowerCamelCase__ : Any = weights[f"layers_{lyr_num}"] lowerCamelCase__ : List[Any] = nn.Parameter( torch.FloatTensor(ly_weight['''pre_attention_layer_norm''']['''scale'''] ) ) lowerCamelCase__ : int = ly_weight['''attention'''] lowerCamelCase__ : List[str] = nn.Parameter(torch.FloatTensor(attention_weights['''query''']['''kernel'''].T ) ) lowerCamelCase__ : Tuple = nn.Parameter(torch.FloatTensor(attention_weights['''key''']['''kernel'''].T ) ) lowerCamelCase__ : str = nn.Parameter(torch.FloatTensor(attention_weights['''value''']['''kernel'''].T ) ) lowerCamelCase__ : List[Any] = nn.Parameter(torch.FloatTensor(attention_weights['''out''']['''kernel'''].T ) ) lowerCamelCase__ : str = nn.Parameter(torch.FloatTensor(ly_weight['''pre_mlp_layer_norm''']['''scale'''] ) ) lowerCamelCase__ : List[Any] = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wi_0''']['''kernel'''].T ) ) lowerCamelCase__ : Union[str, Any] = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wi_1''']['''kernel'''].T ) ) lowerCamelCase__ : Any = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wo''']['''kernel'''].T ) ) lowerCamelCase__ : Optional[Any] = nn.Parameter(torch.FloatTensor(weights['''encoder_norm''']['''scale'''] ) ) return model def _a ( UpperCAmelCase , UpperCAmelCase ) -> Optional[Any]: """simple docstring""" lowerCamelCase__ : Union[str, Any] = nn.Parameter(torch.FloatTensor(weights['''input_proj''']['''kernel'''].T ) ) lowerCamelCase__ : Union[str, Any] = nn.Parameter( torch.FloatTensor(weights['''Embed_0''']['''embedding'''] ) , requires_grad=UpperCAmelCase ) for lyr_num, lyr in enumerate(model.encoders ): lowerCamelCase__ : Tuple = weights[f"layers_{lyr_num}"] lowerCamelCase__ : str = ly_weight['''attention'''] lowerCamelCase__ : Any = nn.Parameter(torch.FloatTensor(attention_weights['''query''']['''kernel'''].T ) ) lowerCamelCase__ : List[Any] = nn.Parameter(torch.FloatTensor(attention_weights['''key''']['''kernel'''].T ) ) lowerCamelCase__ : Optional[Any] = nn.Parameter(torch.FloatTensor(attention_weights['''value''']['''kernel'''].T ) ) lowerCamelCase__ : int = nn.Parameter(torch.FloatTensor(attention_weights['''out''']['''kernel'''].T ) ) lowerCamelCase__ : Tuple = nn.Parameter( torch.FloatTensor(ly_weight['''pre_attention_layer_norm''']['''scale'''] ) ) lowerCamelCase__ : Tuple = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wi_0''']['''kernel'''].T ) ) lowerCamelCase__ : List[Any] = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wi_1''']['''kernel'''].T ) ) lowerCamelCase__ : Optional[Any] = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wo''']['''kernel'''].T ) ) lowerCamelCase__ : Dict = nn.Parameter(torch.FloatTensor(ly_weight['''pre_mlp_layer_norm''']['''scale'''] ) ) lowerCamelCase__ : Any = nn.Parameter(torch.FloatTensor(weights['''encoder_norm''']['''scale'''] ) ) return model def _a ( UpperCAmelCase , UpperCAmelCase ) -> Optional[Any]: """simple docstring""" lowerCamelCase__ : Tuple = nn.Parameter(torch.FloatTensor(weights['''time_emb_dense0''']['''kernel'''].T ) ) lowerCamelCase__ : int = nn.Parameter(torch.FloatTensor(weights['''time_emb_dense1''']['''kernel'''].T ) ) lowerCamelCase__ : str = nn.Parameter( torch.FloatTensor(weights['''Embed_0''']['''embedding'''] ) , requires_grad=UpperCAmelCase ) lowerCamelCase__ : Tuple = nn.Parameter( torch.FloatTensor(weights['''continuous_inputs_projection''']['''kernel'''].T ) ) for lyr_num, lyr in enumerate(model.decoders ): lowerCamelCase__ : List[Any] = weights[f"layers_{lyr_num}"] lowerCamelCase__ : Optional[Any] = nn.Parameter( torch.FloatTensor(ly_weight['''pre_self_attention_layer_norm''']['''scale'''] ) ) lowerCamelCase__ : Any = nn.Parameter( torch.FloatTensor(ly_weight['''FiLMLayer_0''']['''DenseGeneral_0''']['''kernel'''].T ) ) lowerCamelCase__ : Optional[Any] = ly_weight['''self_attention'''] lowerCamelCase__ : Dict = nn.Parameter(torch.FloatTensor(attention_weights['''query''']['''kernel'''].T ) ) lowerCamelCase__ : Union[str, Any] = nn.Parameter(torch.FloatTensor(attention_weights['''key''']['''kernel'''].T ) ) lowerCamelCase__ : Any = nn.Parameter(torch.FloatTensor(attention_weights['''value''']['''kernel'''].T ) ) lowerCamelCase__ : Optional[Any] = nn.Parameter(torch.FloatTensor(attention_weights['''out''']['''kernel'''].T ) ) lowerCamelCase__ : Dict = ly_weight['''MultiHeadDotProductAttention_0'''] lowerCamelCase__ : List[Any] = nn.Parameter(torch.FloatTensor(attention_weights['''query''']['''kernel'''].T ) ) lowerCamelCase__ : List[str] = nn.Parameter(torch.FloatTensor(attention_weights['''key''']['''kernel'''].T ) ) lowerCamelCase__ : Any = nn.Parameter(torch.FloatTensor(attention_weights['''value''']['''kernel'''].T ) ) lowerCamelCase__ : str = nn.Parameter(torch.FloatTensor(attention_weights['''out''']['''kernel'''].T ) ) lowerCamelCase__ : int = nn.Parameter( torch.FloatTensor(ly_weight['''pre_cross_attention_layer_norm''']['''scale'''] ) ) lowerCamelCase__ : str = nn.Parameter(torch.FloatTensor(ly_weight['''pre_mlp_layer_norm''']['''scale'''] ) ) lowerCamelCase__ : Any = nn.Parameter( torch.FloatTensor(ly_weight['''FiLMLayer_1''']['''DenseGeneral_0''']['''kernel'''].T ) ) lowerCamelCase__ : Optional[Any] = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wi_0''']['''kernel'''].T ) ) lowerCamelCase__ : int = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wi_1''']['''kernel'''].T ) ) lowerCamelCase__ : List[str] = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wo''']['''kernel'''].T ) ) lowerCamelCase__ : Any = nn.Parameter(torch.FloatTensor(weights['''decoder_norm''']['''scale'''] ) ) lowerCamelCase__ : Tuple = nn.Parameter(torch.FloatTensor(weights['''spec_out_dense''']['''kernel'''].T ) ) return model def _a ( UpperCAmelCase ) -> Optional[Any]: """simple docstring""" lowerCamelCase__ : Tuple = checkpoints.load_tax_checkpoint(args.checkpoint_path ) lowerCamelCase__ : Optional[int] = jnp.tree_util.tree_map(onp.array , UpperCAmelCase ) lowerCamelCase__ : List[str] = [ '''from __gin__ import dynamic_registration''', '''from music_spectrogram_diffusion.models.diffusion import diffusion_utils''', '''diffusion_utils.ClassifierFreeGuidanceConfig.eval_condition_weight = 2.0''', '''diffusion_utils.DiffusionConfig.classifier_free_guidance = @diffusion_utils.ClassifierFreeGuidanceConfig()''', ] lowerCamelCase__ : List[Any] = os.path.join(args.checkpoint_path , '''..''' , '''config.gin''' ) lowerCamelCase__ : Optional[Any] = inference.parse_training_gin_file(UpperCAmelCase , UpperCAmelCase ) lowerCamelCase__ : Tuple = inference.InferenceModel(args.checkpoint_path , UpperCAmelCase ) lowerCamelCase__ : int = DDPMScheduler(beta_schedule='''squaredcos_cap_v2''' , variance_type='''fixed_large''' ) lowerCamelCase__ : str = SpectrogramNotesEncoder( max_length=synth_model.sequence_length['''inputs'''] , vocab_size=synth_model.model.module.config.vocab_size , d_model=synth_model.model.module.config.emb_dim , dropout_rate=synth_model.model.module.config.dropout_rate , num_layers=synth_model.model.module.config.num_encoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , feed_forward_proj='''gated-gelu''' , ) lowerCamelCase__ : int = SpectrogramContEncoder( input_dims=synth_model.audio_codec.n_dims , targets_context_length=synth_model.sequence_length['''targets_context'''] , d_model=synth_model.model.module.config.emb_dim , dropout_rate=synth_model.model.module.config.dropout_rate , num_layers=synth_model.model.module.config.num_encoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , feed_forward_proj='''gated-gelu''' , ) lowerCamelCase__ : Optional[int] = TaFilmDecoder( input_dims=synth_model.audio_codec.n_dims , targets_length=synth_model.sequence_length['''targets_context'''] , max_decoder_noise_time=synth_model.model.module.config.max_decoder_noise_time , d_model=synth_model.model.module.config.emb_dim , num_layers=synth_model.model.module.config.num_decoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , dropout_rate=synth_model.model.module.config.dropout_rate , ) lowerCamelCase__ : Optional[int] = load_notes_encoder(ta_checkpoint['''target''']['''token_encoder'''] , UpperCAmelCase ) lowerCamelCase__ : int = load_continuous_encoder(ta_checkpoint['''target''']['''continuous_encoder'''] , UpperCAmelCase ) lowerCamelCase__ : List[str] = load_decoder(ta_checkpoint['''target''']['''decoder'''] , UpperCAmelCase ) lowerCamelCase__ : List[str] = OnnxRuntimeModel.from_pretrained('''kashif/soundstream_mel_decoder''' ) lowerCamelCase__ : List[Any] = SpectrogramDiffusionPipeline( notes_encoder=UpperCAmelCase , continuous_encoder=UpperCAmelCase , decoder=UpperCAmelCase , scheduler=UpperCAmelCase , melgan=UpperCAmelCase , ) if args.save: pipe.save_pretrained(args.output_path ) if __name__ == "__main__": _A : int = argparse.ArgumentParser() parser.add_argument('--output_path', default=None, type=str, required=True, help='Path to the converted model.') parser.add_argument( '--save', default=True, type=bool, required=False, help='Whether to save the converted model or not.' ) parser.add_argument( '--checkpoint_path', default=F'''{MODEL}/checkpoint_500000''', type=str, required=False, help='Path to the original jax model checkpoint.', ) _A : Tuple = parser.parse_args() main(args)
265
1
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): global f # a global dp table for knapsack if f[i][j] < 0: if j < wt[i - 1]: lowercase = mf_knapsack(i - 1 , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) else: lowercase = max( mf_knapsack(i - 1 , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) , mf_knapsack(i - 1 , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , j - wt[i - 1] ) + val[i - 1] , ) lowercase = val return f[i][j] def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): lowercase = [[0] * (w + 1) for _ in range(n + 1 )] for i in range(1 , n + 1 ): for w_ in range(1 , w + 1 ): if wt[i - 1] <= w_: lowercase = max(val[i - 1] + dp[i - 1][w_ - wt[i - 1]] , dp[i - 1][w_] ) else: lowercase = dp[i - 1][w_] return dp[n][w_], dp def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): if not (isinstance(__SCREAMING_SNAKE_CASE , (list, tuple) ) and isinstance(__SCREAMING_SNAKE_CASE , (list, tuple) )): raise ValueError( 'Both the weights and values vectors must be either lists or tuples' ) lowercase = len(__SCREAMING_SNAKE_CASE ) if num_items != len(__SCREAMING_SNAKE_CASE ): lowercase = ( 'The number of weights must be the same as the number of values.\n' F'''But got {num_items} weights and {len(__SCREAMING_SNAKE_CASE )} values''' ) raise ValueError(__SCREAMING_SNAKE_CASE ) for i in range(__SCREAMING_SNAKE_CASE ): if not isinstance(wt[i] , __SCREAMING_SNAKE_CASE ): lowercase = ( 'All weights must be integers but got weight of ' F'''type {type(wt[i] )} at index {i}''' ) raise TypeError(__SCREAMING_SNAKE_CASE ) lowercase , lowercase = knapsack(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) lowercase = set() _construct_solution(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) return optimal_val, example_optional_set def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): # for the current item i at a maximum weight j to be part of an optimal subset, # the optimal value at (i, j) must be greater than the optimal value at (i-1, j). # where i - 1 means considering only the previous items at the given maximum weight if i > 0 and j > 0: if dp[i - 1][j] == dp[i][j]: _construct_solution(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , i - 1 , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) else: optimal_set.add(__SCREAMING_SNAKE_CASE ) _construct_solution(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , i - 1 , j - wt[i - 1] , __SCREAMING_SNAKE_CASE ) if __name__ == "__main__": UpperCAmelCase = [3, 2, 4, 4] UpperCAmelCase = [4, 3, 2, 3] UpperCAmelCase = 4 UpperCAmelCase = 6 UpperCAmelCase = [[0] * (w + 1)] + [[0] + [-1] * (w + 1) for _ in range(n + 1)] UpperCAmelCase , UpperCAmelCase = knapsack(w, wt, val, n) print(optimal_solution) print(mf_knapsack(n, wt, val, w)) # switched the n and w # testing the dynamic programming problem with example # the optimal subset for the above example are items 3 and 4 UpperCAmelCase , UpperCAmelCase = knapsack_with_example_solution(w, wt, val) assert optimal_solution == 8 assert optimal_subset == {3, 4} print('''optimal_value = ''', optimal_solution) print('''An optimal subset corresponding to the optimal value''', optimal_subset)
195
import shutil import tempfile import unittest from unittest.mock import patch from transformers import ( DefaultFlowCallback, IntervalStrategy, PrinterCallback, ProgressCallback, Trainer, TrainerCallback, TrainingArguments, is_torch_available, ) from transformers.testing_utils import require_torch if is_torch_available(): from transformers.trainer import DEFAULT_CALLBACKS from .test_trainer import RegressionDataset, RegressionModelConfig, RegressionPreTrainedModel class A_ ( __lowerCamelCase ): '''simple docstring''' def __init__( self ): lowercase = [] def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case , **snake_case ): self.events.append('on_init_end' ) def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case , **snake_case ): self.events.append('on_train_begin' ) def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case , **snake_case ): self.events.append('on_train_end' ) def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case , **snake_case ): self.events.append('on_epoch_begin' ) def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case , **snake_case ): self.events.append('on_epoch_end' ) def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case , **snake_case ): self.events.append('on_step_begin' ) def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case , **snake_case ): self.events.append('on_step_end' ) def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case , **snake_case ): self.events.append('on_evaluate' ) def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case , **snake_case ): self.events.append('on_predict' ) def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case , **snake_case ): self.events.append('on_save' ) def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case , **snake_case ): self.events.append('on_log' ) def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case , **snake_case ): self.events.append('on_prediction_step' ) @require_torch class A_ ( unittest.TestCase ): '''simple docstring''' def SCREAMING_SNAKE_CASE__ ( self ): lowercase = tempfile.mkdtemp() def SCREAMING_SNAKE_CASE__ ( self ): shutil.rmtree(self.output_dir ) def SCREAMING_SNAKE_CASE__ ( self , snake_case=0 , snake_case=0 , snake_case=64 , snake_case=64 , snake_case=None , snake_case=False , **snake_case ): # disable_tqdm in TrainingArguments has a flaky default since it depends on the level of logging. We make sure # its set to False since the tests later on depend on its value. lowercase = RegressionDataset(length=snake_case ) lowercase = RegressionDataset(length=snake_case ) lowercase = RegressionModelConfig(a=snake_case , b=snake_case ) lowercase = RegressionPreTrainedModel(snake_case ) lowercase = TrainingArguments(self.output_dir , disable_tqdm=snake_case , report_to=[] , **snake_case ) return Trainer( snake_case , snake_case , train_dataset=snake_case , eval_dataset=snake_case , callbacks=snake_case , ) def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case ): self.assertEqual(len(snake_case ) , len(snake_case ) ) # Order doesn't matter lowercase = sorted(snake_case , key=lambda snake_case : cb.__name__ if isinstance(snake_case , snake_case ) else cb.__class__.__name__ ) lowercase = sorted(snake_case , key=lambda snake_case : cb.__name__ if isinstance(snake_case , snake_case ) else cb.__class__.__name__ ) for cba, cba in zip(snake_case , snake_case ): if isinstance(snake_case , snake_case ) and isinstance(snake_case , snake_case ): self.assertEqual(snake_case , snake_case ) elif isinstance(snake_case , snake_case ) and not isinstance(snake_case , snake_case ): self.assertEqual(snake_case , cba.__class__ ) elif not isinstance(snake_case , snake_case ) and isinstance(snake_case , snake_case ): self.assertEqual(cba.__class__ , snake_case ) else: self.assertEqual(snake_case , snake_case ) def SCREAMING_SNAKE_CASE__ ( self , snake_case ): lowercase = ['on_init_end', 'on_train_begin'] lowercase = 0 lowercase = len(trainer.get_eval_dataloader() ) lowercase = ['on_prediction_step'] * len(trainer.get_eval_dataloader() ) + ['on_log', 'on_evaluate'] for _ in range(trainer.state.num_train_epochs ): expected_events.append('on_epoch_begin' ) for _ in range(snake_case ): step += 1 expected_events += ["on_step_begin", "on_step_end"] if step % trainer.args.logging_steps == 0: expected_events.append('on_log' ) if trainer.args.evaluation_strategy == IntervalStrategy.STEPS and step % trainer.args.eval_steps == 0: expected_events += evaluation_events.copy() if step % trainer.args.save_steps == 0: expected_events.append('on_save' ) expected_events.append('on_epoch_end' ) if trainer.args.evaluation_strategy == IntervalStrategy.EPOCH: expected_events += evaluation_events.copy() expected_events += ["on_log", "on_train_end"] return expected_events def SCREAMING_SNAKE_CASE__ ( self ): lowercase = self.get_trainer() lowercase = DEFAULT_CALLBACKS.copy() + [ProgressCallback] self.check_callbacks_equality(trainer.callback_handler.callbacks , snake_case ) # Callbacks passed at init are added to the default callbacks lowercase = self.get_trainer(callbacks=[MyTestTrainerCallback] ) expected_callbacks.append(snake_case ) self.check_callbacks_equality(trainer.callback_handler.callbacks , snake_case ) # TrainingArguments.disable_tqdm controls if use ProgressCallback or PrinterCallback lowercase = self.get_trainer(disable_tqdm=snake_case ) lowercase = DEFAULT_CALLBACKS.copy() + [PrinterCallback] self.check_callbacks_equality(trainer.callback_handler.callbacks , snake_case ) def SCREAMING_SNAKE_CASE__ ( self ): lowercase = DEFAULT_CALLBACKS.copy() + [ProgressCallback] lowercase = self.get_trainer() # We can add, pop, or remove by class name trainer.remove_callback(snake_case ) expected_callbacks.remove(snake_case ) self.check_callbacks_equality(trainer.callback_handler.callbacks , snake_case ) lowercase = self.get_trainer() lowercase = trainer.pop_callback(snake_case ) self.assertEqual(cb.__class__ , snake_case ) self.check_callbacks_equality(trainer.callback_handler.callbacks , snake_case ) trainer.add_callback(snake_case ) expected_callbacks.insert(0 , snake_case ) self.check_callbacks_equality(trainer.callback_handler.callbacks , snake_case ) # We can also add, pop, or remove by instance lowercase = self.get_trainer() lowercase = trainer.callback_handler.callbacks[0] trainer.remove_callback(snake_case ) expected_callbacks.remove(snake_case ) self.check_callbacks_equality(trainer.callback_handler.callbacks , snake_case ) lowercase = self.get_trainer() lowercase = trainer.callback_handler.callbacks[0] lowercase = trainer.pop_callback(snake_case ) self.assertEqual(snake_case , snake_case ) self.check_callbacks_equality(trainer.callback_handler.callbacks , snake_case ) trainer.add_callback(snake_case ) expected_callbacks.insert(0 , snake_case ) self.check_callbacks_equality(trainer.callback_handler.callbacks , snake_case ) def SCREAMING_SNAKE_CASE__ ( self ): import warnings # XXX: for now ignore scatter_gather warnings in this test since it's not relevant to what's being tested warnings.simplefilter(action='ignore' , category=snake_case ) lowercase = self.get_trainer(callbacks=[MyTestTrainerCallback] ) trainer.train() lowercase = trainer.callback_handler.callbacks[-2].events self.assertEqual(snake_case , self.get_expected_events(snake_case ) ) # Independent log/save/eval lowercase = self.get_trainer(callbacks=[MyTestTrainerCallback] , logging_steps=5 ) trainer.train() lowercase = trainer.callback_handler.callbacks[-2].events self.assertEqual(snake_case , self.get_expected_events(snake_case ) ) lowercase = self.get_trainer(callbacks=[MyTestTrainerCallback] , save_steps=5 ) trainer.train() lowercase = trainer.callback_handler.callbacks[-2].events self.assertEqual(snake_case , self.get_expected_events(snake_case ) ) lowercase = self.get_trainer(callbacks=[MyTestTrainerCallback] , eval_steps=5 , evaluation_strategy='steps' ) trainer.train() lowercase = trainer.callback_handler.callbacks[-2].events self.assertEqual(snake_case , self.get_expected_events(snake_case ) ) lowercase = self.get_trainer(callbacks=[MyTestTrainerCallback] , evaluation_strategy='epoch' ) trainer.train() lowercase = trainer.callback_handler.callbacks[-2].events self.assertEqual(snake_case , self.get_expected_events(snake_case ) ) # A bit of everything lowercase = self.get_trainer( callbacks=[MyTestTrainerCallback] , logging_steps=3 , save_steps=10 , eval_steps=5 , evaluation_strategy='steps' , ) trainer.train() lowercase = trainer.callback_handler.callbacks[-2].events self.assertEqual(snake_case , self.get_expected_events(snake_case ) ) # warning should be emitted for duplicated callbacks with patch('transformers.trainer_callback.logger.warning' ) as warn_mock: lowercase = self.get_trainer( callbacks=[MyTestTrainerCallback, MyTestTrainerCallback] , ) assert str(snake_case ) in warn_mock.call_args[0][0]
195
1
import copy from ...configuration_utils import PretrainedConfig from ...utils import logging _lowerCAmelCase : List[Any] = logging.get_logger(__name__) class __magic_name__ ( lowerCAmelCase_ ): SCREAMING_SNAKE_CASE = 'encoder-decoder' SCREAMING_SNAKE_CASE = True def __init__( self , **__snake_case ) -> str: '''simple docstring''' super().__init__(**__snake_case ) assert ( "encoder" in kwargs and "decoder" in kwargs ), "Config has to be initialized with encoder and decoder config" __a =kwargs.pop('encoder' ) __a =encoder_config.pop('model_type' ) __a =kwargs.pop('decoder' ) __a =decoder_config.pop('model_type' ) from ..auto.configuration_auto import AutoConfig __a =AutoConfig.for_model(__snake_case , **__snake_case ) __a =AutoConfig.for_model(__snake_case , **__snake_case ) __a =True @classmethod def __magic_name__ ( cls , __snake_case , __snake_case , **__snake_case ) -> PretrainedConfig: '''simple docstring''' logger.info('Set `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config' ) __a =True __a =True return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **__snake_case ) def __magic_name__ ( self ) -> int: '''simple docstring''' __a =copy.deepcopy(self.__dict__ ) __a =self.encoder.to_dict() __a =self.decoder.to_dict() __a =self.__class__.model_type return output
308
import json import os import subprocess import unittest from ast import literal_eval import pytest from parameterized import parameterized, parameterized_class from . import is_sagemaker_available if is_sagemaker_available(): from sagemaker import Session, TrainingJobAnalytics from sagemaker.huggingface import HuggingFace @pytest.mark.skipif( literal_eval(os.getenv('TEST_SAGEMAKER' , 'False' ) ) is not True , reason='Skipping test because should only be run when releasing minor transformers version' , ) @pytest.mark.usefixtures('sm_env' ) @parameterized_class( [ { 'framework': 'pytorch', 'script': 'run_glue.py', 'model_name_or_path': 'distilbert-base-cased', 'instance_type': 'ml.p3.16xlarge', 'results': {'train_runtime': 6_5_0, 'eval_accuracy': 0.7, 'eval_loss': 0.6}, }, { 'framework': 'pytorch', 'script': 'run_ddp.py', 'model_name_or_path': 'distilbert-base-cased', 'instance_type': 'ml.p3.16xlarge', 'results': {'train_runtime': 6_0_0, 'eval_accuracy': 0.7, 'eval_loss': 0.6}, }, { 'framework': 'tensorflow', 'script': 'run_tf_dist.py', 'model_name_or_path': 'distilbert-base-cased', 'instance_type': 'ml.p3.16xlarge', 'results': {'train_runtime': 6_0_0, 'eval_accuracy': 0.6, 'eval_loss': 0.7}, }, ] ) class __magic_name__ ( unittest.TestCase ): def __magic_name__ ( self ) -> int: '''simple docstring''' if self.framework == "pytorch": subprocess.run( f'cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py'.split() , encoding='utf-8' , check=__snake_case , ) assert hasattr(self , 'env' ) def __magic_name__ ( self , __snake_case ) -> int: '''simple docstring''' __a =f'{self.env.base_job_name}-{instance_count}-{"ddp" if "ddp" in self.script else "smd"}' # distributed data settings __a ={'smdistributed': {'dataparallel': {'enabled': True}}} if self.script != 'run_ddp.py' else None # creates estimator return HuggingFace( entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=__snake_case , instance_count=__snake_case , instance_type=self.instance_type , debugger_hook_config=__snake_case , hyperparameters={**self.env.distributed_hyperparameters, 'model_name_or_path': self.model_name_or_path} , metric_definitions=self.env.metric_definitions , distribution=__snake_case , py_version='py36' , ) def __magic_name__ ( self , __snake_case ) -> Optional[Any]: '''simple docstring''' TrainingJobAnalytics(__snake_case ).export_csv(f'{self.env.test_path}/{job_name}_metrics.csv' ) @parameterized.expand([(2,)] ) def __magic_name__ ( self , __snake_case ) -> Optional[int]: '''simple docstring''' # create estimator __a =self.create_estimator(__snake_case ) # run training estimator.fit() # result dataframe __a =TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe() # extract kpis __a =list(result_metrics_df[result_metrics_df.metric_name == 'eval_accuracy']['value'] ) __a =list(result_metrics_df[result_metrics_df.metric_name == 'eval_loss']['value'] ) # get train time from SageMaker job, this includes starting, preprocessing, stopping __a =( Session().describe_training_job(estimator.latest_training_job.name ).get('TrainingTimeInSeconds' , 99_9999 ) ) # assert kpis assert train_runtime <= self.results["train_runtime"] assert all(t >= self.results['eval_accuracy'] for t in eval_accuracy ) assert all(t <= self.results['eval_loss'] for t in eval_loss ) # dump tests result into json file to share in PR with open(f'{estimator.latest_training_job.name}.json' , 'w' ) as outfile: json.dump({'train_time': train_runtime, 'eval_accuracy': eval_accuracy, 'eval_loss': eval_loss} , __snake_case )
308
1
from typing import Optional from torch import nn from .transformer_ad import TransformeraDModel, TransformeraDModelOutput class snake_case_ ( nn.Module ): '''simple docstring''' def __init__( self : Dict , _UpperCamelCase : int = 1_6 , _UpperCamelCase : int = 8_8 , _UpperCamelCase : Optional[int] = None , _UpperCamelCase : int = 1 , _UpperCamelCase : float = 0.0 , _UpperCamelCase : int = 3_2 , _UpperCamelCase : Optional[int] = None , _UpperCamelCase : bool = False , _UpperCamelCase : Optional[int] = None , _UpperCamelCase : Optional[int] = None , _UpperCamelCase : str = "geglu" , _UpperCamelCase : Optional[int] = None , ) ->Any: super().__init__() snake_case_ = nn.ModuleList( [ TransformeraDModel( num_attention_heads=_UpperCamelCase , attention_head_dim=_UpperCamelCase , in_channels=_UpperCamelCase , num_layers=_UpperCamelCase , dropout=_UpperCamelCase , norm_num_groups=_UpperCamelCase , cross_attention_dim=_UpperCamelCase , attention_bias=_UpperCamelCase , sample_size=_UpperCamelCase , num_vector_embeds=_UpperCamelCase , activation_fn=_UpperCamelCase , num_embeds_ada_norm=_UpperCamelCase , ) for _ in range(2 ) ] ) # Variables that can be set by a pipeline: # The ratio of transformer1 to transformer2's output states to be combined during inference snake_case_ = 0.5 # The shape of `encoder_hidden_states` is expected to be # `(batch_size, condition_lengths[0]+condition_lengths[1], num_features)` snake_case_ = [7_7, 2_5_7] # Which transformer to use to encode which condition. # E.g. `(1, 0)` means that we'll use `transformers[1](conditions[0])` and `transformers[0](conditions[1])` snake_case_ = [1, 0] def snake_case__( self : Dict , _UpperCamelCase : str , _UpperCamelCase : str , _UpperCamelCase : Optional[Any]=None , _UpperCamelCase : List[Any]=None , _UpperCamelCase : List[str]=None , _UpperCamelCase : bool = True , ) ->Optional[Any]: snake_case_ = hidden_states snake_case_ = [] snake_case_ = 0 # attention_mask is not used yet for i in range(2 ): # for each of the two transformers, pass the corresponding condition tokens snake_case_ = encoder_hidden_states[:, tokens_start : tokens_start + self.condition_lengths[i]] snake_case_ = self.transformer_index_for_condition[i] snake_case_ = self.transformers[transformer_index]( _UpperCamelCase , encoder_hidden_states=_UpperCamelCase , timestep=_UpperCamelCase , cross_attention_kwargs=_UpperCamelCase , return_dict=_UpperCamelCase , )[0] encoded_states.append(encoded_state - input_states ) tokens_start += self.condition_lengths[i] snake_case_ = encoded_states[0] * self.mix_ratio + encoded_states[1] * (1 - self.mix_ratio) snake_case_ = output_states + input_states if not return_dict: return (output_states,) return TransformeraDModelOutput(sample=_UpperCamelCase )
8
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available SCREAMING_SNAKE_CASE__ : Optional[Any] = {"configuration_wavlm": ["WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP", "WavLMConfig"]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE__ : int = [ "WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST", "WavLMForAudioFrameClassification", "WavLMForCTC", "WavLMForSequenceClassification", "WavLMForXVector", "WavLMModel", "WavLMPreTrainedModel", ] if TYPE_CHECKING: from .configuration_wavlm import WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP, WavLMConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_wavlm import ( WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST, WavLMForAudioFrameClassification, WavLMForCTC, WavLMForSequenceClassification, WavLMForXVector, WavLMModel, WavLMPreTrainedModel, ) else: import sys SCREAMING_SNAKE_CASE__ : int = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
270
0
'''simple docstring''' import os import textwrap import pyarrow as pa import pytest from datasets import ClassLabel, Features, Image from datasets.packaged_modules.csv.csv import Csv from ..utils import require_pil @pytest.fixture def lowerCAmelCase__ ( lowerCamelCase : Union[str, Any] ): _A : List[Any] = tmp_path / 'file.csv' _A : Dict = textwrap.dedent( '\\n header1,header2\n 1,2\n 10,20\n ' ) with open(lowerCamelCase ,'w' ) as f: f.write(lowerCamelCase ) return str(lowerCamelCase ) @pytest.fixture def lowerCAmelCase__ ( lowerCamelCase : Dict ): _A : List[Any] = tmp_path / 'malformed_file.csv' _A : Union[str, Any] = textwrap.dedent( '\\n header1,header2\n 1,2\n 10,20,\n ' ) with open(lowerCamelCase ,'w' ) as f: f.write(lowerCamelCase ) return str(lowerCamelCase ) @pytest.fixture def lowerCAmelCase__ ( lowerCamelCase : Any ,lowerCamelCase : str ): _A : Dict = tmp_path / 'csv_with_image.csv' _A : int = textwrap.dedent( F'\\n image\n {image_file}\n ' ) with open(lowerCamelCase ,'w' ) as f: f.write(lowerCamelCase ) return str(lowerCamelCase ) @pytest.fixture def lowerCAmelCase__ ( lowerCamelCase : Union[str, Any] ): _A : str = tmp_path / 'csv_with_label.csv' _A : Any = textwrap.dedent( '\\n label\n good\n bad\n good\n ' ) with open(lowerCamelCase ,'w' ) as f: f.write(lowerCamelCase ) return str(lowerCamelCase ) @pytest.fixture def lowerCAmelCase__ ( lowerCamelCase : int ): _A : List[Any] = tmp_path / 'csv_with_int_list.csv' _A : Optional[int] = textwrap.dedent( '\\n int_list\n 1 2 3\n 4 5 6\n 7 8 9\n ' ) with open(lowerCamelCase ,'w' ) as f: f.write(lowerCamelCase ) return str(lowerCamelCase ) def lowerCAmelCase__ ( lowerCamelCase : Any ,lowerCamelCase : int ,lowerCamelCase : Optional[Any] ): _A : Dict = Csv() _A : str = csv._generate_tables([[csv_file, malformed_csv_file]] ) with pytest.raises(lowerCamelCase ,match='Error tokenizing data' ): for _ in generator: pass assert any( record.levelname == 'ERROR' and 'Failed to read file' in record.message and os.path.basename(lowerCamelCase ) in record.message for record in caplog.records ) @require_pil def lowerCAmelCase__ ( lowerCamelCase : str ): with open(lowerCamelCase ,encoding='utf-8' ) as f: _A : int = f.read().splitlines()[1] _A : Union[str, Any] = Csv(encoding='utf-8' ,features=Features({'image': Image()} ) ) _A : Optional[Any] = csv._generate_tables([[csv_file_with_image]] ) _A : Tuple = pa.concat_tables([table for _, table in generator] ) assert pa_table.schema.field('image' ).type == Image()() _A : Optional[int] = pa_table.to_pydict()['image'] assert generated_content == [{"path": image_file, "bytes": None}] def lowerCAmelCase__ ( lowerCamelCase : Optional[Any] ): with open(lowerCamelCase ,encoding='utf-8' ) as f: _A : Optional[Any] = f.read().splitlines()[1:] _A : Dict = Csv(encoding='utf-8' ,features=Features({'label': ClassLabel(names=['good', 'bad'] )} ) ) _A : Optional[Any] = csv._generate_tables([[csv_file_with_label]] ) _A : Optional[Any] = pa.concat_tables([table for _, table in generator] ) assert pa_table.schema.field('label' ).type == ClassLabel(names=['good', 'bad'] )() _A : Any = pa_table.to_pydict()['label'] assert generated_content == [ClassLabel(names=['good', 'bad'] ).straint(lowerCamelCase ) for label in labels] def lowerCAmelCase__ ( lowerCamelCase : List[str] ): _A : Tuple = Csv(encoding='utf-8' ,sep=',' ,converters={'int_list': lambda lowerCamelCase : [int(lowerCamelCase ) for i in x.split()]} ) _A : Tuple = csv._generate_tables([[csv_file_with_int_list]] ) _A : Dict = pa.concat_tables([table for _, table in generator] ) assert pa.types.is_list(pa_table.schema.field('int_list' ).type ) _A : Optional[int] = pa_table.to_pydict()['int_list'] assert generated_content == [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
227
'''simple docstring''' # DISCLAIMER: This code is strongly influenced by https://github.com/pesser/pytorch_diffusion # and https://github.com/hojonathanho/diffusion import math from dataclasses import dataclass from typing import List, Optional, Tuple, Union import numpy as np import torch from diffusers.configuration_utils import ConfigMixin, register_to_config from diffusers.schedulers.scheduling_utils import SchedulerMixin from diffusers.utils import BaseOutput, deprecate @dataclass # Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->DDIM class __lowerCamelCase ( a_ ): """simple docstring""" a = 42 a = None def lowerCAmelCase__ ( lowerCamelCase : int ,lowerCamelCase : Tuple=0.999 ,lowerCamelCase : int="cosine" ,): if alpha_transform_type == "cosine": def alpha_bar_fn(lowerCamelCase : str ): return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2 elif alpha_transform_type == "exp": def alpha_bar_fn(lowerCamelCase : Dict ): return math.exp(t * -12.0 ) else: raise ValueError(F'Unsupported alpha_tranform_type: {alpha_transform_type}' ) _A : Tuple = [] for i in range(lowerCamelCase ): _A : Optional[Any] = i / num_diffusion_timesteps _A : Tuple = (i + 1) / num_diffusion_timesteps betas.append(min(1 - alpha_bar_fn(lowerCamelCase ) / alpha_bar_fn(lowerCamelCase ) ,lowerCamelCase ) ) return torch.tensor(lowerCamelCase ,dtype=torch.floataa ) class __lowerCamelCase ( a_ , a_ ): """simple docstring""" a = 1 @register_to_config def __init__( self : Tuple , SCREAMING_SNAKE_CASE : int = 1000 , SCREAMING_SNAKE_CASE : float = 0.0001 , SCREAMING_SNAKE_CASE : float = 0.02 , SCREAMING_SNAKE_CASE : str = "linear" , SCREAMING_SNAKE_CASE : Optional[Union[np.ndarray, List[float]]] = None , SCREAMING_SNAKE_CASE : bool = True , SCREAMING_SNAKE_CASE : bool = True , SCREAMING_SNAKE_CASE : int = 0 , SCREAMING_SNAKE_CASE : str = "epsilon" , SCREAMING_SNAKE_CASE : float = 1.0 , **SCREAMING_SNAKE_CASE : List[str] , ): if kwargs.get('set_alpha_to_one' , SCREAMING_SNAKE_CASE) is not None: _A : Tuple = ( 'The `set_alpha_to_one` argument is deprecated. Please use `set_alpha_to_zero` instead.' ) deprecate('set_alpha_to_one' , '1.0.0' , SCREAMING_SNAKE_CASE , standard_warn=SCREAMING_SNAKE_CASE) _A : Tuple = kwargs['set_alpha_to_one'] if trained_betas is not None: _A : Any = torch.tensor(SCREAMING_SNAKE_CASE , dtype=torch.floataa) elif beta_schedule == "linear": _A : List[Any] = torch.linspace(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , dtype=torch.floataa) elif beta_schedule == "scaled_linear": # this schedule is very specific to the latent diffusion model. _A : List[str] = ( torch.linspace(beta_start**0.5 , beta_end**0.5 , SCREAMING_SNAKE_CASE , dtype=torch.floataa) ** 2 ) elif beta_schedule == "squaredcos_cap_v2": # Glide cosine schedule _A : List[Any] = betas_for_alpha_bar(SCREAMING_SNAKE_CASE) else: raise NotImplementedError(F'{beta_schedule} does is not implemented for {self.__class__}') _A : Optional[int] = 1.0 - self.betas _A : Union[str, Any] = torch.cumprod(self.alphas , dim=0) # At every step in inverted ddim, we are looking into the next alphas_cumprod # For the final step, there is no next alphas_cumprod, and the index is out of bounds # `set_alpha_to_zero` decides whether we set this parameter simply to zero # in this case, self.step() just output the predicted noise # or whether we use the final alpha of the "non-previous" one. _A : Optional[int] = torch.tensor(0.0) if set_alpha_to_zero else self.alphas_cumprod[-1] # standard deviation of the initial noise distribution _A : Union[str, Any] = 1.0 # setable values _A : List[str] = None _A : Dict = torch.from_numpy(np.arange(0 , SCREAMING_SNAKE_CASE).copy().astype(np.intaa)) def A ( self : str , SCREAMING_SNAKE_CASE : torch.FloatTensor , SCREAMING_SNAKE_CASE : Optional[int] = None): return sample def A ( self : Dict , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Union[str, torch.device] = None): if num_inference_steps > self.config.num_train_timesteps: raise ValueError( F'`num_inference_steps`: {num_inference_steps} cannot be larger than `self.config.train_timesteps`:' F' {self.config.num_train_timesteps} as the unet model trained with this scheduler can only handle' F' maximal {self.config.num_train_timesteps} timesteps.') _A : Optional[Any] = num_inference_steps _A : List[Any] = self.config.num_train_timesteps // self.num_inference_steps # creates integer timesteps by multiplying by ratio # casting to int to avoid issues when num_inference_step is power of 3 _A : List[str] = (np.arange(0 , SCREAMING_SNAKE_CASE) * step_ratio).round().copy().astype(np.intaa) _A : int = torch.from_numpy(SCREAMING_SNAKE_CASE).to(SCREAMING_SNAKE_CASE) self.timesteps += self.config.steps_offset def A ( self : List[Any] , SCREAMING_SNAKE_CASE : torch.FloatTensor , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : torch.FloatTensor , SCREAMING_SNAKE_CASE : float = 0.0 , SCREAMING_SNAKE_CASE : bool = False , SCREAMING_SNAKE_CASE : Optional[torch.FloatTensor] = None , SCREAMING_SNAKE_CASE : bool = True , ): # 1. get previous step value (=t+1) _A : Union[str, Any] = timestep + self.config.num_train_timesteps // self.num_inference_steps # 2. compute alphas, betas # change original implementation to exactly match noise levels for analogous forward process _A : List[str] = self.alphas_cumprod[timestep] _A : List[str] = ( self.alphas_cumprod[prev_timestep] if prev_timestep < self.config.num_train_timesteps else self.final_alpha_cumprod ) _A : List[str] = 1 - alpha_prod_t # 3. compute predicted original sample from predicted noise also called # "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf if self.config.prediction_type == "epsilon": _A : Any = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5 _A : List[Any] = model_output elif self.config.prediction_type == "sample": _A : List[Any] = model_output _A : Dict = (sample - alpha_prod_t ** 0.5 * pred_original_sample) / beta_prod_t ** 0.5 elif self.config.prediction_type == "v_prediction": _A : List[str] = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output _A : Optional[int] = (alpha_prod_t**0.5) * model_output + (beta_prod_t**0.5) * sample else: raise ValueError( F'prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample`, or' ' `v_prediction`') # 4. Clip or threshold "predicted x_0" if self.config.clip_sample: _A : str = pred_original_sample.clamp( -self.config.clip_sample_range , self.config.clip_sample_range) # 5. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf _A : Any = (1 - alpha_prod_t_prev) ** 0.5 * pred_epsilon # 6. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf _A : Tuple = alpha_prod_t_prev ** 0.5 * pred_original_sample + pred_sample_direction if not return_dict: return (prev_sample, pred_original_sample) return DDIMSchedulerOutput(prev_sample=SCREAMING_SNAKE_CASE , pred_original_sample=SCREAMING_SNAKE_CASE) def __len__( self : List[Any]): return self.config.num_train_timesteps
227
1
'''simple docstring''' import numpy as np def lowercase__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = 1E-12 , __UpperCamelCase = 100 , )-> tuple[float, np.ndarray]: assert np.shape(__UpperCamelCase )[0] == np.shape(__UpperCamelCase )[1] # Ensure proper dimensionality. assert np.shape(__UpperCamelCase )[0] == np.shape(__UpperCamelCase )[0] # Ensure inputs are either both complex or both real assert np.iscomplexobj(__UpperCamelCase ) == np.iscomplexobj(__UpperCamelCase ) UpperCamelCase = np.iscomplexobj(__UpperCamelCase ) if is_complex: # Ensure complex input_matrix is Hermitian assert np.array_equal(__UpperCamelCase , input_matrix.conj().T ) # Set convergence to False. Will define convergence when we exceed max_iterations # or when we have small changes from one iteration to next. UpperCamelCase = False UpperCamelCase = 0 UpperCamelCase = 0 UpperCamelCase = 1E12 while not convergence: # Multiple matrix by the vector. UpperCamelCase = np.dot(__UpperCamelCase , __UpperCamelCase ) # Normalize the resulting output vector. UpperCamelCase = w / np.linalg.norm(__UpperCamelCase ) # Find rayleigh quotient # (faster than usual b/c we know vector is normalized already) UpperCamelCase = vector.conj().T if is_complex else vector.T UpperCamelCase = np.dot(__UpperCamelCase , np.dot(__UpperCamelCase , __UpperCamelCase ) ) # Check convergence. UpperCamelCase = np.abs(lambda_ - lambda_previous ) / lambda_ iterations += 1 if error <= error_tol or iterations >= max_iterations: UpperCamelCase = True UpperCamelCase = lambda_ if is_complex: UpperCamelCase = np.real(lambda_ ) return lambda_, vector def lowercase__ ( )-> None: UpperCamelCase = np.array([[41, 4, 20], [4, 26, 30], [20, 30, 50]] ) UpperCamelCase = np.array([41, 4, 20] ) UpperCamelCase = real_input_matrix.astype(np.complexaaa ) UpperCamelCase = np.triu(1j * complex_input_matrix , 1 ) complex_input_matrix += imag_matrix complex_input_matrix += -1 * imag_matrix.T UpperCamelCase = np.array([41, 4, 20] ).astype(np.complexaaa ) for problem_type in ["real", "complex"]: if problem_type == "real": UpperCamelCase = real_input_matrix UpperCamelCase = real_vector elif problem_type == "complex": UpperCamelCase = complex_input_matrix UpperCamelCase = complex_vector # Our implementation. UpperCamelCase ,UpperCamelCase = power_iteration(__UpperCamelCase , __UpperCamelCase ) # Numpy implementation. # Get eigenvalues and eigenvectors using built-in numpy # eigh (eigh used for symmetric or hermetian matrices). UpperCamelCase ,UpperCamelCase = np.linalg.eigh(__UpperCamelCase ) # Last eigenvalue is the maximum one. UpperCamelCase = eigen_values[-1] # Last column in this matrix is eigenvector corresponding to largest eigenvalue. UpperCamelCase = eigen_vectors[:, -1] # Check our implementation and numpy gives close answers. assert np.abs(eigen_value - eigen_value_max ) <= 1E-6 # Take absolute values element wise of each eigenvector. # as they are only unique to a minus sign. assert np.linalg.norm(np.abs(__UpperCamelCase ) - np.abs(__UpperCamelCase ) ) <= 1E-6 if __name__ == "__main__": import doctest doctest.testmod() test_power_iteration()
321
'''simple docstring''' def lowercase__ ( __UpperCamelCase = 2000000 )-> int: UpperCamelCase = [0 for i in range(n + 1 )] UpperCamelCase = 1 UpperCamelCase = 1 for i in range(2 , int(n**0.5 ) + 1 ): if primality_list[i] == 0: for j in range(i * i , n + 1 , __UpperCamelCase ): UpperCamelCase = 1 UpperCamelCase = 0 for i in range(__UpperCamelCase ): if primality_list[i] == 0: sum_of_primes += i return sum_of_primes if __name__ == "__main__": print(f'{solution() = }')
321
1
'''simple docstring''' import collections import tempfile import unittest import numpy as np from transformers.testing_utils import ( is_pt_flax_cross_test, require_flax, require_torch, require_vision, slow, torch_device, ) from transformers.utils import is_flax_available, is_torch_available, is_vision_available from ...test_modeling_flax_common import floats_tensor, ids_tensor, random_attention_mask from ..bert.test_modeling_flax_bert import FlaxBertModelTester from ..clip.test_modeling_flax_clip import FlaxCLIPVisionModelTester from ..vit.test_modeling_flax_vit import FlaxViTModelTester if is_flax_available(): from transformers import ( FlaxBertModel, FlaxCLIPVisionModel, FlaxVisionTextDualEncoderModel, FlaxViTModel, VisionTextDualEncoderConfig, VisionTextDualEncoderProcessor, ) from transformers.modeling_flax_pytorch_utils import ( convert_pytorch_state_dict_to_flax, load_flax_weights_in_pytorch_model, ) if is_torch_available(): import torch from transformers import VisionTextDualEncoderModel if is_vision_available(): from PIL import Image def _lowerCamelCase ( lowerCamelCase_ : Union[str, Any] ): """simple docstring""" if isinstance(_A , collections.abc.Iterable ): return x return (x, x) @require_flax class __SCREAMING_SNAKE_CASE : '''simple docstring''' def _UpperCamelCase ( self , snake_case_ , snake_case_ ): '''simple docstring''' pass def _UpperCamelCase ( self ): '''simple docstring''' pass def _UpperCamelCase ( self ): '''simple docstring''' pass def _UpperCamelCase ( self , snake_case_ , snake_case_ , snake_case_ ): '''simple docstring''' UpperCAmelCase_ : str = np.abs((a - b) ).max() self.assertLessEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , F'''Difference between torch and flax is {diff} (>= {tol}).''' ) def _UpperCamelCase ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_=None , **snake_case_ ): '''simple docstring''' UpperCAmelCase_ : Tuple = VisionTextDualEncoderConfig.from_vision_text_configs(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : Dict = FlaxVisionTextDualEncoderModel(_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : Optional[int] = model(input_ids=_SCREAMING_SNAKE_CASE , pixel_values=_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE ) self.assertEqual(output['text_embeds'].shape , (input_ids.shape[0], config.projection_dim) ) self.assertEqual(output['image_embeds'].shape , (pixel_values.shape[0], config.projection_dim) ) def _UpperCamelCase ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_=None , **snake_case_ ): '''simple docstring''' UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = self.get_vision_text_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : Optional[int] = {'vision_model': vision_model, 'text_model': text_model} UpperCAmelCase_ : Optional[int] = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : Union[str, Any] = model(input_ids=_SCREAMING_SNAKE_CASE , pixel_values=_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE ) self.assertEqual(output['text_embeds'].shape , (input_ids.shape[0], model.config.projection_dim) ) self.assertEqual(output['image_embeds'].shape , (pixel_values.shape[0], model.config.projection_dim) ) def _UpperCamelCase ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_=None , **snake_case_ ): '''simple docstring''' UpperCAmelCase_ , UpperCAmelCase_ : Any = self.get_vision_text_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : Optional[Any] = {'vision_model': vision_model, 'text_model': text_model} UpperCAmelCase_ : Optional[int] = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : Tuple = model(input_ids=_SCREAMING_SNAKE_CASE , pixel_values=_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : List[Any] = output[0] with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : Any = FlaxVisionTextDualEncoderModel.from_pretrained(_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : Tuple = model(input_ids=_SCREAMING_SNAKE_CASE , pixel_values=_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : Dict = after_output[0] UpperCAmelCase_ : int = np.amax(np.abs(out_a - out_a ) ) self.assertLessEqual(_SCREAMING_SNAKE_CASE , 1E-3 ) def _UpperCamelCase ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_=None , **snake_case_ ): '''simple docstring''' UpperCAmelCase_ , UpperCAmelCase_ : str = self.get_vision_text_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : List[Any] = {'vision_model': vision_model, 'text_model': text_model} UpperCAmelCase_ : str = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : int = model( input_ids=_SCREAMING_SNAKE_CASE , pixel_values=_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , output_attentions=_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : Optional[int] = output.vision_model_output.attentions self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , vision_config.num_hidden_layers ) # in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token) UpperCAmelCase_ : Tuple = to_atuple(vision_model.config.image_size ) UpperCAmelCase_ : List[Any] = to_atuple(vision_model.config.patch_size ) UpperCAmelCase_ : str = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) UpperCAmelCase_ : List[Any] = num_patches + 1 self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) ) UpperCAmelCase_ : Union[str, Any] = output.text_model_output.attentions self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , text_config.num_hidden_layers ) self.assertEqual( text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , ) def _UpperCamelCase ( self , snake_case_ , snake_case_ , snake_case_ ): '''simple docstring''' pt_model.to(_SCREAMING_SNAKE_CASE ) pt_model.eval() # prepare inputs UpperCAmelCase_ : List[Any] = inputs_dict UpperCAmelCase_ : Optional[Any] = {k: torch.tensor(v.tolist() ) for k, v in flax_inputs.items()} with torch.no_grad(): UpperCAmelCase_ : Dict = pt_model(**_SCREAMING_SNAKE_CASE ).to_tuple() UpperCAmelCase_ : int = fx_model(**_SCREAMING_SNAKE_CASE ).to_tuple() self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , len(_SCREAMING_SNAKE_CASE ) , 'Output lengths differ between Flax and PyTorch' ) for fx_output, pt_output in zip(fx_outputs[:4] , pt_outputs[:4] ): self.assert_almost_equals(_SCREAMING_SNAKE_CASE , pt_output.numpy() , 4E-2 ) # PT -> Flax with tempfile.TemporaryDirectory() as tmpdirname: pt_model.save_pretrained(_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : List[str] = FlaxVisionTextDualEncoderModel.from_pretrained(_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : Tuple = fx_model_loaded(**_SCREAMING_SNAKE_CASE ).to_tuple() self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , len(_SCREAMING_SNAKE_CASE ) , 'Output lengths differ between Flax and PyTorch' ) for fx_output_loaded, pt_output in zip(fx_outputs_loaded[:4] , pt_outputs[:4] ): self.assert_almost_equals(_SCREAMING_SNAKE_CASE , pt_output.numpy() , 4E-2 ) # Flax -> PT with tempfile.TemporaryDirectory() as tmpdirname: fx_model.save_pretrained(_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : Dict = VisionTextDualEncoderModel.from_pretrained(_SCREAMING_SNAKE_CASE , from_flax=_SCREAMING_SNAKE_CASE ) pt_model_loaded.to(_SCREAMING_SNAKE_CASE ) pt_model_loaded.eval() with torch.no_grad(): UpperCAmelCase_ : Optional[Any] = pt_model_loaded(**_SCREAMING_SNAKE_CASE ).to_tuple() self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , len(_SCREAMING_SNAKE_CASE ) , 'Output lengths differ between Flax and PyTorch' ) for fx_output, pt_output_loaded in zip(fx_outputs[:4] , pt_outputs_loaded[:4] ): self.assert_almost_equals(_SCREAMING_SNAKE_CASE , pt_output_loaded.numpy() , 4E-2 ) def _UpperCamelCase ( self , snake_case_ , snake_case_ , snake_case_ ): '''simple docstring''' UpperCAmelCase_ : Dict = VisionTextDualEncoderConfig.from_vision_text_configs(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : Union[str, Any] = VisionTextDualEncoderModel(_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : Any = FlaxVisionTextDualEncoderModel(_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : Tuple = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , _SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : Optional[int] = fx_state self.check_pt_flax_equivalence(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) def _UpperCamelCase ( self , snake_case_ , snake_case_ , snake_case_ ): '''simple docstring''' UpperCAmelCase_ : Union[str, Any] = VisionTextDualEncoderConfig.from_vision_text_configs(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : str = VisionTextDualEncoderModel(_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : Tuple = FlaxVisionTextDualEncoderModel(_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : Any = load_flax_weights_in_pytorch_model(_SCREAMING_SNAKE_CASE , fx_model.params ) self.check_pt_flax_equivalence(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) def _UpperCamelCase ( self ): '''simple docstring''' UpperCAmelCase_ : str = self.prepare_config_and_inputs() self.check_model_from_pretrained_configs(**_SCREAMING_SNAKE_CASE ) def _UpperCamelCase ( self ): '''simple docstring''' UpperCAmelCase_ : Union[str, Any] = self.prepare_config_and_inputs() self.check_vision_text_dual_encoder_from_pretrained(**_SCREAMING_SNAKE_CASE ) def _UpperCamelCase ( self ): '''simple docstring''' UpperCAmelCase_ : Optional[Any] = self.prepare_config_and_inputs() self.check_save_load(**_SCREAMING_SNAKE_CASE ) def _UpperCamelCase ( self ): '''simple docstring''' UpperCAmelCase_ : int = self.prepare_config_and_inputs() self.check_vision_text_output_attention(**_SCREAMING_SNAKE_CASE ) @is_pt_flax_cross_test def _UpperCamelCase ( self ): '''simple docstring''' UpperCAmelCase_ : List[str] = self.prepare_config_and_inputs() UpperCAmelCase_ : Optional[int] = config_inputs_dict.pop('vision_config' ) UpperCAmelCase_ : Dict = config_inputs_dict.pop('text_config' ) UpperCAmelCase_ : int = config_inputs_dict self.check_equivalence_pt_to_flax(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) self.check_equivalence_flax_to_pt(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) @slow def _UpperCamelCase ( self ): '''simple docstring''' UpperCAmelCase_ , UpperCAmelCase_ : str = self.get_pretrained_model_and_inputs() UpperCAmelCase_ : Optional[Any] = model_a(**_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : List[str] = outputs[0] with tempfile.TemporaryDirectory() as tmp_dirname: model_a.save_pretrained(_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : Any = FlaxVisionTextDualEncoderModel.from_pretrained(_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : Tuple = model_a(**_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : List[str] = after_outputs[0] UpperCAmelCase_ : Union[str, Any] = np.amax(np.abs(out_a - out_a ) ) self.assertLessEqual(_SCREAMING_SNAKE_CASE , 1E-5 ) @require_flax class __SCREAMING_SNAKE_CASE ( lowerCAmelCase__ , unittest.TestCase ): '''simple docstring''' def _UpperCamelCase ( self ): '''simple docstring''' UpperCAmelCase_ : str = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained( 'hf-internal-testing/tiny-random-vit' , 'hf-internal-testing/tiny-bert' , vision_from_pt=_SCREAMING_SNAKE_CASE , text_from_pt=_SCREAMING_SNAKE_CASE , ) UpperCAmelCase_ : Optional[Any] = 1_3 UpperCAmelCase_ : List[str] = floats_tensor( [ batch_size, model.config.vision_config.num_channels, model.config.vision_config.image_size, model.config.vision_config.image_size, ] ) UpperCAmelCase_ : Optional[Any] = ids_tensor([batch_size, 4] , model.config.text_config.vocab_size ) UpperCAmelCase_ : int = random_attention_mask([batch_size, 4] ) UpperCAmelCase_ : List[str] = {'pixel_values': pixel_values, 'input_ids': input_ids, 'attention_mask': attention_mask} return model, inputs def _UpperCamelCase ( self , snake_case_ , snake_case_ ): '''simple docstring''' UpperCAmelCase_ : Dict = FlaxViTModel(_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : List[Any] = FlaxBertModel(_SCREAMING_SNAKE_CASE ) return vision_model, text_model def _UpperCamelCase ( self ): '''simple docstring''' UpperCAmelCase_ : List[str] = FlaxViTModelTester(self ) UpperCAmelCase_ : List[Any] = FlaxBertModelTester(self ) UpperCAmelCase_ : Union[str, Any] = vit_model_tester.prepare_config_and_inputs() UpperCAmelCase_ : List[str] = bert_model_tester.prepare_config_and_inputs() UpperCAmelCase_ , UpperCAmelCase_ : int = vision_config_and_inputs UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = text_config_and_inputs # make sure that cross attention layers are added return { "text_config": text_config, "vision_config": vision_config, "pixel_values": pixel_values, "attention_mask": attention_mask, "input_ids": input_ids, "token_type_ids": token_type_ids, } @require_torch class __SCREAMING_SNAKE_CASE ( lowerCAmelCase__ , unittest.TestCase ): '''simple docstring''' def _UpperCamelCase ( self ): '''simple docstring''' UpperCAmelCase_ : Any = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained( 'hf-internal-testing/tiny-random-clip' , 'hf-internal-testing/tiny-bert' , vision_from_pt=_SCREAMING_SNAKE_CASE , text_from_pt=_SCREAMING_SNAKE_CASE , ) UpperCAmelCase_ : int = 1_3 UpperCAmelCase_ : int = floats_tensor( [ batch_size, model.config.vision_config.num_channels, model.config.vision_config.image_size, model.config.vision_config.image_size, ] ) UpperCAmelCase_ : Optional[Any] = ids_tensor([batch_size, 4] , model.config.text_config.vocab_size ) UpperCAmelCase_ : str = random_attention_mask([batch_size, 4] ) UpperCAmelCase_ : Union[str, Any] = {'pixel_values': pixel_values, 'input_ids': input_ids, 'attention_mask': attention_mask} return model, inputs def _UpperCamelCase ( self , snake_case_ , snake_case_ ): '''simple docstring''' UpperCAmelCase_ : Optional[Any] = FlaxCLIPVisionModel(_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : int = FlaxBertModel(_SCREAMING_SNAKE_CASE ) return vision_model, text_model def _UpperCamelCase ( self ): '''simple docstring''' UpperCAmelCase_ : Tuple = FlaxCLIPVisionModelTester(self ) UpperCAmelCase_ : List[Any] = FlaxBertModelTester(self ) UpperCAmelCase_ : List[str] = clip_model_tester.prepare_config_and_inputs() UpperCAmelCase_ : List[Any] = bert_model_tester.prepare_config_and_inputs() UpperCAmelCase_ , UpperCAmelCase_ : Any = vision_config_and_inputs UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Tuple = text_config_and_inputs # make sure that cross attention layers are added return { "text_config": text_config, "vision_config": vision_config, "pixel_values": pixel_values, "attention_mask": attention_mask, "input_ids": input_ids, "token_type_ids": token_type_ids, } @require_flax @require_vision class __SCREAMING_SNAKE_CASE ( unittest.TestCase ): '''simple docstring''' @slow def _UpperCamelCase ( self ): '''simple docstring''' UpperCAmelCase_ : str = FlaxVisionTextDualEncoderModel.from_pretrained('clip-italian/clip-italian' , logit_scale_init_value=1.0 ) UpperCAmelCase_ : int = VisionTextDualEncoderProcessor.from_pretrained('clip-italian/clip-italian' ) UpperCAmelCase_ : Optional[Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) UpperCAmelCase_ : Optional[Any] = processor( text=['una foto di un gatto', 'una foto di un cane'] , images=_SCREAMING_SNAKE_CASE , padding=_SCREAMING_SNAKE_CASE , return_tensors='np' ) UpperCAmelCase_ : List[Any] = model(**_SCREAMING_SNAKE_CASE ) # verify the logits self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) ) self.assertEqual( outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , ) UpperCAmelCase_ : Optional[Any] = np.array([[1.2_28_47_27, 0.3_10_41_22]] ) self.assertTrue(np.allclose(outputs.logits_per_image , _SCREAMING_SNAKE_CASE , atol=1E-3 ) )
355
'''simple docstring''' from manim import * class __SCREAMING_SNAKE_CASE ( lowerCamelCase_ ): '''simple docstring''' def _UpperCamelCase ( self ): '''simple docstring''' UpperCAmelCase_ : Dict = Rectangle(height=0.5 , width=0.5 ) UpperCAmelCase_ : Any = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 ) UpperCAmelCase_ : List[str] = Rectangle(height=0.25 , width=0.25 ) UpperCAmelCase_ : Any = [mem.copy() for i in range(6 )] UpperCAmelCase_ : Union[str, Any] = [mem.copy() for i in range(6 )] UpperCAmelCase_ : Optional[int] = VGroup(*snake_case_ ).arrange(snake_case_ , buff=0 ) UpperCAmelCase_ : Any = VGroup(*snake_case_ ).arrange(snake_case_ , buff=0 ) UpperCAmelCase_ : str = VGroup(snake_case_ , snake_case_ ).arrange(snake_case_ , buff=0 ) UpperCAmelCase_ : Any = Text('CPU' , font_size=2_4 ) UpperCAmelCase_ : Tuple = Group(snake_case_ , snake_case_ ).arrange(snake_case_ , buff=0.5 , aligned_edge=snake_case_ ) cpu.move_to([-2.5, -0.5, 0] ) self.add(snake_case_ ) UpperCAmelCase_ : str = [mem.copy() for i in range(4 )] UpperCAmelCase_ : Dict = VGroup(*snake_case_ ).arrange(snake_case_ , buff=0 ) UpperCAmelCase_ : List[str] = Text('GPU' , font_size=2_4 ) UpperCAmelCase_ : Optional[int] = Group(snake_case_ , snake_case_ ).arrange(snake_case_ , buff=0.5 , aligned_edge=snake_case_ ) gpu.move_to([-1, -1, 0] ) self.add(snake_case_ ) UpperCAmelCase_ : str = [mem.copy() for i in range(6 )] UpperCAmelCase_ : Tuple = VGroup(*snake_case_ ).arrange(snake_case_ , buff=0 ) UpperCAmelCase_ : str = Text('Model' , font_size=2_4 ) UpperCAmelCase_ : Optional[int] = Group(snake_case_ , snake_case_ ).arrange(snake_case_ , buff=0.5 , aligned_edge=snake_case_ ) model.move_to([3, -1.0, 0] ) self.add(snake_case_ ) UpperCAmelCase_ : str = [] UpperCAmelCase_ : Optional[Any] = [] for i, rect in enumerate(snake_case_ ): UpperCAmelCase_ : str = fill.copy().set_fill(snake_case_ , opacity=0.8 ) target.move_to(snake_case_ ) model_arr.append(snake_case_ ) UpperCAmelCase_ : int = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0.0 ).set_fill(snake_case_ , opacity=0.8 ) cpu_target.move_to(cpu_left_col_base[i] ) model_cpu_arr.append(snake_case_ ) self.add(*snake_case_ , *snake_case_ ) UpperCAmelCase_ : List[Any] = [meta_mem.copy() for i in range(6 )] UpperCAmelCase_ : List[str] = [meta_mem.copy() for i in range(6 )] UpperCAmelCase_ : Tuple = VGroup(*snake_case_ ).arrange(snake_case_ , buff=0 ) UpperCAmelCase_ : Dict = VGroup(*snake_case_ ).arrange(snake_case_ , buff=0 ) UpperCAmelCase_ : Optional[Any] = VGroup(snake_case_ , snake_case_ ).arrange(snake_case_ , buff=0 ) UpperCAmelCase_ : Tuple = Text('Disk' , font_size=2_4 ) UpperCAmelCase_ : Union[str, Any] = Group(snake_case_ , snake_case_ ).arrange(snake_case_ , buff=0.5 , aligned_edge=snake_case_ ) disk.move_to([-4, -1.25, 0] ) self.add(snake_case_ , snake_case_ ) UpperCAmelCase_ : List[Any] = Square(side_length=2.2 ) key.move_to([-5, 2, 0] ) UpperCAmelCase_ : Any = MarkupText( F'''<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model''' , font_size=1_8 , ) key_text.move_to([-5, 2.4, 0] ) self.add(snake_case_ , snake_case_ ) UpperCAmelCase_ : Dict = MarkupText( F'''<span fgcolor=\'{BLUE}\'>●</span> Checkpoint''' , font_size=1_8 , ) blue_text.next_to(snake_case_ , DOWN * 2.4 , aligned_edge=key_text.get_left() ) self.add(snake_case_ ) UpperCAmelCase_ : Optional[Any] = MarkupText( F'''Now watch as an input is passed through the model\nand how the memory is utilized and handled.''' , font_size=2_4 , ) step_a.move_to([2, 2, 0] ) self.play(Write(snake_case_ ) ) UpperCAmelCase_ : Tuple = Square(0.3 ) input.set_fill(snake_case_ , opacity=1.0 ) input.set_stroke(width=0.0 ) input.next_to(model_base[0] , snake_case_ , buff=0.5 ) self.play(Write(snake_case_ ) ) input.generate_target() input.target.next_to(model_arr[0] , direction=snake_case_ , buff=0.02 ) self.play(MoveToTarget(snake_case_ ) ) self.play(FadeOut(snake_case_ ) ) UpperCAmelCase_ : Any = Arrow(start=snake_case_ , end=snake_case_ , color=snake_case_ , buff=0.5 ) a.next_to(model_arr[0].get_left() , snake_case_ , buff=0.2 ) model_cpu_arr[0].generate_target() model_cpu_arr[0].target.move_to(gpu_rect[0] ) UpperCAmelCase_ : List[str] = MarkupText( F'''As the input reaches a layer, the hook triggers\nand weights are moved from the CPU\nto the GPU and back.''' , font_size=2_4 , ) step_a.move_to([2, 2, 0] ) self.play(Write(snake_case_ , run_time=3 ) ) UpperCAmelCase_ : List[Any] = {'run_time': 1, 'fade_in': True, 'fade_out': True, 'buff': 0.02} self.play( Write(snake_case_ ) , Circumscribe(model_arr[0] , color=snake_case_ , **snake_case_ ) , Circumscribe(model_cpu_arr[0] , color=snake_case_ , **snake_case_ ) , Circumscribe(gpu_rect[0] , color=snake_case_ , **snake_case_ ) , ) self.play(MoveToTarget(model_cpu_arr[0] ) ) UpperCAmelCase_ : Union[str, Any] = a.copy() for i in range(6 ): a_c.next_to(model_arr[i].get_right() + 0.02 , snake_case_ , buff=0.2 ) input.generate_target() input.target.move_to(model_arr[i].get_right() + 0.02 ) UpperCAmelCase_ : Tuple = AnimationGroup( FadeOut(snake_case_ , run_time=0.5 ) , MoveToTarget(snake_case_ , run_time=0.5 ) , FadeIn(snake_case_ , run_time=0.5 ) , lag_ratio=0.2 ) self.play(snake_case_ ) model_cpu_arr[i].generate_target() model_cpu_arr[i].target.move_to(cpu_left_col_base[i] ) if i < 5: model_cpu_arr[i + 1].generate_target() model_cpu_arr[i + 1].target.move_to(gpu_rect[0] ) if i >= 1: UpperCAmelCase_ : Any = 0.7 self.play( Circumscribe(model_arr[i] , **snake_case_ ) , Circumscribe(cpu_left_col_base[i] , **snake_case_ ) , Circumscribe(cpu_left_col_base[i + 1] , color=snake_case_ , **snake_case_ ) , Circumscribe(gpu_rect[0] , color=snake_case_ , **snake_case_ ) , Circumscribe(model_arr[i + 1] , color=snake_case_ , **snake_case_ ) , ) if i < 1: self.play( MoveToTarget(model_cpu_arr[i] ) , MoveToTarget(model_cpu_arr[i + 1] ) , ) else: self.play( MoveToTarget(model_cpu_arr[i] , run_time=0.7 ) , MoveToTarget(model_cpu_arr[i + 1] , run_time=0.7 ) , ) else: model_cpu_arr[i].generate_target() model_cpu_arr[i].target.move_to(cpu_left_col_base[-1] ) input.generate_target() input.target.next_to(model_arr[-1].get_right() , RIGHT + 0.02 , buff=0.2 ) self.play( Circumscribe(model_arr[-1] , color=snake_case_ , **snake_case_ ) , Circumscribe(cpu_left_col_base[-1] , color=snake_case_ , **snake_case_ ) , Circumscribe(gpu_rect[0] , color=snake_case_ , **snake_case_ ) , ) self.play(MoveToTarget(model_cpu_arr[i] ) ) UpperCAmelCase_ : Any = a_c UpperCAmelCase_ : int = a_c.copy() input.generate_target() input.target.next_to(model_base[-1] , RIGHT + 0.02 , buff=0.5 ) self.play( FadeOut(snake_case_ ) , FadeOut(snake_case_ , run_time=0.5 ) , ) UpperCAmelCase_ : Optional[Any] = MarkupText(F'''Inference on a model too large for GPU memory\nis successfully completed.''' , font_size=2_4 ) step_a.move_to([2, 2, 0] ) self.play(Write(snake_case_ , run_time=3 ) , MoveToTarget(snake_case_ ) ) self.wait()
274
0
"""simple docstring""" from typing import Any, Dict, Optional import torch import torch.nn.functional as F from torch import nn from ..utils import maybe_allow_in_graph from .activations import get_activation from .attention_processor import Attention from .embeddings import CombinedTimestepLabelEmbeddings @maybe_allow_in_graph class __lowerCAmelCase ( nn.Module ): def __init__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=0.0 , __UpperCAmelCase = None , __UpperCAmelCase = "geglu" , __UpperCAmelCase = None , __UpperCAmelCase = False , __UpperCAmelCase = False , __UpperCAmelCase = False , __UpperCAmelCase = False , __UpperCAmelCase = True , __UpperCAmelCase = "layer_norm" , __UpperCAmelCase = False , ): '''simple docstring''' super().__init__() __UpperCamelCase = only_cross_attention __UpperCamelCase = (num_embeds_ada_norm is not None) and norm_type == 'ada_norm_zero' __UpperCamelCase = (num_embeds_ada_norm is not None) and norm_type == 'ada_norm' if norm_type in ("ada_norm", "ada_norm_zero") and num_embeds_ada_norm is None: raise ValueError( F'`norm_type` is set to {norm_type}, but `num_embeds_ada_norm` is not defined. Please make sure to' F' define `num_embeds_ada_norm` if setting `norm_type` to {norm_type}.' ) # Define 3 blocks. Each block has its own normalization layer. # 1. Self-Attn if self.use_ada_layer_norm: __UpperCamelCase = AdaLayerNorm(__UpperCAmelCase , __UpperCAmelCase ) elif self.use_ada_layer_norm_zero: __UpperCamelCase = AdaLayerNormZero(__UpperCAmelCase , __UpperCAmelCase ) else: __UpperCamelCase = nn.LayerNorm(__UpperCAmelCase , elementwise_affine=__UpperCAmelCase ) __UpperCamelCase = Attention( query_dim=__UpperCAmelCase , heads=__UpperCAmelCase , dim_head=__UpperCAmelCase , dropout=__UpperCAmelCase , bias=__UpperCAmelCase , cross_attention_dim=cross_attention_dim if only_cross_attention else None , upcast_attention=__UpperCAmelCase , ) # 2. Cross-Attn if cross_attention_dim is not None or double_self_attention: # We currently only use AdaLayerNormZero for self attention where there will only be one attention block. # I.e. the number of returned modulation chunks from AdaLayerZero would not make sense if returned during # the second cross attention block. __UpperCamelCase = ( AdaLayerNorm(__UpperCAmelCase , __UpperCAmelCase ) if self.use_ada_layer_norm else nn.LayerNorm(__UpperCAmelCase , elementwise_affine=__UpperCAmelCase ) ) __UpperCamelCase = Attention( query_dim=__UpperCAmelCase , cross_attention_dim=cross_attention_dim if not double_self_attention else None , heads=__UpperCAmelCase , dim_head=__UpperCAmelCase , dropout=__UpperCAmelCase , bias=__UpperCAmelCase , upcast_attention=__UpperCAmelCase , ) # is self-attn if encoder_hidden_states is none else: __UpperCamelCase = None __UpperCamelCase = None # 3. Feed-forward __UpperCamelCase = nn.LayerNorm(__UpperCAmelCase , elementwise_affine=__UpperCAmelCase ) __UpperCamelCase = FeedForward(__UpperCAmelCase , dropout=__UpperCAmelCase , activation_fn=__UpperCAmelCase , final_dropout=__UpperCAmelCase ) # let chunk size default to None __UpperCamelCase = None __UpperCamelCase = 0 def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' __UpperCamelCase = chunk_size __UpperCamelCase = dim def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , ): '''simple docstring''' if self.use_ada_layer_norm: __UpperCamelCase = self.norma(__UpperCAmelCase , __UpperCAmelCase ) elif self.use_ada_layer_norm_zero: __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = self.norma( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , hidden_dtype=hidden_states.dtype ) else: __UpperCamelCase = self.norma(__UpperCAmelCase ) __UpperCamelCase = cross_attention_kwargs if cross_attention_kwargs is not None else {} __UpperCamelCase = self.attna( __UpperCAmelCase , encoder_hidden_states=encoder_hidden_states if self.only_cross_attention else None , attention_mask=__UpperCAmelCase , **__UpperCAmelCase , ) if self.use_ada_layer_norm_zero: __UpperCamelCase = gate_msa.unsqueeze(1 ) * attn_output __UpperCamelCase = attn_output + hidden_states # 2. Cross-Attention if self.attna is not None: __UpperCamelCase = ( self.norma(__UpperCAmelCase , __UpperCAmelCase ) if self.use_ada_layer_norm else self.norma(__UpperCAmelCase ) ) __UpperCamelCase = self.attna( __UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , attention_mask=__UpperCAmelCase , **__UpperCAmelCase , ) __UpperCamelCase = attn_output + hidden_states # 3. Feed-forward __UpperCamelCase = self.norma(__UpperCAmelCase ) if self.use_ada_layer_norm_zero: __UpperCamelCase = norm_hidden_states * (1 + scale_mlp[:, None]) + shift_mlp[:, None] if self._chunk_size is not None: # "feed_forward_chunk_size" can be used to save memory if norm_hidden_states.shape[self._chunk_dim] % self._chunk_size != 0: raise ValueError( F'`hidden_states` dimension to be chunked: {norm_hidden_states.shape[self._chunk_dim]} has to be divisible by chunk size: {self._chunk_size}. Make sure to set an appropriate `chunk_size` when calling `unet.enable_forward_chunking`.' ) __UpperCamelCase = norm_hidden_states.shape[self._chunk_dim] // self._chunk_size __UpperCamelCase = torch.cat( [self.ff(__UpperCAmelCase ) for hid_slice in norm_hidden_states.chunk(__UpperCAmelCase , dim=self._chunk_dim )] , dim=self._chunk_dim , ) else: __UpperCamelCase = self.ff(__UpperCAmelCase ) if self.use_ada_layer_norm_zero: __UpperCamelCase = gate_mlp.unsqueeze(1 ) * ff_output __UpperCamelCase = ff_output + hidden_states return hidden_states class __lowerCAmelCase ( nn.Module ): def __init__( self , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = 4 , __UpperCAmelCase = 0.0 , __UpperCAmelCase = "geglu" , __UpperCAmelCase = False , ): '''simple docstring''' super().__init__() __UpperCamelCase = int(dim * mult ) __UpperCamelCase = dim_out if dim_out is not None else dim if activation_fn == "gelu": __UpperCamelCase = GELU(__UpperCAmelCase , __UpperCAmelCase ) if activation_fn == "gelu-approximate": __UpperCamelCase = GELU(__UpperCAmelCase , __UpperCAmelCase , approximate='tanh' ) elif activation_fn == "geglu": __UpperCamelCase = GEGLU(__UpperCAmelCase , __UpperCAmelCase ) elif activation_fn == "geglu-approximate": __UpperCamelCase = ApproximateGELU(__UpperCAmelCase , __UpperCAmelCase ) __UpperCamelCase = nn.ModuleList([] ) # project in self.net.append(__UpperCAmelCase ) # project dropout self.net.append(nn.Dropout(__UpperCAmelCase ) ) # project out self.net.append(nn.Linear(__UpperCAmelCase , __UpperCAmelCase ) ) # FF as used in Vision Transformer, MLP-Mixer, etc. have a final dropout if final_dropout: self.net.append(nn.Dropout(__UpperCAmelCase ) ) def UpperCAmelCase ( self , __UpperCAmelCase ): '''simple docstring''' for module in self.net: __UpperCamelCase = module(__UpperCAmelCase ) return hidden_states class __lowerCAmelCase ( nn.Module ): def __init__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = "none" ): '''simple docstring''' super().__init__() __UpperCamelCase = nn.Linear(__UpperCAmelCase , __UpperCAmelCase ) __UpperCamelCase = approximate def UpperCAmelCase ( self , __UpperCAmelCase ): '''simple docstring''' if gate.device.type != "mps": return F.gelu(__UpperCAmelCase , approximate=self.approximate ) # mps: gelu is not implemented for float16 return F.gelu(gate.to(dtype=torch.floataa ) , approximate=self.approximate ).to(dtype=gate.dtype ) def UpperCAmelCase ( self , __UpperCAmelCase ): '''simple docstring''' __UpperCamelCase = self.proj(__UpperCAmelCase ) __UpperCamelCase = self.gelu(__UpperCAmelCase ) return hidden_states class __lowerCAmelCase ( nn.Module ): def __init__( self , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' super().__init__() __UpperCamelCase = nn.Linear(__UpperCAmelCase , dim_out * 2 ) def UpperCAmelCase ( self , __UpperCAmelCase ): '''simple docstring''' if gate.device.type != "mps": return F.gelu(__UpperCAmelCase ) # mps: gelu is not implemented for float16 return F.gelu(gate.to(dtype=torch.floataa ) ).to(dtype=gate.dtype ) def UpperCAmelCase ( self , __UpperCAmelCase ): '''simple docstring''' __UpperCamelCase , __UpperCamelCase = self.proj(__UpperCAmelCase ).chunk(2 , dim=-1 ) return hidden_states * self.gelu(__UpperCAmelCase ) class __lowerCAmelCase ( nn.Module ): def __init__( self , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' super().__init__() __UpperCamelCase = nn.Linear(__UpperCAmelCase , __UpperCAmelCase ) def UpperCAmelCase ( self , __UpperCAmelCase ): '''simple docstring''' __UpperCamelCase = self.proj(__UpperCAmelCase ) return x * torch.sigmoid(1.7_0_2 * x ) class __lowerCAmelCase ( nn.Module ): def __init__( self , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' super().__init__() __UpperCamelCase = nn.Embedding(__UpperCAmelCase , __UpperCAmelCase ) __UpperCamelCase = nn.SiLU() __UpperCamelCase = nn.Linear(__UpperCAmelCase , embedding_dim * 2 ) __UpperCamelCase = nn.LayerNorm(__UpperCAmelCase , elementwise_affine=__UpperCAmelCase ) def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' __UpperCamelCase = self.linear(self.silu(self.emb(__UpperCAmelCase ) ) ) __UpperCamelCase , __UpperCamelCase = torch.chunk(__UpperCAmelCase , 2 ) __UpperCamelCase = self.norm(__UpperCAmelCase ) * (1 + scale) + shift return x class __lowerCAmelCase ( nn.Module ): def __init__( self , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' super().__init__() __UpperCamelCase = CombinedTimestepLabelEmbeddings(__UpperCAmelCase , __UpperCAmelCase ) __UpperCamelCase = nn.SiLU() __UpperCamelCase = nn.Linear(__UpperCAmelCase , 6 * embedding_dim , bias=__UpperCAmelCase ) __UpperCamelCase = nn.LayerNorm(__UpperCAmelCase , elementwise_affine=__UpperCAmelCase , eps=1E-6 ) def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=None ): '''simple docstring''' __UpperCamelCase = self.linear(self.silu(self.emb(__UpperCAmelCase , __UpperCAmelCase , hidden_dtype=__UpperCAmelCase ) ) ) __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = emb.chunk(6 , dim=1 ) __UpperCamelCase = self.norm(__UpperCAmelCase ) * (1 + scale_msa[:, None]) + shift_msa[:, None] return x, gate_msa, shift_mlp, scale_mlp, gate_mlp class __lowerCAmelCase ( nn.Module ): def __init__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = 1E-5 ): '''simple docstring''' super().__init__() __UpperCamelCase = num_groups __UpperCamelCase = eps if act_fn is None: __UpperCamelCase = None else: __UpperCamelCase = get_activation(__UpperCAmelCase ) __UpperCamelCase = nn.Linear(__UpperCAmelCase , out_dim * 2 ) def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' if self.act: __UpperCamelCase = self.act(__UpperCAmelCase ) __UpperCamelCase = self.linear(__UpperCAmelCase ) __UpperCamelCase = emb[:, :, None, None] __UpperCamelCase , __UpperCamelCase = emb.chunk(2 , dim=1 ) __UpperCamelCase = F.group_norm(__UpperCAmelCase , self.num_groups , eps=self.eps ) __UpperCamelCase = x * (1 + scale) + shift return x
316
"""simple docstring""" def A ( snake_case :int ) -> list: # bit count represents no. of bits in the gray code if bit_count < 0: raise ValueError('The given input must be positive' ) # get the generated string sequence __UpperCamelCase = gray_code_sequence_string(snake_case ) # # convert them to integers for i in range(len(snake_case ) ): __UpperCamelCase = int(sequence[i] , 2 ) return sequence def A ( snake_case :int ) -> list: # The approach is a recursive one # Base case achieved when either n = 0 or n=1 if bit_count == 0: return ["0"] if bit_count == 1: return ["0", "1"] __UpperCamelCase = 1 << bit_count # defines the length of the sequence # 1<< n is equivalent to 2^n # recursive answer will generate answer for n-1 bits __UpperCamelCase = gray_code_sequence_string(bit_count - 1 ) __UpperCamelCase = [] # append 0 to first half of the smaller sequence generated for i in range(seq_len // 2 ): __UpperCamelCase = '0' + smaller_sequence[i] sequence.append(snake_case ) # append 1 to second half ... start from the end of the list for i in reversed(range(seq_len // 2 ) ): __UpperCamelCase = '1' + smaller_sequence[i] sequence.append(snake_case ) return sequence if __name__ == "__main__": import doctest doctest.testmod()
316
1
from __future__ import annotations import unittest from transformers import DistilBertConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers.models.distilbert.modeling_tf_distilbert import ( TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST, TFDistilBertForMaskedLM, TFDistilBertForMultipleChoice, TFDistilBertForQuestionAnswering, TFDistilBertForSequenceClassification, TFDistilBertForTokenClassification, TFDistilBertModel, ) class A__ : def __init__( self , A_ , ): '''simple docstring''' UpperCamelCase : Any = parent UpperCamelCase : Union[str, Any] = 13 UpperCamelCase : Union[str, Any] = 7 UpperCamelCase : List[str] = True UpperCamelCase : str = True UpperCamelCase : str = False UpperCamelCase : Union[str, Any] = True UpperCamelCase : Any = 99 UpperCamelCase : Any = 32 UpperCamelCase : int = 2 UpperCamelCase : Tuple = 4 UpperCamelCase : Tuple = 37 UpperCamelCase : Optional[Any] = 'gelu' UpperCamelCase : Optional[int] = 0.1 UpperCamelCase : Optional[int] = 0.1 UpperCamelCase : Optional[Any] = 512 UpperCamelCase : str = 16 UpperCamelCase : Optional[Any] = 2 UpperCamelCase : Any = 0.02 UpperCamelCase : int = 3 UpperCamelCase : Dict = 4 UpperCamelCase : List[Any] = None def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) UpperCamelCase : List[str] = None if self.use_input_mask: UpperCamelCase : int = random_attention_mask([self.batch_size, self.seq_length] ) UpperCamelCase : List[str] = None UpperCamelCase : Dict = None UpperCamelCase : Dict = None if self.use_labels: UpperCamelCase : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) UpperCamelCase : int = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) UpperCamelCase : List[str] = ids_tensor([self.batch_size] , self.num_choices ) UpperCamelCase : Dict = DistilBertConfig( vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , ) return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels def __UpperCamelCase( self , A_ , A_ , A_ , A_ , A_ , A_ ): '''simple docstring''' UpperCamelCase : Tuple = TFDistilBertModel(config=_A ) UpperCamelCase : Union[str, Any] = {'input_ids': input_ids, 'attention_mask': input_mask} UpperCamelCase : Optional[int] = model(_A ) UpperCamelCase : Union[str, Any] = [input_ids, input_mask] UpperCamelCase : Any = model(_A ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def __UpperCamelCase( self , A_ , A_ , A_ , A_ , A_ , A_ ): '''simple docstring''' UpperCamelCase : Optional[Any] = TFDistilBertForMaskedLM(config=_A ) UpperCamelCase : Union[str, Any] = {'input_ids': input_ids, 'attention_mask': input_mask} UpperCamelCase : Dict = model(_A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def __UpperCamelCase( self , A_ , A_ , A_ , A_ , A_ , A_ ): '''simple docstring''' UpperCamelCase : int = TFDistilBertForQuestionAnswering(config=_A ) UpperCamelCase : str = { 'input_ids': input_ids, 'attention_mask': input_mask, } UpperCamelCase : Dict = model(_A ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def __UpperCamelCase( self , A_ , A_ , A_ , A_ , A_ , A_ ): '''simple docstring''' UpperCamelCase : int = self.num_labels UpperCamelCase : str = TFDistilBertForSequenceClassification(_A ) UpperCamelCase : Any = {'input_ids': input_ids, 'attention_mask': input_mask} UpperCamelCase : Optional[int] = model(_A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def __UpperCamelCase( self , A_ , A_ , A_ , A_ , A_ , A_ ): '''simple docstring''' UpperCamelCase : str = self.num_choices UpperCamelCase : List[Any] = TFDistilBertForMultipleChoice(_A ) UpperCamelCase : int = tf.tile(tf.expand_dims(_A , 1 ) , (1, self.num_choices, 1) ) UpperCamelCase : Union[str, Any] = tf.tile(tf.expand_dims(_A , 1 ) , (1, self.num_choices, 1) ) UpperCamelCase : int = { 'input_ids': multiple_choice_inputs_ids, 'attention_mask': multiple_choice_input_mask, } UpperCamelCase : str = model(_A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def __UpperCamelCase( self , A_ , A_ , A_ , A_ , A_ , A_ ): '''simple docstring''' UpperCamelCase : Any = self.num_labels UpperCamelCase : Optional[int] = TFDistilBertForTokenClassification(_A ) UpperCamelCase : int = {'input_ids': input_ids, 'attention_mask': input_mask} UpperCamelCase : Optional[Any] = model(_A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Union[str, Any] = self.prepare_config_and_inputs() (UpperCamelCase) : Optional[Any] = config_and_inputs UpperCamelCase : Tuple = {'input_ids': input_ids, 'attention_mask': input_mask} return config, inputs_dict @require_tf class A__ ( __snake_case , __snake_case , unittest.TestCase ): _UpperCAmelCase :Optional[Any] = ( ( TFDistilBertModel, TFDistilBertForMaskedLM, TFDistilBertForQuestionAnswering, TFDistilBertForSequenceClassification, TFDistilBertForTokenClassification, TFDistilBertForMultipleChoice, ) if is_tf_available() else None ) _UpperCAmelCase :str = ( { """feature-extraction""": TFDistilBertModel, """fill-mask""": TFDistilBertForMaskedLM, """question-answering""": TFDistilBertForQuestionAnswering, """text-classification""": TFDistilBertForSequenceClassification, """token-classification""": TFDistilBertForTokenClassification, """zero-shot""": TFDistilBertForSequenceClassification, } if is_tf_available() else {} ) _UpperCAmelCase :Tuple = False _UpperCAmelCase :List[str] = False def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Dict = TFDistilBertModelTester(self ) UpperCamelCase : List[Any] = ConfigTester(self , config_class=_A , dim=37 ) def __UpperCamelCase( self ): '''simple docstring''' self.config_tester.run_common_tests() def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_model(*_A ) def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_masked_lm(*_A ) def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_question_answering(*_A ) def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_sequence_classification(*_A ) def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_multiple_choice(*_A ) def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_token_classification(*_A ) @slow def __UpperCamelCase( self ): '''simple docstring''' for model_name in list(TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1] ): UpperCamelCase : int = TFDistilBertModel.from_pretrained(_A ) self.assertIsNotNone(_A ) @require_tf class A__ ( unittest.TestCase ): @slow def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Dict = TFDistilBertModel.from_pretrained("distilbert-base-uncased" ) UpperCamelCase : List[Any] = tf.constant([[0, 1, 2, 3, 4, 5]] ) UpperCamelCase : Dict = model(_A )[0] UpperCamelCase : Optional[Any] = [1, 6, 768] self.assertEqual(output.shape , _A ) UpperCamelCase : str = tf.constant( [ [ [0.19_26_18_85, -0.13_73_29_55, 0.4_11_97_99], [0.22_15_01_56, -0.07_42_26_61, 0.39_03_72_04], [0.22_75_60_18, -0.0_89_64_14, 0.3_70_14_67], ] ] ) tf.debugging.assert_near(output[:, :3, :3] , _A , atol=1e-4 )
357
from __future__ import annotations def A_ ( _lowerCAmelCase , _lowerCAmelCase ) -> list[str]: if partitions <= 0: raise ValueError("partitions must be a positive number!" ) if partitions > number_of_bytes: raise ValueError("partitions can not > number_of_bytes!" ) UpperCamelCase : str = number_of_bytes // partitions UpperCamelCase : List[Any] = [] for i in range(_lowerCAmelCase ): UpperCamelCase : Optional[Any] = i * bytes_per_partition + 1 UpperCamelCase : Any = ( number_of_bytes if i == partitions - 1 else (i + 1) * bytes_per_partition ) allocation_list.append(F"""{start_bytes}-{end_bytes}""" ) return allocation_list if __name__ == "__main__": import doctest doctest.testmod()
140
0
import argparse import datetime def snake_case__ ( SCREAMING_SNAKE_CASE_ : str ): '''simple docstring''' lowercase__ : Optional[int] = { '0': 'Sunday', '1': 'Monday', '2': 'Tuesday', '3': 'Wednesday', '4': 'Thursday', '5': 'Friday', '6': 'Saturday', } lowercase__ : List[str] = {0: 1, 1: 2, 2: 3, 3: 4, 4: 5, 5: 6, 6: 0} # Validate if not 0 < len(SCREAMING_SNAKE_CASE_ ) < 11: raise ValueError('Must be 10 characters long' ) # Get month lowercase__ : int = int(date_input[0] + date_input[1] ) # Validate if not 0 < m < 13: raise ValueError('Month must be between 1 - 12' ) lowercase__ : str = date_input[2] # Validate if sep_a not in ["-", "/"]: raise ValueError('Date separator must be \'-\' or \'/\'' ) # Get day lowercase__ : int = int(date_input[3] + date_input[4] ) # Validate if not 0 < d < 32: raise ValueError('Date must be between 1 - 31' ) # Get second separator lowercase__ : str = date_input[5] # Validate if sep_a not in ["-", "/"]: raise ValueError('Date separator must be \'-\' or \'/\'' ) # Get year lowercase__ : int = int(date_input[6] + date_input[7] + date_input[8] + date_input[9] ) # Arbitrary year range if not 45 < y < 8_500: raise ValueError( 'Year out of range. There has to be some sort of limit...right?' ) # Get datetime obj for validation lowercase__ : Optional[Any] = datetime.date(int(SCREAMING_SNAKE_CASE_ ) , int(SCREAMING_SNAKE_CASE_ ) , int(SCREAMING_SNAKE_CASE_ ) ) # Start math if m <= 2: lowercase__ : int = y - 1 lowercase__ : int = m + 12 # maths var lowercase__ : int = int(str(SCREAMING_SNAKE_CASE_ )[:2] ) lowercase__ : int = int(str(SCREAMING_SNAKE_CASE_ )[2:] ) lowercase__ : int = int(2.6 * m - 5.39 ) lowercase__ : int = int(c / 4 ) lowercase__ : int = int(k / 4 ) lowercase__ : int = int(d + k ) lowercase__ : int = int(t + u + v + x ) lowercase__ : int = int(z - (2 * c) ) lowercase__ : int = round(w % 7 ) # End math # Validate math if f != convert_datetime_days[dt_ck.weekday()]: raise AssertionError('The date was evaluated incorrectly. Contact developer.' ) # Response lowercase__ : str = f"""Your date {date_input}, is a {days[str(SCREAMING_SNAKE_CASE_ )]}!""" return response if __name__ == "__main__": import doctest doctest.testmod() snake_case_ = argparse.ArgumentParser( description=( '''Find out what day of the week nearly any date is or was. Enter ''' '''date as a string in the mm-dd-yyyy or mm/dd/yyyy format''' ) ) parser.add_argument( '''date_input''', type=str, help='''Date as a string (mm-dd-yyyy or mm/dd/yyyy)''' ) snake_case_ = parser.parse_args() zeller(args.date_input)
214
import argparse import shlex import runhouse as rh if __name__ == "__main__": # Refer to https://runhouse-docs.readthedocs-hosted.com/en/latest/api/python/cluster.html#hardware-setup for cloud access # setup instructions, if using on-demand hardware # If user passes --user <user> --host <host> --key_path <key_path> <example> <args>, fill them in as BYO cluster # If user passes --instance <instance> --provider <provider> <example> <args>, fill them in as on-demand cluster # Throw an error if user passes both BYO and on-demand cluster args # Otherwise, use default values snake_case_ = argparse.ArgumentParser() parser.add_argument('''--user''', type=str, default='''ubuntu''') parser.add_argument('''--host''', type=str, default='''localhost''') parser.add_argument('''--key_path''', type=str, default=None) parser.add_argument('''--instance''', type=str, default='''V100:1''') parser.add_argument('''--provider''', type=str, default='''cheapest''') parser.add_argument('''--use_spot''', type=bool, default=False) parser.add_argument('''--example''', type=str, default='''pytorch/text-generation/run_generation.py''') snake_case_ , snake_case_ = parser.parse_known_args() if args.host != "localhost": if args.instance != "V100:1" or args.provider != "cheapest": raise ValueError('''Cannot specify both BYO and on-demand cluster args''') snake_case_ = rh.cluster( name='''rh-cluster''', ips=[args.host], ssh_creds={'''ssh_user''': args.user, '''ssh_private_key''': args.key_path} ) else: snake_case_ = rh.cluster( name='''rh-cluster''', instance_type=args.instance, provider=args.provider, use_spot=args.use_spot ) snake_case_ = args.example.rsplit('''/''', 1)[0] # Set up remote environment cluster.install_packages(['''pip:./''']) # Installs transformers from local source # Note transformers is copied into the home directory on the remote machine, so we can install from there cluster.run([F'''pip install -r transformers/examples/{example_dir}/requirements.txt''']) cluster.run(['''pip install torch --upgrade --extra-index-url https://download.pytorch.org/whl/cu117''']) # Run example. You can bypass the CLI wrapper and paste your own code here. cluster.run([F'''python transformers/examples/{args.example} {' '.join(shlex.quote(arg) for arg in unknown)}''']) # Alternatively, we can just import and run a training function (especially if there's no wrapper CLI): # from my_script... import train # reqs = ['pip:./', 'torch', 'datasets', 'accelerate', 'evaluate', 'tqdm', 'scipy', 'scikit-learn', 'tensorboard'] # launch_train_gpu = rh.function(fn=train, # system=gpu, # reqs=reqs, # name='train_bert_glue') # # We can pass in arguments just like we would to a function: # launch_train_gpu(num_epochs = 3, lr = 2e-5, seed = 42, batch_size = 16 # stream_logs=True)
214
1
import unittest from transformers import DebertaVaTokenizer, DebertaVaTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin _snake_case : Union[str, Any] = get_tests_dir('fixtures/spiece.model') @require_sentencepiece @require_tokenizers class A ( _a ,unittest.TestCase ): lowercase_ = DebertaVaTokenizer lowercase_ = DebertaVaTokenizerFast lowercase_ = True lowercase_ = True def __lowerCAmelCase ( self : Any ) -> List[Any]: """simple docstring""" super().setUp() # We have a SentencePiece fixture for testing _a = DebertaVaTokenizer(lowerCAmelCase_ , unk_token='''<unk>''' ) tokenizer.save_pretrained(self.tmpdirname ) def __lowerCAmelCase ( self : List[str] , lowerCAmelCase_ : str ) -> int: """simple docstring""" _a = '''this is a test''' _a = '''this is a test''' return input_text, output_text def __lowerCAmelCase ( self : str ) -> List[Any]: """simple docstring""" _a = '''<pad>''' _a = 0 self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCAmelCase_ ) , lowerCAmelCase_ ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCAmelCase_ ) , lowerCAmelCase_ ) def __lowerCAmelCase ( self : Tuple ) -> Dict: """simple docstring""" _a = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , '''<pad>''' ) self.assertEqual(vocab_keys[1] , '''<unk>''' ) self.assertEqual(vocab_keys[-1] , '''[PAD]''' ) self.assertEqual(len(lowerCAmelCase_ ) , 3_00_01 ) def __lowerCAmelCase ( self : Dict ) -> Optional[int]: """simple docstring""" self.assertEqual(self.get_tokenizer().vocab_size , 3_00_00 ) def __lowerCAmelCase ( self : str ) -> str: """simple docstring""" _a = ''' \tHeLLo!how \n Are yoU? ''' _a = ['''▁hello''', '''!''', '''how''', '''▁are''', '''▁you''', '''?'''] # fmt: on _a = DebertaVaTokenizer(lowerCAmelCase_ , do_lower_case=lowerCAmelCase_ ) _a = tokenizer.convert_ids_to_tokens(tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ ) ) self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ ) _a = DebertaVaTokenizerFast(lowerCAmelCase_ , do_lower_case=lowerCAmelCase_ ) _a = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ ) ) self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ ) @unittest.skip('''There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.''' ) def __lowerCAmelCase ( self : str ) -> List[Any]: """simple docstring""" pass @unittest.skip('''There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.''' ) def __lowerCAmelCase ( self : Optional[Any] ) -> Dict: """simple docstring""" pass def __lowerCAmelCase ( self : Optional[int] ) -> List[Any]: """simple docstring""" _a = '''I was born in 92000, and this is falsé.''' _a = ['''▁''', '''<unk>''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', '''▁''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''▁''', '''.''', ] # fmt: on _a = DebertaVaTokenizer(lowerCAmelCase_ , split_by_punct=lowerCAmelCase_ ) _a = tokenizer.convert_ids_to_tokens(tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ ) ) self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ ) _a = DebertaVaTokenizerFast(lowerCAmelCase_ , split_by_punct=lowerCAmelCase_ ) _a = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ ) ) self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ ) def __lowerCAmelCase ( self : Union[str, Any] ) -> int: """simple docstring""" _a = '''I was born in 92000, and this is falsé.''' _a = ['''▁i''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', '''▁''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''▁''', '''.''', ] # fmt: on _a = DebertaVaTokenizer(lowerCAmelCase_ , do_lower_case=lowerCAmelCase_ , split_by_punct=lowerCAmelCase_ ) _a = tokenizer.convert_ids_to_tokens(tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ ) ) self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ ) _a = DebertaVaTokenizerFast(lowerCAmelCase_ , do_lower_case=lowerCAmelCase_ , split_by_punct=lowerCAmelCase_ ) _a = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ ) ) self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ ) def __lowerCAmelCase ( self : List[Any] ) -> int: """simple docstring""" _a = '''I was born in 92000, and this is falsé.''' _a = ['''▁i''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''.''', ] # fmt: on _a = DebertaVaTokenizer(lowerCAmelCase_ , do_lower_case=lowerCAmelCase_ , split_by_punct=lowerCAmelCase_ ) _a = tokenizer.convert_ids_to_tokens(tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ ) ) self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ ) _a = DebertaVaTokenizerFast(lowerCAmelCase_ , do_lower_case=lowerCAmelCase_ , split_by_punct=lowerCAmelCase_ ) _a = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ ) ) self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ ) def __lowerCAmelCase ( self : List[Any] ) -> Tuple: """simple docstring""" _a = '''I was born in 92000, and this is falsé.''' _a = ['''▁''', '''<unk>''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', '''▁''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''▁''', '''.''', ] # fmt: on _a = DebertaVaTokenizer(lowerCAmelCase_ , do_lower_case=lowerCAmelCase_ , split_by_punct=lowerCAmelCase_ ) _a = tokenizer.convert_ids_to_tokens(tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ ) ) self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ ) _a = DebertaVaTokenizerFast(lowerCAmelCase_ , do_lower_case=lowerCAmelCase_ , split_by_punct=lowerCAmelCase_ ) _a = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ ) ) self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ ) def __lowerCAmelCase ( self : str ) -> List[Any]: """simple docstring""" _a = ''' \tHeLLo!how \n Are yoU? ''' _a = ['''▁''', '''<unk>''', '''e''', '''<unk>''', '''o''', '''!''', '''how''', '''▁''', '''<unk>''', '''re''', '''▁yo''', '''<unk>''', '''?'''] # fmt: on _a = DebertaVaTokenizer(lowerCAmelCase_ , do_lower_case=lowerCAmelCase_ , split_by_punct=lowerCAmelCase_ ) _a = tokenizer.convert_ids_to_tokens(tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ ) ) self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ ) _a = DebertaVaTokenizerFast(lowerCAmelCase_ , do_lower_case=lowerCAmelCase_ , split_by_punct=lowerCAmelCase_ ) _a = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ ) ) self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ ) def __lowerCAmelCase ( self : Optional[int] ) -> Optional[int]: """simple docstring""" _a = self.get_tokenizer() _a = self.get_rust_tokenizer() _a = '''I was born in 92000, and this is falsé.''' _a = tokenizer.convert_ids_to_tokens(tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ ) ) _a = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ ) ) self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ ) _a = tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ ) _a = rust_tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ ) self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ ) _a = self.get_rust_tokenizer() _a = tokenizer.encode(lowerCAmelCase_ ) _a = rust_tokenizer.encode(lowerCAmelCase_ ) self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ ) def __lowerCAmelCase ( self : str ) -> str: """simple docstring""" _a = '''This is a test''' _a = [13, 1, 43_98, 25, 21, 12_89] _a = ['''▁''', '''T''', '''his''', '''▁is''', '''▁a''', '''▁test'''] _a = ['''▁''', '''<unk>''', '''his''', '''▁is''', '''▁a''', '''▁test'''] _a = DebertaVaTokenizer(lowerCAmelCase_ , keep_accents=lowerCAmelCase_ ) _a = DebertaVaTokenizerFast(lowerCAmelCase_ , keep_accents=lowerCAmelCase_ ) _a = tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ ) self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ ) _a = tokenizer.tokenize(lowerCAmelCase_ ) self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ ) _a = tokenizer.convert_ids_to_tokens(lowerCAmelCase_ ) self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ ) _a = rust_tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ ) self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ ) _a = rust_tokenizer.tokenize(lowerCAmelCase_ ) self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ ) _a = rust_tokenizer.convert_ids_to_tokens(lowerCAmelCase_ ) self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ ) # fmt: off _a = '''I was born in 92000, and this is falsé.''' _a = [13, 1, 23, 3_86, 19, 5_61, 30_50, 15, 17, 48, 25, 82_56, 18, 1, 9] _a = ['''▁''', '''I''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''é''', '''.''', ] _a = ['''▁''', '''<unk>''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''.''', ] # fmt: on _a = tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ ) self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ ) _a = tokenizer.tokenize(lowerCAmelCase_ ) self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ ) _a = tokenizer.convert_ids_to_tokens(lowerCAmelCase_ ) self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ ) _a = rust_tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ ) self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ ) _a = rust_tokenizer.tokenize(lowerCAmelCase_ ) self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ ) _a = rust_tokenizer.convert_ids_to_tokens(lowerCAmelCase_ ) self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ ) def __lowerCAmelCase ( self : List[Any] ) -> Dict: """simple docstring""" _a = DebertaVaTokenizer(lowerCAmelCase_ ) _a = tokenizer.encode('''sequence builders''' ) _a = tokenizer.encode('''multi-sequence build''' ) _a = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase_ ) _a = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase_ , lowerCAmelCase_ ) self.assertEqual([tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] , lowerCAmelCase_ ) self.assertEqual( [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [tokenizer.sep_token_id] , lowerCAmelCase_ , ) @slow def __lowerCAmelCase ( self : Dict ) -> int: """simple docstring""" _a = {'''input_ids''': [[1, 3_98_67, 36, 1_93_90, 4_86, 27, 3_50_52, 8_14_36, 18, 6_06_85, 12_25, 7, 3_50_52, 8_14_36, 18, 93_67, 1_68_99, 18, 1_59_37, 53, 5_94, 7_73, 18, 1_62_87, 3_04_65, 36, 1_59_37, 6, 4_11_39, 38, 3_69_79, 6_07_63, 1_91, 6, 3_41_32, 99, 6, 5_05_38, 3_90, 4_32_30, 6, 3_41_32, 27_79, 2_08_50, 14, 6_99, 10_72, 11_94, 36, 3_82, 1_09_01, 53, 7, 6_99, 10_72, 20_84, 36, 2_04_22, 6_30, 53, 19, 1_05, 30_49, 18_96, 10_53, 1_68_99, 15_06, 11, 3_79_78, 42_43, 7, 12_37, 3_18_69, 2_00, 1_65_66, 6_54, 6, 3_50_52, 8_14_36, 7, 5_56_30, 1_35_93, 4, 2], [1, 26, 1_50_11, 13, 6_67, 8, 10_53, 18, 2_36_11, 12_37, 7_23_56, 1_28_20, 34, 10_41_34, 12_09, 35, 1_33_13, 66_27, 21, 2_02, 3_47, 7, 1_64, 23_99, 11, 46, 44_85, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 5, 12_32, 28_64, 1_57_85, 1_49_51, 1_05, 5, 85_81, 12_50, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''token_type_ids''': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=lowerCAmelCase_ , model_name='''microsoft/deberta-v2-xlarge''' , revision='''ad6e42c1532ddf3a15c39246b63f5559d558b670''' , )
358
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available _snake_case : int = {'configuration_yolos': ['YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP', 'YolosConfig', 'YolosOnnxConfig']} try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _snake_case : List[str] = ['YolosFeatureExtractor'] _snake_case : Optional[int] = ['YolosImageProcessor'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _snake_case : Tuple = [ 'YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST', 'YolosForObjectDetection', 'YolosModel', 'YolosPreTrainedModel', ] if TYPE_CHECKING: from .configuration_yolos import YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP, YolosConfig, YolosOnnxConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_yolos import YolosFeatureExtractor from .image_processing_yolos import YolosImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_yolos import ( YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST, YolosForObjectDetection, YolosModel, YolosPreTrainedModel, ) else: import sys _snake_case : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
179
0
import copy import unittest from transformers.models.auto import get_values from transformers.testing_utils import require_torch, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MODEL_FOR_MULTIPLE_CHOICE_MAPPING, MODEL_FOR_QUESTION_ANSWERING_MAPPING, MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING, LayoutLMvaConfig, LayoutLMvaForQuestionAnswering, LayoutLMvaForSequenceClassification, LayoutLMvaForTokenClassification, LayoutLMvaModel, ) from transformers.models.layoutlmva.modeling_layoutlmva import LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import LayoutLMvaImageProcessor class lowerCamelCase_ : '''simple docstring''' def __init__( self : Optional[int] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : List[Any]=2 , _lowerCAmelCase : Any=3 , _lowerCAmelCase : Union[str, Any]=4 , _lowerCAmelCase : int=2 , _lowerCAmelCase : Dict=7 , _lowerCAmelCase : Dict=True , _lowerCAmelCase : int=True , _lowerCAmelCase : List[str]=True , _lowerCAmelCase : Tuple=True , _lowerCAmelCase : Tuple=99 , _lowerCAmelCase : Any=36 , _lowerCAmelCase : List[Any]=3 , _lowerCAmelCase : Optional[int]=4 , _lowerCAmelCase : Any=37 , _lowerCAmelCase : int="gelu" , _lowerCAmelCase : int=0.1 , _lowerCAmelCase : Tuple=0.1 , _lowerCAmelCase : Dict=512 , _lowerCAmelCase : Dict=16 , _lowerCAmelCase : Optional[Any]=2 , _lowerCAmelCase : List[str]=0.02 , _lowerCAmelCase : Any=6 , _lowerCAmelCase : Any=6 , _lowerCAmelCase : Optional[Any]=3 , _lowerCAmelCase : str=4 , _lowerCAmelCase : Optional[Any]=None , _lowerCAmelCase : List[str]=1_000 , ): SCREAMING_SNAKE_CASE_ = parent SCREAMING_SNAKE_CASE_ = batch_size SCREAMING_SNAKE_CASE_ = num_channels SCREAMING_SNAKE_CASE_ = image_size SCREAMING_SNAKE_CASE_ = patch_size SCREAMING_SNAKE_CASE_ = text_seq_length SCREAMING_SNAKE_CASE_ = is_training SCREAMING_SNAKE_CASE_ = use_input_mask SCREAMING_SNAKE_CASE_ = use_token_type_ids SCREAMING_SNAKE_CASE_ = use_labels SCREAMING_SNAKE_CASE_ = vocab_size SCREAMING_SNAKE_CASE_ = hidden_size SCREAMING_SNAKE_CASE_ = num_hidden_layers SCREAMING_SNAKE_CASE_ = num_attention_heads SCREAMING_SNAKE_CASE_ = intermediate_size SCREAMING_SNAKE_CASE_ = hidden_act SCREAMING_SNAKE_CASE_ = hidden_dropout_prob SCREAMING_SNAKE_CASE_ = attention_probs_dropout_prob SCREAMING_SNAKE_CASE_ = max_position_embeddings SCREAMING_SNAKE_CASE_ = type_vocab_size SCREAMING_SNAKE_CASE_ = type_sequence_label_size SCREAMING_SNAKE_CASE_ = initializer_range SCREAMING_SNAKE_CASE_ = coordinate_size SCREAMING_SNAKE_CASE_ = shape_size SCREAMING_SNAKE_CASE_ = num_labels SCREAMING_SNAKE_CASE_ = num_choices SCREAMING_SNAKE_CASE_ = scope SCREAMING_SNAKE_CASE_ = range_bbox # LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token) SCREAMING_SNAKE_CASE_ = text_seq_length SCREAMING_SNAKE_CASE_ = (image_size // patch_size) ** 2 + 1 SCREAMING_SNAKE_CASE_ = self.text_seq_length + self.image_seq_length def lowerCAmelCase_ ( self : str ): SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size ) SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox ) # Ensure that bbox is legal for i in range(bbox.shape[0] ): for j in range(bbox.shape[1] ): if bbox[i, j, 3] < bbox[i, j, 1]: SCREAMING_SNAKE_CASE_ = bbox[i, j, 3] SCREAMING_SNAKE_CASE_ = bbox[i, j, 1] SCREAMING_SNAKE_CASE_ = t if bbox[i, j, 2] < bbox[i, j, 0]: SCREAMING_SNAKE_CASE_ = bbox[i, j, 2] SCREAMING_SNAKE_CASE_ = bbox[i, j, 0] SCREAMING_SNAKE_CASE_ = t SCREAMING_SNAKE_CASE_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) SCREAMING_SNAKE_CASE_ = None if self.use_input_mask: SCREAMING_SNAKE_CASE_ = random_attention_mask([self.batch_size, self.text_seq_length] ) SCREAMING_SNAKE_CASE_ = None if self.use_token_type_ids: SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size ) SCREAMING_SNAKE_CASE_ = None SCREAMING_SNAKE_CASE_ = None if self.use_labels: SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size] , self.type_sequence_label_size ) SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels ) SCREAMING_SNAKE_CASE_ = LayoutLMvaConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , ) return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels def lowerCAmelCase_ ( self : Optional[int] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Tuple , _lowerCAmelCase : List[str] , _lowerCAmelCase : str , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : List[Any] ): SCREAMING_SNAKE_CASE_ = LayoutLMvaModel(config=_A ) model.to(_A ) model.eval() # text + image SCREAMING_SNAKE_CASE_ = model(_A , pixel_values=_A ) SCREAMING_SNAKE_CASE_ = model( _A , bbox=_A , pixel_values=_A , attention_mask=_A , token_type_ids=_A ) SCREAMING_SNAKE_CASE_ = model(_A , bbox=_A , pixel_values=_A , token_type_ids=_A ) SCREAMING_SNAKE_CASE_ = model(_A , bbox=_A , pixel_values=_A ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) # text only SCREAMING_SNAKE_CASE_ = model(_A ) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) ) # image only SCREAMING_SNAKE_CASE_ = model(pixel_values=_A ) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) ) def lowerCAmelCase_ ( self : str , _lowerCAmelCase : str , _lowerCAmelCase : str , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Dict , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : List[str] , _lowerCAmelCase : Tuple ): SCREAMING_SNAKE_CASE_ = self.num_labels SCREAMING_SNAKE_CASE_ = LayoutLMvaForSequenceClassification(_A ) model.to(_A ) model.eval() SCREAMING_SNAKE_CASE_ = model( _A , bbox=_A , pixel_values=_A , attention_mask=_A , token_type_ids=_A , labels=_A , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def lowerCAmelCase_ ( self : str , _lowerCAmelCase : Dict , _lowerCAmelCase : Tuple , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Tuple , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : int , _lowerCAmelCase : Union[str, Any] ): SCREAMING_SNAKE_CASE_ = self.num_labels SCREAMING_SNAKE_CASE_ = LayoutLMvaForTokenClassification(config=_A ) model.to(_A ) model.eval() SCREAMING_SNAKE_CASE_ = model( _A , bbox=_A , pixel_values=_A , attention_mask=_A , token_type_ids=_A , labels=_A , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) ) def lowerCAmelCase_ ( self : str , _lowerCAmelCase : Dict , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : List[str] , _lowerCAmelCase : int , _lowerCAmelCase : str , _lowerCAmelCase : int , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Dict ): SCREAMING_SNAKE_CASE_ = LayoutLMvaForQuestionAnswering(config=_A ) model.to(_A ) model.eval() SCREAMING_SNAKE_CASE_ = model( _A , bbox=_A , pixel_values=_A , attention_mask=_A , token_type_ids=_A , start_positions=_A , end_positions=_A , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def lowerCAmelCase_ ( self : Tuple ): SCREAMING_SNAKE_CASE_ = self.prepare_config_and_inputs() ( SCREAMING_SNAKE_CASE_ ) = config_and_inputs SCREAMING_SNAKE_CASE_ = { '''input_ids''': input_ids, '''bbox''': bbox, '''pixel_values''': pixel_values, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask, } return config, inputs_dict @require_torch class lowerCamelCase_ ( lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ): '''simple docstring''' lowercase_ = False lowercase_ = False lowercase_ = False lowercase_ = ( ( LayoutLMvaModel, LayoutLMvaForSequenceClassification, LayoutLMvaForTokenClassification, LayoutLMvaForQuestionAnswering, ) if is_torch_available() else () ) lowercase_ = ( {"document-question-answering": LayoutLMvaForQuestionAnswering, "feature-extraction": LayoutLMvaModel} if is_torch_available() else {} ) def lowerCAmelCase_ ( self : Dict , _lowerCAmelCase : Tuple , _lowerCAmelCase : List[str] , _lowerCAmelCase : List[str] , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : int ): return True def lowerCAmelCase_ ( self : List[str] ): SCREAMING_SNAKE_CASE_ = LayoutLMvaModelTester(self ) SCREAMING_SNAKE_CASE_ = ConfigTester(self , config_class=_A , hidden_size=37 ) def lowerCAmelCase_ ( self : Any , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Union[str, Any]=False ): SCREAMING_SNAKE_CASE_ = copy.deepcopy(_A ) if model_class in get_values(_A ): SCREAMING_SNAKE_CASE_ = { k: v.unsqueeze(1 ).expand(-1 , self.model_tester.num_choices , -1 ).contiguous() if isinstance(_A , torch.Tensor ) and v.ndim > 1 else v for k, v in inputs_dict.items() } if return_labels: if model_class in get_values(_A ): SCREAMING_SNAKE_CASE_ = torch.ones(self.model_tester.batch_size , dtype=torch.long , device=_A ) elif model_class in get_values(_A ): SCREAMING_SNAKE_CASE_ = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=_A ) SCREAMING_SNAKE_CASE_ = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=_A ) elif model_class in [ *get_values(_A ), ]: SCREAMING_SNAKE_CASE_ = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=_A ) elif model_class in [ *get_values(_A ), ]: SCREAMING_SNAKE_CASE_ = torch.zeros( (self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=torch.long , device=_A , ) return inputs_dict def lowerCAmelCase_ ( self : int ): self.config_tester.run_common_tests() def lowerCAmelCase_ ( self : Optional[int] ): SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_A ) def lowerCAmelCase_ ( self : Tuple ): SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: SCREAMING_SNAKE_CASE_ = type self.model_tester.create_and_check_model(*_A ) def lowerCAmelCase_ ( self : List[str] ): SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*_A ) def lowerCAmelCase_ ( self : Dict ): SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*_A ) def lowerCAmelCase_ ( self : List[Any] ): SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*_A ) @slow def lowerCAmelCase_ ( self : Tuple ): for model_name in LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: SCREAMING_SNAKE_CASE_ = LayoutLMvaModel.from_pretrained(_A ) self.assertIsNotNone(_A ) def UpperCAmelCase_ ( ) -> Any: SCREAMING_SNAKE_CASE_ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_torch class lowerCamelCase_ ( unittest.TestCase ): '''simple docstring''' @cached_property def lowerCAmelCase_ ( self : Optional[int] ): return LayoutLMvaImageProcessor(apply_ocr=_A ) if is_vision_available() else None @slow def lowerCAmelCase_ ( self : Optional[Any] ): SCREAMING_SNAKE_CASE_ = LayoutLMvaModel.from_pretrained('microsoft/layoutlmv3-base' ).to(_A ) SCREAMING_SNAKE_CASE_ = self.default_image_processor SCREAMING_SNAKE_CASE_ = prepare_img() SCREAMING_SNAKE_CASE_ = image_processor(images=_A , return_tensors='pt' ).pixel_values.to(_A ) SCREAMING_SNAKE_CASE_ = torch.tensor([[1, 2]] ) SCREAMING_SNAKE_CASE_ = torch.tensor([[1, 2, 3, 4], [5, 6, 7, 8]] ).unsqueeze(0 ) # forward pass SCREAMING_SNAKE_CASE_ = model( input_ids=input_ids.to(_A ) , bbox=bbox.to(_A ) , pixel_values=pixel_values.to(_A ) , ) # verify the logits SCREAMING_SNAKE_CASE_ = torch.Size((1, 199, 768) ) self.assertEqual(outputs.last_hidden_state.shape , _A ) SCREAMING_SNAKE_CASE_ = torch.tensor( [[-0.0529, 0.3618, 0.1632], [-0.1587, -0.1667, -0.0400], [-0.1557, -0.1671, -0.0505]] ).to(_A ) self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , _A , atol=1E-4 ) )
225
import argparse import json import os import pickle import shutil import numpy as np import torch from distiller import Distiller from lm_seqs_dataset import LmSeqsDataset from transformers import ( BertConfig, BertForMaskedLM, BertTokenizer, DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer, GPTaConfig, GPTaLMHeadModel, GPTaTokenizer, RobertaConfig, RobertaForMaskedLM, RobertaTokenizer, ) from utils import git_log, init_gpu_params, logger, set_seed lowercase_ = { """distilbert""": (DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer), """roberta""": (RobertaConfig, RobertaForMaskedLM, RobertaTokenizer), """bert""": (BertConfig, BertForMaskedLM, BertTokenizer), """gpt2""": (GPTaConfig, GPTaLMHeadModel, GPTaTokenizer), } def a__ ( snake_case ): """simple docstring""" assert (args.mlm and args.alpha_mlm > 0.0) or (not args.mlm and args.alpha_mlm == 0.0) assert (args.alpha_mlm > 0.0 and args.alpha_clm == 0.0) or (args.alpha_mlm == 0.0 and args.alpha_clm > 0.0) if args.mlm: assert os.path.isfile(args.token_counts ) assert (args.student_type in ["roberta", "distilbert"]) and (args.teacher_type in ["roberta", "bert"]) else: assert (args.student_type in ["gpt2"]) and (args.teacher_type in ["gpt2"]) assert args.teacher_type == args.student_type or ( args.student_type == "distilbert" and args.teacher_type == "bert" ) assert os.path.isfile(args.student_config ) if args.student_pretrained_weights is not None: assert os.path.isfile(args.student_pretrained_weights ) if args.freeze_token_type_embds: assert args.student_type in ["roberta"] assert args.alpha_ce >= 0.0 assert args.alpha_mlm >= 0.0 assert args.alpha_clm >= 0.0 assert args.alpha_mse >= 0.0 assert args.alpha_cos >= 0.0 assert args.alpha_ce + args.alpha_mlm + args.alpha_clm + args.alpha_mse + args.alpha_cos > 0.0 def a__ ( snake_case , snake_case ): """simple docstring""" if args.student_type == "roberta": __SCREAMING_SNAKE_CASE : int = False elif args.student_type == "gpt2": __SCREAMING_SNAKE_CASE : Optional[int] = False def a__ ( snake_case , snake_case ): """simple docstring""" if args.student_type == "roberta": __SCREAMING_SNAKE_CASE : Dict = False def a__ ( ): """simple docstring""" __SCREAMING_SNAKE_CASE : Dict = argparse.ArgumentParser(description='''Training''' ) parser.add_argument('''--force''' , action='''store_true''' , help='''Overwrite dump_path if it already exists.''' ) parser.add_argument( '''--dump_path''' , type=snake_case , required=snake_case , help='''The output directory (log, checkpoints, parameters, etc.)''' ) parser.add_argument( '''--data_file''' , type=snake_case , required=snake_case , help='''The binarized file (tokenized + tokens_to_ids) and grouped by sequence.''' , ) parser.add_argument( '''--student_type''' , type=snake_case , choices=['''distilbert''', '''roberta''', '''gpt2'''] , required=snake_case , help='''The student type (DistilBERT, RoBERTa).''' , ) parser.add_argument('''--student_config''' , type=snake_case , required=snake_case , help='''Path to the student configuration.''' ) parser.add_argument( '''--student_pretrained_weights''' , default=snake_case , type=snake_case , help='''Load student initialization checkpoint.''' ) parser.add_argument( '''--teacher_type''' , choices=['''bert''', '''roberta''', '''gpt2'''] , required=snake_case , help='''Teacher type (BERT, RoBERTa).''' ) parser.add_argument('''--teacher_name''' , type=snake_case , required=snake_case , help='''The teacher model.''' ) parser.add_argument('''--temperature''' , default=2.0 , type=snake_case , help='''Temperature for the softmax temperature.''' ) parser.add_argument( '''--alpha_ce''' , default=0.5 , type=snake_case , help='''Linear weight for the distillation loss. Must be >=0.''' ) parser.add_argument( '''--alpha_mlm''' , default=0.0 , type=snake_case , help='''Linear weight for the MLM loss. Must be >=0. Should be used in conjunction with `mlm` flag.''' , ) parser.add_argument('''--alpha_clm''' , default=0.5 , type=snake_case , help='''Linear weight for the CLM loss. Must be >=0.''' ) parser.add_argument('''--alpha_mse''' , default=0.0 , type=snake_case , help='''Linear weight of the MSE loss. Must be >=0.''' ) parser.add_argument( '''--alpha_cos''' , default=0.0 , type=snake_case , help='''Linear weight of the cosine embedding loss. Must be >=0.''' ) parser.add_argument( '''--mlm''' , action='''store_true''' , help='''The LM step: MLM or CLM. If `mlm` is True, the MLM is used over CLM.''' ) parser.add_argument( '''--mlm_mask_prop''' , default=0.15 , type=snake_case , help='''Proportion of tokens for which we need to make a prediction.''' , ) parser.add_argument('''--word_mask''' , default=0.8 , type=snake_case , help='''Proportion of tokens to mask out.''' ) parser.add_argument('''--word_keep''' , default=0.1 , type=snake_case , help='''Proportion of tokens to keep.''' ) parser.add_argument('''--word_rand''' , default=0.1 , type=snake_case , help='''Proportion of tokens to randomly replace.''' ) parser.add_argument( '''--mlm_smoothing''' , default=0.7 , type=snake_case , help='''Smoothing parameter to emphasize more rare tokens (see XLM, similar to word2vec).''' , ) parser.add_argument('''--token_counts''' , type=snake_case , help='''The token counts in the data_file for MLM.''' ) parser.add_argument( '''--restrict_ce_to_mask''' , action='''store_true''' , help='''If true, compute the distillation loss only the [MLM] prediction distribution.''' , ) parser.add_argument( '''--freeze_pos_embs''' , action='''store_true''' , help='''Freeze positional embeddings during distillation. For student_type in [\'roberta\', \'gpt2\'] only.''' , ) parser.add_argument( '''--freeze_token_type_embds''' , action='''store_true''' , help='''Freeze token type embeddings during distillation if existent. For student_type in [\'roberta\'] only.''' , ) parser.add_argument('''--n_epoch''' , type=snake_case , default=3 , help='''Number of pass on the whole dataset.''' ) parser.add_argument('''--batch_size''' , type=snake_case , default=5 , help='''Batch size (for each process).''' ) parser.add_argument( '''--group_by_size''' , action='''store_false''' , help='''If true, group sequences that have similar length into the same batch. Default is true.''' , ) parser.add_argument( '''--gradient_accumulation_steps''' , type=snake_case , default=50 , help='''Gradient accumulation for larger training batches.''' , ) parser.add_argument('''--warmup_prop''' , default=0.05 , type=snake_case , help='''Linear warmup proportion.''' ) parser.add_argument('''--weight_decay''' , default=0.0 , type=snake_case , help='''Weight decay if we apply some.''' ) parser.add_argument('''--learning_rate''' , default=5E-4 , type=snake_case , help='''The initial learning rate for Adam.''' ) parser.add_argument('''--adam_epsilon''' , default=1E-6 , type=snake_case , help='''Epsilon for Adam optimizer.''' ) parser.add_argument('''--max_grad_norm''' , default=5.0 , type=snake_case , help='''Max gradient norm.''' ) parser.add_argument('''--initializer_range''' , default=0.02 , type=snake_case , help='''Random initialization range.''' ) parser.add_argument( '''--fp16''' , action='''store_true''' , help='''Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit''' , ) parser.add_argument( '''--fp16_opt_level''' , type=snake_case , default='''O1''' , help=( '''For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\'].''' '''See details at https://nvidia.github.io/apex/amp.html''' ) , ) parser.add_argument('''--n_gpu''' , type=snake_case , default=1 , help='''Number of GPUs in the node.''' ) parser.add_argument('''--local_rank''' , type=snake_case , default=-1 , help='''Distributed training - Local rank''' ) parser.add_argument('''--seed''' , type=snake_case , default=56 , help='''Random seed''' ) parser.add_argument('''--log_interval''' , type=snake_case , default=500 , help='''Tensorboard logging interval.''' ) parser.add_argument('''--checkpoint_interval''' , type=snake_case , default=4_000 , help='''Checkpoint interval.''' ) __SCREAMING_SNAKE_CASE : Optional[int] = parser.parse_args() sanity_checks(snake_case ) # ARGS # init_gpu_params(snake_case ) set_seed(snake_case ) if args.is_master: if os.path.exists(args.dump_path ): if not args.force: raise ValueError( F'''Serialization dir {args.dump_path} already exists, but you have not precised wheter to overwrite''' ''' itUse `--force` if you want to overwrite it''' ) else: shutil.rmtree(args.dump_path ) if not os.path.exists(args.dump_path ): os.makedirs(args.dump_path ) logger.info(F'''Experiment will be dumped and logged in {args.dump_path}''' ) # SAVE PARAMS # logger.info(F'''Param: {args}''' ) with open(os.path.join(args.dump_path , '''parameters.json''' ) , '''w''' ) as f: json.dump(vars(snake_case ) , snake_case , indent=4 ) git_log(args.dump_path ) __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : str = MODEL_CLASSES[args.student_type] __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Tuple = MODEL_CLASSES[args.teacher_type] # TOKENIZER # __SCREAMING_SNAKE_CASE : Optional[int] = teacher_tokenizer_class.from_pretrained(args.teacher_name ) __SCREAMING_SNAKE_CASE : Optional[Any] = {} for tok_name, tok_symbol in tokenizer.special_tokens_map.items(): __SCREAMING_SNAKE_CASE : Any = tokenizer.all_special_tokens.index(snake_case ) __SCREAMING_SNAKE_CASE : List[Any] = tokenizer.all_special_ids[idx] logger.info(F'''Special tokens {special_tok_ids}''' ) __SCREAMING_SNAKE_CASE : Any = special_tok_ids __SCREAMING_SNAKE_CASE : List[Any] = tokenizer.max_model_input_sizes[args.teacher_name] # DATA LOADER # logger.info(F'''Loading data from {args.data_file}''' ) with open(args.data_file , '''rb''' ) as fp: __SCREAMING_SNAKE_CASE : List[str] = pickle.load(snake_case ) if args.mlm: logger.info(F'''Loading token counts from {args.token_counts} (already pre-computed)''' ) with open(args.token_counts , '''rb''' ) as fp: __SCREAMING_SNAKE_CASE : Optional[Any] = pickle.load(snake_case ) __SCREAMING_SNAKE_CASE : List[Any] = np.maximum(snake_case , 1 ) ** -args.mlm_smoothing for idx in special_tok_ids.values(): __SCREAMING_SNAKE_CASE : Any = 0.0 # do not predict special tokens __SCREAMING_SNAKE_CASE : Union[str, Any] = torch.from_numpy(snake_case ) else: __SCREAMING_SNAKE_CASE : Optional[int] = None __SCREAMING_SNAKE_CASE : Optional[Any] = LmSeqsDataset(params=snake_case , data=snake_case ) logger.info('''Data loader created.''' ) # STUDENT # logger.info(F'''Loading student config from {args.student_config}''' ) __SCREAMING_SNAKE_CASE : Optional[Any] = student_config_class.from_pretrained(args.student_config ) __SCREAMING_SNAKE_CASE : Dict = True if args.student_pretrained_weights is not None: logger.info(F'''Loading pretrained weights from {args.student_pretrained_weights}''' ) __SCREAMING_SNAKE_CASE : Optional[Any] = student_model_class.from_pretrained(args.student_pretrained_weights , config=snake_case ) else: __SCREAMING_SNAKE_CASE : str = student_model_class(snake_case ) if args.n_gpu > 0: student.to(F'''cuda:{args.local_rank}''' ) logger.info('''Student loaded.''' ) # TEACHER # __SCREAMING_SNAKE_CASE : List[str] = teacher_model_class.from_pretrained(args.teacher_name , output_hidden_states=snake_case ) if args.n_gpu > 0: teacher.to(F'''cuda:{args.local_rank}''' ) logger.info(F'''Teacher loaded from {args.teacher_name}.''' ) # FREEZING # if args.freeze_pos_embs: freeze_pos_embeddings(snake_case , snake_case ) if args.freeze_token_type_embds: freeze_token_type_embeddings(snake_case , snake_case ) # SANITY CHECKS # assert student.config.vocab_size == teacher.config.vocab_size assert student.config.hidden_size == teacher.config.hidden_size assert student.config.max_position_embeddings == teacher.config.max_position_embeddings if args.mlm: assert token_probs.size(0 ) == stu_architecture_config.vocab_size # DISTILLER # torch.cuda.empty_cache() __SCREAMING_SNAKE_CASE : int = Distiller( params=snake_case , dataset=snake_case , token_probs=snake_case , student=snake_case , teacher=snake_case ) distiller.train() logger.info('''Let\'s go get some drinks.''' ) if __name__ == "__main__": main()
303
0
def A(__a: int = 1000 ): lowerCAmelCase_ = -1 lowerCAmelCase_ = 0 for a in range(1 , n // 3 ): # Solving the two equations a**2+b**2=c**2 and a+b+c=N eliminating c lowerCAmelCase_ = (n * n - 2 * a * n) // (2 * n - 2 * a) lowerCAmelCase_ = n - a - b if c * c == (a * a + b * b): lowerCAmelCase_ = a * b * c if candidate >= product: lowerCAmelCase_ = candidate return product if __name__ == "__main__": print(F'''{solution() = }''')
365
import math def A(__a: int ): return math.sqrt(__a ) * math.sqrt(__a ) == num def A(__a: int ): lowerCAmelCase_ = 0 lowerCAmelCase_ = n while left <= right: lowerCAmelCase_ = (left + right) // 2 if mid**2 == n: return True elif mid**2 > n: lowerCAmelCase_ = mid - 1 else: lowerCAmelCase_ = mid + 1 return False if __name__ == "__main__": import doctest doctest.testmod()
22
0
"""simple docstring""" def _lowerCamelCase( a , a , a ): __a = (num_of_terms / 2) * (2 * first_term + (num_of_terms - 1) * common_diff) # formula for sum of series return total def _lowerCamelCase( ): print(sum_of_series(1 , 1 , 1_0 ) ) if __name__ == "__main__": import doctest doctest.testmod()
261
"""simple docstring""" import copy import re class snake_case__ : _snake_case : Dict = """hp""" _snake_case : List[str] = {} _snake_case : int = None @classmethod def a__ ( cls , lowerCamelCase , lowerCamelCase ): __a = prefix __a = defaults cls.build_naming_info() @staticmethod def a__ ( lowerCamelCase , lowerCamelCase ): if len(lowerCamelCase ) == 0: return "" __a = None if any(char.isdigit() for char in word ): raise Exception(F"Parameters should not contain numbers: '{word}' contains a number" ) if word in info["short_word"]: return info["short_word"][word] for prefix_len in range(1 , len(lowerCamelCase ) + 1 ): __a = word[:prefix_len] if prefix in info["reverse_short_word"]: continue else: __a = prefix break if short_word is None: # Paranoid fallback def int_to_alphabetic(lowerCamelCase ): __a = "" while integer != 0: __a = chr(ord("A" ) + integer % 10 ) + s integer //= 10 return s __a = 0 while True: __a = word + "#" + int_to_alphabetic(lowerCamelCase ) if sword in info["reverse_short_word"]: continue else: __a = sword break __a = short_word __a = word return short_word @staticmethod def a__ ( lowerCamelCase , lowerCamelCase ): __a = param_name.split("_" ) __a = [TrialShortNamer.shortname_for_word(lowerCamelCase , lowerCamelCase ) for word in words] # We try to create a separatorless short name, but if there is a collision we have to fallback # to a separated short name __a = ["", "_"] for separator in separators: __a = separator.join(lowerCamelCase ) if shortname not in info["reverse_short_param"]: __a = shortname __a = param_name return shortname return param_name @staticmethod def a__ ( lowerCamelCase , lowerCamelCase ): __a = TrialShortNamer.shortname_for_key(lowerCamelCase , lowerCamelCase ) __a = short_name __a = param_name @classmethod def a__ ( cls ): if cls.NAMING_INFO is not None: return __a = { "short_word": {}, "reverse_short_word": {}, "short_param": {}, "reverse_short_param": {}, } __a = list(cls.DEFAULTS.keys() ) for k in field_keys: cls.add_new_param_name(lowerCamelCase , lowerCamelCase ) __a = info @classmethod def a__ ( cls , lowerCamelCase ): cls.build_naming_info() assert cls.PREFIX is not None __a = [copy.copy(cls.PREFIX )] for k, v in params.items(): if k not in cls.DEFAULTS: raise Exception(F"You should provide a default value for the param name {k} with value {v}" ) if v == cls.DEFAULTS[k]: # The default value is not added to the name continue __a = cls.NAMING_INFO["short_param"][k] if isinstance(lowerCamelCase , lowerCamelCase ): __a = 1 if v else 0 __a = "" if isinstance(lowerCamelCase , (int, float) ) else "-" __a = F"{key}{sep}{v}" name.append(lowerCamelCase ) return "_".join(lowerCamelCase ) @classmethod def a__ ( cls , lowerCamelCase ): __a = repr[len(cls.PREFIX ) + 1 :] if repr == "": __a = [] else: __a = repr.split("_" ) __a = {} for value in values: if "-" in value: __a , __a = value.split("-" ) else: __a = re.sub("[0-9.]" , "" , lowerCamelCase ) __a = float(re.sub("[^0-9.]" , "" , lowerCamelCase ) ) __a = cls.NAMING_INFO["reverse_short_param"][p_k] __a = p_v for k in cls.DEFAULTS: if k not in parameters: __a = cls.DEFAULTS[k] return parameters
261
1
"""simple docstring""" # This is the module that test_patching.py uses to test patch_submodule() import os # noqa: this is just for tests import os as renamed_os # noqa: this is just for tests from os import path # noqa: this is just for tests from os import path as renamed_path # noqa: this is just for tests from os.path import join # noqa: this is just for tests from os.path import join as renamed_join # noqa: this is just for tests lowerCAmelCase__ = open # noqa: we just need to have a builtin inside this module to test it properly
133
"""simple docstring""" def a__ ( SCREAMING_SNAKE_CASE : list[int] ): '''simple docstring''' lowerCAmelCase : str = len(SCREAMING_SNAKE_CASE ) for i in range(SCREAMING_SNAKE_CASE ): for j in range(i + 1 , SCREAMING_SNAKE_CASE ): if numbers[j] < numbers[i]: lowerCAmelCase , lowerCAmelCase : Any = numbers[j], numbers[i] return numbers if __name__ == "__main__": lowerCAmelCase__ = input('''Enter numbers separated by a comma:\n''').strip() lowerCAmelCase__ = [int(item) for item in user_input.split(''',''')] print(exchange_sort(unsorted))
133
1
'''simple docstring''' from typing import List, Optional, Union import torch from transformers import ( XLMRobertaTokenizer, ) from ...models import UNetaDConditionModel, VQModel from ...pipelines import DiffusionPipeline from ...pipelines.pipeline_utils import ImagePipelineOutput from ...schedulers import DDIMScheduler, DDPMScheduler from ...utils import ( is_accelerate_available, is_accelerate_version, logging, randn_tensor, replace_example_docstring, ) from .text_encoder import MultilingualCLIP a : Tuple = logging.get_logger(__name__) # pylint: disable=invalid-name a : Optional[Any] = """ Examples: ```py >>> from diffusers import KandinskyPipeline, KandinskyPriorPipeline >>> import torch >>> pipe_prior = KandinskyPriorPipeline.from_pretrained(\"kandinsky-community/Kandinsky-2-1-prior\") >>> pipe_prior.to(\"cuda\") >>> prompt = \"red cat, 4k photo\" >>> out = pipe_prior(prompt) >>> image_emb = out.image_embeds >>> negative_image_emb = out.negative_image_embeds >>> pipe = KandinskyPipeline.from_pretrained(\"kandinsky-community/kandinsky-2-1\") >>> pipe.to(\"cuda\") >>> image = pipe( ... prompt, ... image_embeds=image_emb, ... negative_image_embeds=negative_image_emb, ... height=768, ... width=768, ... num_inference_steps=100, ... ).images >>> image[0].save(\"cat.png\") ``` """ def __lowerCamelCase ( _lowercase , _lowercase , _lowercase=8 ) -> List[str]: UpperCAmelCase : List[Any] = h // scale_factor**2 if h % scale_factor**2 != 0: new_h += 1 UpperCAmelCase : int = w // scale_factor**2 if w % scale_factor**2 != 0: new_w += 1 return new_h * scale_factor, new_w * scale_factor class UpperCamelCase_ ( __magic_name__ ): def __init__( self , A , A , A , A , A , ) -> List[str]: super().__init__() self.register_modules( text_encoder=A , tokenizer=A , unet=A , scheduler=A , movq=A , ) UpperCAmelCase : Dict = 2 ** (len(self.movq.config.block_out_channels ) - 1) def _lowercase( self , A , A , A , A , A , A ) -> List[Any]: if latents is None: UpperCAmelCase : List[Any] = randn_tensor(A , generator=A , device=A , dtype=A ) else: if latents.shape != shape: raise ValueError(f'''Unexpected latents shape, got {latents.shape}, expected {shape}''' ) UpperCAmelCase : List[str] = latents.to(A ) UpperCAmelCase : Dict = latents * scheduler.init_noise_sigma return latents def _lowercase( self , A , A , A , A , A=None , ) -> Union[str, Any]: UpperCAmelCase : Any = len(A ) if isinstance(A , A ) else 1 # get prompt text embeddings UpperCAmelCase : str = self.tokenizer( A , padding="""max_length""" , truncation=A , max_length=77 , return_attention_mask=A , add_special_tokens=A , return_tensors="""pt""" , ) UpperCAmelCase : List[str] = text_inputs.input_ids UpperCAmelCase : Tuple = self.tokenizer(A , padding="""longest""" , return_tensors="""pt""" ).input_ids if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(A , A ): UpperCAmelCase : Tuple = self.tokenizer.batch_decode(untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1] ) logger.warning( """The following part of your input was truncated because CLIP can only handle sequences up to""" f''' {self.tokenizer.model_max_length} tokens: {removed_text}''' ) UpperCAmelCase : Tuple = text_input_ids.to(A ) UpperCAmelCase : Union[str, Any] = text_inputs.attention_mask.to(A ) UpperCAmelCase , UpperCAmelCase : Optional[int] = self.text_encoder( input_ids=A , attention_mask=A ) UpperCAmelCase : List[Any] = prompt_embeds.repeat_interleave(A , dim=0 ) UpperCAmelCase : int = text_encoder_hidden_states.repeat_interleave(A , dim=0 ) UpperCAmelCase : Tuple = text_mask.repeat_interleave(A , dim=0 ) if do_classifier_free_guidance: UpperCAmelCase : List[str] if negative_prompt is None: UpperCAmelCase : Optional[Any] = [""""""] * batch_size elif type(A ) is not type(A ): raise TypeError( f'''`negative_prompt` should be the same type to `prompt`, but got {type(A )} !=''' f''' {type(A )}.''' ) elif isinstance(A , A ): UpperCAmelCase : List[str] = [negative_prompt] elif batch_size != len(A ): raise ValueError( f'''`negative_prompt`: {negative_prompt} has batch size {len(A )}, but `prompt`:''' f''' {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches''' """ the batch size of `prompt`.""" ) else: UpperCAmelCase : Union[str, Any] = negative_prompt UpperCAmelCase : Union[str, Any] = self.tokenizer( A , padding="""max_length""" , max_length=77 , truncation=A , return_attention_mask=A , add_special_tokens=A , return_tensors="""pt""" , ) UpperCAmelCase : Dict = uncond_input.input_ids.to(A ) UpperCAmelCase : Union[str, Any] = uncond_input.attention_mask.to(A ) UpperCAmelCase , UpperCAmelCase : Dict = self.text_encoder( input_ids=A , attention_mask=A ) # duplicate unconditional embeddings for each generation per prompt, using mps friendly method UpperCAmelCase : Any = negative_prompt_embeds.shape[1] UpperCAmelCase : List[Any] = negative_prompt_embeds.repeat(1 , A ) UpperCAmelCase : str = negative_prompt_embeds.view(batch_size * num_images_per_prompt , A ) UpperCAmelCase : Union[str, Any] = uncond_text_encoder_hidden_states.shape[1] UpperCAmelCase : int = uncond_text_encoder_hidden_states.repeat(1 , A , 1 ) UpperCAmelCase : List[Any] = uncond_text_encoder_hidden_states.view( batch_size * num_images_per_prompt , A , -1 ) UpperCAmelCase : int = uncond_text_mask.repeat_interleave(A , dim=0 ) # done duplicates # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes UpperCAmelCase : str = torch.cat([negative_prompt_embeds, prompt_embeds] ) UpperCAmelCase : int = torch.cat([uncond_text_encoder_hidden_states, text_encoder_hidden_states] ) UpperCAmelCase : Dict = torch.cat([uncond_text_mask, text_mask] ) return prompt_embeds, text_encoder_hidden_states, text_mask def _lowercase( self , A=0 ) -> Tuple: if is_accelerate_available(): from accelerate import cpu_offload else: raise ImportError("""Please install accelerate via `pip install accelerate`""" ) UpperCAmelCase : Tuple = torch.device(f'''cuda:{gpu_id}''' ) UpperCAmelCase : Optional[Any] = [ self.unet, self.text_encoder, self.movq, ] for cpu_offloaded_model in models: if cpu_offloaded_model is not None: cpu_offload(A , A ) def _lowercase( self , A=0 ) -> Union[str, Any]: if is_accelerate_available() and is_accelerate_version(""">=""" , """0.17.0.dev0""" ): from accelerate import cpu_offload_with_hook else: raise ImportError("""`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.""" ) UpperCAmelCase : List[str] = torch.device(f'''cuda:{gpu_id}''' ) if self.device.type != "cpu": self.to("""cpu""" , silence_dtype_warnings=A ) torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist) UpperCAmelCase : List[str] = None for cpu_offloaded_model in [self.text_encoder, self.unet, self.movq]: UpperCAmelCase , UpperCAmelCase : Any = cpu_offload_with_hook(A , A , prev_module_hook=A ) if self.safety_checker is not None: UpperCAmelCase , UpperCAmelCase : Tuple = cpu_offload_with_hook(self.safety_checker , A , prev_module_hook=A ) # We'll offload the last model manually. UpperCAmelCase : Optional[Any] = hook @property # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device def _lowercase( self ) -> Optional[Any]: if not hasattr(self.unet , """_hf_hook""" ): return self.device for module in self.unet.modules(): if ( hasattr(A , """_hf_hook""" ) and hasattr(module._hf_hook , """execution_device""" ) and module._hf_hook.execution_device is not None ): return torch.device(module._hf_hook.execution_device ) return self.device @torch.no_grad() @replace_example_docstring(A ) def __call__( self , A , A , A , A = None , A = 512 , A = 512 , A = 100 , A = 4.0 , A = 1 , A = None , A = None , A = "pil" , A = True , ) -> Dict: if isinstance(A , A ): UpperCAmelCase : Optional[int] = 1 elif isinstance(A , A ): UpperCAmelCase : List[Any] = len(A ) else: raise ValueError(f'''`prompt` has to be of type `str` or `list` but is {type(A )}''' ) UpperCAmelCase : int = self._execution_device UpperCAmelCase : Optional[Any] = batch_size * num_images_per_prompt UpperCAmelCase : List[Any] = guidance_scale > 1.0 UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : List[str] = self._encode_prompt( A , A , A , A , A ) if isinstance(A , A ): UpperCAmelCase : str = torch.cat(A , dim=0 ) if isinstance(A , A ): UpperCAmelCase : str = torch.cat(A , dim=0 ) if do_classifier_free_guidance: UpperCAmelCase : Optional[Any] = image_embeds.repeat_interleave(A , dim=0 ) UpperCAmelCase : List[Any] = negative_image_embeds.repeat_interleave(A , dim=0 ) UpperCAmelCase : Union[str, Any] = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to( dtype=prompt_embeds.dtype , device=A ) self.scheduler.set_timesteps(A , device=A ) UpperCAmelCase : Tuple = self.scheduler.timesteps UpperCAmelCase : Union[str, Any] = self.unet.config.in_channels UpperCAmelCase , UpperCAmelCase : Any = get_new_h_w(A , A , self.movq_scale_factor ) # create initial latent UpperCAmelCase : Any = self.prepare_latents( (batch_size, num_channels_latents, height, width) , text_encoder_hidden_states.dtype , A , A , A , self.scheduler , ) for i, t in enumerate(self.progress_bar(A ) ): # expand the latents if we are doing classifier free guidance UpperCAmelCase : Union[str, Any] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents UpperCAmelCase : List[str] = {"""text_embeds""": prompt_embeds, """image_embeds""": image_embeds} UpperCAmelCase : Any = self.unet( sample=A , timestep=A , encoder_hidden_states=A , added_cond_kwargs=A , return_dict=A , )[0] if do_classifier_free_guidance: UpperCAmelCase , UpperCAmelCase : Union[str, Any] = noise_pred.split(latents.shape[1] , dim=1 ) UpperCAmelCase , UpperCAmelCase : Optional[int] = noise_pred.chunk(2 ) UpperCAmelCase , UpperCAmelCase : Any = variance_pred.chunk(2 ) UpperCAmelCase : int = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) UpperCAmelCase : str = torch.cat([noise_pred, variance_pred_text] , dim=1 ) if not ( hasattr(self.scheduler.config , """variance_type""" ) and self.scheduler.config.variance_type in ["learned", "learned_range"] ): UpperCAmelCase , UpperCAmelCase : Tuple = noise_pred.split(latents.shape[1] , dim=1 ) # compute the previous noisy sample x_t -> x_t-1 UpperCAmelCase : List[Any] = self.scheduler.step( A , A , A , generator=A , ).prev_sample # post-processing UpperCAmelCase : List[str] = self.movq.decode(A , force_not_quantize=A )["""sample"""] if output_type not in ["pt", "np", "pil"]: raise ValueError(f'''Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}''' ) if output_type in ["np", "pil"]: UpperCAmelCase : List[Any] = image * 0.5 + 0.5 UpperCAmelCase : Union[str, Any] = image.clamp(0 , 1 ) UpperCAmelCase : Optional[int] = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy() if output_type == "pil": UpperCAmelCase : Optional[int] = self.numpy_to_pil(A ) if not return_dict: return (image,) return ImagePipelineOutput(images=A )
265
'''simple docstring''' def __lowerCamelCase ( _lowercase , _lowercase ) -> int: if b == 0: return 1 if (b % 2) == 0: return actual_power(_lowercase , int(b / 2 ) ) * actual_power(_lowercase , int(b / 2 ) ) else: return a * actual_power(_lowercase , int(b / 2 ) ) * actual_power(_lowercase , int(b / 2 ) ) def __lowerCamelCase ( _lowercase , _lowercase ) -> float: if b < 0: return 1 / actual_power(_lowercase , _lowercase ) return actual_power(_lowercase , _lowercase ) if __name__ == "__main__": print(power(-2, -3))
265
1
import warnings from transformers import AutoTokenizer from transformers.utils import is_torch_available from transformers.utils.generic import ExplicitEnum from ...processing_utils import ProcessorMixin if is_torch_available(): import torch class __magic_name__ ( __lowerCAmelCase): A: Tuple = "char" A: List[str] = "bpe" A: Any = "wp" __UpperCamelCase : Tuple = (DecodeType.CHARACTER, DecodeType.BPE, DecodeType.WORDPIECE) class __magic_name__ ( __lowerCAmelCase): A: str = ["image_processor", "char_tokenizer"] A: Tuple = "ViTImageProcessor" A: int = "MgpstrTokenizer" def __init__( self : Tuple , lowerCamelCase__ : Tuple=None , lowerCamelCase__ : Optional[Any]=None , **lowerCamelCase__ : Tuple ) -> Tuple: '''simple docstring''' UpperCamelCase__ : Optional[Any] = None if "feature_extractor" in kwargs: warnings.warn( '''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`''' ''' instead.''' , lowerCamelCase__ , ) UpperCamelCase__ : Optional[Any] = kwargs.pop('''feature_extractor''' ) UpperCamelCase__ : Optional[Any] = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError('''You need to specify an `image_processor`.''' ) if tokenizer is None: raise ValueError('''You need to specify a `tokenizer`.''' ) UpperCamelCase__ : List[Any] = tokenizer UpperCamelCase__ : Tuple = AutoTokenizer.from_pretrained('''gpt2''' ) UpperCamelCase__ : Union[str, Any] = AutoTokenizer.from_pretrained('''bert-base-uncased''' ) super().__init__(lowerCamelCase__ , lowerCamelCase__ ) def __call__( self : str , lowerCamelCase__ : int=None , lowerCamelCase__ : List[Any]=None , lowerCamelCase__ : str=None , **lowerCamelCase__ : Union[str, Any] ) -> str: '''simple docstring''' if images is None and text is None: raise ValueError('''You need to specify either an `images` or `text` input to process.''' ) if images is not None: UpperCamelCase__ : List[str] = self.image_processor(lowerCamelCase__ , return_tensors=lowerCamelCase__ , **lowerCamelCase__ ) if text is not None: UpperCamelCase__ : Tuple = self.char_tokenizer(lowerCamelCase__ , return_tensors=lowerCamelCase__ , **lowerCamelCase__ ) if text is None: return inputs elif images is None: return encodings else: UpperCamelCase__ : str = encodings['''input_ids'''] return inputs def UpperCAmelCase__ ( self : Any , lowerCamelCase__ : str ) -> List[str]: '''simple docstring''' UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ : List[str] = sequences UpperCamelCase__ : Any = char_preds.size(0 ) UpperCamelCase__ , UpperCamelCase__ : Union[str, Any] = self._decode_helper(lowerCamelCase__ , '''char''' ) UpperCamelCase__ , UpperCamelCase__ : Optional[Any] = self._decode_helper(lowerCamelCase__ , '''bpe''' ) UpperCamelCase__ , UpperCamelCase__ : Optional[int] = self._decode_helper(lowerCamelCase__ , '''wp''' ) UpperCamelCase__ : List[str] = [] UpperCamelCase__ : int = [] for i in range(lowerCamelCase__ ): UpperCamelCase__ : Any = [char_scores[i], bpe_scores[i], wp_scores[i]] UpperCamelCase__ : Union[str, Any] = [char_strs[i], bpe_strs[i], wp_strs[i]] UpperCamelCase__ : Optional[Any] = scores.index(max(lowerCamelCase__ ) ) final_strs.append(strs[max_score_index] ) final_scores.append(scores[max_score_index] ) UpperCamelCase__ : List[Any] = {} UpperCamelCase__ : str = final_strs UpperCamelCase__ : List[str] = final_scores UpperCamelCase__ : List[Any] = char_strs UpperCamelCase__ : Optional[Any] = bpe_strs UpperCamelCase__ : Tuple = wp_strs return out def UpperCAmelCase__ ( self : List[str] , lowerCamelCase__ : str , lowerCamelCase__ : Dict ) -> Optional[Any]: '''simple docstring''' if format == DecodeType.CHARACTER: UpperCamelCase__ : Union[str, Any] = self.char_decode UpperCamelCase__ : Optional[int] = 1 UpperCamelCase__ : Optional[Any] = '''[s]''' elif format == DecodeType.BPE: UpperCamelCase__ : List[Any] = self.bpe_decode UpperCamelCase__ : Optional[int] = 2 UpperCamelCase__ : str = '''#''' elif format == DecodeType.WORDPIECE: UpperCamelCase__ : Any = self.wp_decode UpperCamelCase__ : int = 102 UpperCamelCase__ : int = '''[SEP]''' else: raise ValueError(F"Format {format} is not supported." ) UpperCamelCase__ , UpperCamelCase__ : Optional[int] = [], [] UpperCamelCase__ : Tuple = pred_logits.size(0 ) UpperCamelCase__ : Optional[Any] = pred_logits.size(1 ) UpperCamelCase__ , UpperCamelCase__ : Tuple = pred_logits.topk(1 , dim=-1 , largest=lowerCamelCase__ , sorted=lowerCamelCase__ ) UpperCamelCase__ : List[str] = preds_index.view(-1 , lowerCamelCase__ )[:, 1:] UpperCamelCase__ : str = decoder(lowerCamelCase__ ) UpperCamelCase__ , UpperCamelCase__ : Optional[int] = torch.nn.functional.softmax(lowerCamelCase__ , dim=2 ).max(dim=2 ) UpperCamelCase__ : Any = preds_max_prob[:, 1:] for index in range(lowerCamelCase__ ): UpperCamelCase__ : int = preds_str[index].find(lowerCamelCase__ ) UpperCamelCase__ : str = preds_str[index][:pred_eos] UpperCamelCase__ : Dict = preds_index[index].cpu().tolist() UpperCamelCase__ : Dict = pred_index.index(lowerCamelCase__ ) if eos_token in pred_index else -1 UpperCamelCase__ : Optional[Any] = preds_max_prob[index][: pred_eos_index + 1] UpperCamelCase__ : Tuple = pred_max_prob.cumprod(dim=0 )[-1] if pred_max_prob.nelement() != 0 else 0.0 dec_strs.append(lowerCamelCase__ ) conf_scores.append(lowerCamelCase__ ) return dec_strs, conf_scores def UpperCAmelCase__ ( self : List[Any] , lowerCamelCase__ : Any ) -> List[str]: '''simple docstring''' UpperCamelCase__ : Dict = [seq.replace(''' ''' , '''''' ) for seq in self.char_tokenizer.batch_decode(lowerCamelCase__ )] return decode_strs def UpperCAmelCase__ ( self : int , lowerCamelCase__ : Any ) -> Dict: '''simple docstring''' return self.bpe_tokenizer.batch_decode(lowerCamelCase__ ) def UpperCAmelCase__ ( self : Union[str, Any] , lowerCamelCase__ : Any ) -> str: '''simple docstring''' UpperCamelCase__ : int = [seq.replace(''' ''' , '''''' ) for seq in self.wp_tokenizer.batch_decode(lowerCamelCase__ )] return decode_strs
51
import os from pathlib import Path import numpy as np import pytest from pack_dataset import pack_data_dir from parameterized import parameterized from save_len_file import save_len_file from torch.utils.data import DataLoader from transformers import AutoTokenizer from transformers.models.mbart.modeling_mbart import shift_tokens_right from transformers.testing_utils import TestCasePlus, slow from utils import FAIRSEQ_AVAILABLE, DistributedSortishSampler, LegacySeqaSeqDataset, SeqaSeqDataset __UpperCamelCase : Union[str, Any] = "bert-base-cased" __UpperCamelCase : Tuple = "google/pegasus-xsum" __UpperCamelCase : Union[str, Any] = [" Sam ate lunch today.", "Sams lunch ingredients."] __UpperCamelCase : Union[str, Any] = ["A very interesting story about what I ate for lunch.", "Avocado, celery, turkey, coffee"] __UpperCamelCase : Any = "patrickvonplaten/t5-tiny-random" __UpperCamelCase : List[Any] = "sshleifer/bart-tiny-random" __UpperCamelCase : Any = "sshleifer/tiny-mbart" __UpperCamelCase : Optional[Any] = "sshleifer/tiny-marian-en-de" def _a ( SCREAMING_SNAKE_CASE : Path , SCREAMING_SNAKE_CASE : list ): """simple docstring""" UpperCamelCase__ : Optional[Any] = '''\n'''.join(SCREAMING_SNAKE_CASE ) Path(SCREAMING_SNAKE_CASE ).open('''w''' ).writelines(SCREAMING_SNAKE_CASE ) def _a ( SCREAMING_SNAKE_CASE : Dict ): """simple docstring""" for split in ["train", "val", "test"]: _dump_articles(os.path.join(SCREAMING_SNAKE_CASE , F"{split}.source" ) , SCREAMING_SNAKE_CASE ) _dump_articles(os.path.join(SCREAMING_SNAKE_CASE , F"{split}.target" ) , SCREAMING_SNAKE_CASE ) return tmp_dir class __magic_name__ ( __lowerCAmelCase): @parameterized.expand( [ MBART_TINY, MARIAN_TINY, T5_TINY, BART_TINY, PEGASUS_XSUM, ] , ) @slow def UpperCAmelCase__ ( self : Tuple , lowerCamelCase__ : Dict ) -> Optional[Any]: '''simple docstring''' UpperCamelCase__ : Dict = AutoTokenizer.from_pretrained(lowerCamelCase__ ) UpperCamelCase__ : Optional[int] = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) UpperCamelCase__ : Tuple = max(len(tokenizer.encode(lowerCamelCase__ ) ) for a in ARTICLES ) UpperCamelCase__ : List[str] = max(len(tokenizer.encode(lowerCamelCase__ ) ) for a in SUMMARIES ) UpperCamelCase__ : int = 4 UpperCamelCase__ : Union[str, Any] = 8 assert max_len_target > max_src_len # Will be truncated assert max_len_source > max_src_len # Will be truncated UpperCamelCase__ , UpperCamelCase__ : Union[str, Any] = '''ro_RO''', '''de_DE''' # ignored for all but mbart, but never causes error. UpperCamelCase__ : List[str] = SeqaSeqDataset( lowerCamelCase__ , data_dir=lowerCamelCase__ , type_path='''train''' , max_source_length=lowerCamelCase__ , max_target_length=lowerCamelCase__ , src_lang=lowerCamelCase__ , tgt_lang=lowerCamelCase__ , ) UpperCamelCase__ : Dict = DataLoader(lowerCamelCase__ , batch_size=2 , collate_fn=train_dataset.collate_fn ) for batch in dataloader: assert isinstance(lowerCamelCase__ , lowerCamelCase__ ) assert batch["attention_mask"].shape == batch["input_ids"].shape # show that articles were trimmed. assert batch["input_ids"].shape[1] == max_src_len # show that targets are the same len assert batch["labels"].shape[1] == max_tgt_len if tok_name != MBART_TINY: continue # check language codes in correct place UpperCamelCase__ : Dict = shift_tokens_right(batch['''labels'''] , tokenizer.pad_token_id ) assert batch["decoder_input_ids"][0, 0].item() == tokenizer.lang_code_to_id[tgt_lang] assert batch["decoder_input_ids"][0, -1].item() == tokenizer.eos_token_id assert batch["input_ids"][0, -2].item() == tokenizer.eos_token_id assert batch["input_ids"][0, -1].item() == tokenizer.lang_code_to_id[src_lang] break # No need to test every batch @parameterized.expand([BART_TINY, BERT_BASE_CASED] ) def UpperCAmelCase__ ( self : int , lowerCamelCase__ : Tuple ) -> List[Any]: '''simple docstring''' UpperCamelCase__ : Dict = AutoTokenizer.from_pretrained(lowerCamelCase__ ) UpperCamelCase__ : str = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) UpperCamelCase__ : Union[str, Any] = max(len(tokenizer.encode(lowerCamelCase__ ) ) for a in ARTICLES ) UpperCamelCase__ : str = max(len(tokenizer.encode(lowerCamelCase__ ) ) for a in SUMMARIES ) UpperCamelCase__ : Union[str, Any] = 4 UpperCamelCase__ : Optional[int] = LegacySeqaSeqDataset( lowerCamelCase__ , data_dir=lowerCamelCase__ , type_path='''train''' , max_source_length=20 , max_target_length=lowerCamelCase__ , ) UpperCamelCase__ : List[str] = DataLoader(lowerCamelCase__ , batch_size=2 , collate_fn=train_dataset.collate_fn ) for batch in dataloader: assert batch["attention_mask"].shape == batch["input_ids"].shape # show that articles were trimmed. assert batch["input_ids"].shape[1] == max_len_source assert 20 >= batch["input_ids"].shape[1] # trimmed significantly # show that targets were truncated assert batch["labels"].shape[1] == trunc_target # Truncated assert max_len_target > trunc_target # Truncated break # No need to test every batch def UpperCAmelCase__ ( self : List[str] ) -> Dict: '''simple docstring''' UpperCamelCase__ : Dict = AutoTokenizer.from_pretrained('''facebook/mbart-large-cc25''' ) UpperCamelCase__ : List[str] = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) ) UpperCamelCase__ : int = tmp_dir.joinpath('''train.source''' ).open().readlines() UpperCamelCase__ : Tuple = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) ) pack_data_dir(lowerCamelCase__ , lowerCamelCase__ , 128 , lowerCamelCase__ ) UpperCamelCase__ : str = {x.name for x in tmp_dir.iterdir()} UpperCamelCase__ : Optional[int] = {x.name for x in save_dir.iterdir()} UpperCamelCase__ : Dict = save_dir.joinpath('''train.source''' ).open().readlines() # orig: [' Sam ate lunch today.\n', 'Sams lunch ingredients.'] # desired_packed: [' Sam ate lunch today.\n Sams lunch ingredients.'] assert len(lowerCamelCase__ ) < len(lowerCamelCase__ ) assert len(lowerCamelCase__ ) == 1 assert len(packed_examples[0] ) == sum(len(lowerCamelCase__ ) for x in orig_examples ) assert orig_paths == new_paths @pytest.mark.skipif(not FAIRSEQ_AVAILABLE , reason='''This test requires fairseq''' ) def UpperCAmelCase__ ( self : str ) -> Optional[Any]: '''simple docstring''' if not FAIRSEQ_AVAILABLE: return UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ : Tuple = self._get_dataset(max_len=64 ) UpperCamelCase__ : List[str] = 64 UpperCamelCase__ : Optional[int] = ds.make_dynamic_sampler(lowerCamelCase__ , required_batch_size_multiple=lowerCamelCase__ ) UpperCamelCase__ : Union[str, Any] = [len(lowerCamelCase__ ) for x in batch_sampler] assert len(set(lowerCamelCase__ ) ) > 1 # it's not dynamic batch size if every batch is the same length assert sum(lowerCamelCase__ ) == len(lowerCamelCase__ ) # no dropped or added examples UpperCamelCase__ : Any = DataLoader(lowerCamelCase__ , batch_sampler=lowerCamelCase__ , collate_fn=ds.collate_fn , num_workers=2 ) UpperCamelCase__ : int = [] UpperCamelCase__ : Tuple = [] for batch in data_loader: UpperCamelCase__ : int = batch['''input_ids'''].shape UpperCamelCase__ : Any = src_shape[0] assert bs % required_batch_size_multiple == 0 or bs < required_batch_size_multiple UpperCamelCase__ : Tuple = np.product(batch['''input_ids'''].shape ) num_src_per_batch.append(lowerCamelCase__ ) if num_src_tokens > (max_tokens * 1.1): failures.append(lowerCamelCase__ ) assert num_src_per_batch[0] == max(lowerCamelCase__ ) if failures: raise AssertionError(F"too many tokens in {len(lowerCamelCase__ )} batches" ) def UpperCAmelCase__ ( self : str ) -> Optional[Any]: '''simple docstring''' UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ : Tuple = self._get_dataset(max_len=512 ) UpperCamelCase__ : Union[str, Any] = 2 UpperCamelCase__ : Optional[int] = ds.make_sortish_sampler(lowerCamelCase__ , shuffle=lowerCamelCase__ ) UpperCamelCase__ : List[Any] = DataLoader(lowerCamelCase__ , batch_size=lowerCamelCase__ , collate_fn=ds.collate_fn , num_workers=2 ) UpperCamelCase__ : str = DataLoader(lowerCamelCase__ , batch_size=lowerCamelCase__ , collate_fn=ds.collate_fn , num_workers=2 , sampler=lowerCamelCase__ ) UpperCamelCase__ : Optional[Any] = tokenizer.pad_token_id def count_pad_tokens(lowerCamelCase__ : int , lowerCamelCase__ : List[Any]="input_ids" ): return [batch[k].eq(lowerCamelCase__ ).sum().item() for batch in data_loader] assert sum(count_pad_tokens(lowerCamelCase__ , k='''labels''' ) ) < sum(count_pad_tokens(lowerCamelCase__ , k='''labels''' ) ) assert sum(count_pad_tokens(lowerCamelCase__ ) ) < sum(count_pad_tokens(lowerCamelCase__ ) ) assert len(lowerCamelCase__ ) == len(lowerCamelCase__ ) def UpperCAmelCase__ ( self : int , lowerCamelCase__ : List[Any]=1000 , lowerCamelCase__ : Tuple=128 ) -> str: '''simple docstring''' if os.getenv('''USE_REAL_DATA''' , lowerCamelCase__ ): UpperCamelCase__ : List[str] = '''examples/seq2seq/wmt_en_ro''' UpperCamelCase__ : int = max_len * 2 * 64 if not Path(lowerCamelCase__ ).joinpath('''train.len''' ).exists(): save_len_file(lowerCamelCase__ , lowerCamelCase__ ) else: UpperCamelCase__ : Optional[Any] = '''examples/seq2seq/test_data/wmt_en_ro''' UpperCamelCase__ : Optional[Any] = max_len * 4 save_len_file(lowerCamelCase__ , lowerCamelCase__ ) UpperCamelCase__ : List[str] = AutoTokenizer.from_pretrained(lowerCamelCase__ ) UpperCamelCase__ : Any = SeqaSeqDataset( lowerCamelCase__ , data_dir=lowerCamelCase__ , type_path='''train''' , max_source_length=lowerCamelCase__ , max_target_length=lowerCamelCase__ , n_obs=lowerCamelCase__ , ) return ds, max_tokens, tokenizer def UpperCAmelCase__ ( self : int ) -> str: '''simple docstring''' UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ : List[Any] = self._get_dataset() UpperCamelCase__ : Any = set(DistributedSortishSampler(lowerCamelCase__ , 256 , num_replicas=2 , rank=0 , add_extra_examples=lowerCamelCase__ ) ) UpperCamelCase__ : str = set(DistributedSortishSampler(lowerCamelCase__ , 256 , num_replicas=2 , rank=1 , add_extra_examples=lowerCamelCase__ ) ) assert idsa.intersection(lowerCamelCase__ ) == set() @parameterized.expand( [ MBART_TINY, MARIAN_TINY, T5_TINY, BART_TINY, PEGASUS_XSUM, ] , ) def UpperCAmelCase__ ( self : Any , lowerCamelCase__ : Tuple ) -> str: '''simple docstring''' UpperCamelCase__ : Union[str, Any] = AutoTokenizer.from_pretrained(lowerCamelCase__ , use_fast=lowerCamelCase__ ) if tok_name == MBART_TINY: UpperCamelCase__ : Dict = SeqaSeqDataset( lowerCamelCase__ , data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) , type_path='''train''' , max_source_length=4 , max_target_length=8 , src_lang='''EN''' , tgt_lang='''FR''' , ) UpperCamelCase__ : List[Any] = train_dataset.dataset_kwargs assert "src_lang" in kwargs and "tgt_lang" in kwargs else: UpperCamelCase__ : List[Any] = SeqaSeqDataset( lowerCamelCase__ , data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) , type_path='''train''' , max_source_length=4 , max_target_length=8 , ) UpperCamelCase__ : Optional[Any] = train_dataset.dataset_kwargs assert "add_prefix_space" not in kwargs if tok_name != BART_TINY else "add_prefix_space" in kwargs assert len(lowerCamelCase__ ) == 1 if tok_name == BART_TINY else len(lowerCamelCase__ ) == 0
51
1
import copy from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCAmelCase_ = logging.get_logger(__name__) class _A ( _lowerCamelCase ): _UpperCamelCase : Any = '''encoder-decoder''' _UpperCamelCase : List[Any] = True def __init__( self : List[Any] , **_A : int ) -> List[str]: """simple docstring""" super().__init__(**_A ) assert ( "encoder" in kwargs and "decoder" in kwargs ), "Config has to be initialized with encoder and decoder config" lowercase : Dict = kwargs.pop('''encoder''' ) lowercase : List[Any] = encoder_config.pop('''model_type''' ) lowercase : Optional[int] = kwargs.pop('''decoder''' ) lowercase : Optional[Any] = decoder_config.pop('''model_type''' ) from ..auto.configuration_auto import AutoConfig lowercase : Any = AutoConfig.for_model(_A , **_A ) lowercase : Any = AutoConfig.for_model(_A , **_A ) lowercase : Any = True @classmethod def __a ( cls : List[Any] , _A : PretrainedConfig , _A : PretrainedConfig , **_A : Optional[int] ) -> PretrainedConfig: """simple docstring""" logger.info('''Set `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config''' ) lowercase : Any = True lowercase : Dict = True return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **_A ) def __a ( self : Tuple ) -> Optional[Any]: """simple docstring""" lowercase : Union[str, Any] = copy.deepcopy(self.__dict__ ) lowercase : List[Any] = self.encoder.to_dict() lowercase : Dict = self.decoder.to_dict() lowercase : Any = self.__class__.model_type return output
308
import unittest from transformers import XLMConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( XLMForMultipleChoice, XLMForQuestionAnswering, XLMForQuestionAnsweringSimple, XLMForSequenceClassification, XLMForTokenClassification, XLMModel, XLMWithLMHeadModel, ) from transformers.models.xlm.modeling_xlm import XLM_PRETRAINED_MODEL_ARCHIVE_LIST class _A : def __init__( self : int , _A : Optional[int] , _A : Any=13 , _A : List[Any]=7 , _A : List[Any]=True , _A : Optional[Any]=True , _A : str=True , _A : Any=True , _A : Dict=True , _A : Optional[Any]=False , _A : Any=False , _A : List[str]=False , _A : Optional[int]=2 , _A : List[Any]=99 , _A : str=0 , _A : Dict=32 , _A : Dict=5 , _A : List[Any]=4 , _A : Optional[Any]=0.1 , _A : Optional[int]=0.1 , _A : Optional[Any]=512 , _A : Optional[Any]=2 , _A : Optional[Any]=0.02 , _A : Optional[int]=2 , _A : Tuple=4 , _A : List[Any]="last" , _A : List[str]=True , _A : Tuple=None , _A : Optional[Any]=0 , ) -> Any: """simple docstring""" lowercase : str = parent lowercase : Optional[Any] = batch_size lowercase : Union[str, Any] = seq_length lowercase : str = is_training lowercase : str = use_input_lengths lowercase : List[Any] = use_token_type_ids lowercase : Union[str, Any] = use_labels lowercase : Tuple = gelu_activation lowercase : Dict = sinusoidal_embeddings lowercase : Any = causal lowercase : str = asm lowercase : Optional[Any] = n_langs lowercase : Dict = vocab_size lowercase : Dict = n_special lowercase : List[Any] = hidden_size lowercase : str = num_hidden_layers lowercase : int = num_attention_heads lowercase : str = hidden_dropout_prob lowercase : Dict = attention_probs_dropout_prob lowercase : List[Any] = max_position_embeddings lowercase : Optional[int] = type_sequence_label_size lowercase : List[str] = initializer_range lowercase : List[str] = num_labels lowercase : int = num_choices lowercase : int = summary_type lowercase : Tuple = use_proj lowercase : Union[str, Any] = scope lowercase : List[str] = bos_token_id def __a ( self : Any ) -> Dict: """simple docstring""" lowercase : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowercase : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] ) lowercase : str = None if self.use_input_lengths: lowercase : int = ( ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2 ) # small variation of seq_length lowercase : Union[str, Any] = None if self.use_token_type_ids: lowercase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.n_langs ) lowercase : Union[str, Any] = None lowercase : List[str] = None lowercase : Optional[Any] = None if self.use_labels: lowercase : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowercase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) lowercase : Tuple = ids_tensor([self.batch_size] , 2 ).float() lowercase : Tuple = ids_tensor([self.batch_size] , self.num_choices ) lowercase : List[Any] = self.get_config() return ( config, input_ids, token_type_ids, input_lengths, sequence_labels, token_labels, is_impossible_labels, choice_labels, input_mask, ) def __a ( self : Any ) -> List[Any]: """simple docstring""" return XLMConfig( vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , num_labels=self.num_labels , bos_token_id=self.bos_token_id , ) def __a ( self : int , _A : str , _A : Optional[Any] , _A : int , _A : List[str] , _A : Any , _A : Dict , _A : Tuple , _A : Union[str, Any] , _A : Tuple , ) -> List[Any]: """simple docstring""" lowercase : List[Any] = XLMModel(config=_A ) model.to(_A ) model.eval() lowercase : Tuple = model(_A , lengths=_A , langs=_A ) lowercase : Dict = model(_A , langs=_A ) lowercase : int = model(_A ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def __a ( self : int , _A : Dict , _A : int , _A : int , _A : Union[str, Any] , _A : Tuple , _A : Union[str, Any] , _A : Any , _A : Union[str, Any] , _A : Dict , ) -> Optional[Any]: """simple docstring""" lowercase : Optional[int] = XLMWithLMHeadModel(_A ) model.to(_A ) model.eval() lowercase : Tuple = model(_A , token_type_ids=_A , labels=_A ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def __a ( self : Union[str, Any] , _A : List[str] , _A : Union[str, Any] , _A : List[str] , _A : Optional[int] , _A : Optional[Any] , _A : int , _A : Union[str, Any] , _A : Tuple , _A : int , ) -> Union[str, Any]: """simple docstring""" lowercase : Dict = XLMForQuestionAnsweringSimple(_A ) model.to(_A ) model.eval() lowercase : List[str] = model(_A ) lowercase : Any = model(_A , start_positions=_A , end_positions=_A ) lowercase : Any = outputs self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def __a ( self : Union[str, Any] , _A : int , _A : Union[str, Any] , _A : List[Any] , _A : Union[str, Any] , _A : List[str] , _A : Any , _A : Any , _A : str , _A : Union[str, Any] , ) -> Dict: """simple docstring""" lowercase : Optional[int] = XLMForQuestionAnswering(_A ) model.to(_A ) model.eval() lowercase : Any = model(_A ) lowercase : Tuple = model( _A , start_positions=_A , end_positions=_A , cls_index=_A , is_impossible=_A , p_mask=_A , ) lowercase : Optional[int] = model( _A , start_positions=_A , end_positions=_A , cls_index=_A , is_impossible=_A , ) ((lowercase) , ) : Optional[int] = result_with_labels.to_tuple() lowercase : List[str] = model(_A , start_positions=_A , end_positions=_A ) ((lowercase) , ) : Any = result_with_labels.to_tuple() self.parent.assertEqual(result_with_labels.loss.shape , () ) self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) ) self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) ) self.parent.assertEqual( result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) ) self.parent.assertEqual( result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) ) self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) ) def __a ( self : Union[str, Any] , _A : Optional[int] , _A : Dict , _A : int , _A : List[Any] , _A : List[str] , _A : Optional[Any] , _A : Dict , _A : Optional[int] , _A : str , ) -> int: """simple docstring""" lowercase : List[str] = XLMForSequenceClassification(_A ) model.to(_A ) model.eval() lowercase : List[str] = model(_A ) lowercase : Union[str, Any] = model(_A , labels=_A ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def __a ( self : Union[str, Any] , _A : str , _A : int , _A : List[str] , _A : Optional[int] , _A : Union[str, Any] , _A : Tuple , _A : Dict , _A : Any , _A : Tuple , ) -> Dict: """simple docstring""" lowercase : Optional[Any] = self.num_labels lowercase : Tuple = XLMForTokenClassification(_A ) model.to(_A ) model.eval() lowercase : str = model(_A , attention_mask=_A , labels=_A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def __a ( self : List[Any] , _A : List[str] , _A : Dict , _A : str , _A : List[str] , _A : List[str] , _A : Union[str, Any] , _A : Tuple , _A : Any , _A : Any , ) -> Union[str, Any]: """simple docstring""" lowercase : int = self.num_choices lowercase : List[Any] = XLMForMultipleChoice(config=_A ) model.to(_A ) model.eval() lowercase : str = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() lowercase : Dict = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() lowercase : List[str] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() lowercase : Dict = model( _A , attention_mask=_A , token_type_ids=_A , labels=_A , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def __a ( self : Optional[Any] ) -> List[Any]: """simple docstring""" lowercase : List[Any] = self.prepare_config_and_inputs() ( ( lowercase ) , ( lowercase ) , ( lowercase ) , ( lowercase ) , ( lowercase ) , ( lowercase ) , ( lowercase ) , ( lowercase ) , ( lowercase ) , ) : Union[str, Any] = config_and_inputs lowercase : Optional[int] = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''lengths''': input_lengths} return config, inputs_dict @require_torch class _A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , unittest.TestCase ): _UpperCamelCase : Any = ( ( XLMModel, XLMWithLMHeadModel, XLMForQuestionAnswering, XLMForSequenceClassification, XLMForQuestionAnsweringSimple, XLMForTokenClassification, XLMForMultipleChoice, ) if is_torch_available() else () ) _UpperCamelCase : str = ( (XLMWithLMHeadModel,) if is_torch_available() else () ) # TODO (PVP): Check other models whether language generation is also applicable _UpperCamelCase : Tuple = ( { '''feature-extraction''': XLMModel, '''fill-mask''': XLMWithLMHeadModel, '''question-answering''': XLMForQuestionAnsweringSimple, '''text-classification''': XLMForSequenceClassification, '''text-generation''': XLMWithLMHeadModel, '''token-classification''': XLMForTokenClassification, '''zero-shot''': XLMForSequenceClassification, } if is_torch_available() else {} ) def __a ( self : List[Any] , _A : Tuple , _A : List[str] , _A : Dict , _A : Union[str, Any] , _A : Optional[Any] ) -> List[Any]: """simple docstring""" if ( pipeline_test_casse_name == "QAPipelineTests" and tokenizer_name is not None and not tokenizer_name.endswith('''Fast''' ) ): # `QAPipelineTests` fails for a few models when the slower tokenizer are used. # (The slower tokenizers were never used for pipeline tests before the pipeline testing rework) # TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer return True return False def __a ( self : Dict , _A : Tuple , _A : List[str] , _A : int=False ) -> Optional[Any]: """simple docstring""" lowercase : List[str] = super()._prepare_for_class(_A , _A , return_labels=_A ) if return_labels: if model_class.__name__ == "XLMForQuestionAnswering": lowercase : int = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=_A ) lowercase : str = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=_A ) return inputs_dict def __a ( self : Any ) -> List[str]: """simple docstring""" lowercase : List[str] = XLMModelTester(self ) lowercase : Any = ConfigTester(self , config_class=_A , emb_dim=37 ) def __a ( self : List[Any] ) -> Optional[int]: """simple docstring""" self.config_tester.run_common_tests() def __a ( self : Tuple ) -> Union[str, Any]: """simple docstring""" lowercase : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_model(*_A ) def __a ( self : Any ) -> Dict: """simple docstring""" lowercase : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_lm_head(*_A ) def __a ( self : List[str] ) -> Optional[int]: """simple docstring""" lowercase : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_simple_qa(*_A ) def __a ( self : Union[str, Any] ) -> Tuple: """simple docstring""" lowercase : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_qa(*_A ) def __a ( self : List[str] ) -> Union[str, Any]: """simple docstring""" lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_sequence_classif(*_A ) def __a ( self : Dict ) -> int: """simple docstring""" lowercase : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_token_classif(*_A ) def __a ( self : Any ) -> List[Any]: """simple docstring""" lowercase : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_for_multiple_choice(*_A ) def __a ( self : int , _A : Union[str, Any] , _A : int , _A : Union[str, Any] , _A : Optional[Any] , _A : List[Any] , _A : List[Any]=False , _A : Optional[int]=1 ) -> Any: """simple docstring""" self.assertIsInstance(_A , _A ) self.assertListEqual( [isinstance(_A , _A ) for iter_attentions in attentions] , [True] * len(_A ) ) self.assertEqual(len(_A ) , (max_length - min_length) * num_beam_groups ) for idx, iter_attentions in enumerate(_A ): # adds PAD dummy token lowercase : List[Any] = min_length + idx + 1 lowercase : str = min_length + idx + 1 lowercase : Any = ( batch_size * num_beam_groups, config.num_attention_heads, tgt_len, src_len, ) # check attn size self.assertListEqual( [layer_attention.shape for layer_attention in iter_attentions] , [expected_shape] * len(_A ) ) def __a ( self : int , _A : Optional[int] , _A : Dict , _A : Any , _A : List[str] , _A : Optional[int] , _A : List[Any]=False , _A : List[Any]=1 ) -> str: """simple docstring""" self.assertIsInstance(_A , _A ) self.assertListEqual( [isinstance(_A , _A ) for iter_hidden_states in hidden_states] , [True] * len(_A ) , ) self.assertEqual(len(_A ) , (max_length - min_length) * num_beam_groups ) for idx, iter_hidden_states in enumerate(_A ): # adds PAD dummy token lowercase : Union[str, Any] = min_length + idx + 1 lowercase : Optional[Any] = (batch_size * num_beam_groups, seq_len, config.hidden_size) # check hidden size self.assertListEqual( [layer_hidden_states.shape for layer_hidden_states in iter_hidden_states] , [expected_shape] * len(_A ) , ) pass @slow def __a ( self : Optional[int] ) -> Any: """simple docstring""" for model_name in XLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowercase : Any = XLMModel.from_pretrained(_A ) self.assertIsNotNone(_A ) @require_torch class _A ( unittest.TestCase ): @slow def __a ( self : Any ) -> Optional[Any]: """simple docstring""" lowercase : Optional[int] = XLMWithLMHeadModel.from_pretrained('''xlm-mlm-en-2048''' ) model.to(_A ) lowercase : str = torch.tensor([[14, 447]] , dtype=torch.long , device=_A ) # the president lowercase : List[str] = [ 14, 447, 14, 447, 14, 447, 14, 447, 14, 447, 14, 447, 14, 447, 14, 447, 14, 447, 14, 447, ] # the president the president the president the president the president the president the president the president the president the president # TODO(PVP): this and other input_ids I tried for generation give pretty bad results. Not sure why. Model might just not be made for auto-regressive inference lowercase : Dict = model.generate(_A , do_sample=_A ) self.assertListEqual(output_ids[0].cpu().numpy().tolist() , _A )
308
1
def snake_case ( snake_case__ :int = 1_000) -> int: _A = 2**power _A = 0 while n: _A , _A = r + n % 10, n // 10 return r if __name__ == "__main__": print(solution(int(str(input()).strip())))
361
import warnings from ...utils import logging from .image_processing_clip import CLIPImageProcessor _SCREAMING_SNAKE_CASE = logging.get_logger(__name__) class a ( __lowerCAmelCase ): """simple docstring""" def __init__( self , *lowerCAmelCase_ , **lowerCAmelCase_ ) -> None: warnings.warn( """The class CLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please""" """ use CLIPImageProcessor instead.""" , lowerCAmelCase_ , ) super().__init__(*lowerCAmelCase_ , **lowerCAmelCase_ )
81
0
import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( SwiftFormerConfig, SwiftFormerForImageClassification, ViTImageProcessor, ) from transformers.utils import logging logging.set_verbosity_info() _lowercase: Optional[int] = logging.get_logger(__name__) _lowercase: List[str] = torch.device("cpu") def a( ) -> int: """simple docstring""" a = "http://images.cocodataset.org/val2017/000000039769.jpg" a = Image.open(requests.get(A , stream=A ).raw ) return im def a( A : Dict ) -> Optional[Any]: """simple docstring""" if swiftformer_name == "swiftformer_xs": return torch.tensor([-2.1_7_0_3e0_0, 2.1_1_0_7e0_0, -2.0_8_1_1e0_0, 8.8_6_8_5e-0_1, 2.4_3_6_0e-0_1] ) elif swiftformer_name == "swiftformer_s": return torch.tensor([3.9_6_3_6e-0_1, 2.3_4_7_8e-0_1, -1.6_9_6_3e0_0, -1.7_3_8_1e0_0, -8.6_3_3_7e-0_1] ) elif swiftformer_name == "swiftformer_l1": return torch.tensor([-4.2_7_6_8e-0_1, -4.7_4_2_9e-0_1, -1.0_8_9_7e0_0, -1.0_2_4_8e0_0, 3.5_5_2_3e-0_2] ) elif swiftformer_name == "swiftformer_l3": return torch.tensor([-2.5_3_3_0e-0_1, 2.4_2_1_1e-0_1, -6.0_1_8_5e-0_1, -8.2_7_8_9e-0_1, -6.0_4_4_6e-0_2] ) def a( A : Optional[Any] , A : Tuple , A : Dict ) -> Union[str, Any]: """simple docstring""" a = dct.pop(A ) a = val def a( A : Optional[int] ) -> List[str]: """simple docstring""" a = [] for k in state_dict.keys(): a = k if ".pwconv" in k: a = k_new.replace(".pwconv" , ".point_wise_conv" ) if ".dwconv" in k: a = k_new.replace(".dwconv" , ".depth_wise_conv" ) if ".Proj." in k: a = k_new.replace(".Proj." , ".proj." ) if "patch_embed" in k_new: a = k_new.replace("patch_embed" , "swiftformer.patch_embed.patch_embedding" ) if "network" in k_new: a = k_new.split("." ) if ls[2].isdigit(): a = "swiftformer.encoder.network." + ls[1] + ".blocks." + ls[2] + "." + ".".join(ls[3:] ) else: a = k_new.replace("network" , "swiftformer.encoder.network" ) rename_keys.append((k, k_new) ) return rename_keys @torch.no_grad() def a( A : Any , A : Any , A : List[str] ) -> int: """simple docstring""" a = SwiftFormerConfig() # dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size a = 1000 a = "huggingface/label-files" a = "imagenet-1k-id2label.json" a = json.load(open(hf_hub_download(A , A , repo_type="dataset" ) , "r" ) ) a = {int(A ): v for k, v in idalabel.items()} a = idalabel a = {v: k for k, v in idalabel.items()} # size of the architecture if swiftformer_name == "swiftformer_xs": a = [3, 3, 6, 4] a = [48, 56, 112, 220] elif swiftformer_name == "swiftformer_s": a = [3, 3, 9, 6] a = [48, 64, 168, 224] elif swiftformer_name == "swiftformer_l1": a = [4, 3, 10, 5] a = [48, 96, 192, 384] elif swiftformer_name == "swiftformer_l3": a = [4, 4, 12, 6] a = [64, 128, 320, 512] # load state_dict of original model, remove and rename some keys if original_ckpt: if original_ckpt.startswith("https" ): a = torch.hub.load_state_dict_from_url(A , map_location="cpu" , check_hash=A ) else: a = torch.load(A , map_location="cpu" ) a = checkpoint a = create_rename_keys(A ) for rename_key_src, rename_key_dest in rename_keys: rename_key(A , A , A ) # load HuggingFace model a = SwiftFormerForImageClassification(A ).eval() hf_model.load_state_dict(A ) # prepare test inputs a = prepare_img() a = ViTImageProcessor.from_pretrained("preprocessor_config" ) a = processor(images=A , return_tensors="pt" ) # compare outputs from both models a = get_expected_output(A ) a = hf_model(inputs["pixel_values"] ).logits assert hf_logits.shape == torch.Size([1, 1000] ) assert torch.allclose(hf_logits[0, 0:5] , A , atol=1e-3 ) Path(A ).mkdir(exist_ok=A ) print(f'''Saving model {swiftformer_name} to {pytorch_dump_folder_path}''' ) hf_model.save_pretrained(A ) if __name__ == "__main__": _lowercase: Optional[int] = argparse.ArgumentParser() # Required parameters parser.add_argument( "--swiftformer_name", default="swiftformer_xs", choices=["swiftformer_xs", "swiftformer_s", "swiftformer_l1", "swiftformer_l3"], type=str, help="Name of the SwiftFormer model you'd like to convert.", ) parser.add_argument( "--pytorch_dump_folder_path", default="./converted_outputs/", type=str, help="Path to the output PyTorch model directory.", ) parser.add_argument("--original_ckpt", default=None, type=str, help="Path to the original model checkpoint.") _lowercase: List[Any] = parser.parse_args() convert_swiftformer_checkpoint(args.swiftformer_name, args.pytorch_dump_folder_path, args.original_ckpt)
227
import json import multiprocessing as mp import re from collections import defaultdict from functools import partial from typing import Dict, List, Optional, Set, Tuple, Type from datasets import Dataset from datasketch import MinHash, MinHashLSH from dpu_utils.utils.iterators import ThreadedIterator from tqdm import tqdm _lowercase: Optional[int] = re.compile("[^A-Za-z_0-9]") # parameters used in DuplicationIndex _lowercase: Dict = 10 _lowercase: Optional[Any] = 256 def a( A : List[str] ) -> Optional[MinHash]: """simple docstring""" if len(A ) < MIN_NUM_TOKENS: return None a = MinHash(num_perm=A ) for token in set(A ): min_hash.update(token.encode() ) return min_hash def a( A : str ) -> Set[str]: """simple docstring""" return {t for t in NON_ALPHA.split(A ) if len(t.strip() ) > 0} class _lowercase : """simple docstring""" def __init__(self , *, lowerCamelCase_ = 0.85 , ): """simple docstring""" a = duplication_jaccard_threshold a = NUM_PERM a = MinHashLSH(threshold=self._duplication_jaccard_threshold , num_perm=self._num_perm ) a = defaultdict(lowerCamelCase_ ) def UpperCamelCase_ (self , lowerCamelCase_ , lowerCamelCase_ ): """simple docstring""" a = self._index.query(lowerCamelCase_ ) if code_key in self._index.keys: print(F'''Duplicate key {code_key}''' ) return self._index.insert(lowerCamelCase_ , lowerCamelCase_ ) if len(lowerCamelCase_ ) > 0: for base_duplicate in close_duplicates: if base_duplicate in self._duplicate_clusters: self._duplicate_clusters[base_duplicate].add(lowerCamelCase_ ) break else: self._duplicate_clusters[close_duplicates[0]].add(lowerCamelCase_ ) def UpperCamelCase_ (self ): """simple docstring""" a = [] for base, duplicates in self._duplicate_clusters.items(): a = [base] + list(lowerCamelCase_ ) # reformat the cluster to be a list of dict a = [{"base_index": el[0], "repo_name": el[1], "path": el[2]} for el in cluster] duplicate_clusters.append(lowerCamelCase_ ) return duplicate_clusters def UpperCamelCase_ (self , lowerCamelCase_ ): """simple docstring""" a = self.get_duplicate_clusters() with open(lowerCamelCase_ , "w" ) as f: json.dump(lowerCamelCase_ , lowerCamelCase_ ) def a( A : Any ) -> List[Any]: """simple docstring""" a , a = element a = get_min_hash([t for t in NON_ALPHA.split(data["content"] ) if len(t.strip() ) > 0] ) if min_hash is not None: return (index, data["repo_name"], data["path"]), min_hash def a( A : Type[Dataset] ) -> List[Any]: """simple docstring""" with mp.Pool() as pool: for data in pool.imap_unordered( _compute_min_hash , ThreadedIterator(A , max_queue_size=1_0000 ) , chunksize=100 , ): if data is not None: yield data def a( A : Type[Dataset] , A : float ) -> Dict: """simple docstring""" a = DuplicationIndex(duplication_jaccard_threshold=A ) for filename, min_hash in tqdm(ThreadedIterator(minhash_iter(enumerate(A ) ) , max_queue_size=100 ) ): di.add(A , A ) # Returns a List[Cluster] where Cluster is List[str] with the filenames. return di.get_duplicate_clusters() def a( A : str , A : str ) -> float: """simple docstring""" a = get_tokens(A ) a = get_tokens(A ) return len(tokensa & tokensa ) / len(tokensa | tokensa ) _lowercase: int = None def a( A : str , A : Tuple ) -> int: """simple docstring""" a = [] for elementa in cluster: a = _shared_dataset[elementa["base_index"]]["content"] for elementa in extremes: a = _shared_dataset[elementa["base_index"]]["content"] if jaccard_similarity(A , A ) >= jaccard_threshold: elementa["copies"] += 1 break else: a = 1 extremes.append(A ) return extremes def a( A : str , A : List[str] , A : int ) -> Tuple: """simple docstring""" global _shared_dataset a = dataset a = [] a = partial(_find_cluster_extremes_shared , jaccard_threshold=A ) with mp.Pool() as pool: for extremes in tqdm( pool.imap_unordered( A , A , ) , total=len(A ) , ): extremes_list.append(A ) return extremes_list def a( A : Type[Dataset] , A : float = 0.85 ) -> Tuple[Type[Dataset], List[List[Dict]]]: """simple docstring""" a = make_duplicate_clusters(A , A ) a = {x["base_index"] for cluster in duplicate_clusters for x in cluster} a = {} a = find_extremes(A , A , A ) for extremes in extremes_clusters: for element in extremes: a = element a = duplicate_indices - set(extreme_dict.keys() ) a = dataset.filter(lambda A , A : idx not in remove_indices , with_indices=A ) # update duplicate_clusters for cluster in duplicate_clusters: for element in cluster: a = element["base_index"] in extreme_dict if element["is_extreme"]: a = extreme_dict[element["base_index"]]["copies"] print(f'''Original dataset size: {len(A )}''' ) print(f'''Number of duplicate clusters: {len(A )}''' ) print(f'''Files in duplicate cluster: {len(A )}''' ) print(f'''Unique files in duplicate cluster: {len(A )}''' ) print(f'''Filtered dataset size: {len(A )}''' ) return ds_filter, duplicate_clusters
227
1
'''simple docstring''' import json import os import unittest from transformers import DebertaTokenizer, DebertaTokenizerFast from transformers.models.deberta.tokenization_deberta import VOCAB_FILES_NAMES from transformers.testing_utils import slow from ...test_tokenization_common import TokenizerTesterMixin class UpperCamelCase_ ( __magic_name__ , unittest.TestCase ): lowercase = DebertaTokenizer lowercase = True lowercase = DebertaTokenizerFast def _lowercase( self ) -> Dict: super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt UpperCAmelCase : List[str] = [ """l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """\u0120""", """\u0120l""", """\u0120n""", """\u0120lo""", """\u0120low""", """er""", """\u0120lowest""", """\u0120newer""", """\u0120wider""", """[UNK]""", ] UpperCAmelCase : Optional[int] = dict(zip(A , range(len(A ) ) ) ) UpperCAmelCase : Any = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""] UpperCAmelCase : List[Any] = {"""unk_token""": """[UNK]"""} UpperCAmelCase : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] ) UpperCAmelCase : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] ) with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp: fp.write(json.dumps(A ) + """\n""" ) with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp: fp.write("""\n""".join(A ) ) def _lowercase( self , **A ) -> Optional[int]: kwargs.update(self.special_tokens_map ) return self.tokenizer_class.from_pretrained(self.tmpdirname , **A ) def _lowercase( self , A ) -> Tuple: UpperCAmelCase : Optional[int] = """lower newer""" UpperCAmelCase : Any = """lower newer""" return input_text, output_text def _lowercase( self ) -> Union[str, Any]: UpperCAmelCase : Tuple = self.get_tokenizer() UpperCAmelCase : Optional[int] = """lower newer""" UpperCAmelCase : Optional[int] = ["""l""", """o""", """w""", """er""", """\u0120""", """n""", """e""", """w""", """er"""] UpperCAmelCase : Optional[int] = tokenizer.tokenize(A ) self.assertListEqual(A , A ) UpperCAmelCase : int = tokens + [tokenizer.unk_token] UpperCAmelCase : List[Any] = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19] self.assertListEqual(tokenizer.convert_tokens_to_ids(A ) , A ) def _lowercase( self ) -> str: UpperCAmelCase : Tuple = self.get_tokenizer() UpperCAmelCase : List[str] = tokenizer("""Hello""" , """World""" ) UpperCAmelCase : Tuple = [0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1] self.assertListEqual(tokd["""token_type_ids"""] , A ) @slow def _lowercase( self ) -> Optional[int]: UpperCAmelCase : int = self.tokenizer_class.from_pretrained("""microsoft/deberta-base""" ) UpperCAmelCase : Tuple = tokenizer.encode("""sequence builders""" , add_special_tokens=A ) UpperCAmelCase : Tuple = tokenizer.encode("""multi-sequence build""" , add_special_tokens=A ) UpperCAmelCase : List[Any] = tokenizer.encode( """sequence builders""" , add_special_tokens=A , add_prefix_space=A ) UpperCAmelCase : Dict = tokenizer.encode( """sequence builders""" , """multi-sequence build""" , add_special_tokens=A , add_prefix_space=A ) UpperCAmelCase : Optional[int] = tokenizer.build_inputs_with_special_tokens(A ) UpperCAmelCase : Optional[Any] = tokenizer.build_inputs_with_special_tokens(A , A ) assert encoded_sentence == encoded_text_from_decode assert encoded_pair == encoded_pair_from_decode @slow def _lowercase( self ) -> Optional[Any]: UpperCAmelCase : str = [self.tokenizer_class] if self.test_rust_tokenizer: tokenizer_classes.append(self.rust_tokenizer_class ) for tokenizer_class in tokenizer_classes: UpperCAmelCase : Any = tokenizer_class.from_pretrained("""microsoft/deberta-base""" ) UpperCAmelCase : Tuple = [ """ALBERT: A Lite BERT for Self-supervised Learning of Language Representations""", """ALBERT incorporates two parameter reduction techniques""", """The first one is a factorized embedding parameterization. By decomposing the large vocabulary""" """ embedding matrix into two small matrices, we separate the size of the hidden layers from the size of""" """ vocabulary embedding.""", ] UpperCAmelCase : str = tokenizer(A , padding=A ) UpperCAmelCase : List[Any] = [tokenizer.decode(A , skip_special_tokens=A ) for seq in encoding["""input_ids"""]] # fmt: off UpperCAmelCase : str = { """input_ids""": [ [1, 2118, 11126, 565, 35, 83, 25191, 163, 18854, 13, 12156, 12, 16101, 25376, 13807, 9, 22205, 27893, 1635, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 2118, 11126, 565, 24536, 80, 43797, 4878, 7373, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 133, 78, 65, 16, 10, 3724, 1538, 33183, 11303, 43797, 1938, 4, 870, 24165, 29105, 5, 739, 32644, 33183, 11303, 36173, 88, 80, 650, 7821, 45940, 6, 52, 2559, 5, 1836, 9, 5, 7397, 13171, 31, 5, 1836, 9, 32644, 33183, 11303, 4, 2] ], """token_type_ids""": [ [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] ], """attention_mask""": [ [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1] ] } # fmt: on UpperCAmelCase : Optional[int] = [ """ALBERT: A Lite BERT for Self-supervised Learning of Language Representations""", """ALBERT incorporates two parameter reduction techniques""", """The first one is a factorized embedding parameterization. By decomposing the large vocabulary""" """ embedding matrix into two small matrices, we separate the size of the hidden layers from the size of""" """ vocabulary embedding.""", ] self.assertDictEqual(encoding.data , A ) for expected, decoded in zip(A , A ): self.assertEqual(A , A )
338
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available a : List[str] = { """configuration_altclip""": [ """ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""", """AltCLIPConfig""", """AltCLIPTextConfig""", """AltCLIPVisionConfig""", ], """processing_altclip""": ["""AltCLIPProcessor"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a : List[Any] = [ """ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST""", """AltCLIPPreTrainedModel""", """AltCLIPModel""", """AltCLIPTextModel""", """AltCLIPVisionModel""", ] if TYPE_CHECKING: from .configuration_altclip import ( ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, AltCLIPConfig, AltCLIPTextConfig, AltCLIPVisionConfig, ) from .processing_altclip import AltCLIPProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_altclip import ( ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST, AltCLIPModel, AltCLIPPreTrainedModel, AltCLIPTextModel, AltCLIPVisionModel, ) else: import sys a : Optional[int] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
338
1
"""simple docstring""" import builtins import sys from ...utils.imports import _is_package_available from . import cursor, input from .helpers import Direction, clear_line, forceWrite, linebreak, move_cursor, reset_cursor, writeColor from .keymap import KEYMAP _a = False try: _a = _is_package_available('google.colab') except ModuleNotFoundError: pass @input.register class A_ : '''simple docstring''' def __init__( self , lowercase_ = None , lowercase_ = [] ): """simple docstring""" UpperCAmelCase_ : List[Any] = 0 UpperCAmelCase_ : Optional[Any] = choices UpperCAmelCase_ : List[Any] = prompt if sys.platform == "win32": UpperCAmelCase_ : Optional[int] = "*" else: UpperCAmelCase_ : int = "➔ " def UpperCamelCase__ ( self , lowercase_ , lowercase_ = "" ): """simple docstring""" if sys.platform != "win32": writeColor(self.choices[index] , 32 , lowercase_ ) else: forceWrite(self.choices[index] , lowercase_ ) def UpperCamelCase__ ( self , lowercase_ ): """simple docstring""" if index == self.position: forceWrite(F""" {self.arrow_char} """ ) self.write_choice(lowercase_ ) else: forceWrite(F""" {self.choices[index]}""" ) reset_cursor() def UpperCamelCase__ ( self , lowercase_ , lowercase_ = 1 ): """simple docstring""" UpperCAmelCase_ : List[str] = self.position if direction == Direction.DOWN: if self.position + 1 >= len(self.choices ): return self.position += num_spaces else: if self.position - 1 < 0: return self.position -= num_spaces clear_line() self.print_choice(lowercase_ ) move_cursor(lowercase_ , direction.name ) self.print_choice(self.position ) @input.mark(KEYMAP["up"] ) def UpperCamelCase__ ( self ): """simple docstring""" self.move_direction(Direction.UP ) @input.mark(KEYMAP["down"] ) def UpperCamelCase__ ( self ): """simple docstring""" self.move_direction(Direction.DOWN ) @input.mark(KEYMAP["newline"] ) def UpperCamelCase__ ( self ): """simple docstring""" move_cursor(len(self.choices ) - self.position , "DOWN" ) return self.position @input.mark(KEYMAP["interrupt"] ) def UpperCamelCase__ ( self ): """simple docstring""" move_cursor(len(self.choices ) - self.position , "DOWN" ) raise KeyboardInterrupt @input.mark_multiple(*[KEYMAP[str(lowercase_ )] for number in range(10 )] ) def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ : Optional[Any] = int(chr(self.current_selection ) ) UpperCAmelCase_ : Optional[Any] = index - self.position if index == self.position: return if index < len(self.choices ): if self.position > index: self.move_direction(Direction.UP , -movement ) elif self.position < index: self.move_direction(Direction.DOWN , lowercase_ ) else: return else: return def UpperCamelCase__ ( self , lowercase_ = 0 ): """simple docstring""" if self.prompt: linebreak() forceWrite(self.prompt , "\n" ) if in_colab: forceWrite("Please input a choice index (starting from 0), and press enter" , "\n" ) else: forceWrite("Please select a choice using the arrow or number keys, and selecting with enter" , "\n" ) UpperCAmelCase_ : Optional[Any] = default_choice for i in range(len(self.choices ) ): self.print_choice(lowercase_ ) forceWrite("\n" ) move_cursor(len(self.choices ) - self.position , "UP" ) with cursor.hide(): while True: if in_colab: try: UpperCAmelCase_ : Tuple = int(builtins.input() ) except ValueError: UpperCAmelCase_ : List[Any] = default_choice else: UpperCAmelCase_ : Optional[int] = self.handle_input() if choice is not None: reset_cursor() for _ in range(len(self.choices ) + 1 ): move_cursor(1 , "UP" ) clear_line() self.write_choice(lowercase_ , "\n" ) return choice
61
import dataclasses import json import warnings from dataclasses import dataclass, field from time import time from typing import List from ..utils import logging A : Dict = logging.get_logger(__name__) def __lowerCamelCase ( __a :int=None , __a :Optional[Any]=None ) -> int: """simple docstring""" return field(default_factory=lambda: default , metadata=__a ) @dataclass class A : '''simple docstring''' __lowerCamelCase : List[str] = list_field( default=[] , metadata={ '''help''': ( '''Model checkpoints to be provided to the AutoModel classes. Leave blank to benchmark the base version''' ''' of all available models''' ) } , ) __lowerCamelCase : List[int] = list_field( default=[8] , metadata={'''help''': '''List of batch sizes for which memory and time performance will be evaluated'''} ) __lowerCamelCase : List[int] = list_field( default=[8, 32, 128, 512] , metadata={'''help''': '''List of sequence lengths for which memory and time performance will be evaluated'''} , ) __lowerCamelCase : bool = field( default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''Whether to benchmark inference of model. Inference can be disabled via --no-inference.'''} , ) __lowerCamelCase : bool = field( default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''Whether to run on available cuda devices. Cuda can be disabled via --no-cuda.'''} , ) __lowerCamelCase : bool = field( default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''Whether to run on available tpu devices. TPU can be disabled via --no-tpu.'''} ) __lowerCamelCase : bool = field(default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''Use FP16 to accelerate inference.'''} ) __lowerCamelCase : bool = field(default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''Benchmark training of model'''} ) __lowerCamelCase : bool = field(default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''Verbose memory tracing'''} ) __lowerCamelCase : bool = field( default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''Whether to perform speed measurements. Speed measurements can be disabled via --no-speed.'''} , ) __lowerCamelCase : bool = field( default=SCREAMING_SNAKE_CASE , metadata={ '''help''': '''Whether to perform memory measurements. Memory measurements can be disabled via --no-memory''' } , ) __lowerCamelCase : bool = field(default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''Trace memory line by line'''} ) __lowerCamelCase : bool = field(default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''Save result to a CSV file'''} ) __lowerCamelCase : bool = field(default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''Save all print statements in a log file'''} ) __lowerCamelCase : bool = field(default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''Whether to print environment information'''} ) __lowerCamelCase : bool = field( default=SCREAMING_SNAKE_CASE , metadata={ '''help''': ( '''Whether to use multiprocessing for memory and speed measurement. It is highly recommended to use''' ''' multiprocessing for accurate CPU and GPU memory measurements. This option should only be disabled''' ''' for debugging / testing and on TPU.''' ) } , ) __lowerCamelCase : str = field( default=F'''inference_time_{round(time() )}.csv''' , metadata={'''help''': '''CSV filename used if saving time results to csv.'''} , ) __lowerCamelCase : str = field( default=F'''inference_memory_{round(time() )}.csv''' , metadata={'''help''': '''CSV filename used if saving memory results to csv.'''} , ) __lowerCamelCase : str = field( default=F'''train_time_{round(time() )}.csv''' , metadata={'''help''': '''CSV filename used if saving time results to csv for training.'''} , ) __lowerCamelCase : str = field( default=F'''train_memory_{round(time() )}.csv''' , metadata={'''help''': '''CSV filename used if saving memory results to csv for training.'''} , ) __lowerCamelCase : str = field( default=F'''env_info_{round(time() )}.csv''' , metadata={'''help''': '''CSV filename used if saving environment information.'''} , ) __lowerCamelCase : str = field( default=F'''log_{round(time() )}.csv''' , metadata={'''help''': '''Log filename used if print statements are saved in log.'''} , ) __lowerCamelCase : int = field(default=3 , metadata={'''help''': '''Times an experiment will be run.'''} ) __lowerCamelCase : bool = field( default=SCREAMING_SNAKE_CASE , metadata={ '''help''': ( '''Instead of loading the model as defined in `config.architectures` if exists, just load the pretrain''' ''' model weights.''' ) } , ) def a_ ( self : Dict ) -> Union[str, Any]: """simple docstring""" warnings.warn( f'The class {self.__class__} is deprecated. Hugging Face Benchmarking utils' """ are deprecated in general and it is advised to use external Benchmarking libraries """ """ to benchmark Transformer models.""" , __lowerCAmelCase , ) def a_ ( self : Union[str, Any] ) -> List[str]: """simple docstring""" return json.dumps(dataclasses.asdict(self ) , indent=2 ) @property def a_ ( self : Tuple ) -> List[str]: """simple docstring""" if len(self.models ) <= 0: raise ValueError( """Please make sure you provide at least one model name / model identifier, *e.g.* `--models""" """ bert-base-cased` or `args.models = ['bert-base-cased'].""" ) return self.models @property def a_ ( self : Union[str, Any] ) -> Optional[Any]: """simple docstring""" if not self.multi_process: return False elif self.is_tpu: logger.info("""Multiprocessing is currently not possible on TPU.""" ) return False else: return True
274
0
from math import ceil def lowerCamelCase_ ( lowerCAmelCase: Tuple , lowerCAmelCase: Union[str, Any] )-> str: _snake_case : Union[str, Any] = list(range(0 , lowerCAmelCase ) ) _snake_case : int = [item for sublist in list(device_map.values() ) for item in sublist] # Duplicate check _snake_case : Any = [] for i in device_map_blocks: if device_map_blocks.count(lowerCAmelCase ) > 1 and i not in duplicate_blocks: duplicate_blocks.append(lowerCAmelCase ) # Missing blocks _snake_case : Dict = [i for i in blocks if i not in device_map_blocks] _snake_case : Tuple = [i for i in device_map_blocks if i not in blocks] if len(lowerCAmelCase ) != 0: raise ValueError( 'Duplicate attention blocks specified in device_map. Attention blocks must be specified to one device.' ' These attention blocks were specified more than once: ' + str(lowerCAmelCase ) ) if len(lowerCAmelCase ) != 0: raise ValueError( 'There are attention blocks for this model that are not specified in the device_map. Add these attention ' 'blocks to a device on the device_map: ' + str(lowerCAmelCase ) ) if len(lowerCAmelCase ) != 0: raise ValueError( 'The device_map contains more attention blocks than this model has. Remove these from the device_map:' + str(lowerCAmelCase ) ) def lowerCamelCase_ ( lowerCAmelCase: Tuple , lowerCAmelCase: List[Any] )-> Optional[Any]: _snake_case : int = list(range(lowerCAmelCase ) ) _snake_case : Union[str, Any] = int(ceil(n_layers / len(lowerCAmelCase ) ) ) _snake_case : Optional[Any] = [layers[i : i + n_blocks] for i in range(0 , lowerCAmelCase , lowerCAmelCase )] return dict(zip(lowerCAmelCase , lowerCAmelCase ) )
260
def lowerCamelCase_ ( lowerCAmelCase: int )-> bool: return number & 1 == 0 if __name__ == "__main__": import doctest doctest.testmod()
260
1
from __future__ import annotations from random import choice def _a ( SCREAMING_SNAKE_CASE_ : List[str] ): return choice(SCREAMING_SNAKE_CASE_ ) def _a ( SCREAMING_SNAKE_CASE_ : list[int] , SCREAMING_SNAKE_CASE_ : int ): __lowerCAmelCase = random_pivot(SCREAMING_SNAKE_CASE_ ) # partition based on pivot # linear time __lowerCAmelCase = [e for e in lst if e < pivot] __lowerCAmelCase = [e for e in lst if e > pivot] # if we get lucky, pivot might be the element we want. # we can easily see this: # small (elements smaller than k) # + pivot (kth element) # + big (elements larger than k) if len(SCREAMING_SNAKE_CASE_ ) == k - 1: return pivot # pivot is in elements bigger than k elif len(SCREAMING_SNAKE_CASE_ ) < k - 1: return kth_number(SCREAMING_SNAKE_CASE_ , k - len(SCREAMING_SNAKE_CASE_ ) - 1 ) # pivot is in elements smaller than k else: return kth_number(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) if __name__ == "__main__": import doctest doctest.testmod()
92
import tempfile import unittest from make_student import create_student_by_copying_alternating_layers from transformers import AutoConfig from transformers.file_utils import cached_property from transformers.testing_utils import require_torch _UpperCAmelCase = """sshleifer/bart-tiny-random""" _UpperCAmelCase = """patrickvonplaten/t5-tiny-random""" @require_torch class UpperCAmelCase ( unittest.TestCase ): '''simple docstring''' @cached_property def lowerCAmelCase_ ( self ): """simple docstring""" return AutoConfig.from_pretrained(lowercase ) def lowerCAmelCase_ ( self ): """simple docstring""" A_ , *A_ : Tuple = create_student_by_copying_alternating_layers(lowercase , tempfile.mkdtemp() , e=1 , d=1 ) self.assertEqual(student.config.num_hidden_layers , 1 ) def lowerCAmelCase_ ( self ): """simple docstring""" A_ , *A_ : int = create_student_by_copying_alternating_layers(lowercase , tempfile.mkdtemp() , e=1 , d=lowercase ) def lowerCAmelCase_ ( self ): """simple docstring""" A_ , *A_ : str = create_student_by_copying_alternating_layers(lowercase , tempfile.mkdtemp() , e=1 , d=lowercase ) self.assertEqual(student.config.encoder_layers , 1 ) self.assertEqual(student.config.decoder_layers , self.teacher_config.encoder_layers ) def lowerCAmelCase_ ( self ): """simple docstring""" A_ , *A_ : List[str] = create_student_by_copying_alternating_layers(lowercase , tempfile.mkdtemp() , e=1 , d=1 ) self.assertEqual(student.config.encoder_layers , 1 ) self.assertEqual(student.config.decoder_layers , 1 ) def lowerCAmelCase_ ( self ): """simple docstring""" with self.assertRaises(lowercase ): create_student_by_copying_alternating_layers(lowercase , tempfile.mkdtemp() , e=lowercase , d=lowercase )
140
0
import logging from dataclasses import dataclass, field from pathlib import Path from typing import Optional, Union from .generation.configuration_utils import GenerationConfig from .training_args import TrainingArguments from .utils import add_start_docstrings _snake_case = logging.getLogger(__name__) @dataclass @add_start_docstrings(TrainingArguments.__doc__ ) class _snake_case ( _lowercase ): lowerCamelCase__: bool = field(default=_lowercase , metadata={"help": "Whether to use SortishSampler or not."} ) lowerCamelCase__: bool = field( default=_lowercase , metadata={"help": "Whether to use generate to calculate generative metrics (ROUGE, BLEU)."} ) lowerCamelCase__: Optional[int] = field( default=_lowercase , metadata={ "help": ( "The `max_length` to use on each evaluation loop when `predict_with_generate=True`. Will default " "to the `max_length` value of the model configuration." ) } , ) lowerCamelCase__: Optional[int] = field( default=_lowercase , metadata={ "help": ( "The `num_beams` to use on each evaluation loop when `predict_with_generate=True`. Will default " "to the `num_beams` value of the model configuration." ) } , ) lowerCamelCase__: Optional[Union[str, Path, GenerationConfig]] = field( default=_lowercase , metadata={ "help": "Model id, file path or url pointing to a GenerationConfig json file, to use during prediction." } , ) def _lowerCamelCase ( self: int ) -> Dict: __UpperCAmelCase : str = super().to_dict() for k, v in d.items(): if isinstance(__lowerCamelCase , __lowerCamelCase ): __UpperCAmelCase : Tuple = v.to_dict() return d
360
import os from typing import List, Optional, Union from ...image_processing_utils import BatchFeature from ...image_utils import ImageInput from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType from ..auto import AutoTokenizer class _snake_case ( _lowercase ): lowerCamelCase__: Any = ["image_processor", "tokenizer"] lowerCamelCase__: Optional[Any] = "BlipImageProcessor" lowerCamelCase__: Optional[int] = "AutoTokenizer" def __init__( self: List[str] , __lowerCamelCase: str , __lowerCamelCase: List[str] , __lowerCamelCase: Optional[Any] ) -> Dict: super().__init__(__lowerCamelCase , __lowerCamelCase ) # add QFormer tokenizer __UpperCAmelCase : Dict = qformer_tokenizer def __call__( self: Any , __lowerCamelCase: ImageInput = None , __lowerCamelCase: Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , __lowerCamelCase: bool = True , __lowerCamelCase: Union[bool, str, PaddingStrategy] = False , __lowerCamelCase: Union[bool, str, TruncationStrategy] = None , __lowerCamelCase: Optional[int] = None , __lowerCamelCase: int = 0 , __lowerCamelCase: Optional[int] = None , __lowerCamelCase: Optional[bool] = None , __lowerCamelCase: bool = False , __lowerCamelCase: bool = False , __lowerCamelCase: bool = False , __lowerCamelCase: bool = False , __lowerCamelCase: bool = False , __lowerCamelCase: bool = True , __lowerCamelCase: Optional[Union[str, TensorType]] = None , **__lowerCamelCase: Dict , ) -> BatchFeature: if images is None and text is None: raise ValueError("You have to specify at least images or text." ) __UpperCAmelCase : str = BatchFeature() if text is not None: __UpperCAmelCase : Any = self.tokenizer( text=__lowerCamelCase , add_special_tokens=__lowerCamelCase , padding=__lowerCamelCase , truncation=__lowerCamelCase , max_length=__lowerCamelCase , stride=__lowerCamelCase , pad_to_multiple_of=__lowerCamelCase , return_attention_mask=__lowerCamelCase , return_overflowing_tokens=__lowerCamelCase , return_special_tokens_mask=__lowerCamelCase , return_offsets_mapping=__lowerCamelCase , return_token_type_ids=__lowerCamelCase , return_length=__lowerCamelCase , verbose=__lowerCamelCase , return_tensors=__lowerCamelCase , **__lowerCamelCase , ) encoding.update(__lowerCamelCase ) __UpperCAmelCase : Dict = self.qformer_tokenizer( text=__lowerCamelCase , add_special_tokens=__lowerCamelCase , padding=__lowerCamelCase , truncation=__lowerCamelCase , max_length=__lowerCamelCase , stride=__lowerCamelCase , pad_to_multiple_of=__lowerCamelCase , return_attention_mask=__lowerCamelCase , return_overflowing_tokens=__lowerCamelCase , return_special_tokens_mask=__lowerCamelCase , return_offsets_mapping=__lowerCamelCase , return_token_type_ids=__lowerCamelCase , return_length=__lowerCamelCase , verbose=__lowerCamelCase , return_tensors=__lowerCamelCase , **__lowerCamelCase , ) __UpperCAmelCase : int = qformer_text_encoding.pop("input_ids" ) __UpperCAmelCase : Optional[int] = qformer_text_encoding.pop("attention_mask" ) if images is not None: __UpperCAmelCase : Union[str, Any] = self.image_processor(__lowerCamelCase , return_tensors=__lowerCamelCase ) encoding.update(__lowerCamelCase ) return encoding def _lowerCamelCase ( self: Any , *__lowerCamelCase: Any , **__lowerCamelCase: Any ) -> Optional[Any]: return self.tokenizer.batch_decode(*__lowerCamelCase , **__lowerCamelCase ) def _lowerCamelCase ( self: Tuple , *__lowerCamelCase: Any , **__lowerCamelCase: Dict ) -> Tuple: return self.tokenizer.decode(*__lowerCamelCase , **__lowerCamelCase ) @property # Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names def _lowerCamelCase ( self: List[str] ) -> Tuple: __UpperCAmelCase : str = self.tokenizer.model_input_names __UpperCAmelCase : Dict = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) ) def _lowerCamelCase ( self: Union[str, Any] , __lowerCamelCase: Union[str, Any] , **__lowerCamelCase: Optional[Any] ) -> str: if os.path.isfile(__lowerCamelCase ): raise ValueError(f'''Provided path ({save_directory}) should be a directory, not a file''' ) os.makedirs(__lowerCamelCase , exist_ok=__lowerCamelCase ) __UpperCAmelCase : List[str] = os.path.join(__lowerCamelCase , "qformer_tokenizer" ) self.qformer_tokenizer.save_pretrained(__lowerCamelCase ) return super().save_pretrained(__lowerCamelCase , **__lowerCamelCase ) @classmethod def _lowerCamelCase ( cls: Tuple , __lowerCamelCase: Tuple , **__lowerCamelCase: Optional[int] ) -> Union[str, Any]: __UpperCAmelCase : List[Any] = AutoTokenizer.from_pretrained(__lowerCamelCase , subfolder="qformer_tokenizer" ) __UpperCAmelCase : List[Any] = cls._get_arguments_from_pretrained(__lowerCamelCase , **__lowerCamelCase ) args.append(__lowerCamelCase ) return cls(*__lowerCamelCase )
342
0
'''simple docstring''' from __future__ import annotations from typing import Generic, TypeVar SCREAMING_SNAKE_CASE__ = TypeVar('T') class a_ ( Generic[T] ): def __init__( self , _SCREAMING_SNAKE_CASE ) -> Dict: """simple docstring""" UpperCamelCase = data UpperCamelCase = self UpperCamelCase = 0 class a_ ( Generic[T] ): def __init__( self ) -> Any: """simple docstring""" UpperCamelCase = {} def A__ ( self , _SCREAMING_SNAKE_CASE ) -> List[str]: """simple docstring""" UpperCamelCase = DisjointSetTreeNode(__lowerCamelCase ) def A__ ( self , _SCREAMING_SNAKE_CASE ) -> Optional[Any]: """simple docstring""" UpperCamelCase = self.map[data] if elem_ref != elem_ref.parent: UpperCamelCase = self.find_set(elem_ref.parent.data ) return elem_ref.parent def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Dict: """simple docstring""" if nodea.rank > nodea.rank: UpperCamelCase = nodea else: UpperCamelCase = nodea if nodea.rank == nodea.rank: nodea.rank += 1 def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Dict: """simple docstring""" self.link(self.find_set(__lowerCamelCase ) , self.find_set(__lowerCamelCase ) ) class a_ ( Generic[T] ): def __init__( self ) -> Optional[int]: """simple docstring""" UpperCamelCase = {} def A__ ( self , _SCREAMING_SNAKE_CASE ) -> str: """simple docstring""" if node not in self.connections: UpperCamelCase = {} def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[Any]: """simple docstring""" self.add_node(__lowerCamelCase ) self.add_node(__lowerCamelCase ) UpperCamelCase = weight UpperCamelCase = weight def A__ ( self ) -> Optional[int]: """simple docstring""" UpperCamelCase = [] UpperCamelCase = set() for start in self.connections: for end in self.connections[start]: if (start, end) not in seen: seen.add((end, start) ) edges.append((start, end, self.connections[start][end]) ) edges.sort(key=lambda _SCREAMING_SNAKE_CASE : x[2] ) # creating the disjoint set UpperCamelCase = DisjointSetTree[T]() for node in self.connections: disjoint_set.make_set(__lowerCamelCase ) # MST generation UpperCamelCase = 0 UpperCamelCase = 0 UpperCamelCase = GraphUndirectedWeighted[T]() while num_edges < len(self.connections ) - 1: UpperCamelCase = edges[index] index += 1 UpperCamelCase = disjoint_set.find_set(__lowerCamelCase ) UpperCamelCase = disjoint_set.find_set(__lowerCamelCase ) if parent_u != parent_v: num_edges += 1 graph.add_edge(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) disjoint_set.union(__lowerCamelCase , __lowerCamelCase ) return graph
321
"""simple docstring""" # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import torch from ..models.speechta import SpeechTaForTextToSpeech, SpeechTaHifiGan, SpeechTaProcessor from ..utils import is_datasets_available from .base import PipelineTool if is_datasets_available(): from datasets import load_dataset class __snake_case ( SCREAMING_SNAKE_CASE__ ): """simple docstring""" _lowerCamelCase = """microsoft/speecht5_tts""" _lowerCamelCase = ( """This is a tool that reads an English text out loud. It takes an input named `text` which should contain the """ """text to read (in English) and returns a waveform object containing the sound.""" ) _lowerCamelCase = """text_reader""" _lowerCamelCase = SpeechTaProcessor _lowerCamelCase = SpeechTaForTextToSpeech _lowerCamelCase = SpeechTaHifiGan _lowerCamelCase = ["""text"""] _lowerCamelCase = ["""audio"""] def UpperCamelCase__( self ): '''simple docstring''' if self.post_processor is None: __A : List[str] = '''microsoft/speecht5_hifigan''' super().setup() def UpperCamelCase__( self , __lowerCamelCase , __lowerCamelCase=None ): '''simple docstring''' __A : int = self.pre_processor(text=__lowerCamelCase , return_tensors='''pt''' , truncation=__lowerCamelCase ) if speaker_embeddings is None: if not is_datasets_available(): raise ImportError('''Datasets needs to be installed if not passing speaker embeddings.''' ) __A : List[Any] = load_dataset('''Matthijs/cmu-arctic-xvectors''' , split='''validation''' ) __A : int = torch.tensor(embeddings_dataset[7305]['''xvector'''] ).unsqueeze(0 ) return {"input_ids": inputs["input_ids"], "speaker_embeddings": speaker_embeddings} def UpperCamelCase__( self , __lowerCamelCase ): '''simple docstring''' with torch.no_grad(): return self.model.generate_speech(**__lowerCamelCase ) def UpperCamelCase__( self , __lowerCamelCase ): '''simple docstring''' with torch.no_grad(): return self.post_processor(__lowerCamelCase ).cpu().detach()
179
0
import unittest import torch from torch import nn from diffusers.models.activations import get_activation class _lowercase ( unittest.TestCase ): """simple docstring""" def UpperCamelCase_ (self ): """simple docstring""" a = get_activation("swish" ) self.assertIsInstance(_UpperCAmelCase , nn.SiLU ) self.assertEqual(act(torch.tensor(-100 , dtype=torch.floataa ) ).item() , 0 ) self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 ) self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 ) self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 ) def UpperCamelCase_ (self ): """simple docstring""" a = get_activation("silu" ) self.assertIsInstance(_UpperCAmelCase , nn.SiLU ) self.assertEqual(act(torch.tensor(-100 , dtype=torch.floataa ) ).item() , 0 ) self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 ) self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 ) self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 ) def UpperCamelCase_ (self ): """simple docstring""" a = get_activation("mish" ) self.assertIsInstance(_UpperCAmelCase , nn.Mish ) self.assertEqual(act(torch.tensor(-200 , dtype=torch.floataa ) ).item() , 0 ) self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 ) self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 ) self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 ) def UpperCamelCase_ (self ): """simple docstring""" a = get_activation("gelu" ) self.assertIsInstance(_UpperCAmelCase , nn.GELU ) self.assertEqual(act(torch.tensor(-100 , dtype=torch.floataa ) ).item() , 0 ) self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 ) self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 ) self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
368
import math from numpy import inf from scipy.integrate import quad def a( A : float ) -> float: """simple docstring""" if num <= 0: raise ValueError("math domain error" ) return quad(A , 0 , A , args=(A) )[0] def a( A : float , A : float ) -> float: """simple docstring""" return math.pow(A , z - 1 ) * math.exp(-x ) if __name__ == "__main__": from doctest import testmod testmod()
71
0
"""simple docstring""" def _A ( lowercase , lowercase ): """simple docstring""" if b == 0: return 1 if (b % 2) == 0: return actual_power(lowercase , int(b / 2 ) ) * actual_power(lowercase , int(b / 2 ) ) else: return a * actual_power(lowercase , int(b / 2 ) ) * actual_power(lowercase , int(b / 2 ) ) def _A ( lowercase , lowercase ): """simple docstring""" if b < 0: return 1 / actual_power(lowercase , lowercase ) return actual_power(lowercase , lowercase ) if __name__ == "__main__": print(power(-2, -3))
81
'''simple docstring''' import math def UpperCAmelCase_ ( __lowercase : int ) -> bool: '''simple docstring''' return math.sqrt(__lowercase ) * math.sqrt(__lowercase ) == num def UpperCAmelCase_ ( __lowercase : int ) -> bool: '''simple docstring''' _UpperCAmelCase = 0 _UpperCAmelCase = n while left <= right: _UpperCAmelCase = (left + right) // 2 if mid**2 == n: return True elif mid**2 > n: _UpperCAmelCase = mid - 1 else: _UpperCAmelCase = mid + 1 return False if __name__ == "__main__": import doctest doctest.testmod()
22
0
"""simple docstring""" from typing import List from .keymap import KEYMAP, get_character def A_ ( _lowerCAmelCase : str ): """simple docstring""" def decorator(_lowerCAmelCase : Any ): _a = getattr(_lowerCAmelCase, '''handle_key''', [] ) handle += [key] setattr(_lowerCAmelCase, '''handle_key''', _lowerCAmelCase ) return func return decorator def A_ ( *_lowerCAmelCase : List[str] ): """simple docstring""" def decorator(_lowerCAmelCase : List[Any] ): _a = getattr(_lowerCAmelCase, '''handle_key''', [] ) handle += keys setattr(_lowerCAmelCase, '''handle_key''', _lowerCAmelCase ) return func return decorator class __lowerCamelCase ( a__ ): '''simple docstring''' def __new__( cls , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> str: _a = super().__new__(cls , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) if not hasattr(__UpperCAmelCase , '''key_handler''' ): setattr(__UpperCAmelCase , '''key_handler''' , {} ) setattr(__UpperCAmelCase , '''handle_input''' , KeyHandler.handle_input ) for value in attrs.values(): _a = getattr(__UpperCAmelCase , '''handle_key''' , [] ) for key in handled_keys: _a = value return new_cls @staticmethod def _UpperCAmelCase ( cls ) -> Any: _a = get_character() if char != KEYMAP["undefined"]: _a = ord(__UpperCAmelCase ) _a = cls.key_handler.get(__UpperCAmelCase ) if handler: _a = char return handler(cls ) else: return None def A_ ( cls : Optional[int] ): """simple docstring""" return KeyHandler(cls.__name__, cls.__bases__, cls.__dict__.copy() )
153
"""simple docstring""" import os import shutil import tempfile import unittest import numpy as np from transformers import AutoTokenizer, BarkProcessor from transformers.testing_utils import require_torch, slow @require_torch class __lowerCamelCase ( unittest.TestCase ): '''simple docstring''' def _UpperCAmelCase ( self ) -> str: _a = '''ylacombe/bark-small''' _a = tempfile.mkdtemp() _a = '''en_speaker_1''' _a = '''This is a test string''' _a = '''speaker_embeddings_path.json''' _a = '''speaker_embeddings''' def _UpperCAmelCase ( self , **__UpperCAmelCase ) -> Tuple: return AutoTokenizer.from_pretrained(self.checkpoint , **__UpperCAmelCase ) def _UpperCAmelCase ( self ) -> Tuple: shutil.rmtree(self.tmpdirname ) def _UpperCAmelCase ( self ) -> str: _a = self.get_tokenizer() _a = BarkProcessor(tokenizer=__UpperCAmelCase ) processor.save_pretrained(self.tmpdirname ) _a = BarkProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() ) @slow def _UpperCAmelCase ( self ) -> Optional[Any]: _a = BarkProcessor.from_pretrained( pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , ) processor.save_pretrained( self.tmpdirname , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , speaker_embeddings_directory=self.speaker_embeddings_directory , ) _a = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' ) _a = BarkProcessor.from_pretrained( self.tmpdirname , self.speaker_embeddings_dict_path , bos_token='''(BOS)''' , eos_token='''(EOS)''' , ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) def _UpperCAmelCase ( self ) -> str: _a = BarkProcessor.from_pretrained( pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , ) _a = 35 _a = 2 _a = 8 _a = { '''semantic_prompt''': np.ones(__UpperCAmelCase ), '''coarse_prompt''': np.ones((nb_codebooks_coarse, seq_len) ), '''fine_prompt''': np.ones((nb_codebooks_total, seq_len) ), } # test providing already loaded voice_preset _a = processor(text=self.input_string , voice_preset=__UpperCAmelCase ) _a = inputs['''history_prompt'''] for key in voice_preset: self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(__UpperCAmelCase , np.array([] ) ).tolist() ) # test loading voice preset from npz file _a = os.path.join(self.tmpdirname , '''file.npz''' ) np.savez(__UpperCAmelCase , **__UpperCAmelCase ) _a = processor(text=self.input_string , voice_preset=__UpperCAmelCase ) _a = inputs['''history_prompt'''] for key in voice_preset: self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(__UpperCAmelCase , np.array([] ) ).tolist() ) # test loading voice preset from the hub _a = processor(text=self.input_string , voice_preset=self.voice_preset ) def _UpperCAmelCase ( self ) -> Tuple: _a = self.get_tokenizer() _a = BarkProcessor(tokenizer=__UpperCAmelCase ) _a = processor(text=self.input_string ) _a = tokenizer( self.input_string , padding='''max_length''' , max_length=256 , add_special_tokens=__UpperCAmelCase , return_attention_mask=__UpperCAmelCase , return_token_type_ids=__UpperCAmelCase , ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key].squeeze().tolist() )
153
1
from typing import List, Optional, Union from ...configuration_utils import PretrainedConfig from ...utils import logging lowercase_ : Tuple = logging.get_logger(__name__) lowercase_ : List[str] = { 'huggingface/informer-tourism-monthly': ( 'https://huggingface.co/huggingface/informer-tourism-monthly/resolve/main/config.json' ), # See all Informer models at https://huggingface.co/models?filter=informer } class __lowerCAmelCase ( UpperCAmelCase__ ): snake_case_ : Optional[Any] = "informer" snake_case_ : List[Any] = { "hidden_size": "d_model", "num_attention_heads": "encoder_attention_heads", "num_hidden_layers": "encoder_layers", } def __init__( self : Tuple , snake_case__ : Optional[int] = None , snake_case__ : Optional[int] = None , snake_case__ : str = "student_t" , snake_case__ : str = "nll" , snake_case__ : int = 1 , snake_case__ : List[int] = None , snake_case__ : Optional[Union[str, bool]] = "mean" , snake_case__ : int = 0 , snake_case__ : int = 0 , snake_case__ : int = 0 , snake_case__ : int = 0 , snake_case__ : Optional[List[int]] = None , snake_case__ : Optional[List[int]] = None , snake_case__ : int = 64 , snake_case__ : int = 32 , snake_case__ : int = 32 , snake_case__ : int = 2 , snake_case__ : int = 2 , snake_case__ : int = 2 , snake_case__ : int = 2 , snake_case__ : bool = True , snake_case__ : str = "gelu" , snake_case__ : float = 0.05 , snake_case__ : float = 0.1 , snake_case__ : float = 0.1 , snake_case__ : float = 0.1 , snake_case__ : float = 0.1 , snake_case__ : int = 100 , snake_case__ : float = 0.02 , snake_case__ : List[Any]=True , snake_case__ : str = "prob" , snake_case__ : int = 5 , snake_case__ : bool = True , **snake_case__ : Dict , ): """simple docstring""" _UpperCAmelCase = prediction_length _UpperCAmelCase = context_length or prediction_length _UpperCAmelCase = distribution_output _UpperCAmelCase = loss _UpperCAmelCase = input_size _UpperCAmelCase = num_time_features _UpperCAmelCase = lags_sequence if lags_sequence is not None else [1, 2, 3, 4, 5, 6, 7] _UpperCAmelCase = scaling _UpperCAmelCase = num_dynamic_real_features _UpperCAmelCase = num_static_real_features _UpperCAmelCase = num_static_categorical_features # set cardinality if cardinality and num_static_categorical_features > 0: if len(snake_case__ ) != num_static_categorical_features: raise ValueError( "The cardinality should be a list of the same length as `num_static_categorical_features`" ) _UpperCAmelCase = cardinality else: _UpperCAmelCase = [0] # set embedding_dimension if embedding_dimension and num_static_categorical_features > 0: if len(snake_case__ ) != num_static_categorical_features: raise ValueError( "The embedding dimension should be a list of the same length as `num_static_categorical_features`" ) _UpperCAmelCase = embedding_dimension else: _UpperCAmelCase = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality] _UpperCAmelCase = num_parallel_samples # Transformer architecture configuration _UpperCAmelCase = input_size * len(self.lags_sequence ) + self._number_of_features _UpperCAmelCase = d_model _UpperCAmelCase = encoder_attention_heads _UpperCAmelCase = decoder_attention_heads _UpperCAmelCase = encoder_ffn_dim _UpperCAmelCase = decoder_ffn_dim _UpperCAmelCase = encoder_layers _UpperCAmelCase = decoder_layers _UpperCAmelCase = dropout _UpperCAmelCase = attention_dropout _UpperCAmelCase = activation_dropout _UpperCAmelCase = encoder_layerdrop _UpperCAmelCase = decoder_layerdrop _UpperCAmelCase = activation_function _UpperCAmelCase = init_std _UpperCAmelCase = use_cache # Informer _UpperCAmelCase = attention_type _UpperCAmelCase = sampling_factor _UpperCAmelCase = distil super().__init__(is_encoder_decoder=snake_case__ , **snake_case__ ) @property def UpperCamelCase ( self : Dict ): """simple docstring""" return ( sum(self.embedding_dimension ) + self.num_dynamic_real_features + self.num_time_features + self.num_static_real_features + self.input_size * 2 # the log1p(abs(loc)) and log(scale) features )
133
import argparse import shutil from pathlib import Path from tqdm import tqdm from transformers import AutoTokenizer def __SCREAMING_SNAKE_CASE ( snake_case_ , snake_case_ , snake_case_ , snake_case_=1024 ): '''simple docstring''' _UpperCAmelCase , _UpperCAmelCase = [], [] _UpperCAmelCase = list(zip(snake_case_ , snake_case_ ) ) _UpperCAmelCase , _UpperCAmelCase = sorted_examples[0] def is_too_big(snake_case_ ): return tok(snake_case_ , return_tensors="pt" ).input_ids.shape[1] > max_tokens for src, tgt in tqdm(sorted_examples[1:] ): _UpperCAmelCase = new_src + " " + src _UpperCAmelCase = new_tgt + " " + tgt if is_too_big(snake_case_ ) or is_too_big(snake_case_ ): # cant fit, finalize example finished_src.append(snake_case_ ) finished_tgt.append(snake_case_ ) _UpperCAmelCase , _UpperCAmelCase = src, tgt else: # can fit, keep adding _UpperCAmelCase , _UpperCAmelCase = cand_src, cand_tgt # cleanup if new_src: assert new_tgt finished_src.append(snake_case_ ) finished_tgt.append(snake_case_ ) return finished_src, finished_tgt def __SCREAMING_SNAKE_CASE ( snake_case_ , snake_case_ , snake_case_ , snake_case_ ): '''simple docstring''' _UpperCAmelCase = Path(snake_case_ ) save_path.mkdir(exist_ok=snake_case_ ) for split in ["train"]: _UpperCAmelCase , _UpperCAmelCase = data_dir / f"""{split}.source""", data_dir / f"""{split}.target""" _UpperCAmelCase = [x.rstrip() for x in Path(snake_case_ ).open().readlines()] _UpperCAmelCase = [x.rstrip() for x in Path(snake_case_ ).open().readlines()] _UpperCAmelCase , _UpperCAmelCase = pack_examples(snake_case_ , snake_case_ , snake_case_ , snake_case_ ) print(f"""packed {split} split from {len(snake_case_ )} examples -> {len(snake_case_ )}.""" ) Path(save_path / f"""{split}.source""" ).open("w" ).write("\n".join(snake_case_ ) ) Path(save_path / f"""{split}.target""" ).open("w" ).write("\n".join(snake_case_ ) ) for split in ["val", "test"]: _UpperCAmelCase , _UpperCAmelCase = data_dir / f"""{split}.source""", data_dir / f"""{split}.target""" shutil.copyfile(snake_case_ , save_path / f"""{split}.source""" ) shutil.copyfile(snake_case_ , save_path / f"""{split}.target""" ) def __SCREAMING_SNAKE_CASE ( ): '''simple docstring''' _UpperCAmelCase = argparse.ArgumentParser() parser.add_argument("--tok_name" , type=snake_case_ , help="like facebook/bart-large-cnn,t5-base, etc." ) parser.add_argument("--max_seq_len" , type=snake_case_ , default=128 ) parser.add_argument("--data_dir" , type=snake_case_ ) parser.add_argument("--save_path" , type=snake_case_ ) _UpperCAmelCase = parser.parse_args() _UpperCAmelCase = AutoTokenizer.from_pretrained(args.tok_name ) return pack_data_dir(snake_case_ , Path(args.data_dir ) , args.max_seq_len , args.save_path ) if __name__ == "__main__": packer_cli()
133
1
"""simple docstring""" import argparse import re import requests import torch # git clone https://github.com/salesforce/BLIP.git from models.blip import blip_decoder from models.blip_itm import blip_itm from models.blip_vqa import blip_vqa from PIL import Image from torchvision import transforms from torchvision.transforms.functional import InterpolationMode from transformers import ( BertTokenizer, BlipConfig, BlipForConditionalGeneration, BlipForImageTextRetrieval, BlipForQuestionAnswering, ) def lowerCamelCase_ ( lowerCAmelCase: List[Any] , lowerCAmelCase: Any )-> Optional[Any]: _snake_case : str = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/demo.jpg' _snake_case : Dict = Image.open(requests.get(lowerCAmelCase , stream=lowerCAmelCase ).raw ).convert('RGB' ) _snake_case : Dict = transforms.Compose( [ transforms.Resize((image_size, image_size) , interpolation=InterpolationMode.BICUBIC ), transforms.ToTensor(), transforms.Normalize((0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3) , (0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1) ), ] ) _snake_case : Tuple = transform(lowerCAmelCase ).unsqueeze(0 ).to(lowerCAmelCase ) return image def lowerCamelCase_ ( lowerCAmelCase: Optional[int] )-> Any: if "visual_encoder" in key: _snake_case : Optional[Any] = re.sub('visual_encoder*' , 'vision_model.encoder' , lowerCAmelCase ) if "blocks" in key: _snake_case : Dict = re.sub(R'blocks' , 'layers' , lowerCAmelCase ) if "attn" in key: _snake_case : int = re.sub(R'attn' , 'self_attn' , lowerCAmelCase ) if "norm1" in key: _snake_case : int = re.sub(R'norm1' , 'layer_norm1' , lowerCAmelCase ) if "norm2" in key: _snake_case : Union[str, Any] = re.sub(R'norm2' , 'layer_norm2' , lowerCAmelCase ) if "encoder.norm" in key: _snake_case : int = re.sub(R'encoder.norm' , 'post_layernorm' , lowerCAmelCase ) if "encoder.patch_embed.proj" in key: _snake_case : Dict = re.sub(R'encoder.patch_embed.proj' , 'embeddings.patch_embedding' , lowerCAmelCase ) if "encoder.pos_embed" in key: _snake_case : str = re.sub(R'encoder.pos_embed' , 'embeddings.position_embedding' , lowerCAmelCase ) if "encoder.cls_token" in key: _snake_case : Dict = re.sub(R'encoder.cls_token' , 'embeddings.class_embedding' , lowerCAmelCase ) if "self_attn" in key: _snake_case : Any = re.sub(R'self_attn.proj' , 'self_attn.projection' , lowerCAmelCase ) return key @torch.no_grad() def lowerCamelCase_ ( lowerCAmelCase: List[str] , lowerCAmelCase: Union[str, Any]=None )-> Any: if config_path is not None: _snake_case : str = BlipConfig.from_pretrained(lowerCAmelCase ) else: _snake_case : Union[str, Any] = BlipConfig(projection_dim=5_12 , text_config={} , vision_config={} ) _snake_case : Optional[int] = BlipForConditionalGeneration(lowerCAmelCase ).eval() _snake_case : Optional[Any] = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_capfilt_large.pth' _snake_case : Optional[int] = blip_decoder(pretrained=lowerCAmelCase , image_size=3_84 , vit='base' ) _snake_case : Dict = pt_model.eval() _snake_case : Optional[Any] = pt_model.state_dict() for key in modified_state_dict.copy(): _snake_case : int = modified_state_dict.pop(lowerCAmelCase ) _snake_case : Tuple = rename_key(lowerCAmelCase ) _snake_case : Any = value hf_model.load_state_dict(lowerCAmelCase ) _snake_case : Union[str, Any] = 3_84 _snake_case : Optional[Any] = load_demo_image(image_size=lowerCAmelCase , device='cpu' ) _snake_case : Optional[int] = BertTokenizer.from_pretrained('bert-base-uncased' ) _snake_case : str = tokenizer(['a picture of'] ).input_ids _snake_case : int = hf_model.generate(lowerCAmelCase , lowerCAmelCase ) assert out[0].tolist() == [3_05_22, 10_37, 38_61, 19_97, 10_37, 24_50, 35_64, 20_06, 19_96, 35_09, 20_07, 20_14, 38_99, 1_02] _snake_case : str = hf_model.generate(lowerCAmelCase ) assert out[0].tolist() == [3_05_22, 10_37, 24_50, 35_64, 20_06, 19_96, 35_09, 20_07, 20_14, 38_99, 1_02] if pytorch_dump_folder_path is not None: hf_model.save_pretrained(lowerCAmelCase ) # model_url = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_vqa.pth' _snake_case : Dict = ( 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_vqa_capfilt_large.pth' ) _snake_case : Optional[int] = blip_vqa(pretrained=lowerCAmelCase , image_size=lowerCAmelCase , vit='base' ) vqa_model.eval() _snake_case : Optional[Any] = vqa_model.state_dict() for key in modified_state_dict.copy(): _snake_case : List[str] = modified_state_dict.pop(lowerCAmelCase ) _snake_case : str = rename_key(lowerCAmelCase ) _snake_case : Dict = value _snake_case : Optional[Any] = BlipForQuestionAnswering(lowerCAmelCase ) hf_vqa_model.load_state_dict(lowerCAmelCase ) _snake_case : Union[str, Any] = ['How many dogs are in this image?'] _snake_case : Tuple = tokenizer(lowerCAmelCase , return_tensors='pt' ).input_ids _snake_case : int = hf_vqa_model.generate(lowerCAmelCase , lowerCAmelCase ) print(tokenizer.decode(answer[0] ) ) assert tokenizer.decode(answer[0] ) == "[UNK] 1 [SEP]" if pytorch_dump_folder_path is not None: hf_vqa_model.save_pretrained(pytorch_dump_folder_path + '_vqa' ) _snake_case : Dict = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_retrieval_coco.pth' _snake_case : List[Any] = blip_itm(pretrained=lowerCAmelCase , image_size=lowerCAmelCase , vit='base' ) itm_model.eval() _snake_case : List[Any] = itm_model.state_dict() for key in modified_state_dict.copy(): _snake_case : Union[str, Any] = modified_state_dict.pop(lowerCAmelCase ) _snake_case : Any = rename_key(lowerCAmelCase ) _snake_case : Dict = value _snake_case : Any = BlipForImageTextRetrieval(lowerCAmelCase ) _snake_case : Dict = ['A picture of a woman with a dog sitting in a beach'] _snake_case : List[str] = tokenizer( lowerCAmelCase , return_tensors='pt' , padding='max_length' , truncation=lowerCAmelCase , max_length=35 , ).input_ids hf_itm_model.load_state_dict(lowerCAmelCase ) hf_itm_model.eval() _snake_case : Dict = hf_itm_model(lowerCAmelCase , lowerCAmelCase , use_itm_head=lowerCAmelCase ) _snake_case : Tuple = hf_itm_model(lowerCAmelCase , lowerCAmelCase , use_itm_head=lowerCAmelCase ) assert out[0].item() == 0.2_1_1_0_6_8_7_4_9_4_2_7_7_9_5_4 assert torch.nn.functional.softmax(out_itm[0] , dim=1 )[:, 1].item() == 0.4_5_6_9_8_8_4_5_3_8_6_5_0_5_1_2_7 if pytorch_dump_folder_path is not None: hf_itm_model.save_pretrained(pytorch_dump_folder_path + '_itm' ) if __name__ == "__main__": lowerCAmelCase_ = argparse.ArgumentParser() parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""") parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""") lowerCAmelCase_ = parser.parse_args() convert_blip_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
371
import argparse import torch from transformers import MobileBertConfig, MobileBertForPreTraining, load_tf_weights_in_mobilebert from transformers.utils import logging logging.set_verbosity_info() def lowerCamelCase_ ( lowerCAmelCase: List[str] , lowerCAmelCase: Dict , lowerCAmelCase: str )-> List[str]: # Initialise PyTorch model _snake_case : Optional[Any] = MobileBertConfig.from_json_file(lowerCAmelCase ) print(F"""Building PyTorch model from configuration: {config}""" ) _snake_case : Optional[int] = MobileBertForPreTraining(lowerCAmelCase ) # Load weights from tf checkpoint _snake_case : Optional[int] = load_tf_weights_in_mobilebert(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) # Save pytorch-model print(F"""Save PyTorch model to {pytorch_dump_path}""" ) torch.save(model.state_dict() , lowerCAmelCase ) if __name__ == "__main__": lowerCAmelCase_ = argparse.ArgumentParser() # Required parameters parser.add_argument( """--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path.""" ) parser.add_argument( """--mobilebert_config_file""", default=None, type=str, required=True, help=( """The config json file corresponding to the pre-trained MobileBERT model. \n""" """This specifies the model architecture.""" ), ) parser.add_argument( """--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model.""" ) lowerCAmelCase_ = parser.parse_args() convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.mobilebert_config_file, args.pytorch_dump_path)
260
0
def A (__A : str , __A : str ) -> Tuple: """simple docstring""" assert x is not None assert y is not None UpperCAmelCase_ = len(__A ) UpperCAmelCase_ = len(__A ) # declaring the array for storing the dp values UpperCAmelCase_ = [[0] * (n + 1) for _ in range(m + 1 )] # noqa: E741 for i in range(1 , m + 1 ): for j in range(1 , n + 1 ): UpperCAmelCase_ = 1 if x[i - 1] == y[j - 1] else 0 UpperCAmelCase_ = max(l[i - 1][j] , l[i][j - 1] , l[i - 1][j - 1] + match ) UpperCAmelCase_ = '''''' UpperCAmelCase_ , UpperCAmelCase_ = m, n while i > 0 and j > 0: UpperCAmelCase_ = 1 if x[i - 1] == y[j - 1] else 0 if l[i][j] == l[i - 1][j - 1] + match: if match == 1: UpperCAmelCase_ = x[i - 1] + seq i -= 1 j -= 1 elif l[i][j] == l[i - 1][j]: i -= 1 else: j -= 1 return l[m][n], seq if __name__ == "__main__": snake_case_ : List[str] = "AGGTAB" snake_case_ : List[Any] = "GXTXAYB" snake_case_ : str = 4 snake_case_ : str = "GTAB" snake_case_ , snake_case_ : Union[str, Any] = longest_common_subsequence(a, b) print("len =", ln, ", sub-sequence =", subseq) import doctest doctest.testmod()
51
import argparse import pickle import numpy as np import torch from torch import nn from transformers import ReformerConfig, ReformerModelWithLMHead from transformers.utils import logging logging.set_verbosity_info() def A (__A : Optional[int] , __A : int , __A : str=None ) -> List[Any]: """simple docstring""" assert torch_layer.weight.shape == weight.shape, F"""{torch_layer} layer.weight does not match""" UpperCAmelCase_ = nn.Parameter(__A ) if bias is not None: assert torch_layer.bias.shape == bias.shape, F"""{torch_layer} layer.bias does not match""" UpperCAmelCase_ = nn.Parameter(__A ) def A (__A : Tuple , __A : Dict , __A : str ) -> Tuple: """simple docstring""" UpperCAmelCase_ = np.asarray(weights[0] ) UpperCAmelCase_ = np.asarray(weights[1] ) UpperCAmelCase_ = np.asarray(weights[2] ) set_param( torch_layer.self_attention.query_key , torch.tensor(__A ).transpose(1 , 2 ).contiguous().view(-1 , __A ) , ) set_param( torch_layer.self_attention.value , torch.tensor(__A ).transpose(1 , 2 ).contiguous().view(-1 , __A ) , ) set_param( torch_layer.output.dense , torch.tensor(__A ).view(-1 , __A ).contiguous().transpose(0 , 1 ) , ) def A (__A : Optional[Any] , __A : Any , __A : List[Any] ) -> int: """simple docstring""" UpperCAmelCase_ = np.asarray(weights[0] ) UpperCAmelCase_ = np.asarray(weights[1] ) UpperCAmelCase_ = np.asarray(weights[2] ) UpperCAmelCase_ = np.asarray(weights[3] ) set_param( torch_layer.self_attention.query , torch.tensor(__A ).transpose(1 , 2 ).contiguous().view(-1 , __A ) , ) set_param( torch_layer.self_attention.key , torch.tensor(__A ).transpose(1 , 2 ).contiguous().view(-1 , __A ) , ) set_param( torch_layer.self_attention.value , torch.tensor(__A ).transpose(1 , 2 ).contiguous().view(-1 , __A ) , ) set_param( torch_layer.output.dense , torch.tensor(__A ).view(-1 , __A ).contiguous().transpose(0 , 1 ) , ) def A (__A : int , __A : Union[str, Any] , __A : List[str] ) -> List[Any]: """simple docstring""" UpperCAmelCase_ = weights[0][0][0] UpperCAmelCase_ = np.asarray(layer_norm_a[0] ) UpperCAmelCase_ = np.asarray(layer_norm_a[1] ) set_param( torch_block.attention.layer_norm , torch.tensor(__A ) , torch.tensor(__A ) , ) # lsh weights + output UpperCAmelCase_ = weights[0][1] if len(__A ) < 4: set_layer_weights_in_torch_lsh(__A , torch_block.attention , __A ) else: set_layer_weights_in_torch_local(__A , torch_block.attention , __A ) # intermediate weighs UpperCAmelCase_ = weights[2][0][1][2] # Chunked Feed Forward if len(__A ) == 4: UpperCAmelCase_ = intermediate_weights[2] # layernorm 2 UpperCAmelCase_ = np.asarray(intermediate_weights[0][0] ) UpperCAmelCase_ = np.asarray(intermediate_weights[0][1] ) set_param( torch_block.feed_forward.layer_norm , torch.tensor(__A ) , torch.tensor(__A ) , ) # intermediate dense UpperCAmelCase_ = np.asarray(intermediate_weights[1][0] ) UpperCAmelCase_ = np.asarray(intermediate_weights[1][1] ) set_param( torch_block.feed_forward.dense.dense , torch.tensor(__A ).transpose(0 , 1 ).contiguous() , torch.tensor(__A ) , ) # intermediate out UpperCAmelCase_ = np.asarray(intermediate_weights[4][0] ) UpperCAmelCase_ = np.asarray(intermediate_weights[4][1] ) set_param( torch_block.feed_forward.output.dense , torch.tensor(__A ).transpose(0 , 1 ).contiguous() , torch.tensor(__A ) , ) def A (__A : Optional[int] , __A : Tuple , __A : Any ) -> Tuple: """simple docstring""" UpperCAmelCase_ = torch_model.reformer # word embeds UpperCAmelCase_ = np.asarray(weights[1] ) set_param( torch_model_reformer.embeddings.word_embeddings , torch.tensor(__A ) , ) if isinstance(weights[3] , __A ): UpperCAmelCase_ = torch_model_reformer.embeddings.position_embeddings for emb_idx in range(len(position_embeddings.weights ) ): UpperCAmelCase_ = np.asarray(weights[3][emb_idx][0] ) assert ( position_embeddings.weights[emb_idx].shape == emb_weights.shape ), F"""{position_embeddings[emb_idx]} emb does not match""" UpperCAmelCase_ = nn.Parameter(torch.tensor(__A ) ) UpperCAmelCase_ = weights[5] assert len(torch_model_reformer.encoder.layers ) * 4 == len( __A ), "HF and trax model do not have the same number of layers" for layer_idx, layer in enumerate(torch_model_reformer.encoder.layers ): UpperCAmelCase_ = trax_layer_weights[4 * layer_idx : 4 * (layer_idx + 1)] set_block_weights_in_torch(__A , __A , __A ) # output layer norm UpperCAmelCase_ = np.asarray(weights[7][0] ) UpperCAmelCase_ = np.asarray(weights[7][1] ) set_param( torch_model_reformer.encoder.layer_norm , torch.tensor(__A ) , torch.tensor(__A ) , ) # output embeddings UpperCAmelCase_ = np.asarray(weights[9][0] ) UpperCAmelCase_ = np.asarray(weights[9][1] ) set_param( torch_model.lm_head.decoder , torch.tensor(__A ).transpose(0 , 1 ).contiguous() , torch.tensor(__A ) , ) def A (__A : Tuple , __A : int , __A : str ) -> Union[str, Any]: """simple docstring""" UpperCAmelCase_ = ReformerConfig.from_json_file(__A ) print(F"""Building PyTorch model from configuration: {config}""" ) UpperCAmelCase_ = ReformerModelWithLMHead(__A ) with open(__A , '''rb''' ) as f: UpperCAmelCase_ = pickle.load(__A )['''weights'''] set_model_weights_in_torch(__A , __A , config.hidden_size ) # Save pytorch-model print(F"""Save PyTorch model to {pytorch_dump_path}""" ) torch.save(model.state_dict() , __A ) if __name__ == "__main__": snake_case_ : Optional[Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( "--trax_model_pkl_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path." ) parser.add_argument( "--config_file", default=None, type=str, required=True, help=( "The config json file corresponding to the pre-trained Reformer model. \n" "This specifies the model architecture." ), ) parser.add_argument( "--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model." ) snake_case_ : List[Any] = parser.parse_args() convert_trax_checkpoint_to_pytorch(args.trax_model_pkl_path, args.config_file, args.pytorch_dump_path)
51
1
"""simple docstring""" import argparse import re from flax.traverse_util import flatten_dict, unflatten_dict from tax import checkpoints from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model from transformers.utils import logging logging.set_verbosity_info() # should not include what is already done by the `from_pt` argument lowercase__ :Union[str, Any] = { '/attention/': '/0/SelfAttention/', '/self_attention/': '/0/SelfAttention/', '/encoder_decoder_attention/': '/1/EncDecAttention/', 'value': 'v', 'query': 'q', 'key': 'k', 'out': 'o', 'pre_self_attention_layer_norm': '0/layer_norm', 'pre_cross_attention_layer_norm': '1/layer_norm', 'pre_attention_layer_norm': '0/layer_norm', # previously 1, but seems wrong 'token_embedder': 'shared', 'encoder_norm': 'final_layer_norm', 'decoder_norm': 'final_layer_norm', 'relpos_bias/rel_embedding': 'block/0/layer/0/SelfAttention/relative_attention_bias/weight', 'router/router_weights/w/': 'router/classifier/', 'roer/roer_weights/w/': 'router/classifier/', 'logits_dense': 'lm_head', } def UpperCamelCase ( lowerCAmelCase__ ): '''simple docstring''' # 1. in HF T5, we have block.{x}.layer.{y}. which corresponds to layer.{x} in # the original model lowercase = list(s_dict.keys() ) for key in keys: lowercase = R'''.*/layers_(\d+)''' lowercase = key if re.match(_UpperCamelCase , _UpperCamelCase ): lowercase = re.sub(R'''layers_(\d+)''' , R'''block/\1/layer''' , _UpperCamelCase ) lowercase = R'''(encoder|decoder)\/''' if re.match(_UpperCamelCase , _UpperCamelCase ): lowercase = re.match(_UpperCamelCase , _UpperCamelCase ).groups() if groups[0] == "encoder": lowercase = re.sub(R'''/mlp/''' , R'''/1/mlp/''' , _UpperCamelCase ) lowercase = re.sub(R'''/pre_mlp_layer_norm/''' , R'''/1/layer_norm/''' , _UpperCamelCase ) elif groups[0] == "decoder": lowercase = re.sub(R'''/mlp/''' , R'''/2/mlp/''' , _UpperCamelCase ) lowercase = re.sub(R'''/pre_mlp_layer_norm/''' , R'''/2/layer_norm/''' , _UpperCamelCase ) # 2. Convert other classic mappings for old_key, temp_key in MOE_LAYER_NAME_MAPPING.items(): if old_key in new_key: lowercase = new_key.replace(_UpperCamelCase , _UpperCamelCase ) print(f'{key} -> {new_key}' ) lowercase = s_dict.pop(_UpperCamelCase ) if "encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict: lowercase = s_dict[ '''encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight''' ].T if "decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict: lowercase = s_dict[ '''decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight''' ].T # 3. Take extra care of the EXPERTS layer for key in list(s_dict.keys() ): if "expert" in key: lowercase = s_dict[key].shape[0] lowercase = s_dict[key] for idx in range(_UpperCamelCase ): lowercase = expert_weihts[idx] print(f'{key} -> {key.replace("expert/" , "nested fstring" )}' ) s_dict.pop(_UpperCamelCase ) return s_dict lowercase__ :Optional[int] = { 'NUM_ENCODER_LAYERS': 'num_layers', 'NUM_DECODER_LAYERS': 'num_decoder_layers', 'NUM_HEADS': 'num_heads', 'HEAD_DIM': 'd_kv', 'EMBED_DIM': 'd_model', 'MLP_DIM': 'd_ff', 'NUM_SELECTED_EXPERTS': 'num_selected_experts', 'NUM_ENCODER_SPARSE_LAYERS': 'num_sparse_encoder_layers', 'NUM_DECODER_SPARSE_LAYERS': 'num_sparse_decoder_layers', 'dense.MlpBlock.activations': 'feed_forward_proj', } def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ): '''simple docstring''' # Convert a google style config to the hugging face fromat import regex as re with open(_UpperCamelCase , '''r''' ) as f: lowercase = f.read() lowercase = re.findall(R'''(.*) = ([0-9.]*)''' , _UpperCamelCase ) lowercase = {} for param, value in regex_match: if param in GIN_TO_CONFIG_MAPPING and value != "": lowercase = float(_UpperCamelCase ) if '''.''' in value else int(_UpperCamelCase ) lowercase = re.findall(R'''(.*activations) = \(\'(.*)\',\)''' , _UpperCamelCase )[0] lowercase = str(activation[1] ) lowercase = num_experts lowercase = SwitchTransformersConfig(**_UpperCamelCase ) return config def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=None , lowerCAmelCase__="./" , lowerCAmelCase__=8 ): '''simple docstring''' # Initialise PyTorch model print(f'Loading flax weights from : {flax_checkpoint_path}' ) lowercase = checkpoints.load_tax_checkpoint(_UpperCamelCase ) if gin_file is not None: lowercase = convert_gin_to_config(_UpperCamelCase , _UpperCamelCase ) else: lowercase = SwitchTransformersConfig.from_pretrained(_UpperCamelCase ) lowercase = SwitchTransformersForConditionalGeneration(_UpperCamelCase ) lowercase = flax_params['''target'''] lowercase = flatten_dict(_UpperCamelCase , sep='''/''' ) lowercase = rename_keys(_UpperCamelCase ) lowercase = unflatten_dict(_UpperCamelCase , sep='''/''' ) # Load the flax params in the PT model load_flax_weights_in_pytorch_model(_UpperCamelCase , _UpperCamelCase ) print(f'Save PyTorch model to {pytorch_dump_path}' ) pt_model.save_pretrained(_UpperCamelCase ) if __name__ == "__main__": lowercase__ :Tuple = argparse.ArgumentParser() # Required parameters parser.add_argument( "--switch_t5x_checkpoint_path", default=None, type=str, required=True, help=( "The config json file corresponding to the pre-trained SwitchTransformers model. \nThis specifies the" " model architecture. If not provided, a `gin_file` has to be provided." ), ) parser.add_argument( "--gin_file", default=None, type=str, required=False, help="Path to the gin config file. If not provided, a `config_file` has to be passed ", ) parser.add_argument( "--config_name", default=None, type=str, required=False, help="Config name of SwitchTransformers model." ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output pytorch model." ) parser.add_argument("--num_experts", default=8, type=int, required=False, help="Number of experts") lowercase__ :int = parser.parse_args() convert_flax_checkpoint_to_pytorch( args.switch_tax_checkpoint_path, args.config_name, args.gin_file, args.pytorch_dump_folder_path, args.num_experts, )
368
import inspect import os import re from transformers.configuration_utils import PretrainedConfig from transformers.utils import direct_transformers_import # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_config_docstrings.py lowercase__ :Optional[Any] = "src/transformers" # This is to make sure the transformers module imported is the one in the repo. lowercase__ :int = direct_transformers_import(PATH_TO_TRANSFORMERS) lowercase__ :List[Any] = transformers.models.auto.configuration_auto.CONFIG_MAPPING lowercase__ :List[str] = { # used to compute the property `self.chunk_length` "EncodecConfig": ["overlap"], # used as `self.bert_model = BertModel(config, ...)` "DPRConfig": True, # not used in modeling files, but it's an important information "FSMTConfig": ["langs"], # used internally in the configuration class file "GPTNeoConfig": ["attention_types"], # used internally in the configuration class file "EsmConfig": ["is_folding_model"], # used during training (despite we don't have training script for these models yet) "Mask2FormerConfig": ["ignore_value"], # `ignore_value` used during training (despite we don't have training script for these models yet) # `norm` used in conversion script (despite not using in the modeling file) "OneFormerConfig": ["ignore_value", "norm"], # used during preprocessing and collation, see `collating_graphormer.py` "GraphormerConfig": ["spatial_pos_max"], # used internally in the configuration class file "T5Config": ["feed_forward_proj"], # used internally in the configuration class file # `tokenizer_class` get default value `T5Tokenizer` intentionally "MT5Config": ["feed_forward_proj", "tokenizer_class"], "UMT5Config": ["feed_forward_proj", "tokenizer_class"], # used internally in the configuration class file "LongT5Config": ["feed_forward_proj"], # used internally in the configuration class file "SwitchTransformersConfig": ["feed_forward_proj"], # having default values other than `1e-5` - we can't fix them without breaking "BioGptConfig": ["layer_norm_eps"], # having default values other than `1e-5` - we can't fix them without breaking "GLPNConfig": ["layer_norm_eps"], # having default values other than `1e-5` - we can't fix them without breaking "SegformerConfig": ["layer_norm_eps"], # having default values other than `1e-5` - we can't fix them without breaking "CvtConfig": ["layer_norm_eps"], # having default values other than `1e-5` - we can't fix them without breaking "PerceiverConfig": ["layer_norm_eps"], # used internally to calculate the feature size "InformerConfig": ["num_static_real_features", "num_time_features"], # used internally to calculate the feature size "TimeSeriesTransformerConfig": ["num_static_real_features", "num_time_features"], # used internally to calculate the feature size "AutoformerConfig": ["num_static_real_features", "num_time_features"], # used internally to calculate `mlp_dim` "SamVisionConfig": ["mlp_ratio"], # For (head) training, but so far not implemented "ClapAudioConfig": ["num_classes"], # Not used, but providing useful information to users "SpeechT5HifiGanConfig": ["sampling_rate"], } # TODO (ydshieh): Check the failing cases, try to fix them or move some cases to the above block once we are sure SPECIAL_CASES_TO_ALLOW.update( { "CLIPSegConfig": True, "DeformableDetrConfig": True, "DetaConfig": True, "DinatConfig": True, "DonutSwinConfig": True, "EfficientFormerConfig": True, "FSMTConfig": True, "JukeboxConfig": True, "LayoutLMv2Config": True, "MaskFormerSwinConfig": True, "MT5Config": True, "NatConfig": True, "OneFormerConfig": True, "PerceiverConfig": True, "RagConfig": True, "SpeechT5Config": True, "SwinConfig": True, "Swin2SRConfig": True, "Swinv2Config": True, "SwitchTransformersConfig": True, "TableTransformerConfig": True, "TapasConfig": True, "TransfoXLConfig": True, "UniSpeechConfig": True, "UniSpeechSatConfig": True, "WavLMConfig": True, "WhisperConfig": True, # TODO: @Arthur (for `alignment_head` and `alignment_layer`) "JukeboxPriorConfig": True, # TODO: @Younes (for `is_decoder`) "Pix2StructTextConfig": True, } ) def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ): '''simple docstring''' lowercase = False for attribute in attributes: for modeling_source in source_strings: # check if we can find `config.xxx`, `getattr(config, "xxx", ...)` or `getattr(self.config, "xxx", ...)` if ( f'config.{attribute}' in modeling_source or f'getattr(config, "{attribute}"' in modeling_source or f'getattr(self.config, "{attribute}"' in modeling_source ): lowercase = True # Deal with multi-line cases elif ( re.search( Rf'getattr[ \t\v\n\r\f]*\([ \t\v\n\r\f]*(self\.)?config,[ \t\v\n\r\f]*"{attribute}"' , lowerCAmelCase__ , ) is not None ): lowercase = True # `SequenceSummary` is called with `SequenceSummary(config)` elif attribute in [ "summary_type", "summary_use_proj", "summary_activation", "summary_last_dropout", "summary_proj_to_labels", "summary_first_dropout", ]: if "SequenceSummary" in modeling_source: lowercase = True if attribute_used: break if attribute_used: break # common and important attributes, even if they do not always appear in the modeling files lowercase = [ '''bos_index''', '''eos_index''', '''pad_index''', '''unk_index''', '''mask_index''', '''image_size''', '''use_cache''', '''out_features''', '''out_indices''', ] lowercase = ['''encoder_no_repeat_ngram_size'''] # Special cases to be allowed lowercase = True if not attribute_used: lowercase = False for attribute in attributes: # Allow if the default value in the configuration class is different from the one in `PretrainedConfig` if attribute in ["is_encoder_decoder"] and default_value is True: lowercase = True elif attribute in ["tie_word_embeddings"] and default_value is False: lowercase = True # Allow cases without checking the default value in the configuration class elif attribute in attributes_to_allow + attributes_used_in_generation: lowercase = True elif attribute.endswith('''_token_id''' ): lowercase = True # configuration class specific cases if not case_allowed: lowercase = SPECIAL_CASES_TO_ALLOW.get(config_class.__name__ , [] ) lowercase = allowed_cases is True or attribute in allowed_cases return attribute_used or case_allowed def UpperCamelCase ( lowerCAmelCase__ ): '''simple docstring''' lowercase = dict(inspect.signature(config_class.__init__ ).parameters ) lowercase = [x for x in list(signature.keys() ) if x not in ['''self''', '''kwargs''']] lowercase = [signature[param].default for param in parameter_names] # If `attribute_map` exists, an attribute can have different names to be used in the modeling files, and as long # as one variant is used, the test should pass lowercase = {} if len(config_class.attribute_map ) > 0: lowercase = {v: k for k, v in config_class.attribute_map.items()} # Get the path to modeling source files lowercase = inspect.getsourcefile(lowerCAmelCase__ ) lowercase = os.path.dirname(lowerCAmelCase__ ) # Let's check against all frameworks: as long as one framework uses an attribute, we are good. lowercase = [os.path.join(lowerCAmelCase__ , lowerCAmelCase__ ) for fn in os.listdir(lowerCAmelCase__ ) if fn.startswith('''modeling_''' )] # Get the source code strings lowercase = [] for path in modeling_paths: if os.path.isfile(lowerCAmelCase__ ): with open(lowerCAmelCase__ ) as fp: modeling_sources.append(fp.read() ) lowercase = [] for config_param, default_value in zip(lowerCAmelCase__ , lowerCAmelCase__ ): # `attributes` here is all the variant names for `config_param` lowercase = [config_param] # some configuration classes have non-empty `attribute_map`, and both names could be used in the # corresponding modeling files. As long as one of them appears, it is fine. if config_param in reversed_attribute_map: attributes.append(reversed_attribute_map[config_param] ) if not check_attribute_being_used(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ): unused_attributes.append(attributes[0] ) return sorted(lowerCAmelCase__ ) def UpperCamelCase ( ): '''simple docstring''' lowercase = {} for _config_class in list(CONFIG_MAPPING.values() ): # Skip deprecated models if "models.deprecated" in _config_class.__module__: continue # Some config classes are not in `CONFIG_MAPPING` (e.g. `CLIPVisionConfig`, `Blip2VisionConfig`, etc.) lowercase = [ cls for name, cls in inspect.getmembers( inspect.getmodule(_config_class ) , lambda lowerCAmelCase__ : inspect.isclass(lowerCAmelCase__ ) and issubclass(lowerCAmelCase__ , lowerCAmelCase__ ) and inspect.getmodule(lowerCAmelCase__ ) == inspect.getmodule(_config_class ) , ) ] for config_class in config_classes_in_module: lowercase = check_config_attributes_being_used(lowerCAmelCase__ ) if len(lowerCAmelCase__ ) > 0: lowercase = unused_attributes if len(lowerCAmelCase__ ) > 0: lowercase = '''The following configuration classes contain unused attributes in the corresponding modeling files:\n''' for name, attributes in configs_with_unused_attributes.items(): error += f'{name}: {attributes}\n' raise ValueError(lowerCAmelCase__ ) if __name__ == "__main__": check_config_attributes()
97
0